diff --git a/.gitignore b/.gitignore index fa397f98d2..8dacf57dc7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ build tmp *.gcov + +# Configuration and build directories for CLion +.idea +cmake-build-debug \ No newline at end of file diff --git a/.gitmodules b/.gitmodules index 5efc3b0fb8..1bceead3d6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,12 +4,9 @@ [submodule "aux/binpac"] path = aux/binpac url = https://github.com/zeek/binpac -[submodule "aux/broccoli"] - path = aux/broccoli - url = https://github.com/zeek/broccoli -[submodule "aux/broctl"] - path = aux/broctl - url = https://github.com/zeek/broctl +[submodule "aux/zeekctl"] + path = aux/zeekctl + url = https://github.com/zeek/zeekctl [submodule "aux/btest"] path = aux/btest url = https://github.com/zeek/btest @@ -31,3 +28,6 @@ [submodule "doc"] path = doc url = https://github.com/zeek/zeek-docs +[submodule "aux/paraglob"] + path = aux/paraglob + url = https://github.com/zeek/paraglob diff --git a/.travis.yml b/.travis.yml index 56d41de17d..304377486e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,10 +16,10 @@ branches: notifications: email: recipients: - - bro-commits-internal@bro.org + - zeek-commits-internal@zeek.org -# Build Bro and run tests in the following Linux distros (specifying "travis" -# builds bro in Travis without using docker). +# Build Zeek and run tests in the following Linux distros (specifying "travis" +# builds Zeek in Travis without using docker). env: - distro: centos_7 - distro: debian_9 diff --git a/.update-changes.cfg b/.update-changes.cfg index e3d04b7422..ed23fb4565 100644 --- a/.update-changes.cfg +++ b/.update-changes.cfg @@ -7,15 +7,7 @@ function new_version_hook # test suite repos to check out on a CI system. version=$1 - if [ -d testing/external/zeek-testing ]; then - echo "Updating testing/external/commit-hash.zeek-testing" - ( cd testing/external/zeek-testing && git fetch origin && git rev-parse origin/master ) > testing/external/commit-hash.zeek-testing - git add testing/external/commit-hash.zeek-testing - fi + ./testing/scripts/update-external-repo-pointer.sh testing/external/zeek-testing testing/external/commit-hash.zeek-testing - if [ -d testing/external/zeek-testing-private ]; then - echo "Updating testing/external/commit-hash.zeek-testing-private" - ( cd testing/external/zeek-testing-private && git fetch origin && git rev-parse origin/master ) > testing/external/commit-hash.zeek-testing-private - git add testing/external/commit-hash.zeek-testing-private - fi + ./testing/scripts/update-external-repo-pointer.sh testing/external/zeek-testing-private testing/external/commit-hash.zeek-testing-private } diff --git a/CHANGES b/CHANGES index f2beb944fc..47f258684c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,711 @@ +2.6-526 | 2019-06-25 12:45:31 -0700 + + * Make a paraglob unit test parallelizable (Jon Siwek, Corelight) + +2.6-523 | 2019-06-25 10:38:24 -0700 + + * GH-427: improve default ID values shown by Zeekygen + + The default value of an ID is now truly the one used to initialize it, + unaltered by any subsequent redefs. + + Redefs are now shown separately, along with the expression that + modifies the ID's value. (Jon Siwek, Corelight) + + * Unbreak build on Linux (Johanna Amann, Corelight) + +2.6-519 | 2019-06-24 15:25:08 -0700 + + * GH-435: fix null pointer deref in RPC analyzer. (Jon Siwek, Corelight) + +2.6-517 | 2019-06-24 15:20:39 -0700 + + * Add paraglob, a fairly quick data structure for matching a string against a large list of patterns. + (Zeke Medley, Corelight) + + * GH-171: support warning messages alongside deprecated attributes (Tim Wojtulewicz, Corelight) + +2.6-503 | 2019-06-21 11:17:58 -0700 + + * GH-417: Remove old, unmaintained p0f support. (Johanna Amann, Corelight) + +2.6-500 | 2019-06-20 20:54:15 -0700 + + * Add new RDP event: rdp_client_cluster_data (Jeff Atkinson) + + * Added "options" field to RDP::ClientChannelDef (Jeff Atkinson) + +2.6-494 | 2019-06-20 20:24:38 -0700 + + * Renaming src/StateAccess.{h,cc} to src/Notifier.{h,cc}. + + The old names did not reflect the content of the files anymore. (Robin Sommer, Corelight) + + * Remove MutableVal, StateAccess classes, enum Opcode. (Robin Sommer, Corelight) + + * Redo API for notifiers. + + There's now an notifier::Modifiable interface class that class + supposed to signal modifications are to be derived from. This takes + the place of the former MutableValue class and also unifies how Val + and IDs signal modifications. (Robin Sommer, Corelight) + + * Redo NotfifierRegistry to no longer rely on StateAccess. + + We simplify the API to a simple Modified() operation. (Robin Sommer, Corelight) + + * Add new test for when-statement watching global variables. (Robin Sommer, Corelight) + +2.6-482 | 2019-06-20 19:57:20 -0700 + + * Make configure complain if submodules are not checked out. (Johanna Amann, Corelight) + + * Improve C++ header includes to improve build time (Jon Siwek, Corelight) + +2.6-479 | 2019-06-20 18:31:58 -0700 + + * Fix TableVal::DoClone to use CloneState cache (Jon Siwek, Corelight) + +2.6-478 | 2019-06-20 14:19:11 -0700 + + * Remove old Broccoli SSL options (Jon Siwek, Corelight) + + - ssl_ca_certificate + - ssl_private_key + - ssl_passphrase + +2.6-477 | 2019-06-20 14:00:22 -0700 + + * Remove unused SerialInfo.h and SerialTypes.h headers (Jon Siwek, Corelight) + +2.6-476 | 2019-06-20 13:23:22 -0700 + + * Remove opaque of ocsp_resp. (Johanna Amann, Corelight) + + Only used in one event, without any way to use the opaque for anything + else. At this point this just seems like a complication that has no + reason to be there. + + * Remove remnants of event serializer. (Johanna Amann, Corelight) + + * Reimplement serialization infrastructure for OpaqueVals. + (Robin Sommer, Corelight & Johanna Amann, Corelight) + + We need this to sender through Broker, and we also leverage it for + cloning opaques. The serialization methods now produce Broker data + instances directly, and no longer go through the binary formatter. + + Summary of the new API for types derived from OpaqueVal: + + - Add DECLARE_OPAQUE_VALUE() to the class declaration + - Add IMPLEMENT_OPAQUE_VALUE() to the class' implementation file + - Implement these two methods (which are declated by the 1st macro): + - broker::data DoSerialize() const + - bool DoUnserialize(const broker::data& data) + + This machinery should work correctly from dynamic plugins as well. + + OpaqueVal provides a default implementation of DoClone() as well that + goes through serialization. Derived classes can provide a more + efficient version if they want. + + The declaration of the "OpaqueVal" class has moved into the header + file "OpaqueVal.h", along with the new serialization infrastructure. + This is breaking existing code that relies on the location, but + because the API is changing anyways that seems fine. + + * Implement a Shallow Clone operation for types. (Johanna Amann, Corelight) + + This is needed to track name changes for the documentation. + + * Remove old serialization infrastrucutre. (Johanna Amann, Corelight) + +2.6-454 | 2019-06-19 09:39:06 -0700 + + * GH-393: Add slice notation for vectors (Tim Wojtulewicz, Corelight & Jon Siwek, Corelight) + + Example Syntax: + + local v = vector(1, 2, 3, 4, 5); + v[2:4] = vector(6, 7, 8); # v is now [1, 2, 6, 7, 8, 5] + print v[:4]; # prints [1, 2, 6, 7] + +2.6-446 | 2019-06-17 20:26:49 -0700 + + * Rename bro to zeek in error messages (Daniel Thayer) + +2.6-444 | 2019-06-15 19:09:03 -0700 + + * Add/rewrite NTP support (Vlad Grigorescu and Mauro Palumbo) + +2.6-416 | 2019-06-14 20:57:57 -0700 + + * DNS: Add support for SPF response records (Vlad Grigorescu) + +2.6-413 | 2019-06-14 19:51:28 -0700 + + * GH-406: rename bro.bif to zeek.bif (Jon Siwek, Corelight) + +2.6-412 | 2019-06-14 19:26:21 -0700 + + * GH-387: update Broker topic names to use "zeek/" prefix (Jon Siwek, Corelight) + + * GH-323: change builtin plugin namespaces to Zeek (Jon Siwek, Corelight) + +2.6-408 | 2019-06-13 11:19:50 -0700 + + * Fix potential null-dereference in current_time() (Tim Wojtulewicz, Corelight) + + * Add --sanitizers configure script to enable Clang sanitizers (Tim Wojtulewicz, Corelight) + +2.6-404 | 2019-06-12 15:10:19 -0700 + + * Rename directories from bro to zeek (Daniel Thayer) + + The new default installation prefix is /usr/local/zeek + +2.6-400 | 2019-06-07 20:06:33 -0700 + + * Adapt bro_plugin CMake macros to use zeek_plugin (Jon Siwek, Corelight) + +2.6-399 | 2019-06-07 14:02:18 -0700 + + * Update SSL documentation. (Johanna Amann) + + * Support the newer TLS 1.3 key_share extension. (Johanna Amann) + + * Include all data of the server-hello random (Johanna Amann) + + Before we cut the first 4 bytes, which makes it impossible to recognize + several newer packets (like the hello retry). + + * Parse TLS 1.3 pre-shared-key extension. (Johanna Amann) + + + Adds new events: + + - ssl_extension_pre_shared_key_client_hello + - ssl_extension_pre_shared_key_server_hello + +2.6-391 | 2019-06-07 17:29:28 +1000 + + * GH-209: replace "remote_ip" field of radius.log with "tunnel_client". + Also changes type from addr to string. (Jon Siwek, Corelight) + +2.6-389 | 2019-06-06 20:02:19 -0700 + + * Update plugin unit tests to use --zeek-dist (Jon Siwek, Corelight) + +2.6-388 | 2019-06-06 19:48:55 -0700 + + * Change default value of peer_description "zeek" (Jon Siwek, Corelight) + +2.6-387 | 2019-06-06 18:51:09 -0700 + + * Rename Bro to Zeek in Zeekygen-generated documentation (Jon Siwek, Corelight) + +2.6-386 | 2019-06-06 17:17:55 -0700 + + * Add new RDP event: rdp_native_encrytped_data (Anthony Kasza, Corelight) + +2.6-384 | 2019-06-06 16:49:14 -0700 + + * Add new RDP event: rdp_client_security_data (Jeff Atkinson) + +2.6-379 | 2019-06-06 11:56:58 -0700 + + * Improve sqlite logging unit tests (Jon Siwek, Corelight) + +2.6-378 | 2019-06-05 16:23:04 -0700 + + * Rename BRO_DEPRECATED macro to ZEEK_DEPRECATED (Jon Siwek, Corelight) + +2.6-377 | 2019-06-05 16:15:58 -0700 + + * Deprecate functions with "bro" in them. (Jon Siwek, Corelight) + + * "bro_is_terminating" is now "zeek_is_terminating" + + * "bro_version" is now "zeek_version" + + The old functions still exist for now, but are deprecated. + +2.6-376 | 2019-06-05 13:29:57 -0700 + + * GH-379: move catch-and-release and unified2 scripts to policy/ (Jon Siwek, Corelight) + + These are no longer loaded by default due to the performance impact they + cause simply by being loaded (they have event handlers for commonly + generated events) and they aren't generally useful enough to justify it. + +2.6-375 | 2019-06-04 19:28:06 -0700 + + * Simplify threading::Value destructor (Jon Siwek, Corelight) + + * Add pattern support to input framework. (Zeke Medley, Corelight) + +2.6-369 | 2019-06-04 17:53:10 -0700 + + * GH-155: Improve coercion of expression lists to vector types (Tim Wojtulewicz, Corelight) + + * GH-159: Allow coercion of numeric record field values to other types (Tim Wojtulewicz, Corelight) + + * Allow passing a location to BroObj::Warning and BroObj::Error. (Tim Wojtulewicz, Corelight) + + This allows callers (such as check_and_promote) to pass an expression + location to be logged if the location doesn't exist in the value being + promoted. + + * Add CLion directories to gitignore (Tim Wojtulewicz, Corelight) + + * Move #define outside of max_type for clarity (Tim Wojtulewicz, Corelight) + +2.6-361 | 2019-06-04 10:30:21 -0700 + + * GH-293: Protect copy() against reference cycles. (Robin Sommer, Corelight) + + Reference cycles shouldn't occur but there's nothing really preventing + people from creating them, so may just as well be safe and deal with + them when cloning values. + +2.6-359 | 2019-05-31 13:37:17 -0700 + + * Remove old documentation reference to rotate_interval (Jon Siwek, Corelight) + +2.6-357 | 2019-05-30 10:57:54 -0700 + + * Tweak to ASCII reader warning suppression (Christian Kreibich, Corelight) + + Warnings in the ASCII reader so far remained suppressed even + when an input file changed. It's helpful to learn about problems + in the data when putting in place new data files, so this change + maintains the existing warning suppression while processing a file, + but re-enables warnings after updates to a file. + +2.6-354 | 2019-05-29 09:46:19 -0700 + + * Add weird: "RDP_channels_requested_exceeds_max" (Vlad Grigorescu) + +2.6-352 | 2019-05-28 17:57:36 -0700 + + * Reduce data copying in Broker message processing (Jon Siwek, Corelight) + + * Improve Broker I/O loop integration: less mutex locking (Jon Siwek, Corelight) + + Checking a subscriber for available messages required locking a mutex, + but we should never actually need to do that in the main-loop to check + for Broker readiness since we can rely on file descriptor polling. + + * Improve processing of broker data store responses (Jon Siwek, Corelight) + + Now retrieves and processes all N available responses at once instead + of one-by-one-until-empty. + +2.6-345 | 2019-05-28 11:32:16 -0700 + + * RDP: Add parsing and logging of channels requested by the client. (Vlad Grigorescu) + + Can determine capabilities requested by the client, as well as attacks such + as CVE-2019-0708. + +2.6-342 | 2019-05-28 10:48:37 -0700 + + * GH-168: Improve type-checking for table/set list assignment. (Zeke Medley and Jon Siwek, Corelight) + +2.6-340 | 2019-05-24 18:02:43 -0700 + + * Add support for parsing additional DHCP options (Jay Wren) + + The following optional fields were added to the DHCP::Options record: + + - time_offset (Option 2) + - time_servers (Option 4) + - name_servers (Option 5) + - ntp_servers (Option 42) + +2.6-338 | 2019-05-24 17:06:08 -0700 + + * Add input file name to additional ASCII reader warning messages (Christian Kreibich, Corelight) + +2.6-336 | 2019-05-24 10:23:20 -0700 + + * GH-378: check validity of missing 'val' field in Input::add_table (Jon Siwek, Corelight) + +2.6-335 | 2019-05-24 08:58:59 -0700 + + * Fix memory leak when no protocol_violation event handler exists (Jon Siwek, Corelight) + +2.6-334 | 2019-05-23 20:40:03 -0700 + + * Add an internal getenv wrapper function: zeekenv (Jon Siwek, Corelight) + + It maps newer environment variable names starting with ZEEK to the + legacy names starting with BRO. + + * Rename all BRO-prefixed environment variables (Daniel Thayer) + + For backward compatibility when reading values, we first check + the ZEEK-prefixed value, and if not set, then check the corresponding + BRO-prefixed value. + +2.6-331 | 2019-05-23 18:03:42 -0700 + + * Update broker unit test output. (Jon Siwek, Corelight) + + Due to string representation of Broker vectors changing (they now + use parentheses instead of square brackets). + +2.6-330 | 2019-05-23 13:04:26 -0700 + + * GH-173: Support ranges of values for value_list elements in the signature parser + (Tim Wojtulewicz, Corelight) + + * GH-173: Modify the signature parser so ID components can't start with numbers + (Tim Wojtulewicz, Corelight) + +2.6-327 | 2019-05-23 11:56:11 -0700 + + * Remove redundant RecordVal::record_type member (Jon Siwek, Corelight) + +2.6-326 | 2019-05-23 10:49:38 -0700 + + * Fix parse-time RecordVal tracking containing duplicates (Jon Siwek, Corelight) + +2.6-325 | 2019-05-22 23:56:23 -0700 + + * Add leak-checks for new copy operations (Johanna Amann, Corelight) + + * Finish implementation of new copy method. (Johanna Amann, Corelight) + + All types (besides EntropyVal) now support a native copy operation, + which uses primitives of the underlying datatypes to perform a quick + copy, without serialization. + + EntropyVal is the one exception - since that type is rather complex + (many members) and will probably not be copied a lot, if at all, it + makes sense to just use the serialization function. + + This will have to be slightly re-written in the near-term-future to use + the new serialization function for that opaque type. + + This change also introduces a new x509_from_der bif, which allows to + parse a der into an opaque of x509. + + This change removes the d2i_X509_ wrapper function; this was a remnant + when d2i_X509 took non-const arguments. We directly use d2i_X509 at + several places assuming const-ness, so there does not seem to ba a + reason to keep the wrapper. + + This change also exposed a problem in the File cache - cases in which an + object was brought back into the cache, and writing occurred in the + file_open event were never correctly handeled as far as I can tell. + + * Reimplement copy(). (Robin Sommer, Corelight) + + The old implementation used the serialization framework, which is + going away. This is a new standalone implementation that should also + be quite a bit faster. + +2.6-318 | 2019-05-21 09:17:53 -0700 + + * Remove state_dir and state_write_delay options (Jon Siwek, Corelight) + + * Remove a reference to &synchronized from docs (Jon Siwek, Corelight) + +2.6-316 | 2019-05-20 20:56:46 -0700 + + * Additional Bro to Zeek renaming (Daniel Thayer) + + * Added a new unit test for legacy Bro Plugins (Daniel Thayer) + + * Added a symlink bro-path-dev.in for use by legacy Bro packages (Daniel Thayer) + +2.6-314 | 2019-05-20 16:20:33 -0700 + + * Remove deprecated attributes. (Johanna Amann, Corelight) + To be more exact: &encrypt, &mergeable, &rotate_interval, &rotate_size + + Also removes no longer used redef-able constants: + log_rotate_interval, log_max_size, log_encryption_key + +2.6-311 | 2019-05-20 09:07:58 -0700 + + * Add missing &optional attr to KRB record fields; also add existence + checks to scripts (Jon Siwek, Corelight). + +2.6-308 | 2019-05-17 14:13:46 -0700 + + * Always emit scripting errors to stderr during zeek_init (Jon Siwek, Corelight) + +2.6-307 | 2019-05-16 13:37:24 -0700 + + * More bro-to-zeek renaming in scripts and other files (Daniel Thayer) + + * More bro-to-zeek renaming in the unit tests (Daniel Thayer) + +2.6-303 | 2019-05-15 15:03:11 -0700 + + * Changes needed due to bro-to-zeek renaming in broker (Daniel Thayer) + +2.6-301 | 2019-05-15 10:05:53 -0700 + + * Fix potential race in openflow broker plugin (Jon Siwek, Corelight) + +2.6-300 | 2019-05-15 09:00:57 -0700 + + * Fixes to DNS lookup, including ref-counting bugs, preventing starvation + of the DNS_Mgr in the I/O loop, dead code removal, and a fix that + prevents the timeout of already resolved DNS lookups (Jon Siwek, Corelight) + +2.6-292 | 2019-05-14 19:01:05 -0700 + + * Fix maybe-uninitialized compiler warning (Jon Siwek, Corelight) + +2.6-290 | 2019-05-14 18:35:25 -0700 + + * Update btest.cfg path to use zeek-aux (Jon Siwek, Corelight) + +2.6-288 | 2019-05-14 17:47:55 -0700 + + * Update CMake to use aux/zeekctl and aux/zeek-aux submodules (Jon Siwek, Corelight) + +2.6-287 | 2019-05-14 17:40:40 -0700 + + * Rename broctl submodule to zeekctl (Jon Siwek, Corelight) + +2.6-286 | 2019-05-14 13:19:12 -0700 + + * Undo an unintentional change to btest.cfg from a recent commit (Daniel Thayer) + + * Fix zeek-wrapper and improve error messages (Daniel Thayer) + + The script was not passing command-line arguments to the new program. + + * Update for renaming BroControl to ZeekControl. (Robin Sommer, Corelight) + + * GH-239: Rename bro to zeek, bro-config to zeek-config, and bro-path-dev to zeek-path-dev. + (Robin Sommer, Corelight) + + This also installs symlinks from "zeek" and "bro-config" to a wrapper + script that prints a deprecation warning. + +2.6-279 | 2019-05-13 20:02:59 -0700 + + * GH-365: improve un-indexable type error message (Jon Siwek, Corelight) + +2.6-277 | 2019-05-08 12:42:18 -0700 + + * Allow tuning Broker log batching via scripts (Jon Siwek, Corelight) + + Via redefining "Broker::log_batch_size" or "Broker::log_batch_interval" + +2.6-276 | 2019-05-08 09:03:27 -0700 + + * Force the Broker IOSource to idle periodically, preventing packet + IOSource starvation. (Jon Siwek, Corelight). + +2.6-274 | 2019-05-08 08:58:25 -0700 + + * GH-353: Add `//i` case-insensitive signature syntax (Jon Siwek, Corelight) + +2.6-272 | 2019-05-06 18:43:13 -0700 + + * Remove support for using && and || with patterns. (Johanna Amann, Corelight) + + This was never documented and previously deprecated. + + * Remove RemoteSerializer and related code/types. (Johanna Amann, Corelight) + + Also removes broccoli from the source tree. + + * Remove PersistenceSerializer. (Johanna Amann, Corelight) + + * Remove &synchronized and &persistent attributes. (Johanna Amann, Corelight) + +2.6-264 | 2019-05-03 11:16:38 -0700 + + * Fix sporadic openflow/broker test failure (Jon Siwek, Corelight) + +2.6-263 | 2019-05-02 22:49:40 -0700 + + * Install local.zeek as symlink to pre-existing local.bro (Jon Siwek, Corelight) + + This a convenience for those that are upgrading. If we didn't do + this, then deployments can silently break until the user intervenes + since BroControl now prefers to load the initially-vanilla local.zeek + instead of the formerly-customized local.bro. + +2.6-262 | 2019-05-02 21:39:01 -0700 + + * Rename Zeexygen to Zeekygen (Jon Siwek, Corelight) + +2.6-261 | 2019-05-02 20:49:23 -0700 + + * Remove previously deprecated policy/protocols/smb/__load__ (Jon Siwek, Corelight) + +2.6-260 | 2019-05-02 19:16:48 -0700 + + * GH-243: Remove deprecated functions/events from 2.6 and earlier (Johanna Amann, Corelight) + +2.6-258 | 2019-05-02 12:26:54 -0700 + + * GH-340: Improve IPv4/IPv6 regexes, extraction, and validity functions. + + is_valid_ip() is not a BIF, the IP regular expressions are improved and + extract_ip_addresses should give better results due to this. + (Jon Siwek, Corelight) + +2.6-255 | 2019-05-01 08:38:49 -0700 + + * Add methods to queue events without handler existence check + + Added ConnectionEventFast() and QueueEventFast() methods to avoid + redundant event handler existence checks. + + It's common practice for caller to already check for event handler + existence before doing all the work of constructing the arguments, so + it's desirable to not have to check for existence again. + + E.g. going through ConnectionEvent() means 3 existence checks: + one you do yourself before calling it, one in ConnectionEvent(), and then + another in QueueEvent(). + + The existence check itself can be more than a few operations sometimes + as it needs to check a few flags that determine if it's enabled, has + a local body, or has any remote receivers in the old comm. system or + has been flagged as something to publish in the new comm. system. (Jon Siwek, Corelight) + + * Cleanup/improve PList usage and Event API + + Majority of PLists are now created as automatic/stack objects, + rather than on heap and initialized either with the known-capacity + reserved upfront or directly from an initializer_list (so there's no + wasted slack in the memory that gets allocated for lists containing + a fixed/known number of elements). + + Added versions of the ConnectionEvent/QueueEvent methods that take + a val_list by value. + + Added a move ctor/assign-operator to Plists to allow passing them + around without having to copy the underlying array of pointers. (Jon Siwek, Corelight) + +2.6-250 | 2019-04-29 18:09:29 -0700 + + * Remove 'dns_resolver' option, replace w/ ZEEK_DNS_RESOLVER env. var. (Jon Siwek, Corelight) + +2.6-249 | 2019-04-26 19:26:44 -0700 + + * Fix parsing of hybrid IPv6-IPv4 addr literals with no zero compression (Jon Siwek, Corelight) + +2.6-246 | 2019-04-25 10:22:11 -0700 + + * Add Zeexygen cross-reference links for some events (Jon Siwek, Corelight) + +2.6-245 | 2019-04-23 18:42:02 -0700 + + * Expose TCP analyzer utility functions to derived classes (Vern Paxson, Corelight) + +2.6-243 | 2019-04-22 19:42:52 -0700 + + * GH-234: rename Broxygen to Zeexygen along with roles/directives (Jon Siwek, Corelight) + + * All "Broxygen" usages have been replaced in + code, documentation, filenames, etc. + + * Sphinx roles/directives like ":bro:see" are now ":zeek:see" + + * The "--broxygen" command-line option is now "--zeexygen" + +2.6-242 | 2019-04-22 22:43:09 +0200 + + * update SSL consts from TLS 1.3 (Johanna Amann) + +2.6-241 | 2019-04-22 12:38:06 -0700 + + * Add 'g' character to conn.log history field to flag content gaps (Vern Paxson, Corelight) + + There's also a small change to TCP state machine that distrusts ACKs + appearing at the end of connections (in FIN or RST) such that they won't + count towards revealing a true content gap. + +2.6-237 | 2019-04-19 12:00:37 -0700 + + * GH-236: Add zeek_script_loaded event, deprecate bro_script_loaded (Jon Siwek, Corelight) + + Existing handlers for bro_script_loaded automatically alias to the new + zeek_script_loaded event, but emit a deprecation warning. + +2.6-236 | 2019-04-19 11:16:35 -0700 + + * Add zeek_init/zeek_done events and deprecate bro_init/bro_done (Seth Hall, Corelight) + + Any existing handlers for bro_init and bro_done will automatically alias + to the new zeek_init and zeek_done events such that code will not break, + but will emit a deprecation warning. + +2.6-232 | 2019-04-18 09:34:13 +0200 + + * Prevent topk_merge from crashing when second argument is empty set (Jeff Barber) + +2.6-230 | 2019-04-17 16:44:16 -0700 + + * Fix unit test failures on case-insensitive file systems (Jon Siwek, Corelight) + +2.6-227 | 2019-04-16 17:44:31 -0700 + + * GH-237: add `@load foo.bro` -> foo.zeek fallback (Jon Siwek, Corelight) + + When failing to locate a script with explicit .bro suffix, check for + whether one with a .zeek suffix exists and use it instead. + +2.6-225 | 2019-04-16 16:07:49 -0700 + + * Use .zeek file suffix in unit tests (Jon Siwek, Corelight) + +2.6-223 | 2019-04-16 11:56:00 -0700 + + * Update tests and baselines due to renaming all scripts (Daniel Thayer) + + * Rename all scripts to have ".zeek" file extension (Daniel Thayer) + + * Add test cases to verify new file extension is recognized (Daniel Thayer) + + * Fix the core/load-duplicates.bro test (Daniel Thayer) + + * Update script search logic for new .zeek file extension (Daniel Thayer) + + When searching for script files, look for both the new and old file + extensions. If a file with ".zeek" can't be found, then search for + a file with ".bro" as a fallback. + + * Remove unnecessary ".bro" from @load directives (Daniel Thayer) + +2.6-212 | 2019-04-12 10:12:31 -0700 + + * smb2_write_response event added (Mauro Palumbo) + +2.6-210 | 2019-04-10 09:54:27 -0700 + + * Add options to tune BinPAC flowbuffer policy (Jon Siwek, Corelight) + +2.6-208 | 2019-04-10 11:36:17 +0000 + + * Improve PE file analysis (Jon Siwek, Corelight) + + * Set PE analyzer CMake dependencies correctly (Jon Siwek, Corelight) + +2.6-205 | 2019-04-05 17:06:26 -0700 + + * Add script to update external test repo commit pointers (Jon Siwek, Corelight) + +2.6-203 | 2019-04-04 16:35:52 -0700 + + * Update DTLS error handling (Johanna Amann, Corelight) + + - Adds tuning options: SSL::dtls_max_version_errors and + SSL::dtls_max_reported_version_errors + 2.6-200 | 2019-04-03 09:44:53 -0700 * Fix reporter net_weird API usage for unknown_mobility_type diff --git a/CMakeLists.txt b/CMakeLists.txt index cfe0b29ed9..217e741148 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ -project(Bro C CXX) +project(Zeek C CXX) # When changing the minimum version here, also adapt -# aux/bro-aux/plugin-support/skeleton/CMakeLists.txt +# aux/zeek-aux/plugin-support/skeleton/CMakeLists.txt cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR) include(cmake/CommonCMakeConfig.cmake) @@ -21,34 +21,34 @@ if ( ENABLE_CCACHE ) set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_PROGRAM}) endif () -set(BRO_ROOT_DIR ${CMAKE_INSTALL_PREFIX}) -if (NOT BRO_SCRIPT_INSTALL_PATH) - # set the default Bro script installation path (user did not specify one) - set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro) +set(ZEEK_ROOT_DIR ${CMAKE_INSTALL_PREFIX}) +if (NOT ZEEK_SCRIPT_INSTALL_PATH) + # set the default Zeek script installation path (user did not specify one) + set(ZEEK_SCRIPT_INSTALL_PATH ${ZEEK_ROOT_DIR}/share/zeek) endif () -if (NOT BRO_MAN_INSTALL_PATH) - # set the default Bro man page installation path (user did not specify one) - set(BRO_MAN_INSTALL_PATH ${BRO_ROOT_DIR}/share/man) +if (NOT ZEEK_MAN_INSTALL_PATH) + # set the default Zeek man page installation path (user did not specify one) + set(ZEEK_MAN_INSTALL_PATH ${ZEEK_ROOT_DIR}/share/man) endif () -# sanitize the Bro script install directory into an absolute path +# sanitize the Zeek script install directory into an absolute path # (CMake is confused by ~ as a representation of home directory) -get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH} +get_filename_component(ZEEK_SCRIPT_INSTALL_PATH ${ZEEK_SCRIPT_INSTALL_PATH} ABSOLUTE) -set(BRO_PLUGIN_INSTALL_PATH ${BRO_ROOT_DIR}/lib/bro/plugins CACHE STRING "Installation path for plugins" FORCE) +set(BRO_PLUGIN_INSTALL_PATH ${ZEEK_ROOT_DIR}/lib/zeek/plugins CACHE STRING "Installation path for plugins" FORCE) -configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev) +configure_file(zeek-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev) -file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh - "export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" - "export BRO_PLUGIN_PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":${BRO_PLUGIN_PATH}\n" +file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.sh + "export ZEEKPATH=`${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev`\n" + "export ZEEK_PLUGIN_PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":${ZEEK_PLUGIN_PATH}\n" "export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") -file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh - "setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" - "setenv BRO_PLUGIN_PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":${BRO_PLUGIN_PATH}\n" +file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev.csh + "setenv ZEEKPATH `${CMAKE_CURRENT_BINARY_DIR}/zeek-path-dev`\n" + "setenv ZEEK_PLUGIN_PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":${ZEEK_PLUGIN_PATH}\n" "setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1) @@ -72,6 +72,40 @@ if(${ENABLE_DEBUG}) set(VERSION_C_IDENT "${VERSION_C_IDENT}_debug") endif() +if ( NOT BINARY_PACKAGING_MODE ) + macro(_make_install_dir_symlink _target _link) + install(CODE " + if ( \"\$ENV{DESTDIR}\" STREQUAL \"\" ) + if ( EXISTS \"${_target}\" AND NOT EXISTS \"${_link}\" ) + message(STATUS \"WARNING: installed ${_link} as symlink to ${_target}\") + execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink + \"${_target}\" \"${_link}\") + endif () + endif () + ") + endmacro() + + if ( "${CMAKE_INSTALL_PREFIX}" STREQUAL "/usr/local/zeek" ) + # If we're installing into the default prefix, check if the + # old default prefix already exists and symlink to it. + # This is done to help keep custom user configuration/installation + # if they're upgrading from a version before Zeek 3.0. + _make_install_dir_symlink("/usr/local/bro" "/usr/local/zeek") + endif () + + # Check whether we need to symlink directories used by versions + # before Zeek 3.0. + _make_install_dir_symlink("${CMAKE_INSTALL_PREFIX}/include/bro" "${CMAKE_INSTALL_PREFIX}/include/zeek") + _make_install_dir_symlink("${CMAKE_INSTALL_PREFIX}/share/bro" "${CMAKE_INSTALL_PREFIX}/share/zeek") + _make_install_dir_symlink("${CMAKE_INSTALL_PREFIX}/lib/bro" "${CMAKE_INSTALL_PREFIX}/lib/zeek") +endif () + +if ( SANITIZERS ) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=${SANITIZERS} -fno-omit-frame-pointer") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=${SANITIZERS} -fno-omit-frame-pointer") + set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -fsanitize=${SANITIZERS} -fno-omit-frame-pointer") +endif() + ######################################################################## ## Dependency Configuration @@ -211,7 +245,7 @@ if ( ${CMAKE_SYSTEM_NAME} MATCHES Linux AND EXISTS /etc/os-release ) endif () endif () -set(brodeps +set(zeekdeps ${BinPAC_LIBRARY} ${PCAP_LIBRARY} ${OPENSSL_LIBRARIES} @@ -241,49 +275,56 @@ include(GetArchitecture) include(RequireCXX11) if ( (OPENSSL_VERSION VERSION_EQUAL "1.1.0") OR (OPENSSL_VERSION VERSION_GREATER "1.1.0") ) - set(BRO_HAVE_OPENSSL_1_1 true CACHE INTERNAL "" FORCE) + set(ZEEK_HAVE_OPENSSL_1_1 true CACHE INTERNAL "" FORCE) endif() # Tell the plugin code that we're building as part of the main tree. -set(BRO_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE) +set(ZEEK_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE) -set(DEFAULT_BROPATH .:${BRO_SCRIPT_INSTALL_PATH}:${BRO_SCRIPT_INSTALL_PATH}/policy:${BRO_SCRIPT_INSTALL_PATH}/site) +set(DEFAULT_ZEEKPATH .:${ZEEK_SCRIPT_INSTALL_PATH}:${ZEEK_SCRIPT_INSTALL_PATH}/policy:${ZEEK_SCRIPT_INSTALL_PATH}/site) if ( NOT BINARY_PACKAGING_MODE ) - set(BRO_DIST ${CMAKE_SOURCE_DIR}) + set(ZEEK_DIST ${CMAKE_SOURCE_DIR}) endif () string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.h.in - ${CMAKE_CURRENT_BINARY_DIR}/bro-config.h) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zeek-config.h.in + ${CMAKE_CURRENT_BINARY_DIR}/zeek-config.h) include_directories(${CMAKE_CURRENT_BINARY_DIR}) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/bro-config.h DESTINATION include/bro) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/zeek-config.h DESTINATION include/zeek) if ( CAF_ROOT_DIR ) - set(BRO_CONFIG_CAF_ROOT_DIR ${CAF_ROOT_DIR}) + set(ZEEK_CONFIG_CAF_ROOT_DIR ${CAF_ROOT_DIR}) else () - set(BRO_CONFIG_CAF_ROOT_DIR ${BRO_ROOT_DIR}) + set(ZEEK_CONFIG_CAF_ROOT_DIR ${ZEEK_ROOT_DIR}) endif () if ( BinPAC_ROOT_DIR ) - set(BRO_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR}) + set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${BinPAC_ROOT_DIR}) else () - set(BRO_CONFIG_BINPAC_ROOT_DIR ${BRO_ROOT_DIR}) + set(ZEEK_CONFIG_BINPAC_ROOT_DIR ${ZEEK_ROOT_DIR}) endif () if ( BROKER_ROOT_DIR ) - set(BRO_CONFIG_BROKER_ROOT_DIR ${BROKER_ROOT_DIR}) + set(ZEEK_CONFIG_BROKER_ROOT_DIR ${BROKER_ROOT_DIR}) else () - set(BRO_CONFIG_BROKER_ROOT_DIR ${BRO_ROOT_DIR}) + set(ZEEK_CONFIG_BROKER_ROOT_DIR ${ZEEK_ROOT_DIR}) endif () -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.in - ${CMAKE_CURRENT_BINARY_DIR}/bro-config @ONLY) -install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bro-config DESTINATION bin) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/zeek-config.in + ${CMAKE_CURRENT_BINARY_DIR}/zeek-config @ONLY) +install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/zeek-config DESTINATION bin) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cmake DESTINATION share/bro +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cmake DESTINATION share/zeek USE_SOURCE_PERMISSIONS) +# Install wrapper script for Bro-to-Zeek renaming. +include(InstallShellScript) +include(InstallSymlink) +InstallShellScript("bin" "zeek-wrapper.in" "zeek-wrapper") +InstallSymlink("${CMAKE_INSTALL_PREFIX}/bin/zeek-wrapper" "${CMAKE_INSTALL_PREFIX}/bin/bro-config") +InstallSymlink("${CMAKE_INSTALL_PREFIX}/include/zeek/zeek-config.h" "${CMAKE_INSTALL_PREFIX}/include/zeek/bro-config.h") + ######################################################################## ## Recurse on sub-directories @@ -291,7 +332,7 @@ if ( BROKER_ROOT_DIR ) find_package(Broker REQUIRED) find_package(CAF COMPONENTS core io openssl REQUIRED) - set(brodeps ${brodeps} ${BROKER_LIBRARY} ${CAF_LIBRARIES}) + set(zeekdeps ${zeekdeps} ${BROKER_LIBRARY} ${CAF_LIBRARIES}) include_directories(BEFORE ${BROKER_INCLUDE_DIR}) else () set(ENABLE_STATIC_ONLY_SAVED ${ENABLE_STATIC_ONLY}) @@ -304,9 +345,9 @@ else () set(ENABLE_STATIC_ONLY ${ENABLE_STATIC_ONLY_SAVED}) if ( BUILD_STATIC_BROKER ) - set(brodeps ${brodeps} broker_static) + set(zeekdeps ${zeekdeps} broker_static) else() - set(brodeps ${brodeps} broker) + set(zeekdeps ${zeekdeps} broker) endif() include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker ${CMAKE_CURRENT_BINARY_DIR}/aux/broker) @@ -318,20 +359,23 @@ include_directories(BEFORE ${CAF_INCLUDE_DIR_CORE}) include_directories(BEFORE ${CAF_INCLUDE_DIR_IO}) include_directories(BEFORE ${CAF_INCLUDE_DIR_OPENSSL}) +add_subdirectory(aux/paraglob) +set(zeekdeps ${zeekdeps} paraglob) +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/paraglob) + add_subdirectory(src) add_subdirectory(scripts) add_subdirectory(man) include(CheckOptionalBuildSources) -CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL) -CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS) -CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI) +CheckOptionalBuildSources(aux/zeekctl ZeekControl INSTALL_ZEEKCTL) +CheckOptionalBuildSources(aux/zeek-aux Zeek-Aux INSTALL_AUX_TOOLS) ######################################################################## ## Packaging Setup -if (INSTALL_BROCTL) +if (INSTALL_ZEEKCTL) # CPack RPM Generator may not automatically detect this set(CPACK_RPM_PACKAGE_REQUIRES "python >= 2.6.0") endif () @@ -352,12 +396,12 @@ if (CMAKE_BUILD_TYPE) endif () message( - "\n====================| Bro Build Summary |=====================" + "\n====================| Zeek Build Summary |====================" "\n" "\nBuild type: ${CMAKE_BUILD_TYPE}" "\nBuild dir: ${CMAKE_BINARY_DIR}" "\nInstall prefix: ${CMAKE_INSTALL_PREFIX}" - "\nBro Script Path: ${BRO_SCRIPT_INSTALL_PATH}" + "\nZeek Script Path: ${ZEEK_SCRIPT_INSTALL_PATH}" "\nDebug mode: ${ENABLE_DEBUG}" "\n" "\nCC: ${CMAKE_C_COMPILER}" @@ -366,8 +410,7 @@ message( "\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}" "\nCPP: ${CMAKE_CXX_COMPILER}" "\n" - "\nBroccoli: ${INSTALL_BROCCOLI}" - "\nBroctl: ${INSTALL_BROCTL}" + "\nZeekControl: ${INSTALL_ZEEKCTL}" "\nAux. Tools: ${INSTALL_AUX_TOOLS}" "\n" "\nlibmaxminddb: ${USE_GEOIP}" diff --git a/Makefile b/Makefile index 8e9d77e3cf..c83ef22a54 100644 --- a/Makefile +++ b/Makefile @@ -55,9 +55,9 @@ test: -@( cd testing && make ) test-aux: - -test -d aux/broctl && ( cd aux/broctl && make test-all ) + -test -d aux/zeekctl && ( cd aux/zeekctl && make test-all ) -test -d aux/btest && ( cd aux/btest && make test ) - -test -d aux/bro-aux && ( cd aux/bro-aux && make test ) + -test -d aux/zeek-aux && ( cd aux/zeek-aux && make test ) -test -d aux/plugins && ( cd aux/plugins && make test-all ) test-all: test test-aux diff --git a/NEWS b/NEWS index 13f05baa3b..477a532f91 100644 --- a/NEWS +++ b/NEWS @@ -1,10 +1,10 @@ -This document summarizes the most important changes in the current Bro +This document summarizes the most important changes in the current Zeek release. For an exhaustive list of changes, see the ``CHANGES`` file (note that submodules, such as Broker, come with their own ``CHANGES``.) -Bro 2.7 -======= +Zeek 3.0.0 +========== New Functionality ----------------- @@ -18,7 +18,10 @@ New Functionality - dns_NSEC - dns_NSEC3 -- Bro's Plugin framework now allows a patch version. If a patch version is not +- Added support for parsing and logging DNS SPF resource records. + A new ``dns_SPF_reply`` event is also available. + +- Zeek's Plugin framework now allows a patch version. If a patch version is not provided, it will default to 0. To specify this, modify the plugin Configuration class in your ``src/Plugin.cc`` and set ``config.version.patch``. Note that the default plugin skeleton @@ -68,9 +71,103 @@ New Functionality - Added a new event for weirdness found via file analysis: ``file_weird``. +- The conn.log "history" field supports a new character 'G' or 'g' + (capital for originator, lowercase responder) to indicate a content + gap in the TCP stream. These are recorded logarithmically. + +- The ``ZEEK_DNS_RESOLVER`` environment variable now controls + the DNS resolver to use by setting it to an IPv4 or IPv6 address. If + not set, then the first IPv4 address from /etc/resolv.conf gets used. + +- The ``//i`` convenience syntax for case-insensitive patterns is now + also allowed when specifying patterns used in signature files. + +- New RDP functionality. + + - New events: + - rdp_client_network_data + - rdp_client_security_data + - rdp_client_cluster_data + - rdp_native_encrypted_data + + - Add a new "client_channels" field to rdp.log based on data parsed from + the Client Network Data (TS_UD_CS_NET) packet. The channel list is also + available in the new ``rdp_client_network_data`` event. + +- Add parsing support for TLS 1.3 pre-shared key extension. This info + is available in the events: ``ssl_extension_pre_shared_key_client_hello`` + and ``ssl_extension_pre_shared_key_server_hello``. + +- Added/re-wrote support for NTP. + + - Parsing support for modes 1-7, with parsed structures available in + the ``ntp_message`` event. + + - An ntp.log is produced by default, containing data extracted from + NTP messages with modes 1-5. + +- Add support for vector slicing operations. For example:: + + local v = vector(1, 2, 3, 4, 5); + v[2:4] = vector(6, 7, 8); # v is now [1, 2, 6, 7, 8, 5] + print v[:4]; # prints [1, 2, 6, 7] + +- Add support for paraglob, a fairly quick data structure for matching a string + against a large list of patterns. For example:: + + local v1 = vector("*", "d?g", "*og", "d?", "d[!wl]g"); + local p1 = paraglob_init(v1); + print paraglob_match(p1, "dog"); + Changed Functionality --------------------- +- The following executable names have changed (the old names will + continue to work, but emit a deprecation warning): + + - ``bro`` is now ``zeek`` + + - ``bro-config`` is now ``zeek-config`` + + - ``broctl`` is now ``zeekctl`` + + - ``bro-cut`` is now ``zeek-cut`` + +- BroControl has been completely renamed to ZeekControl. Many installation + directories and files with "broctl" in their name have been changed + to use "zeekctl" instead. It's expected this has been done in a way + that's backwards compatible with previous Bro installations. E.g. + if you made customizations to the ``broctl.cfg`` file of a previous + installation, installing the newer Zeek version over it will retain that + file and even symlink the new ``zeekctl.cfg`` to it. + +- The default install prefix is now ``/usr/local/zeek`` instead of + ``/usr/local/bro``. If you have an existing installation that used + the previous default and are still using the new default when upgrading, + we'll crate ``/usr/local/zeek`` as a symlink to ``/usr/local/bro``. + Certain subdirectories will also get similar treatment: ``share/bro``, + ``include/bro``, and ``lib/bro``. + +- ``$prefix/share/bro/site/local.bro`` has been renamed to + ``local.zeek``. If you have a ``local.bro`` file from a previous + installation, possibly with customizations made to it, the new + version of Zeek will install a ``local.zeek`` file that is a symlink + to the pre-existing ``local.bro``. In that case, you may want to + just copy ``local.bro`` into the new ``local.zeek`` location to + avoid confusion, but things are otherwise meant to work properly + without intervention. + +- All scripts ending in ``.bro`` that ship with the Zeek source tree have + been renamed to ``.zeek``. + +- The search logic for the ``@load`` script directive now prefers files + ending in ``.zeek``, but will fallback to loading a ``.bro`` file if + it exists. E.g. ``@load foo`` will first check for a ``foo.zeek`` + file to load and then otherwise ``foo.bro``. Note that + ``@load foo.bro`` (with the explicit ``.bro`` file suffix) prefers + in the opposite order: it first checks for ``foo.bro`` and then + falls back to a ``foo.zeek``, if it exists. + - The for-loop index variable for vectors has been changed from 'int' to 'count' type. It's unlikely this would alter/break any script behavior unless they were explicitly inspecting the variable's @@ -149,16 +246,249 @@ Changed Functionality - "unknown_gre_version_%d" -> unknown_gre_version - "unknown_gre_protocol_%u16" -> unknown_gre_protocol +- The "missed_bytes" field of conn.log can be calculated slightly differently + in some cases: ACKs that reveal a content gap, but also come at + the end of a connection (in a FIN or RST) are considered unreliable + and aren't counted as true gaps. + +- The Broxygen component, which is used to generate our Doxygen-like + scripting API documentation has been renamed to Zeekygen. This likely has + no breaking or visible changes for most users, except in the case one + used it to generate their own documentation via the ``--broxygen`` flag, + which is now named ``--zeekygen``. Besides that, the various documentation + in scripts has also been updated to replace Sphinx cross-referencing roles + and directives like ":bro:see:" with ":zeek:zee:". + +- The catch-and-release and unified2 scripts are no longer loaded by + default. Because there was a performance impact simply from loading + them and it's unlikely a majority of user make use of their features, + they've been moved from the scripts/base/ directory into + scripts/policy/ and must be manually loaded to use their + functionality. The "drop" action for the notice framework is likewise + moved since it was implemented via catch-and-release. As a result, + the default notice.log no longer contains a "dropped" field. + + If you previously used the catch-and-release functionality add this: + + @load policy/frameworks/netcontrol/catch-and-release + + If you previously used Notice::ACTION_DROP add: + + @load policy/frameworks/notice/actions/drop + + If you previously used the Unified2 file analysis support add: + + @load policy/files/unified2 + +- The default value of ``peer_description`` has changed from "bro" + to "zeek". This won't effect most users, except for the fact that + this value may appear in several log files, so any external plugins + that have written unit tests that compare baselines of such log + files may need to be updated. + +- The "remote_ip" field of "addr" type was removed from radius.log and + replaced with a field named "tunnel_client" of "string" type. The + reason for this is that the Tunnel-Client-Endpoint RADIUS attribute + this data is derived from may also be a FQDN, not just an IP address. + +- The ``ssl_server_hello`` event's ``server_random`` parameter has been + changed to always include the full 32-byte field from the + ServerHello. Previously a 4-byte timestamp and 28-byte random data + were parsed separately as some TLS protocol versions specified a + separate timestamp field as part of the full 32-byte random sequence. + +- The namespace used by all the builtin plugins that ship with Zeek have + changed to use "Zeek::" instead of "Bro::". + +- Any Broker topic names used in scripts shipped with Zeek that + previously were prefixed with "bro/" are now prefixed with "zeek/" + instead. + + In the case where external applications were using a "bro/" topic + to send data into a Bro process, a Zeek process still subscribes + to those topics in addition to the equivalently named "zeek/" topic. + + In the case where external applications were using a "bro/" topic + to subscribe to remote messages or query data stores, there's no + backwards compatibility and external applications must be changed + to use the new "zeek/" topic. The thought is this change will have + low impact since most data published under "bro/" topic names is + intended for use only as a detail of implementing cluster-enabled + versions of various scripts. + + A list of the most relevant/common topic names that could potentially + be used in external applications to consume/query remote data that + one may need to change: + + - store names + - bro/known/services + - bro/known/hosts + - bro/known/certs + + - cluster nodes + - bro/cluster/ + - bro/cluster/node/ + - bro/cluster/nodeid/ + + - logging + - bro/logs/ + +- The ``resp_ref`` argument was removed from the ``ocsp_response_bytes`` + event. ``resp_ref`` was not used by anything in the codebase and could not be + passed to any other functions for further processing. The remainder of the + ``ocsp_response_bytes`` is unchanged. + Removed Functionality --------------------- +- A number of functions that were deprecated in version 2.6 or below and completely + removed from this release. Most of the functions were used for the old communication + code. + + - ``find_ip_addresses`` + - ``cat_string_array`` + - ``cat_string_array_n`` + - ``complete_handshake`` + - ``connect`` + - ``decode_base64_custom`` + - ``disconnect`` + - ``enable_communication`` + - ``encode_base64_custom`` + - ``get_event_peer`` + - ``get_local_event_peer`` + - ``join_string_array`` + - ``listen`` + - ``merge_pattern`` + - ``request_remote_events`` + - ``request_remote_logs`` + - ``request_remote_sync`` + - ``resume_state_updates`` + - ``send_capture_filter`` + - ``send_current_packet`` + - ``send_id`` + - ``send_ping`` + - ``set_accept_state`` + - ``set_compression_level`` + - ``sort_string_array`` + - ``split1`` + - ``split_all`` + - ``split`` + - ``suspend_state_updates`` + - ``terminate_communication`` + - ``split`` + - ``send_state`` + - ``checkpoint_state`` + - ``rescan_state`` + +- The following events were deprecated in version 2.6 or below and are completely + removed from this release: + + - ``ssl_server_curve`` + - ``dhcp_ack`` + - ``dhcp_decline`` + - ``dhcp_discover`` + - ``dhcp_inform`` + - ``dhcp_nak`` + - ``dhcp_offer`` + - ``dhcp_release`` + - ``dhcp_request`` + - ``remote_state_access_performed`` + - ``remote_state_inconsistency`` + - ``remote_connection_established`` + - ``remote_connection_closed`` + - ``remote_connection_handshake_done`` + - ``remote_event_registered`` + - ``remote_connection_error`` + - ``remote_capture_filter`` + - ``remote_log_peer`` + - ``remote_log`` + - ``finished_send_state`` + - ``remote_pong`` + +- The following types/records were deprecated in version 2.6 or below and are + removed from this release: + + - ``peer_id`` + - ``event_peer`` + +- The following configuration options were deprecated in version 2.6 or below and are + removed from this release: + + - ``max_remote_events_processed`` + - ``forward_remote_events`` + - ``forward_remote_state_changes`` + - ``enable_syslog`` + - ``remote_trace_sync_interval`` + - ``remote_trace_sync_peers`` + - ``remote_check_sync_consistency`` + - ``log_rotate_interval`` + - ``log_max_size`` + - ``log_encryption_key`` + - ``state_dir`` + - ``state_write_delay`` + - ``ssl_ca_certificate`` + - ``ssl_private_key`` + - ``ssl_passphrase`` + +- The following constants were used as part of deprecated functionality in version 2.6 + or below and are removed from this release: + + - ``PEER_ID_NONE`` + - ``REMOTE_LOG_INFO`` + - ``REMOTE_SRC_CHILD`` + - ``REMOTE_SRC_PARENT`` + - ``REMOTE_SRC_SCRIPT`` + +- The deprecated script ``policy/protocols/smb/__load__.bro`` was removed. + Instead of ``@load policy/protocols/smb`` use ``@load base/protocols/smb``. + +- Broccoli, which had been deprecated in version 2.6 and was no longer built by default + was removed from the source tree. + +- Support for the &persistent, &synchronized, &mergeable, &encrypt, &rotate_interval, + and &rotate_size attributes, which were deprecated in Bro 2.6, was removed. The ``-g`` + command-line option (dump-config) which relied on this functionality was also removed. + +- Functionality for writing state updates for variables with the + &synchronized attribute was removed. This entails the + ``-x`` command-line option (print-state) as well as the + ``capture_state_updates`` function. + +- Removed the BroControl ``update`` command, which was deprecated in Bro 2.6. + +- Functionality for writing/reading binary event streams was + removed. This functionality relied on the old communication code + anc was basically untested. The ``-R`` command-line option (replay) + as well as the ``capture_events`` function were removed. + +- Removed p0f (passive OS fingerprinting) support. The version of + p0f shipped with zeek was ancient, probably did not give + any reliable support anymore and did not offer a clear + upgrade path. The ``OS_version_found`` event as well as the + ``generate_OS_version_event`` configuration option were removed. + Deprecated Functionality ------------------------ -- The ``str_shell_escape` function is now deprecated, use ``safe_shell_quote`` +- The ``str_shell_escape`` function is now deprecated, use ``safe_shell_quote`` instead. The later will automatically return a value that is enclosed in double-quotes. +- The ``bro_init``, ``bro_done``, and ``bro_script_loaded`` events are now + deprecated, use ``zeek_init``, ``zeek_done``, and + ``zeek_script_loaded`` instead. Any existing event handlers for + the deprecated versions will automatically alias to the new events + such that existing code will not break, but will emit a deprecation + warning. + +- The ``bro_is_terminating`` and ``bro_version`` function are deprecated and + replaced by functions named ``zeek_is_terminating`` and ``zeek_version``. + +- The ``rotate_file``, ``rotate_file_by_name`` and ``calc_next_rotate`` functions + were marked as deprecated. These functions were used with the old pre-2.0 logging + framework and are no longer used. They also were marked as deprecated in their + documentation, however the functions themselves did not carry the deprecation marker. + Bro 2.6 ======= @@ -530,7 +860,7 @@ New Functionality Each has the same form, e.g.:: event tcp_multiple_retransmissions(c: connection, is_orig: bool, - threshold: count); + threshold: count); - Added support for set union, intersection, difference, and comparison operations. The corresponding operators for the first three are diff --git a/VERSION b/VERSION index 879e9b62f6..b40f65b0fa 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.6-200 +2.6-526 diff --git a/aux/bifcl b/aux/bifcl index 44622332fb..699ffb13c9 160000 --- a/aux/bifcl +++ b/aux/bifcl @@ -1 +1 @@ -Subproject commit 44622332fb1361383799be33e365704caacce199 +Subproject commit 699ffb13c986aca599b70735b368a515c2149982 diff --git a/aux/binpac b/aux/binpac index bb2476465e..baabe22a2b 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit bb2476465e304a00c368bd73d40cc6f734be5311 +Subproject commit baabe22a2b8a68fac448e862e1c2acc46f89c5fc diff --git a/aux/broccoli b/aux/broccoli deleted file mode 160000 index 41841d8f64..0000000000 --- a/aux/broccoli +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 41841d8f64bdb062860309f7b36513212e81befa diff --git a/aux/broctl b/aux/broctl deleted file mode 160000 index afc0260abf..0000000000 --- a/aux/broctl +++ /dev/null @@ -1 +0,0 @@ -Subproject commit afc0260abf663f4b44d535d66d378fde7b0d5206 diff --git a/aux/broctl b/aux/broctl new file mode 120000 index 0000000000..d17a55b030 --- /dev/null +++ b/aux/broctl @@ -0,0 +1 @@ +zeekctl \ No newline at end of file diff --git a/aux/broker b/aux/broker index 7dab576984..3f827567ed 160000 --- a/aux/broker +++ b/aux/broker @@ -1 +1 @@ -Subproject commit 7dab576984dee1f58fe5ceb81f36b63128d58860 +Subproject commit 3f827567edca20eb0fe9ad071519f305699296ea diff --git a/aux/btest b/aux/btest index 6ece47ba64..bcda130bfa 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 6ece47ba6438e7a6db5c7b85a68b3c16f0911871 +Subproject commit bcda130bfae106b3d071c76cd9a3f0bde66e66da diff --git a/aux/netcontrol-connectors b/aux/netcontrol-connectors index 6501fef1ff..8a6f3f7c50 160000 --- a/aux/netcontrol-connectors +++ b/aux/netcontrol-connectors @@ -1 +1 @@ -Subproject commit 6501fef1fffc0b49dda59b3716b03034edcfeee6 +Subproject commit 8a6f3f7c506ac483265afc77d3c1b0861db79601 diff --git a/aux/paraglob b/aux/paraglob new file mode 160000 index 0000000000..1a8d674d2c --- /dev/null +++ b/aux/paraglob @@ -0,0 +1 @@ +Subproject commit 1a8d674d2ccbef06a6e4e6f1a9c8747a2eadf026 diff --git a/aux/zeek-aux b/aux/zeek-aux index 96c787cb39..e0689c1c95 160000 --- a/aux/zeek-aux +++ b/aux/zeek-aux @@ -1 +1 @@ -Subproject commit 96c787cb396a5aad2d3ea3b2087f2a1fcd6b7216 +Subproject commit e0689c1c9565ba7ffcab011e9f22f6a17a67e40a diff --git a/aux/zeekctl b/aux/zeekctl new file mode 160000 index 0000000000..b6642b80f1 --- /dev/null +++ b/aux/zeekctl @@ -0,0 +1 @@ +Subproject commit b6642b80f1436751884e0dd12fa16930fabc66ca diff --git a/bro-config.h.in b/bro-config.h.in deleted file mode 100644 index 356e790c8e..0000000000 --- a/bro-config.h.in +++ /dev/null @@ -1,239 +0,0 @@ -/* Old libpcap versions (< 0.6.1) need defining pcap_freecode and - pcap_compile_nopcap */ -#cmakedefine DONT_HAVE_LIBPCAP_PCAP_FREECODE - -/* should explicitly declare socket() and friends */ -#cmakedefine DO_SOCK_DECL - -/* Define if you have the header file. */ -#cmakedefine HAVE_GETOPT_H - -/* Define if you have the `getopt_long' function. */ -#cmakedefine HAVE_GETOPT_LONG - -/* We are on a Linux system */ -#cmakedefine HAVE_LINUX - -/* We are on a Mac OS X (Darwin) system */ -#cmakedefine HAVE_DARWIN - -/* Define if you have the `mallinfo' function. */ -#cmakedefine HAVE_MALLINFO - -/* Define if you have the header file. */ -#cmakedefine HAVE_MEMORY_H - -/* Define if you have the header file */ -#cmakedefine HAVE_NETINET_ETHER_H - -/* Define if you have the header file. */ -#cmakedefine HAVE_NETINET_IF_ETHER_H - -/* Define if you have the header file. */ -#cmakedefine HAVE_NETINET_IP6_H - -/* Define if you have the header file. */ -#cmakedefine HAVE_NET_ETHERNET_H - -/* Define if you have the header file. */ -#cmakedefine HAVE_NET_ETHERTYPES_H - -/* have os-proto.h */ -#cmakedefine HAVE_OS_PROTO_H - -/* Define if you have the header file. */ -#cmakedefine HAVE_PCAP_INT_H - -/* line editing & history powers */ -#cmakedefine HAVE_READLINE - -/* Define if you have the `sigaction' function. */ -#cmakedefine HAVE_SIGACTION - -/* Define if you have the `sigset' function. */ -#cmakedefine HAVE_SIGSET - -/* Define if you have the `strcasestr' function. */ -#cmakedefine HAVE_STRCASESTR - -/* Define if you have the `strerror' function. */ -#cmakedefine HAVE_STRERROR - -/* Define if you have the `strsep' function. */ -#cmakedefine HAVE_STRSEP - -/* Define if you have the header file. */ -#cmakedefine HAVE_SYS_ETHERNET_H - -/* Some libpcap versions use an extra parameter (error) in pcap_compile_nopcap - */ -#cmakedefine LIBPCAP_PCAP_COMPILE_NOPCAP_HAS_ERROR_PARAMETER - -/* Include krb5.h */ -#cmakedefine NEED_KRB5_H - -/* Compatibility for Darwin */ -#cmakedefine NEED_NAMESER_COMPAT_H - -/* d2i_x509 uses const char** */ -#cmakedefine OPENSSL_D2I_X509_USES_CONST_CHAR - -/* Define as the return type of signal handlers (`int' or `void'). */ -#define RETSIGTYPE @RETSIGTYPE@ - -/* signal function return value */ -#define RETSIGVAL @RETSIGVAL@ - -/* have sin_len field in sockaddr_in */ -#cmakedefine SIN_LEN - -/* The size of `long int', as computed by sizeof. */ -#define SIZEOF_LONG_INT @SIZEOF_LONG_INT@ - -/* The size of `long long', as computed by sizeof. */ -#define SIZEOF_LONG_LONG @SIZEOF_LONG_LONG@ - -/* The size of `void *', as computed by sizeof. */ -#define SIZEOF_VOID_P @SIZEOF_VOID_P@ - -/* should we declare syslog() and openlog() */ -#cmakedefine SYSLOG_INT - -/* Define if you have */ -#cmakedefine HAVE_SYS_TIME_H - -/* Define if you can safely include both and . */ -#cmakedefine TIME_WITH_SYS_TIME - -/* GeoIP geographic lookup functionality */ -#cmakedefine USE_GEOIP - -/* Define if KRB5 is available */ -#cmakedefine USE_KRB5 - -/* Use Google's perftools */ -#cmakedefine USE_PERFTOOLS_DEBUG - -/* Analyze Mobile IPv6 traffic */ -#cmakedefine ENABLE_MOBILE_IPV6 - -/* Use libCurl. */ -#cmakedefine USE_CURL - -/* Use the DataSeries writer. */ -#cmakedefine USE_DATASERIES - -/* Use the ElasticSearch writer. */ -#cmakedefine USE_ELASTICSEARCH - -/* Version number of package */ -#define VERSION "@VERSION@" - -/* whether words are stored with the most significant byte first */ -#cmakedefine WORDS_BIGENDIAN - -/* whether htonll/ntohll is defined in */ -#cmakedefine HAVE_BYTEORDER_64 - -/* ultrix can't hack const */ -#cmakedefine NEED_ULTRIX_CONST_HACK -#ifdef NEED_ULTRIX_CONST_HACK -#define const -#endif - -/* Define int32_t */ -#cmakedefine int32_t @int32_t@ - -/* use sigset() instead of signal() */ -#ifdef HAVE_SIGSET -#define signal sigset -#endif - -/* define to int if socklen_t not available */ -#cmakedefine socklen_t @socklen_t@ - -/* Define u_int16_t */ -#cmakedefine u_int16_t @u_int16_t@ - -/* Define u_int32_t */ -#cmakedefine u_int32_t @u_int32_t@ - -/* Define u_int8_t */ -#cmakedefine u_int8_t @u_int8_t@ - -/* OpenBSD's bpf.h may not declare this data link type, but it's supposed to be - used consistently for the same purpose on all platforms. */ -#cmakedefine HAVE_DLT_PPP_SERIAL -#ifndef HAVE_DLT_PPP_SERIAL -#define DLT_PPP_SERIAL @DLT_PPP_SERIAL@ -#endif - -/* IPv6 Next Header values defined by RFC 3542 */ -#cmakedefine HAVE_IPPROTO_HOPOPTS -#ifndef HAVE_IPPROTO_HOPOPTS -#define IPPROTO_HOPOPTS 0 -#endif -#cmakedefine HAVE_IPPROTO_IPV6 -#ifndef HAVE_IPPROTO_IPV6 -#define IPPROTO_IPV6 41 -#endif -#cmakedefine HAVE_IPPROTO_IPV4 -#ifndef HAVE_IPPROTO_IPV4 -#define IPPROTO_IPV4 4 -#endif -#cmakedefine HAVE_IPPROTO_ROUTING -#ifndef HAVE_IPPROTO_ROUTING -#define IPPROTO_ROUTING 43 -#endif -#cmakedefine HAVE_IPPROTO_FRAGMENT -#ifndef HAVE_IPPROTO_FRAGMENT -#define IPPROTO_FRAGMENT 44 -#endif -#cmakedefine HAVE_IPPROTO_ESP -#ifndef HAVE_IPPROTO_ESP -#define IPPROTO_ESP 50 -#endif -#cmakedefine HAVE_IPPROTO_AH -#ifndef HAVE_IPPROTO_AH -#define IPPROTO_AH 51 -#endif -#cmakedefine HAVE_IPPROTO_ICMPV6 -#ifndef HAVE_IPPROTO_ICMPV6 -#define IPPROTO_ICMPV6 58 -#endif -#cmakedefine HAVE_IPPROTO_NONE -#ifndef HAVE_IPPROTO_NONE -#define IPPROTO_NONE 59 -#endif -#cmakedefine HAVE_IPPROTO_DSTOPTS -#ifndef HAVE_IPPROTO_DSTOPTS -#define IPPROTO_DSTOPTS 60 -#endif - -/* IPv6 options structure defined by RFC 3542 */ -#cmakedefine HAVE_IP6_OPT - -/* Common IPv6 extension structure */ -#cmakedefine HAVE_IP6_EXT - -/* String with host architecture (e.g., "linux-x86_64") */ -#define HOST_ARCHITECTURE "@HOST_ARCHITECTURE@" - -/* String with extension of dynamic libraries (e.g., ".so") */ -#define DYNAMIC_PLUGIN_SUFFIX "@CMAKE_SHARED_MODULE_SUFFIX@" - -/* True if we're building outside of the main Bro source code tree. */ -#ifndef BRO_PLUGIN_INTERNAL_BUILD -#define BRO_PLUGIN_INTERNAL_BUILD @BRO_PLUGIN_INTERNAL_BUILD@ -#endif - -/* A C function that has the Bro version encoded into its name. */ -#define BRO_VERSION_FUNCTION bro_version_@VERSION_C_IDENT@ -#ifdef __cplusplus -extern "C" { -#endif -extern const char* BRO_VERSION_FUNCTION(); -#ifdef __cplusplus -} -#endif - diff --git a/bro-config.in b/bro-config.in deleted file mode 100755 index 9228271394..0000000000 --- a/bro-config.in +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/sh - -version=@VERSION@ -build_type=@CMAKE_BUILD_TYPE_LOWER@ -prefix=@CMAKE_INSTALL_PREFIX@ -script_dir=@BRO_SCRIPT_INSTALL_PATH@ -site_dir=@BRO_SCRIPT_INSTALL_PATH@/site -plugin_dir=@BRO_PLUGIN_INSTALL_PATH@ -config_dir=@BRO_ETC_INSTALL_DIR@ -python_dir=@PY_MOD_INSTALL_DIR@ -cmake_dir=@CMAKE_INSTALL_PREFIX@/share/bro/cmake -include_dir=@CMAKE_INSTALL_PREFIX@/include/bro -bropath=@DEFAULT_BROPATH@ -bro_dist=@BRO_DIST@ -binpac_root=@BRO_CONFIG_BINPAC_ROOT_DIR@ -caf_root=@BRO_CONFIG_CAF_ROOT_DIR@ -broker_root=@BRO_CONFIG_BROKER_ROOT_DIR@ - -usage="\ -Usage: bro-config [--version] [--build_type] [--prefix] [--script_dir] [--site_dir] [--plugin_dir] [--config_dir] [--python_dir] [--include_dir] [--cmake_dir] [--bropath] [--bro_dist] [--binpac_root] [--caf_root] [--broker_root]" - -if [ $# -eq 0 ] ; then - echo "${usage}" 1>&2 - exit 1 -fi - -while [ $# -ne 0 ]; do - case "$1" in - -*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;; - *) optarg= ;; - esac - - case $1 in - --version) - echo $version - ;; - --prefix) - echo $prefix - ;; - --build_type) - echo $build_type - ;; - --script_dir) - echo $script_dir - ;; - --site_dir) - echo $site_dir - ;; - --plugin_dir) - echo $plugin_dir - ;; - --config_dir) - echo $config_dir - ;; - --python_dir) - echo $python_dir - ;; - --cmake_dir) - echo $cmake_dir - ;; - --include_dir) - echo $include_dir - ;; - --bropath) - echo $bropath - ;; - --bro_dist) - echo $bro_dist - ;; - --binpac_root) - echo $binpac_root - ;; - --caf_root) - echo $caf_root - ;; - --broker_root) - echo $broker_root - ;; - *) - echo "${usage}" 1>&2 - exit 1 - ;; - esac - shift -done - -exit 0 diff --git a/bro-path-dev.in b/bro-path-dev.in deleted file mode 100755 index de8b0274b9..0000000000 --- a/bro-path-dev.in +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -# After configured by CMake, this file prints the absolute path to Bro scripts -# that come with the source distributions of Bro as well as scripts that are -# generated by the BIF compiler at compile time. -# -# The intended use of this script is to make it easier to run Bro from -# the build directory, avoiding the need to install it. This could be -# done like: -# -# BROPATH=`./bro-path-dev` ./src/bro -# - -echo .:${CMAKE_SOURCE_DIR}/scripts:${CMAKE_SOURCE_DIR}/scripts/policy:${CMAKE_SOURCE_DIR}/scripts/site:${CMAKE_BINARY_DIR}/scripts diff --git a/bro-path-dev.in b/bro-path-dev.in new file mode 120000 index 0000000000..854029fbb8 --- /dev/null +++ b/bro-path-dev.in @@ -0,0 +1 @@ +zeek-path-dev.in \ No newline at end of file diff --git a/cmake b/cmake index 0c1ee634a8..58e4eebe3a 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 0c1ee634a8f915e738da72c797a17aad9cb618dd +Subproject commit 58e4eebe3aebd0cf608e51046805a9ab1ffa6c1b diff --git a/configure b/configure index 98bfc5308d..4c45a1f70d 100755 --- a/configure +++ b/configure @@ -31,15 +31,15 @@ Usage: $0 [OPTION]... [VAR=VALUE]... (useful for cross-compiling) Installation Directories: - --prefix=PREFIX installation directory [/usr/local/bro] - --scriptdir=PATH root installation directory for Bro scripts - [PREFIX/share/bro] - --localstatedir=PATH when using BroControl, path to store log files + --prefix=PREFIX installation directory [/usr/local/zeek] + --scriptdir=PATH root installation directory for Zeek scripts + [PREFIX/share/zeek] + --localstatedir=PATH when using ZeekControl, path to store log files and run-time data (within log/ and spool/ subdirs) [PREFIX] - --spooldir=PATH when using BroControl, path to store run-time data + --spooldir=PATH when using ZeekControl, path to store run-time data [PREFIX/spool] - --logdir=PATH when using BroControl, path to store log file + --logdir=PATH when using ZeekControl, path to store log file [PREFIX/logs] --conf-files-dir=PATH config files installation directory [PREFIX/etc] @@ -51,14 +51,14 @@ Usage: $0 [OPTION]... [VAR=VALUE]... (automatically on when perftools is present on Linux) --enable-perftools-debug use Google's perftools for debugging --enable-jemalloc link against jemalloc - --enable-broccoli build or install the Broccoli library (deprecated) - --enable-static-broker build broker statically (ignored if --with-broker is specified) + --enable-static-broker build Broker statically (ignored if --with-broker is specified) --enable-static-binpac build binpac statically (ignored if --with-binpac is specified) - --disable-broctl don't install Broctl + --disable-zeekctl don't install ZeekControl --disable-auxtools don't build or install auxiliary tools --disable-perftools don't try to build with Google Perftools - --disable-python don't try to build python bindings for broker + --disable-python don't try to build python bindings for Broker --disable-broker-tests don't try to build Broker unit tests + --sanitizers=SANITIZERS comma-separated list of Clang sanitizers to enable Required Packages in Non-Standard Locations: --with-openssl=PATH path to OpenSSL install root @@ -66,13 +66,13 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-pcap=PATH path to libpcap install root --with-binpac=PATH path to BinPAC executable (useful for cross-compiling) - --with-bifcl=PATH path to Bro BIF compiler executable + --with-bifcl=PATH path to Zeek BIF compiler executable (useful for cross-compiling) --with-flex=PATH path to flex executable --with-bison=PATH path to bison executable --with-python=PATH path to Python executable --with-broker=PATH path to Broker install root - (Bro uses an embedded version by default) + (Zeek uses an embedded version by default) --with-caf=PATH path to C++ Actor Framework install root (a Broker dependency that is embedded by default) @@ -106,6 +106,18 @@ Usage: $0 [OPTION]... [VAR=VALUE]... sourcedir="$( cd "$( dirname "$0" )" && pwd )" +if [ ! -e "$sourcedir/cmake/COPYING" ] && [ -d "$sourcedir/.git" ]; then + echo "\ +You seem to be missing the content of the cmake directory. + +This typically means that you performed a non-recursive git clone of +Zeek. To check out the required subdirectories, please execute: + + ( cd $sourcedir && git submodule update --recursive --init ) +" >&2; + exit 1; +fi + # Function to append a CMake cache entry definition to the # CMakeCacheEntries variable. # $1 is the cache entry variable name @@ -128,24 +140,24 @@ remove_cache_entry () { # set defaults builddir=build -prefix=/usr/local/bro +prefix=/usr/local/zeek CMakeCacheEntries="" append_cache_entry CMAKE_INSTALL_PREFIX PATH $prefix -append_cache_entry BRO_ROOT_DIR PATH $prefix -append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl -append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro -append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc +append_cache_entry ZEEK_ROOT_DIR PATH $prefix +append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/zeekctl +append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $prefix/share/zeek +append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry ENABLE_JEMALLOC BOOL false append_cache_entry BUILD_SHARED_LIBS BOOL true -append_cache_entry INSTALL_BROCCOLI BOOL false append_cache_entry INSTALL_AUX_TOOLS BOOL true -append_cache_entry INSTALL_BROCTL BOOL true +append_cache_entry INSTALL_ZEEKCTL BOOL true append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry ENABLE_MOBILE_IPV6 BOOL false append_cache_entry DISABLE_PERFTOOLS BOOL false +append_cache_entry SANITIZERS STRING "" # parse arguments while [ $# -ne 0 ]; do @@ -181,25 +193,25 @@ while [ $# -ne 0 ]; do --prefix=*) prefix=$optarg append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg - append_cache_entry BRO_ROOT_DIR PATH $optarg - append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl + append_cache_entry ZEEK_ROOT_DIR PATH $optarg + append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/zeekctl ;; --scriptdir=*) - append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg + append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $optarg user_set_scriptdir="true" ;; --conf-files-dir=*) - append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg + append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $optarg user_set_conffilesdir="true" ;; --localstatedir=*) - append_cache_entry BRO_LOCAL_STATE_DIR PATH $optarg + append_cache_entry ZEEK_LOCAL_STATE_DIR PATH $optarg ;; --spooldir=*) - append_cache_entry BRO_SPOOL_DIR PATH $optarg + append_cache_entry ZEEK_SPOOL_DIR PATH $optarg ;; --logdir=*) - append_cache_entry BRO_LOG_DIR PATH $optarg + append_cache_entry ZEEK_LOG_DIR PATH $optarg ;; --enable-coverage) append_cache_entry ENABLE_COVERAGE BOOL true @@ -218,21 +230,20 @@ while [ $# -ne 0 ]; do append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true ;; + --sanitizers=*) + append_cache_entry SANITIZERS STRING $optarg + ;; --enable-jemalloc) append_cache_entry ENABLE_JEMALLOC BOOL true ;; - --enable-broccoli) - append_cache_entry DISABLE_RUBY_BINDINGS BOOL true - append_cache_entry INSTALL_BROCCOLI BOOL yes - ;; --enable-static-broker) append_cache_entry BUILD_STATIC_BROKER BOOL true ;; --enable-static-binpac) append_cache_entry BUILD_STATIC_BINPAC BOOL true ;; - --disable-broctl) - append_cache_entry INSTALL_BROCTL BOOL false + --disable-zeekctl) + append_cache_entry INSTALL_ZEEKCTL BOOL false ;; --disable-auxtools) append_cache_entry INSTALL_AUX_TOOLS BOOL false @@ -327,11 +338,11 @@ while [ $# -ne 0 ]; do done if [ "$user_set_scriptdir" != "true" ]; then - append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro + append_cache_entry ZEEK_SCRIPT_INSTALL_PATH STRING $prefix/share/zeek fi if [ "$user_set_conffilesdir" != "true" ]; then - append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc + append_cache_entry ZEEK_ETC_INSTALL_DIR PATH $prefix/etc fi if [ -d $builddir ]; then diff --git a/doc b/doc index 5aa921f0f6..a840cea13e 160000 --- a/doc +++ b/doc @@ -1 +1 @@ -Subproject commit 5aa921f0f6ce2931e446a11f2a10cffb7f0dbc09 +Subproject commit a840cea13e7c21951079422d6ec8971fb6812b06 diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt index a369ee32b3..6a8a3d1bd9 100644 --- a/man/CMakeLists.txt +++ b/man/CMakeLists.txt @@ -1,5 +1,5 @@ -install(DIRECTORY . DESTINATION ${BRO_MAN_INSTALL_PATH}/man8 FILES_MATCHING +install(DIRECTORY . DESTINATION ${ZEEK_MAN_INSTALL_PATH}/man8 FILES_MATCHING PATTERN "*.8" ) diff --git a/man/bro.8 b/man/bro.8 deleted file mode 100644 index 66d0fc4f20..0000000000 --- a/man/bro.8 +++ /dev/null @@ -1,156 +0,0 @@ -.TH BRO "8" "November 2014" "bro" "System Administration Utilities" -.SH NAME -bro \- passive network traffic analyzer -.SH SYNOPSIS -.B bro -\/\fP [\fIoptions\fR] [\fIfile\fR ...] -.SH DESCRIPTION -Bro is primarily a security monitor that inspects all traffic on a link in -depth for signs of suspicious activity. More generally, however, Bro -supports a wide range of traffic analysis tasks even outside of the -security domain, including performance measurements and helping with -trouble-shooting. - -Bro comes with built-in functionality for a range of analysis and detection -tasks, including detecting malware by interfacing to external registries, -reporting vulnerable versions of software seen on the network, identifying -popular web applications, detecting SSH brute-forcing, validating SSL -certificate chains, among others. -.SH OPTIONS -.TP -.B -policy file, or read stdin -.TP -\fB\-a\fR,\ \-\-parse\-only -exit immediately after parsing scripts -.TP -\fB\-b\fR,\ \-\-bare\-mode -don't load scripts from the base/ directory -.TP -\fB\-d\fR,\ \-\-debug\-policy -activate policy file debugging -.TP -\fB\-e\fR,\ \-\-exec -augment loaded policies by given code -.TP -\fB\-f\fR,\ \-\-filter -tcpdump filter -.TP -\fB\-g\fR,\ \-\-dump\-config -dump current config into .state dir -.TP -\fB\-h\fR,\ \-\-help|\-? -command line help -.TP -\fB\-i\fR,\ \-\-iface -read from given interface -.TP -\fB\-p\fR,\ \-\-prefix -add given prefix to policy file resolution -.TP -\fB\-r\fR,\ \-\-readfile -read from given tcpdump file -.TP -\fB\-s\fR,\ \-\-rulefile -read rules from given file -.TP -\fB\-t\fR,\ \-\-tracefile -activate execution tracing -.TP -\fB\-w\fR,\ \-\-writefile -write to given tcpdump file -.TP -\fB\-v\fR,\ \-\-version -print version and exit -.TP -\fB\-x\fR,\ \-\-print\-state -print contents of state file -.TP -\fB\-C\fR,\ \-\-no\-checksums -ignore checksums -.TP -\fB\-F\fR,\ \-\-force\-dns -force DNS -.TP -\fB\-I\fR,\ \-\-print\-id -print out given ID -.TP -\fB\-N\fR,\ \-\-print\-plugins -print available plugins and exit (\fB\-NN\fR for verbose) -.TP -\fB\-P\fR,\ \-\-prime\-dns -prime DNS -.TP -\fB\-Q\fR,\ \-\-time -print execution time summary to stderr -.TP -\fB\-R\fR,\ \-\-replay -replay events -.TP -\fB\-S\fR,\ \-\-debug\-rules -enable rule debugging -.TP -\fB\-T\fR,\ \-\-re\-level -set 'RE_level' for rules -.TP -\fB\-U\fR,\ \-\-status\-file -Record process status in file -.TP -\fB\-W\fR,\ \-\-watchdog -activate watchdog timer -.TP -\fB\-X\fR,\ \-\-broxygen -generate documentation based on config file -.TP -\fB\-\-pseudo\-realtime[=\fR] -enable pseudo\-realtime for performance evaluation (default 1) -.TP -\fB\-\-load\-seeds\fR -load seeds from given file -.TP -\fB\-\-save\-seeds\fR -save seeds to given file -.TP -The following option is available only when Bro is built with the \-\-enable\-debug configure option: -.TP -\fB\-B\fR,\ \-\-debug -Enable debugging output for selected streams ('-B help' for help) -.TP -The following options are available only when Bro is built with gperftools support (use the \-\-enable\-perftools and \-\-enable\-perftools\-debug configure options): -.TP -\fB\-m\fR,\ \-\-mem-leaks -show leaks -.TP -\fB\-M\fR,\ \-\-mem-profile -record heap -.SH ENVIRONMENT -.TP -.B BROPATH -file search path -.TP -.B BRO_PLUGIN_PATH -plugin search path -.TP -.B BRO_PLUGIN_ACTIVATE -plugins to always activate -.TP -.B BRO_PREFIXES -prefix list -.TP -.B BRO_DNS_FAKE -disable DNS lookups -.TP -.B BRO_SEED_FILE -file to load seeds from -.TP -.B BRO_LOG_SUFFIX -ASCII log file extension -.TP -.B BRO_PROFILER_FILE -Output file for script execution statistics -.TP -.B BRO_DISABLE_BROXYGEN -Disable Broxygen documentation support -.SH AUTHOR -.B bro -was written by The Bro Project . diff --git a/man/zeek.8 b/man/zeek.8 new file mode 100644 index 0000000000..b59b054328 --- /dev/null +++ b/man/zeek.8 @@ -0,0 +1,153 @@ +.TH ZEEK "8" "November 2014" "zeek" "System Administration Utilities" +.SH NAME +zeek \- passive network traffic analyzer +.SH SYNOPSIS +.B zeek +\/\fP [\fIoptions\fR] [\fIfile\fR ...] +.SH DESCRIPTION +Zeek is primarily a security monitor that inspects all traffic on a link in +depth for signs of suspicious activity. More generally, however, Zeek +supports a wide range of traffic analysis tasks even outside of the +security domain, including performance measurements and helping with +trouble-shooting. + +Zeek comes with built-in functionality for a range of analysis and detection +tasks, including detecting malware by interfacing to external registries, +reporting vulnerable versions of software seen on the network, identifying +popular web applications, detecting SSH brute-forcing, validating SSL +certificate chains, among others. +.SH OPTIONS +.TP +.B +policy file, or read stdin +.TP +\fB\-a\fR,\ \-\-parse\-only +exit immediately after parsing scripts +.TP +\fB\-b\fR,\ \-\-bare\-mode +don't load scripts from the base/ directory +.TP +\fB\-d\fR,\ \-\-debug\-policy +activate policy file debugging +.TP +\fB\-e\fR,\ \-\-exec +augment loaded policies by given code +.TP +\fB\-f\fR,\ \-\-filter +tcpdump filter +.TP +\fB\-h\fR,\ \-\-help|\-? +command line help +.TP +\fB\-i\fR,\ \-\-iface +read from given interface +.TP +\fB\-p\fR,\ \-\-prefix +add given prefix to policy file resolution +.TP +\fB\-r\fR,\ \-\-readfile +read from given tcpdump file +.TP +\fB\-s\fR,\ \-\-rulefile +read rules from given file +.TP +\fB\-t\fR,\ \-\-tracefile +activate execution tracing +.TP +\fB\-w\fR,\ \-\-writefile +write to given tcpdump file +.TP +\fB\-v\fR,\ \-\-version +print version and exit +.TP +\fB\-x\fR,\ \-\-print\-state +print contents of state file +.TP +\fB\-C\fR,\ \-\-no\-checksums +ignore checksums +.TP +\fB\-F\fR,\ \-\-force\-dns +force DNS +.TP +\fB\-I\fR,\ \-\-print\-id +print out given ID +.TP +\fB\-N\fR,\ \-\-print\-plugins +print available plugins and exit (\fB\-NN\fR for verbose) +.TP +\fB\-P\fR,\ \-\-prime\-dns +prime DNS +.TP +\fB\-Q\fR,\ \-\-time +print execution time summary to stderr +.TP +\fB\-R\fR,\ \-\-replay +replay events +.TP +\fB\-S\fR,\ \-\-debug\-rules +enable rule debugging +.TP +\fB\-T\fR,\ \-\-re\-level +set 'RE_level' for rules +.TP +\fB\-U\fR,\ \-\-status\-file +Record process status in file +.TP +\fB\-W\fR,\ \-\-watchdog +activate watchdog timer +.TP +\fB\-X\fR,\ \-\-zeekygen +generate documentation based on config file +.TP +\fB\-\-pseudo\-realtime[=\fR] +enable pseudo\-realtime for performance evaluation (default 1) +.TP +\fB\-\-load\-seeds\fR +load seeds from given file +.TP +\fB\-\-save\-seeds\fR +save seeds to given file +.TP +The following option is available only when Zeek is built with the \-\-enable\-debug configure option: +.TP +\fB\-B\fR,\ \-\-debug +Enable debugging output for selected streams ('-B help' for help) +.TP +The following options are available only when Zeek is built with gperftools support (use the \-\-enable\-perftools and \-\-enable\-perftools\-debug configure options): +.TP +\fB\-m\fR,\ \-\-mem-leaks +show leaks +.TP +\fB\-M\fR,\ \-\-mem-profile +record heap +.SH ENVIRONMENT +.TP +.B ZEEKPATH +file search path +.TP +.B ZEEK_PLUGIN_PATH +plugin search path +.TP +.B ZEEK_PLUGIN_ACTIVATE +plugins to always activate +.TP +.B ZEEK_PREFIXES +prefix list +.TP +.B ZEEK_DNS_FAKE +disable DNS lookups +.TP +.B ZEEK_SEED_FILE +file to load seeds from +.TP +.B ZEEK_LOG_SUFFIX +ASCII log file extension +.TP +.B ZEEK_PROFILER_FILE +Output file for script execution statistics +.TP +.B ZEEK_DISABLE_ZEEKYGEN +Disable Zeekygen (Broxygen) documentation support +.SH AUTHOR +.B zeek +was written by The Zeek Project . diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 96c682871a..266981dd9e 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -1,16 +1,35 @@ include(InstallPackageConfigFile) -install(DIRECTORY ./ DESTINATION ${BRO_SCRIPT_INSTALL_PATH} FILES_MATCHING +install(DIRECTORY ./ DESTINATION ${ZEEK_SCRIPT_INSTALL_PATH} FILES_MATCHING PATTERN "site/local*" EXCLUDE - PATTERN "test-all-policy.bro" EXCLUDE - PATTERN "*.bro" + PATTERN "test-all-policy.zeek" EXCLUDE + PATTERN "*.zeek" PATTERN "*.sig" PATTERN "*.fp" ) -# Install all local* scripts as config files since they are meant to be -# user modify-able. +if ( NOT BINARY_PACKAGING_MODE ) + # If the user has a local.bro file from a previous installation, prefer to + # symlink local.zeek to it to avoid breaking their custom configuration -- + # because ZeekControl will now prefer to load local.zeek rather than local.bro + # and we're about to install a default version of local.zeek. + + set(_local_bro_dst ${ZEEK_SCRIPT_INSTALL_PATH}/site/local.bro) + set(_local_zeek_dst ${ZEEK_SCRIPT_INSTALL_PATH}/site/local.zeek) + + install(CODE " + if ( \"\$ENV{DESTDIR}\" STREQUAL \"\" ) + if ( EXISTS \"${_local_bro_dst}\" AND NOT EXISTS \"${_local_zeek_dst}\" ) + message(STATUS \"WARNING: installed ${_local_zeek_dst} as symlink to ${_local_bro_dst}\") + execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink + \"${_local_bro_dst}\" \"${_local_zeek_dst}\") + endif () + endif () + ") +endif () + +# Install local script as a config file since it's meant to be modified directly. InstallPackageConfigFile( - ${CMAKE_CURRENT_SOURCE_DIR}/site/local.bro - ${BRO_SCRIPT_INSTALL_PATH}/site - local.bro) + ${CMAKE_CURRENT_SOURCE_DIR}/site/local.zeek + ${ZEEK_SCRIPT_INSTALL_PATH}/site + local.zeek) diff --git a/scripts/base/files/extract/__load__.bro b/scripts/base/files/extract/__load__.zeek similarity index 100% rename from scripts/base/files/extract/__load__.bro rename to scripts/base/files/extract/__load__.zeek diff --git a/scripts/base/files/extract/main.bro b/scripts/base/files/extract/main.bro deleted file mode 100644 index b2d1907e01..0000000000 --- a/scripts/base/files/extract/main.bro +++ /dev/null @@ -1,81 +0,0 @@ -@load base/frameworks/files -@load base/utils/paths - -module FileExtract; - -export { - ## The prefix where files are extracted to. - const prefix = "./extract_files/" &redef; - - ## The default max size for extracted files (they won't exceed this - ## number of bytes). A value of zero means unlimited. - option default_limit = 0; - - redef record Files::Info += { - ## Local filename of extracted file. - extracted: string &optional &log; - - ## Set to true if the file being extracted was cut off - ## so the whole file was not logged. - extracted_cutoff: bool &optional &log; - - ## The number of bytes extracted to disk. - extracted_size: count &optional &log; - }; - - redef record Files::AnalyzerArgs += { - ## The local filename to which to write an extracted file. - ## This field is used in the core by the extraction plugin - ## to know where to write the file to. If not specified, then - ## a filename in the format "extract--" is - ## automatically assigned (using the *source* and *id* - ## fields of :bro:see:`fa_file`). - extract_filename: string &optional; - ## The maximum allowed file size in bytes of *extract_filename*. - ## Once reached, a :bro:see:`file_extraction_limit` event is - ## raised and the analyzer will be removed unless - ## :bro:see:`FileExtract::set_limit` is called to increase the - ## limit. A value of zero means "no limit". - extract_limit: count &default=default_limit; - }; - - ## Sets the maximum allowed extracted file size. - ## - ## f: A file that's being extracted. - ## - ## args: Arguments that identify a file extraction analyzer. - ## - ## n: Allowed number of bytes to be extracted. - ## - ## Returns: false if a file extraction analyzer wasn't active for - ## the file, else true. - global set_limit: function(f: fa_file, args: Files::AnalyzerArgs, n: count): bool; -} - -function set_limit(f: fa_file, args: Files::AnalyzerArgs, n: count): bool - { - return __set_limit(f$id, args, n); - } - -function on_add(f: fa_file, args: Files::AnalyzerArgs) - { - if ( ! args?$extract_filename ) - args$extract_filename = cat("extract-", f$last_active, "-", f$source, - "-", f$id); - - f$info$extracted = args$extract_filename; - args$extract_filename = build_path_compressed(prefix, args$extract_filename); - f$info$extracted_cutoff = F; - mkdir(prefix); - } - -event file_extraction_limit(f: fa_file, args: Files::AnalyzerArgs, limit: count, len: count) &priority=10 - { - f$info$extracted_cutoff = T; - f$info$extracted_size = limit; - } - -event bro_init() &priority=10 - { - Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, on_add); - } diff --git a/scripts/base/files/extract/main.zeek b/scripts/base/files/extract/main.zeek new file mode 100644 index 0000000000..93288c5127 --- /dev/null +++ b/scripts/base/files/extract/main.zeek @@ -0,0 +1,81 @@ +@load base/frameworks/files +@load base/utils/paths + +module FileExtract; + +export { + ## The prefix where files are extracted to. + const prefix = "./extract_files/" &redef; + + ## The default max size for extracted files (they won't exceed this + ## number of bytes). A value of zero means unlimited. + option default_limit = 0; + + redef record Files::Info += { + ## Local filename of extracted file. + extracted: string &optional &log; + + ## Set to true if the file being extracted was cut off + ## so the whole file was not logged. + extracted_cutoff: bool &optional &log; + + ## The number of bytes extracted to disk. + extracted_size: count &optional &log; + }; + + redef record Files::AnalyzerArgs += { + ## The local filename to which to write an extracted file. + ## This field is used in the core by the extraction plugin + ## to know where to write the file to. If not specified, then + ## a filename in the format "extract--" is + ## automatically assigned (using the *source* and *id* + ## fields of :zeek:see:`fa_file`). + extract_filename: string &optional; + ## The maximum allowed file size in bytes of *extract_filename*. + ## Once reached, a :zeek:see:`file_extraction_limit` event is + ## raised and the analyzer will be removed unless + ## :zeek:see:`FileExtract::set_limit` is called to increase the + ## limit. A value of zero means "no limit". + extract_limit: count &default=default_limit; + }; + + ## Sets the maximum allowed extracted file size. + ## + ## f: A file that's being extracted. + ## + ## args: Arguments that identify a file extraction analyzer. + ## + ## n: Allowed number of bytes to be extracted. + ## + ## Returns: false if a file extraction analyzer wasn't active for + ## the file, else true. + global set_limit: function(f: fa_file, args: Files::AnalyzerArgs, n: count): bool; +} + +function set_limit(f: fa_file, args: Files::AnalyzerArgs, n: count): bool + { + return __set_limit(f$id, args, n); + } + +function on_add(f: fa_file, args: Files::AnalyzerArgs) + { + if ( ! args?$extract_filename ) + args$extract_filename = cat("extract-", f$last_active, "-", f$source, + "-", f$id); + + f$info$extracted = args$extract_filename; + args$extract_filename = build_path_compressed(prefix, args$extract_filename); + f$info$extracted_cutoff = F; + mkdir(prefix); + } + +event file_extraction_limit(f: fa_file, args: Files::AnalyzerArgs, limit: count, len: count) &priority=10 + { + f$info$extracted_cutoff = T; + f$info$extracted_size = limit; + } + +event zeek_init() &priority=10 + { + Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, on_add); + } diff --git a/scripts/base/files/hash/__load__.bro b/scripts/base/files/hash/__load__.zeek similarity index 100% rename from scripts/base/files/hash/__load__.bro rename to scripts/base/files/hash/__load__.zeek diff --git a/scripts/base/files/hash/main.bro b/scripts/base/files/hash/main.zeek similarity index 100% rename from scripts/base/files/hash/main.bro rename to scripts/base/files/hash/main.zeek diff --git a/scripts/base/files/pe/__load__.bro b/scripts/base/files/pe/__load__.zeek similarity index 100% rename from scripts/base/files/pe/__load__.bro rename to scripts/base/files/pe/__load__.zeek diff --git a/scripts/base/files/pe/consts.bro b/scripts/base/files/pe/consts.zeek similarity index 100% rename from scripts/base/files/pe/consts.bro rename to scripts/base/files/pe/consts.zeek diff --git a/scripts/base/files/pe/main.bro b/scripts/base/files/pe/main.bro deleted file mode 100644 index 972e8a31c8..0000000000 --- a/scripts/base/files/pe/main.bro +++ /dev/null @@ -1,137 +0,0 @@ -module PE; - -@load ./consts.bro - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Current timestamp. - ts: time &log; - ## File id of this portable executable file. - id: string &log; - ## The target machine that the file was compiled for. - machine: string &log &optional; - ## The time that the file was created at. - compile_ts: time &log &optional; - ## The required operating system. - os: string &log &optional; - ## The subsystem that is required to run this file. - subsystem: string &log &optional; - ## Is the file an executable, or just an object file? - is_exe: bool &log &default=T; - ## Is the file a 64-bit executable? - is_64bit: bool &log &default=T; - ## Does the file support Address Space Layout Randomization? - uses_aslr: bool &log &default=F; - ## Does the file support Data Execution Prevention? - uses_dep: bool &log &default=F; - ## Does the file enforce code integrity checks? - uses_code_integrity: bool &log &default=F; - ## Does the file use structured exception handing? - uses_seh: bool &log &default=T; - ## Does the file have an import table? - has_import_table: bool &log &optional; - ## Does the file have an export table? - has_export_table: bool &log &optional; - ## Does the file have an attribute certificate table? - has_cert_table: bool &log &optional; - ## Does the file have a debug table? - has_debug_data: bool &log &optional; - ## The names of the sections, in order. - section_names: vector of string &log &optional; - }; - - ## Event for accessing logged records. - global log_pe: event(rec: Info); - - ## A hook that gets called when we first see a PE file. - global set_file: hook(f: fa_file); -} - -redef record fa_file += { - pe: Info &optional; -}; - -const pe_mime_types = { "application/x-dosexec" }; - -event bro_init() &priority=5 - { - Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types); - Log::create_stream(LOG, [$columns=Info, $ev=log_pe, $path="pe"]); - } - -hook set_file(f: fa_file) &priority=5 - { - if ( ! f?$pe ) - f$pe = [$ts=network_time(), $id=f$id]; - } - -event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5 - { - hook set_file(f); - } - -event pe_file_header(f: fa_file, h: PE::FileHeader) &priority=5 - { - hook set_file(f); - - f$pe$machine = machine_types[h$machine]; - f$pe$compile_ts = h$ts; - f$pe$is_exe = ( h$optional_header_size > 0 ); - - for ( c in h$characteristics ) - { - if ( file_characteristics[c] == "32BIT_MACHINE" ) - f$pe$is_64bit = F; - } - } - -event pe_optional_header(f: fa_file, h: PE::OptionalHeader) &priority=5 - { - hook set_file(f); - - # Only EXEs have optional headers - if ( ! f$pe$is_exe ) - return; - - f$pe$os = os_versions[h$os_version_major, h$os_version_minor]; - f$pe$subsystem = windows_subsystems[h$subsystem]; - - for ( c in h$dll_characteristics ) - { - if ( dll_characteristics[c] == "DYNAMIC_BASE" ) - f$pe$uses_aslr = T; - if ( dll_characteristics[c] == "FORCE_INTEGRITY" ) - f$pe$uses_code_integrity = T; - if ( dll_characteristics[c] == "NX_COMPAT" ) - f$pe$uses_dep = T; - if ( dll_characteristics[c] == "NO_SEH" ) - f$pe$uses_seh = F; - } - - f$pe$has_export_table = (|h$table_sizes| > 0 && h$table_sizes[0] > 0); - f$pe$has_import_table = (|h$table_sizes| > 1 && h$table_sizes[1] > 0); - f$pe$has_cert_table = (|h$table_sizes| > 4 && h$table_sizes[4] > 0); - f$pe$has_debug_data = (|h$table_sizes| > 6 && h$table_sizes[6] > 0); - } - -event pe_section_header(f: fa_file, h: PE::SectionHeader) &priority=5 - { - hook set_file(f); - - # Only EXEs have section headers - if ( ! f$pe$is_exe ) - return; - - if ( ! f$pe?$section_names ) - f$pe$section_names = vector(); - f$pe$section_names += h$name; - } - -event file_state_remove(f: fa_file) &priority=-5 - { - if ( f?$pe && f$pe?$machine ) - Log::write(LOG, f$pe); - } - diff --git a/scripts/base/files/pe/main.zeek b/scripts/base/files/pe/main.zeek new file mode 100644 index 0000000000..688c161177 --- /dev/null +++ b/scripts/base/files/pe/main.zeek @@ -0,0 +1,137 @@ +module PE; + +@load ./consts + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Current timestamp. + ts: time &log; + ## File id of this portable executable file. + id: string &log; + ## The target machine that the file was compiled for. + machine: string &log &optional; + ## The time that the file was created at. + compile_ts: time &log &optional; + ## The required operating system. + os: string &log &optional; + ## The subsystem that is required to run this file. + subsystem: string &log &optional; + ## Is the file an executable, or just an object file? + is_exe: bool &log &default=T; + ## Is the file a 64-bit executable? + is_64bit: bool &log &default=T; + ## Does the file support Address Space Layout Randomization? + uses_aslr: bool &log &default=F; + ## Does the file support Data Execution Prevention? + uses_dep: bool &log &default=F; + ## Does the file enforce code integrity checks? + uses_code_integrity: bool &log &default=F; + ## Does the file use structured exception handing? + uses_seh: bool &log &default=T; + ## Does the file have an import table? + has_import_table: bool &log &optional; + ## Does the file have an export table? + has_export_table: bool &log &optional; + ## Does the file have an attribute certificate table? + has_cert_table: bool &log &optional; + ## Does the file have a debug table? + has_debug_data: bool &log &optional; + ## The names of the sections, in order. + section_names: vector of string &log &optional; + }; + + ## Event for accessing logged records. + global log_pe: event(rec: Info); + + ## A hook that gets called when we first see a PE file. + global set_file: hook(f: fa_file); +} + +redef record fa_file += { + pe: Info &optional; +}; + +const pe_mime_types = { "application/x-dosexec" }; + +event zeek_init() &priority=5 + { + Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types); + Log::create_stream(LOG, [$columns=Info, $ev=log_pe, $path="pe"]); + } + +hook set_file(f: fa_file) &priority=5 + { + if ( ! f?$pe ) + f$pe = [$ts=network_time(), $id=f$id]; + } + +event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5 + { + hook set_file(f); + } + +event pe_file_header(f: fa_file, h: PE::FileHeader) &priority=5 + { + hook set_file(f); + + f$pe$machine = machine_types[h$machine]; + f$pe$compile_ts = h$ts; + f$pe$is_exe = ( h$optional_header_size > 0 ); + + for ( c in h$characteristics ) + { + if ( file_characteristics[c] == "32BIT_MACHINE" ) + f$pe$is_64bit = F; + } + } + +event pe_optional_header(f: fa_file, h: PE::OptionalHeader) &priority=5 + { + hook set_file(f); + + # Only EXEs have optional headers + if ( ! f$pe$is_exe ) + return; + + f$pe$os = os_versions[h$os_version_major, h$os_version_minor]; + f$pe$subsystem = windows_subsystems[h$subsystem]; + + for ( c in h$dll_characteristics ) + { + if ( dll_characteristics[c] == "DYNAMIC_BASE" ) + f$pe$uses_aslr = T; + if ( dll_characteristics[c] == "FORCE_INTEGRITY" ) + f$pe$uses_code_integrity = T; + if ( dll_characteristics[c] == "NX_COMPAT" ) + f$pe$uses_dep = T; + if ( dll_characteristics[c] == "NO_SEH" ) + f$pe$uses_seh = F; + } + + f$pe$has_export_table = (|h$table_sizes| > 0 && h$table_sizes[0] > 0); + f$pe$has_import_table = (|h$table_sizes| > 1 && h$table_sizes[1] > 0); + f$pe$has_cert_table = (|h$table_sizes| > 4 && h$table_sizes[4] > 0); + f$pe$has_debug_data = (|h$table_sizes| > 6 && h$table_sizes[6] > 0); + } + +event pe_section_header(f: fa_file, h: PE::SectionHeader) &priority=5 + { + hook set_file(f); + + # Only EXEs have section headers + if ( ! f$pe$is_exe ) + return; + + if ( ! f$pe?$section_names ) + f$pe$section_names = vector(); + f$pe$section_names += h$name; + } + +event file_state_remove(f: fa_file) &priority=-5 + { + if ( f?$pe && f$pe?$machine ) + Log::write(LOG, f$pe); + } + diff --git a/scripts/base/files/unified2/main.bro b/scripts/base/files/unified2/main.bro deleted file mode 100644 index 4670ff35c1..0000000000 --- a/scripts/base/files/unified2/main.bro +++ /dev/null @@ -1,297 +0,0 @@ - -@load base/utils/dir -@load base/utils/paths - -module Unified2; - -export { - redef enum Log::ID += { LOG }; - - ## File to watch for Unified2 files. - const watch_file = "" &redef; - - ## Directory to watch for Unified2 records. - const watch_dir = "" &redef; - - ## The sid-msg.map file you would like to use for your alerts. - const sid_msg = "" &redef; - - ## The gen-msg.map file you would like to use for your alerts. - const gen_msg = "" &redef; - - ## The classification.config file you would like to use for your alerts. - const classification_config = "" &redef; - - ## Reconstructed "alert" which combines related events - ## and packets. - global alert: event(f: fa_file, ev: Unified2::IDSEvent, pkt: Unified2::Packet); - - type PacketID: record { - src_ip: addr; - src_p: port; - dst_ip: addr; - dst_p: port; - } &log; - - type Info: record { - ## Timestamp attached to the alert. - ts: time &log; - ## Addresses and ports for the connection. - id: PacketID &log; - ## Sensor that originated this event. - sensor_id: count &log; - ## Sig id for this generator. - signature_id: count &log; - ## A string representation of the *signature_id* field if a sid_msg.map file was loaded. - signature: string &log &optional; - ## Which generator generated the alert? - generator_id: count &log; - ## A string representation of the *generator_id* field if a gen_msg.map file was loaded. - generator: string &log &optional; - ## Sig revision for this id. - signature_revision: count &log; - ## Event classification. - classification_id: count &log; - ## Descriptive classification string. - classification: string &log &optional; - ## Event priority. - priority_id: count &log; - ## Event ID. - event_id: count &log; - ## Some of the packet data. - packet: string &log &optional; - } &log; - - ## The event for accessing logged records. - global log_unified2: event(rec: Info); -} - -# Mappings for extended information from alerts. -global classification_map: table[count] of string; -global sid_map: table[count] of string; -global gen_map: table[count] of string; - -global num_classification_map_reads = 0; -global num_sid_map_reads = 0; -global num_gen_map_reads = 0; -global watching = F; - -# For reading in config files. -type OneLine: record { - line: string; -}; - -function mappings_initialized(): bool - { - return num_classification_map_reads > 0 && - num_sid_map_reads > 0 && - num_gen_map_reads > 0; - } - -function start_watching() - { - if ( watching ) - return; - - watching = T; - - if ( watch_dir != "" ) - { - Dir::monitor(watch_dir, function(fname: string) - { - Input::add_analysis([$source=fname, - $reader=Input::READER_BINARY, - $mode=Input::STREAM, - $name=fname]); - }, 10secs); - } - - if ( watch_file != "" ) - { - Input::add_analysis([$source=watch_file, - $reader=Input::READER_BINARY, - $mode=Input::STREAM, - $name=watch_file]); - } - } - -function create_info(ev: IDSEvent): Info - { - local info = Info($ts=ev$ts, - $id=PacketID($src_ip=ev$src_ip, $src_p=ev$src_p, - $dst_ip=ev$dst_ip, $dst_p=ev$dst_p), - $sensor_id=ev$sensor_id, - $signature_id=ev$signature_id, - $generator_id=ev$generator_id, - $signature_revision=ev$signature_revision, - $classification_id=ev$classification_id, - $priority_id=ev$priority_id, - $event_id=ev$event_id); - - if ( ev$signature_id in sid_map ) - info$signature=sid_map[ev$signature_id]; - if ( ev$generator_id in gen_map ) - info$generator=gen_map[ev$generator_id]; - if ( ev$classification_id in classification_map ) - info$classification=classification_map[ev$classification_id]; - - return info; - } - -redef record fa_file += { - ## Recently received IDS events. This is primarily used - ## for tying together Unified2 events and packets. - u2_events: table[count] of Unified2::IDSEvent - &optional &create_expire=5sec - &expire_func=function(t: table[count] of Unified2::IDSEvent, event_id: count): interval - { - Log::write(LOG, create_info(t[event_id])); - return 0secs; - }; -}; - -event Unified2::read_sid_msg_line(desc: Input::EventDescription, tpe: Input::Event, line: string) - { - local parts = split_string_n(line, / \|\| /, F, 100); - if ( |parts| >= 2 && /^[0-9]+$/ in parts[0] ) - sid_map[to_count(parts[0])] = parts[1]; - } - -event Unified2::read_gen_msg_line(desc: Input::EventDescription, tpe: Input::Event, line: string) - { - local parts = split_string_n(line, / \|\| /, F, 3); - if ( |parts| >= 2 && /^[0-9]+$/ in parts[0] ) - gen_map[to_count(parts[0])] = parts[2]; - } - -event Unified2::read_classification_line(desc: Input::EventDescription, tpe: Input::Event, line: string) - { - local parts = split_string_n(line, /: /, F, 2); - if ( |parts| == 2 ) - { - local parts2 = split_string_n(parts[1], /,/, F, 4); - if ( |parts2| > 1 ) - classification_map[|classification_map|+1] = parts2[0]; - } - } - -event Input::end_of_data(name: string, source: string) - { - if ( name == classification_config ) - ++num_classification_map_reads; - else if ( name == sid_msg ) - ++num_sid_map_reads; - else if ( name == gen_msg ) - ++num_gen_map_reads; - else - return; - - if ( watching ) - return; - - if ( mappings_initialized() ) - start_watching(); - } - -event bro_init() &priority=5 - { - Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2, $path="unified2"]); - - if ( sid_msg == "" ) - { - num_sid_map_reads = 1; - } - else - { - Input::add_event([$source=sid_msg, - $reader=Input::READER_RAW, - $mode=Input::REREAD, - $name=sid_msg, - $fields=Unified2::OneLine, - $want_record=F, - $ev=Unified2::read_sid_msg_line]); - } - - if ( gen_msg == "" ) - { - num_gen_map_reads = 1; - } - else - { - Input::add_event([$source=gen_msg, - $name=gen_msg, - $reader=Input::READER_RAW, - $mode=Input::REREAD, - $fields=Unified2::OneLine, - $want_record=F, - $ev=Unified2::read_gen_msg_line]); - } - - if ( classification_config == "" ) - { - num_classification_map_reads = 1; - } - else - { - Input::add_event([$source=classification_config, - $name=classification_config, - $reader=Input::READER_RAW, - $mode=Input::REREAD, - $fields=Unified2::OneLine, - $want_record=F, - $ev=Unified2::read_classification_line]); - } - - if ( mappings_initialized() ) - start_watching(); - } - -event file_new(f: fa_file) - { - local file_dir = ""; - local parts = split_string_all(f$source, /\/[^\/]*$/); - if ( |parts| == 3 ) - file_dir = parts[0]; - - if ( (watch_file != "" && f$source == watch_file) || - (watch_dir != "" && compress_path(watch_dir) == file_dir) ) - { - Files::add_analyzer(f, Files::ANALYZER_UNIFIED2); - f$u2_events = table(); - } - } - -event unified2_event(f: fa_file, ev: Unified2::IDSEvent) - { - f$u2_events[ev$event_id] = ev; - } - -event unified2_packet(f: fa_file, pkt: Unified2::Packet) - { - if ( f?$u2_events && pkt$event_id in f$u2_events) - { - local ev = f$u2_events[pkt$event_id]; - event Unified2::alert(f, ev, pkt); - delete f$u2_events[pkt$event_id]; - } - } - -event Unified2::alert(f: fa_file, ev: IDSEvent, pkt: Packet) - { - local info = create_info(ev); - info$packet=pkt$data; - Log::write(LOG, info); - } - -event file_state_remove(f: fa_file) - { - if ( f?$u2_events ) - { - # In case any events never had matching packets, flush - # the extras to the log. - for ( i, ev in f$u2_events ) - { - Log::write(LOG, create_info(ev)); - } - } - } diff --git a/scripts/base/files/unified2/__load__.bro b/scripts/base/files/x509/__load__.zeek similarity index 100% rename from scripts/base/files/unified2/__load__.bro rename to scripts/base/files/x509/__load__.zeek diff --git a/scripts/base/files/x509/main.bro b/scripts/base/files/x509/main.bro deleted file mode 100644 index b6fdde5494..0000000000 --- a/scripts/base/files/x509/main.bro +++ /dev/null @@ -1,90 +0,0 @@ -@load base/frameworks/files -@load base/files/hash - -module X509; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the X.509 log. - type Info: record { - ## Current timestamp. - ts: time &log; - ## File id of this certificate. - id: string &log; - ## Basic information about the certificate. - certificate: X509::Certificate &log; - ## The opaque wrapping the certificate. Mainly used - ## for the verify operations. - handle: opaque of x509; - ## All extensions that were encountered in the certificate. - extensions: vector of X509::Extension &default=vector(); - ## Subject alternative name extension of the certificate. - san: X509::SubjectAlternativeName &optional &log; - ## Basic constraints extension of the certificate. - basic_constraints: X509::BasicConstraints &optional &log; - }; - - ## Event for accessing logged records. - global log_x509: event(rec: Info); -} - -event bro_init() &priority=5 - { - Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509"]); - - # We use MIME types internally to distinguish between user and CA certificates. - # The first certificate in a connection always gets tagged as user-cert, all - # following certificates get tagged as CA certificates. Certificates gotten via - # other means (e.g. identified from HTTP traffic when they are transfered in plain - # text) get tagged as application/pkix-cert. - Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-user-cert"); - Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-ca-cert"); - Files::register_for_mime_type(Files::ANALYZER_X509, "application/pkix-cert"); - - # Always calculate hashes. They are not necessary for base scripts - # but very useful for identification, and required for policy scripts - Files::register_for_mime_type(Files::ANALYZER_MD5, "application/x-x509-user-cert"); - Files::register_for_mime_type(Files::ANALYZER_MD5, "application/x-x509-ca-cert"); - Files::register_for_mime_type(Files::ANALYZER_MD5, "application/pkix-cert"); - Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/x-x509-user-cert"); - Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/x-x509-ca-cert"); - Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/pkix-cert"); - } - -redef record Files::Info += { - ## Information about X509 certificates. This is used to keep - ## certificate information until all events have been received. - x509: X509::Info &optional; -}; - -event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=5 - { - f$info$x509 = [$ts=f$info$ts, $id=f$id, $certificate=cert, $handle=cert_ref]; - } - -event x509_extension(f: fa_file, ext: X509::Extension) &priority=5 - { - if ( f$info?$x509 ) - f$info$x509$extensions += ext; - } - -event x509_ext_basic_constraints(f: fa_file, ext: X509::BasicConstraints) &priority=5 - { - if ( f$info?$x509 ) - f$info$x509$basic_constraints = ext; - } - -event x509_ext_subject_alternative_name(f: fa_file, ext: X509::SubjectAlternativeName) &priority=5 - { - if ( f$info?$x509 ) - f$info$x509$san = ext; - } - -event file_state_remove(f: fa_file) &priority=5 - { - if ( ! f$info?$x509 ) - return; - - Log::write(LOG, f$info$x509); - } diff --git a/scripts/base/files/x509/main.zeek b/scripts/base/files/x509/main.zeek new file mode 100644 index 0000000000..e674ae8888 --- /dev/null +++ b/scripts/base/files/x509/main.zeek @@ -0,0 +1,90 @@ +@load base/frameworks/files +@load base/files/hash + +module X509; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the X.509 log. + type Info: record { + ## Current timestamp. + ts: time &log; + ## File id of this certificate. + id: string &log; + ## Basic information about the certificate. + certificate: X509::Certificate &log; + ## The opaque wrapping the certificate. Mainly used + ## for the verify operations. + handle: opaque of x509; + ## All extensions that were encountered in the certificate. + extensions: vector of X509::Extension &default=vector(); + ## Subject alternative name extension of the certificate. + san: X509::SubjectAlternativeName &optional &log; + ## Basic constraints extension of the certificate. + basic_constraints: X509::BasicConstraints &optional &log; + }; + + ## Event for accessing logged records. + global log_x509: event(rec: Info); +} + +event zeek_init() &priority=5 + { + Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509"]); + + # We use MIME types internally to distinguish between user and CA certificates. + # The first certificate in a connection always gets tagged as user-cert, all + # following certificates get tagged as CA certificates. Certificates gotten via + # other means (e.g. identified from HTTP traffic when they are transfered in plain + # text) get tagged as application/pkix-cert. + Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-user-cert"); + Files::register_for_mime_type(Files::ANALYZER_X509, "application/x-x509-ca-cert"); + Files::register_for_mime_type(Files::ANALYZER_X509, "application/pkix-cert"); + + # Always calculate hashes. They are not necessary for base scripts + # but very useful for identification, and required for policy scripts + Files::register_for_mime_type(Files::ANALYZER_MD5, "application/x-x509-user-cert"); + Files::register_for_mime_type(Files::ANALYZER_MD5, "application/x-x509-ca-cert"); + Files::register_for_mime_type(Files::ANALYZER_MD5, "application/pkix-cert"); + Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/x-x509-user-cert"); + Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/x-x509-ca-cert"); + Files::register_for_mime_type(Files::ANALYZER_SHA1, "application/pkix-cert"); + } + +redef record Files::Info += { + ## Information about X509 certificates. This is used to keep + ## certificate information until all events have been received. + x509: X509::Info &optional; +}; + +event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=5 + { + f$info$x509 = [$ts=f$info$ts, $id=f$id, $certificate=cert, $handle=cert_ref]; + } + +event x509_extension(f: fa_file, ext: X509::Extension) &priority=5 + { + if ( f$info?$x509 ) + f$info$x509$extensions += ext; + } + +event x509_ext_basic_constraints(f: fa_file, ext: X509::BasicConstraints) &priority=5 + { + if ( f$info?$x509 ) + f$info$x509$basic_constraints = ext; + } + +event x509_ext_subject_alternative_name(f: fa_file, ext: X509::SubjectAlternativeName) &priority=5 + { + if ( f$info?$x509 ) + f$info$x509$san = ext; + } + +event file_state_remove(f: fa_file) &priority=5 + { + if ( ! f$info?$x509 ) + return; + + Log::write(LOG, f$info$x509); + } diff --git a/scripts/base/frameworks/analyzer/README b/scripts/base/frameworks/analyzer/README index 2180fdd735..6f8f1f1228 100644 --- a/scripts/base/frameworks/analyzer/README +++ b/scripts/base/frameworks/analyzer/README @@ -1,3 +1,3 @@ -The analyzer framework allows to dynamically enable or disable Bro's +The analyzer framework allows to dynamically enable or disable Zeek's protocol analyzers, as well as to manage the well-known ports which automatically activate a particular analyzer for new connections. diff --git a/scripts/base/files/x509/__load__.bro b/scripts/base/frameworks/analyzer/__load__.zeek similarity index 100% rename from scripts/base/files/x509/__load__.bro rename to scripts/base/frameworks/analyzer/__load__.zeek diff --git a/scripts/base/frameworks/analyzer/main.bro b/scripts/base/frameworks/analyzer/main.bro deleted file mode 100644 index 39b0d573fd..0000000000 --- a/scripts/base/frameworks/analyzer/main.bro +++ /dev/null @@ -1,229 +0,0 @@ -##! Framework for managing Bro's protocol analyzers. -##! -##! The analyzer framework allows to dynamically enable or disable analyzers, as -##! well as to manage the well-known ports which automatically activate a -##! particular analyzer for new connections. -##! -##! Protocol analyzers are identified by unique tags of type -##! :bro:type:`Analyzer::Tag`, such as :bro:enum:`Analyzer::ANALYZER_HTTP`. -##! These tags are defined internally by -##! the analyzers themselves, and documented in their analyzer-specific -##! description along with the events that they generate. - -@load base/frameworks/packet-filter/utils - -module Analyzer; - -export { - ## If true, all available analyzers are initially disabled at startup. - ## One can then selectively enable them with - ## :bro:id:`Analyzer::enable_analyzer`. - global disable_all = F &redef; - - ## Enables an analyzer. Once enabled, the analyzer may be used for analysis - ## of future connections as decided by Bro's dynamic protocol detection. - ## - ## tag: The tag of the analyzer to enable. - ## - ## Returns: True if the analyzer was successfully enabled. - global enable_analyzer: function(tag: Analyzer::Tag) : bool; - - ## Disables an analyzer. Once disabled, the analyzer will not be used - ## further for analysis of future connections. - ## - ## tag: The tag of the analyzer to disable. - ## - ## Returns: True if the analyzer was successfully disabled. - global disable_analyzer: function(tag: Analyzer::Tag) : bool; - - ## Registers a set of well-known ports for an analyzer. If a future - ## connection on one of these ports is seen, the analyzer will be - ## automatically assigned to parsing it. The function *adds* to all ports - ## already registered, it doesn't replace them. - ## - ## tag: The tag of the analyzer. - ## - ## ports: The set of well-known ports to associate with the analyzer. - ## - ## Returns: True if the ports were successfully registered. - global register_for_ports: function(tag: Analyzer::Tag, ports: set[port]) : bool; - - ## Registers an individual well-known port for an analyzer. If a future - ## connection on this port is seen, the analyzer will be automatically - ## assigned to parsing it. The function *adds* to all ports already - ## registered, it doesn't replace them. - ## - ## tag: The tag of the analyzer. - ## - ## p: The well-known port to associate with the analyzer. - ## - ## Returns: True if the port was successfully registered. - global register_for_port: function(tag: Analyzer::Tag, p: port) : bool; - - ## Returns a set of all well-known ports currently registered for a - ## specific analyzer. - ## - ## tag: The tag of the analyzer. - ## - ## Returns: The set of ports. - global registered_ports: function(tag: Analyzer::Tag) : set[port]; - - ## Returns a table of all ports-to-analyzer mappings currently registered. - ## - ## Returns: A table mapping each analyzer to the set of ports - ## registered for it. - global all_registered_ports: function() : table[Analyzer::Tag] of set[port]; - - ## Translates an analyzer type to a string with the analyzer's name. - ## - ## tag: The analyzer tag. - ## - ## Returns: The analyzer name corresponding to the tag. - global name: function(tag: Analyzer::Tag) : string; - - ## Translates an analyzer's name to a tag enum value. - ## - ## name: The analyzer name. - ## - ## Returns: The analyzer tag corresponding to the name. - global get_tag: function(name: string): Analyzer::Tag; - - ## Schedules an analyzer for a future connection originating from a - ## given IP address and port. - ## - ## orig: The IP address originating a connection in the future. - ## 0.0.0.0 can be used as a wildcard to match any originator address. - ## - ## resp: The IP address responding to a connection from *orig*. - ## - ## resp_p: The destination port at *resp*. - ## - ## analyzer: The analyzer ID. - ## - ## tout: A timeout interval after which the scheduling request will be - ## discarded if the connection has not yet been seen. - ## - ## Returns: True if successful. - global schedule_analyzer: function(orig: addr, resp: addr, resp_p: port, - analyzer: Analyzer::Tag, tout: interval) : bool; - - ## Automatically creates a BPF filter for the specified protocol based - ## on the data supplied for the protocol through the - ## :bro:see:`Analyzer::register_for_ports` function. - ## - ## tag: The analyzer tag. - ## - ## Returns: BPF filter string. - global analyzer_to_bpf: function(tag: Analyzer::Tag): string; - - ## Create a BPF filter which matches all of the ports defined - ## by the various protocol analysis scripts as "registered ports" - ## for the protocol. - global get_bpf: function(): string; - - ## A set of analyzers to disable by default at startup. The default set - ## contains legacy analyzers that are no longer supported. - global disabled_analyzers: set[Analyzer::Tag] = { - ANALYZER_INTERCONN, - ANALYZER_STEPPINGSTONE, - ANALYZER_BACKDOOR, - ANALYZER_TCPSTATS, - } &redef; -} - -@load base/bif/analyzer.bif - -global ports: table[Analyzer::Tag] of set[port]; - -event bro_init() &priority=5 - { - if ( disable_all ) - __disable_all_analyzers(); - - for ( a in disabled_analyzers ) - disable_analyzer(a); - } - -function enable_analyzer(tag: Analyzer::Tag) : bool - { - return __enable_analyzer(tag); - } - -function disable_analyzer(tag: Analyzer::Tag) : bool - { - return __disable_analyzer(tag); - } - -function register_for_ports(tag: Analyzer::Tag, ports: set[port]) : bool - { - local rc = T; - - for ( p in ports ) - { - if ( ! register_for_port(tag, p) ) - rc = F; - } - - return rc; - } - -function register_for_port(tag: Analyzer::Tag, p: port) : bool - { - if ( ! __register_for_port(tag, p) ) - return F; - - if ( tag !in ports ) - ports[tag] = set(); - - add ports[tag][p]; - return T; - } - -function registered_ports(tag: Analyzer::Tag) : set[port] - { - return tag in ports ? ports[tag] : set(); - } - -function all_registered_ports(): table[Analyzer::Tag] of set[port] - { - return ports; - } - -function name(atype: Analyzer::Tag) : string - { - return __name(atype); - } - -function get_tag(name: string): Analyzer::Tag - { - return __tag(name); - } - -function schedule_analyzer(orig: addr, resp: addr, resp_p: port, - analyzer: Analyzer::Tag, tout: interval) : bool - { - return __schedule_analyzer(orig, resp, resp_p, analyzer, tout); - } - -function analyzer_to_bpf(tag: Analyzer::Tag): string - { - # Return an empty string if an undefined analyzer was given. - if ( tag !in ports ) - return ""; - - local output = ""; - for ( p in ports[tag] ) - output = PacketFilter::combine_filters(output, "or", PacketFilter::port_to_bpf(p)); - return output; - } - -function get_bpf(): string - { - local output = ""; - for ( tag in ports ) - { - output = PacketFilter::combine_filters(output, "or", analyzer_to_bpf(tag)); - } - return output; - } - diff --git a/scripts/base/frameworks/analyzer/main.zeek b/scripts/base/frameworks/analyzer/main.zeek new file mode 100644 index 0000000000..8fd986e497 --- /dev/null +++ b/scripts/base/frameworks/analyzer/main.zeek @@ -0,0 +1,229 @@ +##! Framework for managing Zeek's protocol analyzers. +##! +##! The analyzer framework allows to dynamically enable or disable analyzers, as +##! well as to manage the well-known ports which automatically activate a +##! particular analyzer for new connections. +##! +##! Protocol analyzers are identified by unique tags of type +##! :zeek:type:`Analyzer::Tag`, such as :zeek:enum:`Analyzer::ANALYZER_HTTP`. +##! These tags are defined internally by +##! the analyzers themselves, and documented in their analyzer-specific +##! description along with the events that they generate. + +@load base/frameworks/packet-filter/utils + +module Analyzer; + +export { + ## If true, all available analyzers are initially disabled at startup. + ## One can then selectively enable them with + ## :zeek:id:`Analyzer::enable_analyzer`. + global disable_all = F &redef; + + ## Enables an analyzer. Once enabled, the analyzer may be used for analysis + ## of future connections as decided by Zeek's dynamic protocol detection. + ## + ## tag: The tag of the analyzer to enable. + ## + ## Returns: True if the analyzer was successfully enabled. + global enable_analyzer: function(tag: Analyzer::Tag) : bool; + + ## Disables an analyzer. Once disabled, the analyzer will not be used + ## further for analysis of future connections. + ## + ## tag: The tag of the analyzer to disable. + ## + ## Returns: True if the analyzer was successfully disabled. + global disable_analyzer: function(tag: Analyzer::Tag) : bool; + + ## Registers a set of well-known ports for an analyzer. If a future + ## connection on one of these ports is seen, the analyzer will be + ## automatically assigned to parsing it. The function *adds* to all ports + ## already registered, it doesn't replace them. + ## + ## tag: The tag of the analyzer. + ## + ## ports: The set of well-known ports to associate with the analyzer. + ## + ## Returns: True if the ports were successfully registered. + global register_for_ports: function(tag: Analyzer::Tag, ports: set[port]) : bool; + + ## Registers an individual well-known port for an analyzer. If a future + ## connection on this port is seen, the analyzer will be automatically + ## assigned to parsing it. The function *adds* to all ports already + ## registered, it doesn't replace them. + ## + ## tag: The tag of the analyzer. + ## + ## p: The well-known port to associate with the analyzer. + ## + ## Returns: True if the port was successfully registered. + global register_for_port: function(tag: Analyzer::Tag, p: port) : bool; + + ## Returns a set of all well-known ports currently registered for a + ## specific analyzer. + ## + ## tag: The tag of the analyzer. + ## + ## Returns: The set of ports. + global registered_ports: function(tag: Analyzer::Tag) : set[port]; + + ## Returns a table of all ports-to-analyzer mappings currently registered. + ## + ## Returns: A table mapping each analyzer to the set of ports + ## registered for it. + global all_registered_ports: function() : table[Analyzer::Tag] of set[port]; + + ## Translates an analyzer type to a string with the analyzer's name. + ## + ## tag: The analyzer tag. + ## + ## Returns: The analyzer name corresponding to the tag. + global name: function(tag: Analyzer::Tag) : string; + + ## Translates an analyzer's name to a tag enum value. + ## + ## name: The analyzer name. + ## + ## Returns: The analyzer tag corresponding to the name. + global get_tag: function(name: string): Analyzer::Tag; + + ## Schedules an analyzer for a future connection originating from a + ## given IP address and port. + ## + ## orig: The IP address originating a connection in the future. + ## 0.0.0.0 can be used as a wildcard to match any originator address. + ## + ## resp: The IP address responding to a connection from *orig*. + ## + ## resp_p: The destination port at *resp*. + ## + ## analyzer: The analyzer ID. + ## + ## tout: A timeout interval after which the scheduling request will be + ## discarded if the connection has not yet been seen. + ## + ## Returns: True if successful. + global schedule_analyzer: function(orig: addr, resp: addr, resp_p: port, + analyzer: Analyzer::Tag, tout: interval) : bool; + + ## Automatically creates a BPF filter for the specified protocol based + ## on the data supplied for the protocol through the + ## :zeek:see:`Analyzer::register_for_ports` function. + ## + ## tag: The analyzer tag. + ## + ## Returns: BPF filter string. + global analyzer_to_bpf: function(tag: Analyzer::Tag): string; + + ## Create a BPF filter which matches all of the ports defined + ## by the various protocol analysis scripts as "registered ports" + ## for the protocol. + global get_bpf: function(): string; + + ## A set of analyzers to disable by default at startup. The default set + ## contains legacy analyzers that are no longer supported. + global disabled_analyzers: set[Analyzer::Tag] = { + ANALYZER_INTERCONN, + ANALYZER_STEPPINGSTONE, + ANALYZER_BACKDOOR, + ANALYZER_TCPSTATS, + } &redef; +} + +@load base/bif/analyzer.bif + +global ports: table[Analyzer::Tag] of set[port]; + +event zeek_init() &priority=5 + { + if ( disable_all ) + __disable_all_analyzers(); + + for ( a in disabled_analyzers ) + disable_analyzer(a); + } + +function enable_analyzer(tag: Analyzer::Tag) : bool + { + return __enable_analyzer(tag); + } + +function disable_analyzer(tag: Analyzer::Tag) : bool + { + return __disable_analyzer(tag); + } + +function register_for_ports(tag: Analyzer::Tag, ports: set[port]) : bool + { + local rc = T; + + for ( p in ports ) + { + if ( ! register_for_port(tag, p) ) + rc = F; + } + + return rc; + } + +function register_for_port(tag: Analyzer::Tag, p: port) : bool + { + if ( ! __register_for_port(tag, p) ) + return F; + + if ( tag !in ports ) + ports[tag] = set(); + + add ports[tag][p]; + return T; + } + +function registered_ports(tag: Analyzer::Tag) : set[port] + { + return tag in ports ? ports[tag] : set(); + } + +function all_registered_ports(): table[Analyzer::Tag] of set[port] + { + return ports; + } + +function name(atype: Analyzer::Tag) : string + { + return __name(atype); + } + +function get_tag(name: string): Analyzer::Tag + { + return __tag(name); + } + +function schedule_analyzer(orig: addr, resp: addr, resp_p: port, + analyzer: Analyzer::Tag, tout: interval) : bool + { + return __schedule_analyzer(orig, resp, resp_p, analyzer, tout); + } + +function analyzer_to_bpf(tag: Analyzer::Tag): string + { + # Return an empty string if an undefined analyzer was given. + if ( tag !in ports ) + return ""; + + local output = ""; + for ( p in ports[tag] ) + output = PacketFilter::combine_filters(output, "or", PacketFilter::port_to_bpf(p)); + return output; + } + +function get_bpf(): string + { + local output = ""; + for ( tag in ports ) + { + output = PacketFilter::combine_filters(output, "or", analyzer_to_bpf(tag)); + } + return output; + } + diff --git a/scripts/base/frameworks/broker/README b/scripts/base/frameworks/broker/README index 11c2479d90..c58b85af19 100644 --- a/scripts/base/frameworks/broker/README +++ b/scripts/base/frameworks/broker/README @@ -1,2 +1,2 @@ -The Broker communication framework facilitates connecting to remote Bro +The Broker communication framework facilitates connecting to remote Zeek instances to share state and transfer events. diff --git a/scripts/base/frameworks/broker/__load__.bro b/scripts/base/frameworks/broker/__load__.zeek similarity index 100% rename from scripts/base/frameworks/broker/__load__.bro rename to scripts/base/frameworks/broker/__load__.zeek diff --git a/scripts/base/frameworks/broker/log.bro b/scripts/base/frameworks/broker/log.bro deleted file mode 100644 index 2461cb8d54..0000000000 --- a/scripts/base/frameworks/broker/log.bro +++ /dev/null @@ -1,80 +0,0 @@ -@load ./main - -module Broker; - -export { - ## The Broker logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The type of a Broker activity being logged. - type Type: enum { - ## An informational status update. - STATUS, - ## An error situation. - ERROR - }; - - ## A record type containing the column fields of the Broker log. - type Info: record { - ## The network time at which a Broker event occurred. - ts: time &log; - ## The type of the Broker event. - ty: Type &log; - ## The event being logged. - ev: string &log; - ## The peer (if any) with which a Broker event is - ## concerned. - peer: NetworkInfo &log &optional; - ## An optional message describing the Broker event in more detail - message: string &log &optional; - }; -} - -event bro_init() &priority=5 - { - Log::create_stream(Broker::LOG, [$columns=Info, $path="broker"]); - } - -function log_status(ev: string, endpoint: EndpointInfo, msg: string) - { - local r: Info; - - r = [$ts = network_time(), - $ev = ev, - $ty = STATUS, - $message = msg]; - - if ( endpoint?$network ) - r$peer = endpoint$network; - - Log::write(Broker::LOG, r); - } - -event Broker::peer_added(endpoint: EndpointInfo, msg: string) - { - log_status("peer-added", endpoint, msg); - } - -event Broker::peer_removed(endpoint: EndpointInfo, msg: string) - { - log_status("peer-removed", endpoint, msg); - } - -event Broker::peer_lost(endpoint: EndpointInfo, msg: string) - { - log_status("connection-terminated", endpoint, msg); - } - -event Broker::error(code: ErrorCode, msg: string) - { - local ev = cat(code); - ev = subst_string(ev, "Broker::", ""); - ev = subst_string(ev, "_", "-"); - ev = to_lower(ev); - - Log::write(Broker::LOG, [$ts = network_time(), - $ev = ev, - $ty = ERROR, - $message = msg]); - } - diff --git a/scripts/base/frameworks/broker/log.zeek b/scripts/base/frameworks/broker/log.zeek new file mode 100644 index 0000000000..bd76684b74 --- /dev/null +++ b/scripts/base/frameworks/broker/log.zeek @@ -0,0 +1,80 @@ +@load ./main + +module Broker; + +export { + ## The Broker logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The type of a Broker activity being logged. + type Type: enum { + ## An informational status update. + STATUS, + ## An error situation. + ERROR + }; + + ## A record type containing the column fields of the Broker log. + type Info: record { + ## The network time at which a Broker event occurred. + ts: time &log; + ## The type of the Broker event. + ty: Type &log; + ## The event being logged. + ev: string &log; + ## The peer (if any) with which a Broker event is + ## concerned. + peer: NetworkInfo &log &optional; + ## An optional message describing the Broker event in more detail + message: string &log &optional; + }; +} + +event zeek_init() &priority=5 + { + Log::create_stream(Broker::LOG, [$columns=Info, $path="broker"]); + } + +function log_status(ev: string, endpoint: EndpointInfo, msg: string) + { + local r: Info; + + r = [$ts = network_time(), + $ev = ev, + $ty = STATUS, + $message = msg]; + + if ( endpoint?$network ) + r$peer = endpoint$network; + + Log::write(Broker::LOG, r); + } + +event Broker::peer_added(endpoint: EndpointInfo, msg: string) + { + log_status("peer-added", endpoint, msg); + } + +event Broker::peer_removed(endpoint: EndpointInfo, msg: string) + { + log_status("peer-removed", endpoint, msg); + } + +event Broker::peer_lost(endpoint: EndpointInfo, msg: string) + { + log_status("connection-terminated", endpoint, msg); + } + +event Broker::error(code: ErrorCode, msg: string) + { + local ev = cat(code); + ev = subst_string(ev, "Broker::", ""); + ev = subst_string(ev, "_", "-"); + ev = to_lower(ev); + + Log::write(Broker::LOG, [$ts = network_time(), + $ev = ev, + $ty = ERROR, + $message = msg]); + } + diff --git a/scripts/base/frameworks/broker/main.bro b/scripts/base/frameworks/broker/main.bro deleted file mode 100644 index 9be261eaf1..0000000000 --- a/scripts/base/frameworks/broker/main.bro +++ /dev/null @@ -1,439 +0,0 @@ -##! The Broker-based communication API and its various options. - -module Broker; - -export { - ## Default port for Broker communication. Where not specified - ## otherwise, this is the port to connect to and listen on. - const default_port = 9999/tcp &redef; - - ## Default interval to retry listening on a port if it's currently in - ## use already. Use of the BRO_DEFAULT_LISTEN_RETRY environment variable - ## (set as a number of seconds) will override this option and also - ## any values given to :bro:see:`Broker::listen`. - const default_listen_retry = 30sec &redef; - - ## Default address on which to listen. - ## - ## .. bro:see:: Broker::listen - const default_listen_address = getenv("BRO_DEFAULT_LISTEN_ADDRESS") &redef; - - ## Default interval to retry connecting to a peer if it cannot be made to - ## work initially, or if it ever becomes disconnected. Use of the - ## BRO_DEFAULT_CONNECT_RETRY environment variable (set as number of - ## seconds) will override this option and also any values given to - ## :bro:see:`Broker::peer`. - const default_connect_retry = 30sec &redef; - - ## If true, do not use SSL for network connections. By default, SSL will - ## even be used if no certificates / CAs have been configured. In that case - ## (which is the default) the communication will be encrypted, but not - ## authenticated. - const disable_ssl = F &redef; - - ## Path to a file containing concatenated trusted certificates - ## in PEM format. If set, Bro will require valid certificates for - ## all peers. - const ssl_cafile = "" &redef; - - ## Path to an OpenSSL-style directory of trusted certificates. - ## If set, Bro will require valid certificates for - ## all peers. - const ssl_capath = "" &redef; - - ## Path to a file containing a X.509 certificate for this - ## node in PEM format. If set, Bro will require valid certificates for - ## all peers. - const ssl_certificate = "" &redef; - - ## Passphrase to decrypt the private key specified by - ## :bro:see:`Broker::ssl_keyfile`. If set, Bro will require valid - ## certificates for all peers. - const ssl_passphrase = "" &redef; - - ## Path to the file containing the private key for this node's - ## certificate. If set, Bro will require valid certificates for - ## all peers. - const ssl_keyfile = "" &redef; - - ## The number of buffered messages at the Broker/CAF layer after which - ## a subscriber considers themselves congested (i.e. tune the congestion - ## control mechanisms). - const congestion_queue_size = 200 &redef; - - ## Max number of threads to use for Broker/CAF functionality. The - ## BRO_BROKER_MAX_THREADS environment variable overrides this setting. - const max_threads = 1 &redef; - - ## Interval of time for under-utilized Broker/CAF threads to sleep - ## when in "moderate" mode. - const moderate_sleep = 16 msec &redef; - - ## Interval of time for under-utilized Broker/CAF threads to sleep - ## when in "relaxed" mode. - const relaxed_sleep = 64 msec &redef; - - ## Number of work-stealing polling attempts for Broker/CAF threads - ## in "aggressive" mode. - const aggressive_polls = 5 &redef; - - ## Number of work-stealing polling attempts for Broker/CAF threads - ## in "moderate" mode. - const moderate_polls = 5 &redef; - - ## Frequency of work-stealing polling attempts for Broker/CAF threads - ## in "aggressive" mode. - const aggressive_interval = 4 &redef; - - ## Frequency of work-stealing polling attempts for Broker/CAF threads - ## in "moderate" mode. - const moderate_interval = 2 &redef; - - ## Frequency of work-stealing polling attempts for Broker/CAF threads - ## in "relaxed" mode. - const relaxed_interval = 1 &redef; - - ## Forward all received messages to subscribing peers. - const forward_messages = F &redef; - - ## Whether calling :bro:see:`Broker::peer` will register the Broker - ## system as an I/O source that will block the process from shutting - ## down. For example, set this to false when you are reading pcaps, - ## but also want to initaiate a Broker peering and still shutdown after - ## done reading the pcap. - option peer_counts_as_iosource = T; - - ## The default topic prefix where logs will be published. The log's stream - ## id is appended when writing to a particular stream. - const default_log_topic_prefix = "bro/logs/" &redef; - - ## The default implementation for :bro:see:`Broker::log_topic`. - function default_log_topic(id: Log::ID, path: string): string - { - return default_log_topic_prefix + cat(id); - } - - ## A function that will be called for each log entry to determine what - ## broker topic string will be used for sending it to peers. The - ## default implementation will return a value based on - ## :bro:see:`Broker::default_log_topic_prefix`. - ## - ## id: the ID associated with the log stream entry that will be sent. - ## - ## path: the path to which the log stream entry will be output. - ## - ## Returns: a string representing the broker topic to which the log - ## will be sent. - const log_topic: function(id: Log::ID, path: string): string = default_log_topic &redef; - - type ErrorCode: enum { - ## The unspecified default error code. - UNSPECIFIED = 1, - ## Version incompatibility. - PEER_INCOMPATIBLE = 2, - ## Referenced peer does not exist. - PEER_INVALID = 3, - ## Remote peer not listening. - PEER_UNAVAILABLE = 4, - ## A peering request timed out. - PEER_TIMEOUT = 5, - ## Master with given name already exists. - MASTER_EXISTS = 6, - ## Master with given name does not exist. - NO_SUCH_MASTER = 7, - ## The given data store key does not exist. - NO_SUCH_KEY = 8, - ## The store operation timed out. - REQUEST_TIMEOUT = 9, - ## The operation expected a different type than provided. - TYPE_CLASH = 10, - ## The data value cannot be used to carry out the desired operation. - INVALID_DATA = 11, - ## The storage backend failed to execute the operation. - BACKEND_FAILURE = 12, - ## The storage backend failed to execute the operation. - STALE_DATA = 13, - ## Catch-all for a CAF-level problem. - CAF_ERROR = 100 - }; - - ## The possible states of a peer endpoint. - type PeerStatus: enum { - ## The peering process is initiated. - INITIALIZING, - ## Connection establishment in process. - CONNECTING, - ## Connection established, peering pending. - CONNECTED, - ## Successfully peered. - PEERED, - ## Connection to remote peer lost. - DISCONNECTED, - ## Reconnecting to peer after a lost connection. - RECONNECTING, - }; - - type NetworkInfo: record { - ## The IP address or hostname where the endpoint listens. - address: string &log; - ## The port where the endpoint is bound to. - bound_port: port &log; - }; - - type EndpointInfo: record { - ## A unique identifier of the node. - id: string; - ## Network-level information. - network: NetworkInfo &optional; - }; - - type PeerInfo: record { - peer: EndpointInfo; - status: PeerStatus; - }; - - type PeerInfos: vector of PeerInfo; - - ## Opaque communication data. - type Data: record { - data: opaque of Broker::Data &optional; - }; - - ## Opaque communication data sequence. - type DataVector: vector of Broker::Data; - - ## Opaque event communication data. - type Event: record { - ## The name of the event. Not set if invalid event or arguments. - name: string &optional; - ## The arguments to the event. - args: DataVector; - }; - - ## Opaque communication data used as a convenient way to wrap key-value - ## pairs that comprise table entries. - type TableItem : record { - key: Broker::Data; - val: Broker::Data; - }; - - ## Listen for remote connections. - ## - ## a: an address string on which to accept connections, e.g. - ## "127.0.0.1". An empty string refers to INADDR_ANY. - ## - ## p: the TCP port to listen on. The value 0 means that the OS should choose - ## the next available free port. - ## - ## retry: If non-zero, retries listening in regular intervals if the port cannot be - ## acquired immediately. 0 disables retries. If the - ## BRO_DEFAULT_LISTEN_RETRY environment variable is set (as number - ## of seconds), it overrides any value given here. - ## - ## Returns: the bound port or 0/? on failure. - ## - ## .. bro:see:: Broker::status - global listen: function(a: string &default = default_listen_address, - p: port &default = default_port, - retry: interval &default = default_listen_retry): port; - ## Initiate a remote connection. - ## - ## a: an address to connect to, e.g. "localhost" or "127.0.0.1". - ## - ## p: the TCP port on which the remote side is listening. - ## - ## retry: an interval at which to retry establishing the - ## connection with the remote peer if it cannot be made initially, or - ## if it ever becomes disconnected. If the - ## BRO_DEFAULT_CONNECT_RETRY environment variable is set (as number - ## of seconds), it overrides any value given here. - ## - ## Returns: true if it's possible to try connecting with the peer and - ## it's a new peer. The actual connection may not be established - ## until a later point in time. - ## - ## .. bro:see:: Broker::status - global peer: function(a: string, p: port &default=default_port, - retry: interval &default=default_connect_retry): bool; - - ## Remove a remote connection. - ## - ## Note that this does not terminate the connection to the peer, it - ## just means that we won't exchange any further information with it - ## unless peering resumes later. - ## - ## a: the address used in previous successful call to :bro:see:`Broker::peer`. - ## - ## p: the port used in previous successful call to :bro:see:`Broker::peer`. - ## - ## Returns: true if the arguments match a previously successful call to - ## :bro:see:`Broker::peer`. - ## - ## TODO: We do not have a function yet to terminate a connection. - global unpeer: function(a: string, p: port): bool; - - ## Get a list of all peer connections. - ## - ## Returns: a list of all peer connections. - global peers: function(): vector of PeerInfo; - - ## Get a unique identifier for the local broker endpoint. - ## - ## Returns: a unique identifier for the local broker endpoint. - global node_id: function(): string; - - ## Sends all pending log messages to remote peers. This normally - ## doesn't need to be used except for test cases that are time-sensitive. - global flush_logs: function(): count; - - ## Publishes the value of an identifier to a given topic. The subscribers - ## will update their local value for that identifier on receipt. - ## - ## topic: a topic associated with the message. - ## - ## id: the identifier to publish. - ## - ## Returns: true if the message is sent. - global publish_id: function(topic: string, id: string): bool; - - ## Register interest in all peer event messages that use a certain topic - ## prefix. Note that subscriptions may not be altered immediately after - ## calling (except during :bro:see:`bro_init`). - ## - ## topic_prefix: a prefix to match against remote message topics. - ## e.g. an empty prefix matches everything and "a" matches - ## "alice" and "amy" but not "bob". - ## - ## Returns: true if it's a new event subscription and it is now registered. - global subscribe: function(topic_prefix: string): bool; - - ## Unregister interest in all peer event messages that use a topic prefix. - ## Note that subscriptions may not be altered immediately after calling - ## (except during :bro:see:`bro_init`). - ## - ## topic_prefix: a prefix previously supplied to a successful call to - ## :bro:see:`Broker::subscribe` or :bro:see:`Broker::forward`. - ## - ## Returns: true if interest in the topic prefix is no longer advertised. - global unsubscribe: function(topic_prefix: string): bool; - - ## Register a topic prefix subscription for events that should only be - ## forwarded to any subscribing peers and not raise any event handlers - ## on the receiving/forwarding node. i.e. it's the same as - ## :bro:see:`Broker::subscribe` except matching events are not raised - ## on the receiver, just forwarded. Use :bro:see:`Broker::unsubscribe` - ## with the same argument to undo this operation. - ## - ## topic_prefix: a prefix to match against remote message topics. - ## e.g. an empty prefix matches everything and "a" matches - ## "alice" and "amy" but not "bob". - ## - ## Returns: true if a new event forwarding/subscription is now registered. - global forward: function(topic_prefix: string): bool; - - ## Automatically send an event to any interested peers whenever it is - ## locally dispatched. (For example, using "event my_event(...);" in a - ## script.) - ## - ## topic: a topic string associated with the event message. - ## Peers advertise interest by registering a subscription to some - ## prefix of this topic name. - ## - ## ev: a Bro event value. - ## - ## Returns: true if automatic event sending is now enabled. - global auto_publish: function(topic: string, ev: any): bool; - - ## Stop automatically sending an event to peers upon local dispatch. - ## - ## topic: a topic originally given to :bro:see:`Broker::auto_publish`. - ## - ## ev: an event originally given to :bro:see:`Broker::auto_publish`. - ## - ## Returns: true if automatic events will not occur for the topic/event - ## pair. - global auto_unpublish: function(topic: string, ev: any): bool; -} - -@load base/bif/comm.bif -@load base/bif/messaging.bif - -module Broker; - -event retry_listen(a: string, p: port, retry: interval) - { - listen(a, p, retry); - } - -function listen(a: string, p: port, retry: interval): port - { - local bound = __listen(a, p); - - if ( bound == 0/tcp ) - { - local e = getenv("BRO_DEFAULT_LISTEN_RETRY"); - - if ( e != "" ) - retry = double_to_interval(to_double(e)); - - if ( retry != 0secs ) - schedule retry { retry_listen(a, p, retry) }; - } - - return bound; - } - -function peer(a: string, p: port, retry: interval): bool - { - return __peer(a, p, retry); - } - -function unpeer(a: string, p: port): bool - { - return __unpeer(a, p); - } - -function peers(): vector of PeerInfo - { - return __peers(); - } - -function node_id(): string - { - return __node_id(); - } - -function flush_logs(): count - { - return __flush_logs(); - } - -function publish_id(topic: string, id: string): bool - { - return __publish_id(topic, id); - } - -function subscribe(topic_prefix: string): bool - { - return __subscribe(topic_prefix); - } - -function forward(topic_prefix: string): bool - { - return __forward(topic_prefix); - } - -function unsubscribe(topic_prefix: string): bool - { - return __unsubscribe(topic_prefix); - } - -function auto_publish(topic: string, ev: any): bool - { - return __auto_publish(topic, ev); - } - -function auto_unpublish(topic: string, ev: any): bool - { - return __auto_unpublish(topic, ev); - } diff --git a/scripts/base/frameworks/broker/main.zeek b/scripts/base/frameworks/broker/main.zeek new file mode 100644 index 0000000000..2b43c3fd2b --- /dev/null +++ b/scripts/base/frameworks/broker/main.zeek @@ -0,0 +1,447 @@ +##! The Broker-based communication API and its various options. + +module Broker; + +export { + ## Default port for Broker communication. Where not specified + ## otherwise, this is the port to connect to and listen on. + const default_port = 9999/tcp &redef; + + ## Default interval to retry listening on a port if it's currently in + ## use already. Use of the ZEEK_DEFAULT_LISTEN_RETRY environment variable + ## (set as a number of seconds) will override this option and also + ## any values given to :zeek:see:`Broker::listen`. + const default_listen_retry = 30sec &redef; + + ## Default address on which to listen. + ## + ## .. zeek:see:: Broker::listen + const default_listen_address = getenv("ZEEK_DEFAULT_LISTEN_ADDRESS") &redef; + + ## Default interval to retry connecting to a peer if it cannot be made to + ## work initially, or if it ever becomes disconnected. Use of the + ## ZEEK_DEFAULT_CONNECT_RETRY environment variable (set as number of + ## seconds) will override this option and also any values given to + ## :zeek:see:`Broker::peer`. + const default_connect_retry = 30sec &redef; + + ## If true, do not use SSL for network connections. By default, SSL will + ## even be used if no certificates / CAs have been configured. In that case + ## (which is the default) the communication will be encrypted, but not + ## authenticated. + const disable_ssl = F &redef; + + ## Path to a file containing concatenated trusted certificates + ## in PEM format. If set, Zeek will require valid certificates for + ## all peers. + const ssl_cafile = "" &redef; + + ## Path to an OpenSSL-style directory of trusted certificates. + ## If set, Zeek will require valid certificates for + ## all peers. + const ssl_capath = "" &redef; + + ## Path to a file containing a X.509 certificate for this + ## node in PEM format. If set, Zeek will require valid certificates for + ## all peers. + const ssl_certificate = "" &redef; + + ## Passphrase to decrypt the private key specified by + ## :zeek:see:`Broker::ssl_keyfile`. If set, Zeek will require valid + ## certificates for all peers. + const ssl_passphrase = "" &redef; + + ## Path to the file containing the private key for this node's + ## certificate. If set, Zeek will require valid certificates for + ## all peers. + const ssl_keyfile = "" &redef; + + ## The number of buffered messages at the Broker/CAF layer after which + ## a subscriber considers themselves congested (i.e. tune the congestion + ## control mechanisms). + const congestion_queue_size = 200 &redef; + + ## The max number of log entries per log stream to batch together when + ## sending log messages to a remote logger. + const log_batch_size = 400 &redef; + + ## Max time to buffer log messages before sending the current set out as a + ## batch. + const log_batch_interval = 1sec &redef; + + ## Max number of threads to use for Broker/CAF functionality. The + ## ZEEK_BROKER_MAX_THREADS environment variable overrides this setting. + const max_threads = 1 &redef; + + ## Interval of time for under-utilized Broker/CAF threads to sleep + ## when in "moderate" mode. + const moderate_sleep = 16 msec &redef; + + ## Interval of time for under-utilized Broker/CAF threads to sleep + ## when in "relaxed" mode. + const relaxed_sleep = 64 msec &redef; + + ## Number of work-stealing polling attempts for Broker/CAF threads + ## in "aggressive" mode. + const aggressive_polls = 5 &redef; + + ## Number of work-stealing polling attempts for Broker/CAF threads + ## in "moderate" mode. + const moderate_polls = 5 &redef; + + ## Frequency of work-stealing polling attempts for Broker/CAF threads + ## in "aggressive" mode. + const aggressive_interval = 4 &redef; + + ## Frequency of work-stealing polling attempts for Broker/CAF threads + ## in "moderate" mode. + const moderate_interval = 2 &redef; + + ## Frequency of work-stealing polling attempts for Broker/CAF threads + ## in "relaxed" mode. + const relaxed_interval = 1 &redef; + + ## Forward all received messages to subscribing peers. + const forward_messages = F &redef; + + ## Whether calling :zeek:see:`Broker::peer` will register the Broker + ## system as an I/O source that will block the process from shutting + ## down. For example, set this to false when you are reading pcaps, + ## but also want to initaiate a Broker peering and still shutdown after + ## done reading the pcap. + option peer_counts_as_iosource = T; + + ## The default topic prefix where logs will be published. The log's stream + ## id is appended when writing to a particular stream. + const default_log_topic_prefix = "zeek/logs/" &redef; + + ## The default implementation for :zeek:see:`Broker::log_topic`. + function default_log_topic(id: Log::ID, path: string): string + { + return default_log_topic_prefix + cat(id); + } + + ## A function that will be called for each log entry to determine what + ## broker topic string will be used for sending it to peers. The + ## default implementation will return a value based on + ## :zeek:see:`Broker::default_log_topic_prefix`. + ## + ## id: the ID associated with the log stream entry that will be sent. + ## + ## path: the path to which the log stream entry will be output. + ## + ## Returns: a string representing the broker topic to which the log + ## will be sent. + const log_topic: function(id: Log::ID, path: string): string = default_log_topic &redef; + + type ErrorCode: enum { + ## The unspecified default error code. + UNSPECIFIED = 1, + ## Version incompatibility. + PEER_INCOMPATIBLE = 2, + ## Referenced peer does not exist. + PEER_INVALID = 3, + ## Remote peer not listening. + PEER_UNAVAILABLE = 4, + ## A peering request timed out. + PEER_TIMEOUT = 5, + ## Master with given name already exists. + MASTER_EXISTS = 6, + ## Master with given name does not exist. + NO_SUCH_MASTER = 7, + ## The given data store key does not exist. + NO_SUCH_KEY = 8, + ## The store operation timed out. + REQUEST_TIMEOUT = 9, + ## The operation expected a different type than provided. + TYPE_CLASH = 10, + ## The data value cannot be used to carry out the desired operation. + INVALID_DATA = 11, + ## The storage backend failed to execute the operation. + BACKEND_FAILURE = 12, + ## The storage backend failed to execute the operation. + STALE_DATA = 13, + ## Catch-all for a CAF-level problem. + CAF_ERROR = 100 + }; + + ## The possible states of a peer endpoint. + type PeerStatus: enum { + ## The peering process is initiated. + INITIALIZING, + ## Connection establishment in process. + CONNECTING, + ## Connection established, peering pending. + CONNECTED, + ## Successfully peered. + PEERED, + ## Connection to remote peer lost. + DISCONNECTED, + ## Reconnecting to peer after a lost connection. + RECONNECTING, + }; + + type NetworkInfo: record { + ## The IP address or hostname where the endpoint listens. + address: string &log; + ## The port where the endpoint is bound to. + bound_port: port &log; + }; + + type EndpointInfo: record { + ## A unique identifier of the node. + id: string; + ## Network-level information. + network: NetworkInfo &optional; + }; + + type PeerInfo: record { + peer: EndpointInfo; + status: PeerStatus; + }; + + type PeerInfos: vector of PeerInfo; + + ## Opaque communication data. + type Data: record { + data: opaque of Broker::Data &optional; + }; + + ## Opaque communication data sequence. + type DataVector: vector of Broker::Data; + + ## Opaque event communication data. + type Event: record { + ## The name of the event. Not set if invalid event or arguments. + name: string &optional; + ## The arguments to the event. + args: DataVector; + }; + + ## Opaque communication data used as a convenient way to wrap key-value + ## pairs that comprise table entries. + type TableItem : record { + key: Broker::Data; + val: Broker::Data; + }; + + ## Listen for remote connections. + ## + ## a: an address string on which to accept connections, e.g. + ## "127.0.0.1". An empty string refers to INADDR_ANY. + ## + ## p: the TCP port to listen on. The value 0 means that the OS should choose + ## the next available free port. + ## + ## retry: If non-zero, retries listening in regular intervals if the port cannot be + ## acquired immediately. 0 disables retries. If the + ## ZEEK_DEFAULT_LISTEN_RETRY environment variable is set (as number + ## of seconds), it overrides any value given here. + ## + ## Returns: the bound port or 0/? on failure. + ## + ## .. zeek:see:: Broker::status + global listen: function(a: string &default = default_listen_address, + p: port &default = default_port, + retry: interval &default = default_listen_retry): port; + ## Initiate a remote connection. + ## + ## a: an address to connect to, e.g. "localhost" or "127.0.0.1". + ## + ## p: the TCP port on which the remote side is listening. + ## + ## retry: an interval at which to retry establishing the + ## connection with the remote peer if it cannot be made initially, or + ## if it ever becomes disconnected. If the + ## ZEEK_DEFAULT_CONNECT_RETRY environment variable is set (as number + ## of seconds), it overrides any value given here. + ## + ## Returns: true if it's possible to try connecting with the peer and + ## it's a new peer. The actual connection may not be established + ## until a later point in time. + ## + ## .. zeek:see:: Broker::status + global peer: function(a: string, p: port &default=default_port, + retry: interval &default=default_connect_retry): bool; + + ## Remove a remote connection. + ## + ## Note that this does not terminate the connection to the peer, it + ## just means that we won't exchange any further information with it + ## unless peering resumes later. + ## + ## a: the address used in previous successful call to :zeek:see:`Broker::peer`. + ## + ## p: the port used in previous successful call to :zeek:see:`Broker::peer`. + ## + ## Returns: true if the arguments match a previously successful call to + ## :zeek:see:`Broker::peer`. + ## + ## TODO: We do not have a function yet to terminate a connection. + global unpeer: function(a: string, p: port): bool; + + ## Get a list of all peer connections. + ## + ## Returns: a list of all peer connections. + global peers: function(): vector of PeerInfo; + + ## Get a unique identifier for the local broker endpoint. + ## + ## Returns: a unique identifier for the local broker endpoint. + global node_id: function(): string; + + ## Sends all pending log messages to remote peers. This normally + ## doesn't need to be used except for test cases that are time-sensitive. + global flush_logs: function(): count; + + ## Publishes the value of an identifier to a given topic. The subscribers + ## will update their local value for that identifier on receipt. + ## + ## topic: a topic associated with the message. + ## + ## id: the identifier to publish. + ## + ## Returns: true if the message is sent. + global publish_id: function(topic: string, id: string): bool; + + ## Register interest in all peer event messages that use a certain topic + ## prefix. Note that subscriptions may not be altered immediately after + ## calling (except during :zeek:see:`zeek_init`). + ## + ## topic_prefix: a prefix to match against remote message topics. + ## e.g. an empty prefix matches everything and "a" matches + ## "alice" and "amy" but not "bob". + ## + ## Returns: true if it's a new event subscription and it is now registered. + global subscribe: function(topic_prefix: string): bool; + + ## Unregister interest in all peer event messages that use a topic prefix. + ## Note that subscriptions may not be altered immediately after calling + ## (except during :zeek:see:`zeek_init`). + ## + ## topic_prefix: a prefix previously supplied to a successful call to + ## :zeek:see:`Broker::subscribe` or :zeek:see:`Broker::forward`. + ## + ## Returns: true if interest in the topic prefix is no longer advertised. + global unsubscribe: function(topic_prefix: string): bool; + + ## Register a topic prefix subscription for events that should only be + ## forwarded to any subscribing peers and not raise any event handlers + ## on the receiving/forwarding node. i.e. it's the same as + ## :zeek:see:`Broker::subscribe` except matching events are not raised + ## on the receiver, just forwarded. Use :zeek:see:`Broker::unsubscribe` + ## with the same argument to undo this operation. + ## + ## topic_prefix: a prefix to match against remote message topics. + ## e.g. an empty prefix matches everything and "a" matches + ## "alice" and "amy" but not "bob". + ## + ## Returns: true if a new event forwarding/subscription is now registered. + global forward: function(topic_prefix: string): bool; + + ## Automatically send an event to any interested peers whenever it is + ## locally dispatched. (For example, using "event my_event(...);" in a + ## script.) + ## + ## topic: a topic string associated with the event message. + ## Peers advertise interest by registering a subscription to some + ## prefix of this topic name. + ## + ## ev: a Zeek event value. + ## + ## Returns: true if automatic event sending is now enabled. + global auto_publish: function(topic: string, ev: any): bool; + + ## Stop automatically sending an event to peers upon local dispatch. + ## + ## topic: a topic originally given to :zeek:see:`Broker::auto_publish`. + ## + ## ev: an event originally given to :zeek:see:`Broker::auto_publish`. + ## + ## Returns: true if automatic events will not occur for the topic/event + ## pair. + global auto_unpublish: function(topic: string, ev: any): bool; +} + +@load base/bif/comm.bif +@load base/bif/messaging.bif + +module Broker; + +event retry_listen(a: string, p: port, retry: interval) + { + listen(a, p, retry); + } + +function listen(a: string, p: port, retry: interval): port + { + local bound = __listen(a, p); + + if ( bound == 0/tcp ) + { + local e = getenv("ZEEK_DEFAULT_LISTEN_RETRY"); + + if ( e != "" ) + retry = double_to_interval(to_double(e)); + + if ( retry != 0secs ) + schedule retry { retry_listen(a, p, retry) }; + } + + return bound; + } + +function peer(a: string, p: port, retry: interval): bool + { + return __peer(a, p, retry); + } + +function unpeer(a: string, p: port): bool + { + return __unpeer(a, p); + } + +function peers(): vector of PeerInfo + { + return __peers(); + } + +function node_id(): string + { + return __node_id(); + } + +function flush_logs(): count + { + return __flush_logs(); + } + +function publish_id(topic: string, id: string): bool + { + return __publish_id(topic, id); + } + +function subscribe(topic_prefix: string): bool + { + return __subscribe(topic_prefix); + } + +function forward(topic_prefix: string): bool + { + return __forward(topic_prefix); + } + +function unsubscribe(topic_prefix: string): bool + { + return __unsubscribe(topic_prefix); + } + +function auto_publish(topic: string, ev: any): bool + { + return __auto_publish(topic, ev); + } + +function auto_unpublish(topic: string, ev: any): bool + { + return __auto_unpublish(topic, ev); + } diff --git a/scripts/base/frameworks/broker/store.bro b/scripts/base/frameworks/broker/store.bro deleted file mode 100644 index 2e216afa93..0000000000 --- a/scripts/base/frameworks/broker/store.bro +++ /dev/null @@ -1,1043 +0,0 @@ -##! The Broker-based data store API and its various options. - -@load ./main -@load base/bif/data.bif - -module Broker; - -export { - ## The default frequency at which clones will attempt to - ## reconnect/resynchronize with their master in the event that they become - ## disconnected. - const default_clone_resync_interval = 10sec &redef; - - ## The duration after which a clone that is disconnected from its master - ## will begin to treat its local cache as stale. In the stale state, - ## queries to the cache will timeout. A negative value indicates that - ## the local cache is never treated as stale. - const default_clone_stale_interval = 5min &redef; - - ## The maximum amount of time that a disconnected clone will - ## buffer data store mutation commands. If the clone reconnects before - ## this time, it will replay all stored commands. Note that this doesn't - ## completely prevent the loss of store updates: all mutation messages - ## are fire-and-forget and not explicitly acknowledged by the master. - ## A negative/zero value indicates to never buffer commands. - const default_clone_mutation_buffer_interval = 2min &redef; - - ## Whether a data store query could be completed or not. - type QueryStatus: enum { - SUCCESS, - FAILURE, - }; - - ## The result of a data store query. - type QueryResult: record { - ## Whether the query completed or not. - status: Broker::QueryStatus; - ## The result of the query. Certain queries may use a particular - ## data type (e.g. querying store size always returns a count, but - ## a lookup may return various data types). - result: Broker::Data; - }; - - ## Enumerates the possible storage backends. - type BackendType: enum { - MEMORY, - SQLITE, - ROCKSDB, - }; - - ## Options to tune the SQLite storage backend. - type SQLiteOptions: record { - ## File system path of the database. - ## If left empty, will be derived from the name of the store, - ## and use the '.sqlite' file suffix. - path: string &default = ""; - }; - - ## Options to tune the RocksDB storage backend. - type RocksDBOptions: record { - ## File system path of the database. - ## If left empty, will be derived from the name of the store, - ## and use the '.rocksdb' file suffix. - path: string &default = ""; - }; - - ## Options to tune the particular storage backends. - type BackendOptions: record { - sqlite: SQLiteOptions &default = SQLiteOptions(); - rocksdb: RocksDBOptions &default = RocksDBOptions(); - }; - - ## Create a master data store which contains key-value pairs. - ## - ## name: a unique name for the data store. - ## - ## b: the storage backend to use. - ## - ## options: tunes how some storage backends operate. - ## - ## Returns: a handle to the data store. - global create_master: function(name: string, b: BackendType &default = MEMORY, - options: BackendOptions &default = BackendOptions()): opaque of Broker::Store; - - ## Create a clone of a master data store which may live with a remote peer. - ## A clone automatically synchronizes to the master by - ## receiving modifications and applying them locally. Direct modifications - ## are not possible, they must be sent through the master store, which then - ## automatically broadcasts the changes out to clones. But queries may be - ## made directly against the local cloned copy, which may be resolved - ## quicker than reaching out to a remote master store. - ## - ## name: the unique name which identifies the master data store. - ## - ## resync_interval: the frequency at which a clone that is disconnected from - ## its master attempts to reconnect with it. - ## - ## stale_interval: the duration after which a clone that is disconnected - ## from its master will begin to treat its local cache as - ## stale. In this state, queries to the clone will timeout. - ## A negative value indicates that the local cache is never - ## treated as stale. - ## - ## mutation_buffer_interval: the amount of time to buffer data store update - ## messages once a clone detects its master is - ## unavailable. If the clone reconnects before - ## this time, it will replay all buffered - ## commands. Note that this doesn't completely - ## prevent the loss of store updates: all mutation - ## messages are fire-and-forget and not explicitly - ## acknowledged by the master. A negative/zero - ## value indicates that commands never buffer. - ## - ## Returns: a handle to the data store. - global create_clone: function(name: string, - resync_interval: interval &default = default_clone_resync_interval, - stale_interval: interval &default = default_clone_stale_interval, - mutation_buffer_interval: interval &default = default_clone_mutation_buffer_interval): opaque of Broker::Store; - - ## Close a data store. - ## - ## h: a data store handle. - ## - ## Returns: true if store was valid and is now closed. The handle can no - ## longer be used for data store operations. - global close: function(h: opaque of Broker::Store): bool; - - ## Check if a store is closed or not. - ## - ## Returns: true if the store is closed. - global is_closed: function(h: opaque of Broker::Store): bool; - - ## Get the name of a store. - ## - ## Returns: the name of the store. - global store_name: function(h: opaque of Broker::Store): string; - - ## Check if a key exists in a data store. - ## - ## h: the handle of the store to query. - ## - ## k: the key to lookup. - ## - ## Returns: True if the key exists in the data store. - global exists: function(h: opaque of Broker::Store, k: any): QueryResult; - - ## Lookup the value associated with a key in a data store. - ## - ## h: the handle of the store to query. - ## - ## k: the key to lookup. - ## - ## Returns: the result of the query. - global get: function(h: opaque of Broker::Store, k: any): QueryResult; - - ## Insert a key-value pair in to the store, but only if the key does not - ## already exist. - ## - ## h: the handle of the store to modify. - ## - ## k: the key to insert. - ## - ## v: the value to insert. - ## - ## e: the expiration interval of the key-value pair. - ## - ## Returns: the result of the query which is a boolean data value that is - ## true if the insertion happened, or false if it was rejected - ## due to the key already existing. - global put_unique: function(h: opaque of Broker::Store, - k: any, v: any, e: interval &default=0sec): QueryResult; - - ## Retrieve a specific index from an existing container value. This - ## is supported for values of types set, table, and vector. - ## - ## h: the handle of the store to query. - ## - ## k: the key of the container value to lookup. - ## - ## i: the index to retrieve from the container value. - ## - ## Returns: For tables and vectors, the value at the given index, or - ## failure if the index doesn't exist. For sets, a boolean - ## indicating whether the index exists. Returns failure if the key - ## does not exist at all. - global get_index_from_value: function(h: opaque of Broker::Store, - k: any, i: any): QueryResult; - - ## Insert a key-value pair in to the store. - ## - ## h: the handle of the store to modify. - ## - ## k: the key to insert. - ## - ## v: the value to insert. - ## - ## e: the expiration interval of the key-value pair. - ## - ## Returns: false if the store handle was not valid. - global put: function(h: opaque of Broker::Store, - k: any, v: any, e: interval &default=0sec) : bool; - - ## Remove a key-value pair from the store. - ## - ## h: the handle of the store to modify. - ## - ## k: the key to remove. - ## - ## Returns: false if the store handle was not valid. - global erase: function(h: opaque of Broker::Store, k: any) : bool; - - ## Increments an existing value by a given amount. This is supported for all - ## numerical types, as well as for timestamps. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## a: the amount to increment the value by. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global increment: function(h: opaque of Broker::Store, k: any, - a: any &default = 1, - e: interval &default=0sec) : bool; - - ## Decrements an existing value by a given amount. This is supported for all - ## numerical types, as well as for timestamps. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## amount: the amount to decrement the value by. - ## - ## e: the new expiration interval of the modified key. If null, the current - ## expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global decrement: function(h: opaque of Broker::Store, k: any, - a: any &default = 1, - e: interval &default=0sec) : bool; - - ## Extends an existing string with another. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## s: the string to append. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global append: function(h: opaque of Broker::Store, k: any, s: string, - e: interval &default=0sec) : bool; - - ## Inserts an element into an existing set. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## i: the index to insert into the set. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global insert_into_set: function(h: opaque of Broker::Store, - k: any, i: any, - e: interval &default=0sec) : bool; - - ## Inserts an element into an existing table. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## i: the index to insert into the table - ## - ## v: the value to associate with the index. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global insert_into_table: function(h: opaque of Broker::Store, - k: any, i: any, v: any, - e: interval &default=0sec) : bool; - - ## Removes an element from an existing set or table. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## i: the index to remove from the set or table. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global remove_from: function(h: opaque of Broker::Store, - k: any, i: any, - e: interval &default=0sec) : bool; - - ## Appends an element to an existing vector. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## b: the value to append to the vector. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global push: function(h: opaque of Broker::Store, - k: any, v: any, - e: interval &default=0sec) : bool; - - ## Removes the last element of an existing vector. - ## - ## h: the handle of the store to modify. - ## - ## k: the key whose associated value is to be modified. The key must - ## already exist. - ## - ## e: the new expiration interval of the modified key. If null, the - ## current expiration time isn't changed. - ## - ## Returns: false if the store handle was not valid. - global pop: function(h: opaque of Broker::Store, - k: any, - e: interval &default=0sec) : bool; - - ## Returns a set with all of a store's keys. The results reflect a snapshot - ## in time that may diverge from reality soon afterwards. When acessing - ## any of the element, it may no longer actually be there. The function is - ## also expensive for large stores, as it copies the complete set. - ## - ## Returns: a set with the keys. If you expect the keys to be of - ## non-uniform type, consider using - ## :bro:see:`Broker::set_iterator` to iterate over the result. - global keys: function(h: opaque of Broker::Store): QueryResult; - - ## Deletes all of a store's content, it will be empty afterwards. - ## - ## Returns: false if the store handle was not valid. - global clear: function(h: opaque of Broker::Store) : bool; - - ########################## - # Data API # - ########################## - - ## Convert any Bro value to communication data. - ## - ## .. note:: Normally you won't need to use this function as data - ## conversion happens implicitly when passing Bro values into Broker - ## functions. - ## - ## d: any Bro value to attempt to convert (not all types are supported). - ## - ## Returns: the converted communication data. If the supplied Bro data - ## type does not support conversion to communication data, the - ## returned record's optional field will not be set. - global data: function(d: any): Broker::Data; - - ## Retrieve the type of data associated with communication data. - ## - ## d: the communication data. - ## - ## Returns: The data type associated with the communication data. - ## Note that broker represents records in the same way as - ## vectors, so there is no "record" type. - global data_type: function(d: Broker::Data): Broker::DataType; - - ## Create communication data of type "set". - global set_create: function(): Broker::Data; - - ## Remove all elements within a set. - ## - ## s: the set to clear. - ## - ## Returns: always true. - global set_clear: function(s: Broker::Data) : bool; - - ## Get the number of elements within a set. - ## - ## s: the set to query. - ## - ## Returns: the number of elements in the set. - global set_size: function(s: Broker::Data): count; - - ## Check if a set contains a particular element. - ## - ## s: the set to query. - ## - ## key: the element to check for existence. - ## - ## Returns: true if the key exists in the set. - global set_contains: function(s: Broker::Data, key: any) : bool; - - ## Insert an element into a set. - ## - ## s: the set to modify. - ## - ## key: the element to insert. - ## - ## Returns: true if the key was inserted, or false if it already existed. - global set_insert: function(s: Broker::Data, key: any) : bool; - - ## Remove an element from a set. - ## - ## s: the set to modify. - ## - ## key: the element to remove. - ## - ## Returns: true if the element existed in the set and is now removed. - global set_remove: function(s: Broker::Data, key: any) : bool; - - ## Create an iterator for a set. Note that this makes a copy of the set - ## internally to ensure the iterator is always valid. - ## - ## s: the set to iterate over. - ## - ## Returns: an iterator. - global set_iterator: function(s: Broker::Data): opaque of Broker::SetIterator; - - ## Check if there are no more elements to iterate over. - ## - ## it: an iterator. - ## - ## Returns: true if there are no more elements to iterator over, i.e. - ## the iterator is one-past-the-final-element. - global set_iterator_last: function(it: opaque of Broker::SetIterator) : bool; - - ## Advance an iterator. - ## - ## it: an iterator. - ## - ## Returns: true if the iterator, after advancing, still references an element - ## in the collection. False if the iterator, after advancing, is - ## one-past-the-final-element. - global set_iterator_next: function(it: opaque of Broker::SetIterator) : bool; - - ## Retrieve the data at an iterator's current position. - ## - ## it: an iterator. - ## - ## Returns: element in the collection that the iterator currently references. - global set_iterator_value: function(it: opaque of Broker::SetIterator): Broker::Data; - - ## Create communication data of type "table". - global table_create: function(): Broker::Data; - - ## Remove all elements within a table. - ## - ## t: the table to clear. - ## - ## Returns: always true. - global table_clear: function(t: Broker::Data) : bool; - - ## Get the number of elements within a table. - ## - ## t: the table to query. - ## - ## Returns: the number of elements in the table. - global table_size: function(t: Broker::Data): count; - - ## Check if a table contains a particular key. - ## - ## t: the table to query. - ## - ## key: the key to check for existence. - ## - ## Returns: true if the key exists in the table. - global table_contains: function(t: Broker::Data, key: any) : bool; - - ## Insert a key-value pair into a table. - ## - ## t: the table to modify. - ## - ## key: the key at which to insert the value. - ## - ## val: the value to insert. - ## - ## Returns: true if the key-value pair was inserted, or false if the key - ## already existed in the table. - global table_insert: function(t: Broker::Data, key: any, val: any): Broker::Data; - - ## Remove a key-value pair from a table. - ## - ## t: the table to modify. - ## - ## key: the key to remove from the table. - ## - ## Returns: the value associated with the key. If the key did not exist, then - ## the optional field of the returned record is not set. - global table_remove: function(t: Broker::Data, key: any): Broker::Data; - - ## Retrieve a value from a table. - ## - ## t: the table to query. - ## - ## key: the key to lookup. - ## - ## Returns: the value associated with the key. If the key did not exist, then - ## the optional field of the returned record is not set. - global table_lookup: function(t: Broker::Data, key: any): Broker::Data; - - ## Create an iterator for a table. Note that this makes a copy of the table - ## internally to ensure the iterator is always valid. - ## - ## t: the table to iterate over. - ## - ## Returns: an iterator. - global table_iterator: function(t: Broker::Data): opaque of Broker::TableIterator; - - ## Check if there are no more elements to iterate over. - ## - ## it: an iterator. - ## - ## Returns: true if there are no more elements to iterator over, i.e. - ## the iterator is one-past-the-final-element. - global table_iterator_last: function(it: opaque of Broker::TableIterator) : bool; - - ## Advance an iterator. - ## - ## it: an iterator. - ## - ## Returns: true if the iterator, after advancing, still references an element - ## in the collection. False if the iterator, after advancing, is - ## one-past-the-final-element. - global table_iterator_next: function(it: opaque of Broker::TableIterator) : bool; - - ## Retrieve the data at an iterator's current position. - ## - ## it: an iterator. - ## - ## Returns: element in the collection that the iterator currently references. - global table_iterator_value: function(it: opaque of Broker::TableIterator): Broker::TableItem; - - ## Create communication data of type "vector". - global vector_create: function(): Broker::Data; - - ## Remove all elements within a vector. - ## - ## v: the vector to clear. - ## - ## Returns: always true. - global vector_clear: function(v: Broker::Data) : bool; - - ## Get the number of elements within a vector. - ## - ## v: the vector to query. - ## - ## Returns: the number of elements in the vector. - global vector_size: function(v: Broker::Data): count; - - ## Insert an element into a vector at a particular position, possibly displacing - ## existing elements (insertion always grows the size of the vector by one). - ## - ## v: the vector to modify. - ## - ## d: the element to insert. - ## - ## idx: the index at which to insert the data. If it is greater than the - ## current size of the vector, the element is inserted at the end. - ## - ## Returns: always true. - global vector_insert: function(v: Broker::Data, idx: count, d: any) : bool; - - ## Replace an element in a vector at a particular position. - ## - ## v: the vector to modify. - ## - ## d: the element to insert. - ## - ## idx: the index to replace. - ## - ## Returns: the value that was just evicted. If the index was larger than any - ## valid index, the optional field of the returned record is not set. - global vector_replace: function(v: Broker::Data, idx: count, d: any): Broker::Data; - - ## Remove an element from a vector at a particular position. - ## - ## v: the vector to modify. - ## - ## idx: the index to remove. - ## - ## Returns: the value that was just evicted. If the index was larger than any - ## valid index, the optional field of the returned record is not set. - global vector_remove: function(v: Broker::Data, idx: count): Broker::Data; - - ## Lookup an element in a vector at a particular position. - ## - ## v: the vector to query. - ## - ## idx: the index to lookup. - ## - ## Returns: the value at the index. If the index was larger than any - ## valid index, the optional field of the returned record is not set. - global vector_lookup: function(v: Broker::Data, idx: count): Broker::Data; - - ## Create an iterator for a vector. Note that this makes a copy of the vector - ## internally to ensure the iterator is always valid. - ## - ## v: the vector to iterate over. - ## - ## Returns: an iterator. - global vector_iterator: function(v: Broker::Data): opaque of Broker::VectorIterator; - - ## Check if there are no more elements to iterate over. - ## - ## it: an iterator. - ## - ## Returns: true if there are no more elements to iterator over, i.e. - ## the iterator is one-past-the-final-element. - global vector_iterator_last: function(it: opaque of Broker::VectorIterator) : bool; - - ## Advance an iterator. - ## - ## it: an iterator. - ## - ## Returns: true if the iterator, after advancing, still references an element - ## in the collection. False if the iterator, after advancing, is - ## one-past-the-final-element. - global vector_iterator_next: function(it: opaque of Broker::VectorIterator) : bool; - - ## Retrieve the data at an iterator's current position. - ## - ## it: an iterator. - ## - ## Returns: element in the collection that the iterator currently references. - global vector_iterator_value: function(it: opaque of Broker::VectorIterator): Broker::Data; - - ## Create communication data of type "record". - ## - ## sz: the number of fields in the record. - ## - ## Returns: record data, with all fields uninitialized. - global record_create: function(sz: count): Broker::Data; - - ## Get the number of fields within a record. - ## - ## r: the record to query. - ## - ## Returns: the number of fields in the record. - global record_size: function(r: Broker::Data): count; - - ## Replace a field in a record at a particular position. - ## - ## r: the record to modify. - ## - ## d: the new field value to assign. - ## - ## idx: the index to replace. - ## - ## Returns: false if the index was larger than any valid index, else true. - global record_assign: function(r: Broker::Data, idx: count, d: any) : bool; - - ## Lookup a field in a record at a particular position. - ## - ## r: the record to query. - ## - ## idx: the index to lookup. - ## - ## Returns: the value at the index. The optional field of the returned record - ## may not be set if the field of the record has no value or if the - ## index was not valid. - global record_lookup: function(r: Broker::Data, idx: count): Broker::Data; - - ## Create an iterator for a record. Note that this makes a copy of the record - ## internally to ensure the iterator is always valid. - ## - ## r: the record to iterate over. - ## - ## Returns: an iterator. - global record_iterator: function(r: Broker::Data): opaque of Broker::RecordIterator; - - ## Check if there are no more elements to iterate over. - ## - ## it: an iterator. - ## - ## Returns: true if there are no more elements to iterator over, i.e. - ## the iterator is one-past-the-final-element. - global record_iterator_last: function(it: opaque of Broker::RecordIterator) : bool; - - ## Advance an iterator. - ## - ## it: an iterator. - ## - ## Returns: true if the iterator, after advancing, still references an element - ## in the collection. False if the iterator, after advancing, is - ## one-past-the-final-element. - global record_iterator_next: function(it: opaque of Broker::RecordIterator) : bool; - - ## Retrieve the data at an iterator's current position. - ## - ## it: an iterator. - ## - ## Returns: element in the collection that the iterator currently references. - global record_iterator_value: function(it: opaque of Broker::RecordIterator): Broker::Data; -} - -@load base/bif/store.bif - -module Broker; - -function create_master(name: string, b: BackendType &default = MEMORY, - options: BackendOptions &default = BackendOptions()): opaque of Broker::Store - { - return __create_master(name, b, options); - } - -function create_clone(name: string, - resync_interval: interval &default = default_clone_resync_interval, - stale_interval: interval &default = default_clone_stale_interval, - mutation_buffer_interval: interval &default = default_clone_mutation_buffer_interval): opaque of Broker::Store - { - return __create_clone(name, resync_interval, stale_interval, - mutation_buffer_interval); - } - -function close(h: opaque of Broker::Store): bool - { - return __close(h); - } - -function is_closed(h: opaque of Broker::Store): bool - { - return __is_closed(h); - } - -function store_name(h: opaque of Broker::Store): string - { - return __store_name(h); - } - -function exists(h: opaque of Broker::Store, k: any): QueryResult - { - return __exists(h, k); - } - -function get(h: opaque of Broker::Store, k: any): QueryResult - { - return __get(h, k); - } - -function put_unique(h: opaque of Broker::Store, k: any, v: any, - e: interval &default=0sec): QueryResult - { - return __put_unique(h, k, v, e); - } - -function get_index_from_value(h: opaque of Broker::Store, k: any, i: any): QueryResult - { - return __get_index_from_value(h, k, i); - } - -function keys(h: opaque of Broker::Store): QueryResult - { - return __keys(h); - } - -function put(h: opaque of Broker::Store, k: any, v: any, e: interval) : bool - { - return __put(h, k, v, e); - } - -function erase(h: opaque of Broker::Store, k: any) : bool - { - return __erase(h, k); - } - -function increment(h: opaque of Broker::Store, k: any, a: any, e: interval) : bool - { - return __increment(h, k, a, e); - } - -function decrement(h: opaque of Broker::Store, k: any, a: any, e: interval) : bool - { - return __decrement(h, k, a, e); - } - -function append(h: opaque of Broker::Store, k: any, s: string, e: interval) : bool - { - return __append(h, k, s, e); - } - -function insert_into_set(h: opaque of Broker::Store, k: any, i: any, e: interval) : bool - { - return __insert_into_set(h, k, i, e); - } - -function insert_into_table(h: opaque of Broker::Store, k: any, i: any, v: any, e: interval) : bool - { - return __insert_into_table(h, k, i, v, e); - } - -function remove_from(h: opaque of Broker::Store, k: any, i: any, e: interval) : bool - { - return __remove_from(h, k, i, e); - } - -function push(h: opaque of Broker::Store, k: any, v: any, e: interval) : bool - { - return __push(h, k, v, e); - } - -function pop(h: opaque of Broker::Store, k: any, e: interval) : bool - { - return __pop(h, k, e); - } - -function clear(h: opaque of Broker::Store) : bool - { - return __clear(h); - } - -function data_type(d: Broker::Data): Broker::DataType - { - return __data_type(d); - } - -function data(d: any): Broker::Data - { - return __data(d); - } - -function set_create(): Broker::Data - { - return __set_create(); - } - -function set_clear(s: Broker::Data) : bool - { - return __set_clear(s); - } - -function set_size(s: Broker::Data): count - { - return __set_size(s); - } - -function set_contains(s: Broker::Data, key: any) : bool - { - return __set_contains(s, key); - } - -function set_insert(s: Broker::Data, key: any) : bool - { - return __set_insert(s, key); - } - -function set_remove(s: Broker::Data, key: any) : bool - { - return __set_remove(s, key); - } - -function set_iterator(s: Broker::Data): opaque of Broker::SetIterator - { - return __set_iterator(s); - } - -function set_iterator_last(it: opaque of Broker::SetIterator) : bool - { - return __set_iterator_last(it); - } - -function set_iterator_next(it: opaque of Broker::SetIterator) : bool - { - return __set_iterator_next(it); - } - -function set_iterator_value(it: opaque of Broker::SetIterator): Broker::Data - { - return __set_iterator_value(it); - } - -function table_create(): Broker::Data - { - return __table_create(); - } - -function table_clear(t: Broker::Data) : bool - { - return __table_clear(t); - } - -function table_size(t: Broker::Data): count - { - return __table_size(t); - } - -function table_contains(t: Broker::Data, key: any) : bool - { - return __table_contains(t, key); - } - -function table_insert(t: Broker::Data, key: any, val: any): Broker::Data - { - return __table_insert(t, key, val); - } - -function table_remove(t: Broker::Data, key: any): Broker::Data - { - return __table_remove(t, key); - } - -function table_lookup(t: Broker::Data, key: any): Broker::Data - { - return __table_lookup(t, key); - } - -function table_iterator(t: Broker::Data): opaque of Broker::TableIterator - { - return __table_iterator(t); - } - -function table_iterator_last(it: opaque of Broker::TableIterator) : bool - { - return __table_iterator_last(it); - } - -function table_iterator_next(it: opaque of Broker::TableIterator) : bool - { - return __table_iterator_next(it); - } - -function table_iterator_value(it: opaque of Broker::TableIterator): Broker::TableItem - { - return __table_iterator_value(it); - } - -function vector_create(): Broker::Data - { - return __vector_create(); - } - -function vector_clear(v: Broker::Data) : bool - { - return __vector_clear(v); - } - -function vector_size(v: Broker::Data): count - { - return __vector_size(v); - } - -function vector_insert(v: Broker::Data, idx: count, d: any) : bool - { - return __vector_insert(v, idx, d); - } - -function vector_replace(v: Broker::Data, idx: count, d: any): Broker::Data - { - return __vector_replace(v, idx, d); - } - -function vector_remove(v: Broker::Data, idx: count): Broker::Data - { - return __vector_remove(v, idx); - } - -function vector_lookup(v: Broker::Data, idx: count): Broker::Data - { - return __vector_lookup(v, idx); - } - -function vector_iterator(v: Broker::Data): opaque of Broker::VectorIterator - { - return __vector_iterator(v); - } - -function vector_iterator_last(it: opaque of Broker::VectorIterator) : bool - { - return __vector_iterator_last(it); - } - -function vector_iterator_next(it: opaque of Broker::VectorIterator) : bool - { - return __vector_iterator_next(it); - } - -function vector_iterator_value(it: opaque of Broker::VectorIterator): Broker::Data - { - return __vector_iterator_value(it); - } - -function record_create(sz: count): Broker::Data - { - return __record_create(sz); - } - -function record_size(r: Broker::Data): count - { - return __record_size(r); - } - -function record_assign(r: Broker::Data, idx: count, d: any) : bool - { - return __record_assign(r, idx, d); - } - -function record_lookup(r: Broker::Data, idx: count): Broker::Data - { - return __record_lookup(r, idx); - } - -function record_iterator(r: Broker::Data): opaque of Broker::RecordIterator - { - return __record_iterator(r); - } - -function record_iterator_last(it: opaque of Broker::RecordIterator) : bool - { - return __record_iterator_last(it); - } - -function record_iterator_next(it: opaque of Broker::RecordIterator) : bool - { - return __record_iterator_next(it); - } - -function record_iterator_value(it: opaque of Broker::RecordIterator): Broker::Data - { - return __record_iterator_value(it); - } - diff --git a/scripts/base/frameworks/broker/store.zeek b/scripts/base/frameworks/broker/store.zeek new file mode 100644 index 0000000000..50559c4522 --- /dev/null +++ b/scripts/base/frameworks/broker/store.zeek @@ -0,0 +1,1043 @@ +##! The Broker-based data store API and its various options. + +@load ./main +@load base/bif/data.bif + +module Broker; + +export { + ## The default frequency at which clones will attempt to + ## reconnect/resynchronize with their master in the event that they become + ## disconnected. + const default_clone_resync_interval = 10sec &redef; + + ## The duration after which a clone that is disconnected from its master + ## will begin to treat its local cache as stale. In the stale state, + ## queries to the cache will timeout. A negative value indicates that + ## the local cache is never treated as stale. + const default_clone_stale_interval = 5min &redef; + + ## The maximum amount of time that a disconnected clone will + ## buffer data store mutation commands. If the clone reconnects before + ## this time, it will replay all stored commands. Note that this doesn't + ## completely prevent the loss of store updates: all mutation messages + ## are fire-and-forget and not explicitly acknowledged by the master. + ## A negative/zero value indicates to never buffer commands. + const default_clone_mutation_buffer_interval = 2min &redef; + + ## Whether a data store query could be completed or not. + type QueryStatus: enum { + SUCCESS, + FAILURE, + }; + + ## The result of a data store query. + type QueryResult: record { + ## Whether the query completed or not. + status: Broker::QueryStatus; + ## The result of the query. Certain queries may use a particular + ## data type (e.g. querying store size always returns a count, but + ## a lookup may return various data types). + result: Broker::Data; + }; + + ## Enumerates the possible storage backends. + type BackendType: enum { + MEMORY, + SQLITE, + ROCKSDB, + }; + + ## Options to tune the SQLite storage backend. + type SQLiteOptions: record { + ## File system path of the database. + ## If left empty, will be derived from the name of the store, + ## and use the '.sqlite' file suffix. + path: string &default = ""; + }; + + ## Options to tune the RocksDB storage backend. + type RocksDBOptions: record { + ## File system path of the database. + ## If left empty, will be derived from the name of the store, + ## and use the '.rocksdb' file suffix. + path: string &default = ""; + }; + + ## Options to tune the particular storage backends. + type BackendOptions: record { + sqlite: SQLiteOptions &default = SQLiteOptions(); + rocksdb: RocksDBOptions &default = RocksDBOptions(); + }; + + ## Create a master data store which contains key-value pairs. + ## + ## name: a unique name for the data store. + ## + ## b: the storage backend to use. + ## + ## options: tunes how some storage backends operate. + ## + ## Returns: a handle to the data store. + global create_master: function(name: string, b: BackendType &default = MEMORY, + options: BackendOptions &default = BackendOptions()): opaque of Broker::Store; + + ## Create a clone of a master data store which may live with a remote peer. + ## A clone automatically synchronizes to the master by + ## receiving modifications and applying them locally. Direct modifications + ## are not possible, they must be sent through the master store, which then + ## automatically broadcasts the changes out to clones. But queries may be + ## made directly against the local cloned copy, which may be resolved + ## quicker than reaching out to a remote master store. + ## + ## name: the unique name which identifies the master data store. + ## + ## resync_interval: the frequency at which a clone that is disconnected from + ## its master attempts to reconnect with it. + ## + ## stale_interval: the duration after which a clone that is disconnected + ## from its master will begin to treat its local cache as + ## stale. In this state, queries to the clone will timeout. + ## A negative value indicates that the local cache is never + ## treated as stale. + ## + ## mutation_buffer_interval: the amount of time to buffer data store update + ## messages once a clone detects its master is + ## unavailable. If the clone reconnects before + ## this time, it will replay all buffered + ## commands. Note that this doesn't completely + ## prevent the loss of store updates: all mutation + ## messages are fire-and-forget and not explicitly + ## acknowledged by the master. A negative/zero + ## value indicates that commands never buffer. + ## + ## Returns: a handle to the data store. + global create_clone: function(name: string, + resync_interval: interval &default = default_clone_resync_interval, + stale_interval: interval &default = default_clone_stale_interval, + mutation_buffer_interval: interval &default = default_clone_mutation_buffer_interval): opaque of Broker::Store; + + ## Close a data store. + ## + ## h: a data store handle. + ## + ## Returns: true if store was valid and is now closed. The handle can no + ## longer be used for data store operations. + global close: function(h: opaque of Broker::Store): bool; + + ## Check if a store is closed or not. + ## + ## Returns: true if the store is closed. + global is_closed: function(h: opaque of Broker::Store): bool; + + ## Get the name of a store. + ## + ## Returns: the name of the store. + global store_name: function(h: opaque of Broker::Store): string; + + ## Check if a key exists in a data store. + ## + ## h: the handle of the store to query. + ## + ## k: the key to lookup. + ## + ## Returns: True if the key exists in the data store. + global exists: function(h: opaque of Broker::Store, k: any): QueryResult; + + ## Lookup the value associated with a key in a data store. + ## + ## h: the handle of the store to query. + ## + ## k: the key to lookup. + ## + ## Returns: the result of the query. + global get: function(h: opaque of Broker::Store, k: any): QueryResult; + + ## Insert a key-value pair in to the store, but only if the key does not + ## already exist. + ## + ## h: the handle of the store to modify. + ## + ## k: the key to insert. + ## + ## v: the value to insert. + ## + ## e: the expiration interval of the key-value pair. + ## + ## Returns: the result of the query which is a boolean data value that is + ## true if the insertion happened, or false if it was rejected + ## due to the key already existing. + global put_unique: function(h: opaque of Broker::Store, + k: any, v: any, e: interval &default=0sec): QueryResult; + + ## Retrieve a specific index from an existing container value. This + ## is supported for values of types set, table, and vector. + ## + ## h: the handle of the store to query. + ## + ## k: the key of the container value to lookup. + ## + ## i: the index to retrieve from the container value. + ## + ## Returns: For tables and vectors, the value at the given index, or + ## failure if the index doesn't exist. For sets, a boolean + ## indicating whether the index exists. Returns failure if the key + ## does not exist at all. + global get_index_from_value: function(h: opaque of Broker::Store, + k: any, i: any): QueryResult; + + ## Insert a key-value pair in to the store. + ## + ## h: the handle of the store to modify. + ## + ## k: the key to insert. + ## + ## v: the value to insert. + ## + ## e: the expiration interval of the key-value pair. + ## + ## Returns: false if the store handle was not valid. + global put: function(h: opaque of Broker::Store, + k: any, v: any, e: interval &default=0sec) : bool; + + ## Remove a key-value pair from the store. + ## + ## h: the handle of the store to modify. + ## + ## k: the key to remove. + ## + ## Returns: false if the store handle was not valid. + global erase: function(h: opaque of Broker::Store, k: any) : bool; + + ## Increments an existing value by a given amount. This is supported for all + ## numerical types, as well as for timestamps. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## a: the amount to increment the value by. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global increment: function(h: opaque of Broker::Store, k: any, + a: any &default = 1, + e: interval &default=0sec) : bool; + + ## Decrements an existing value by a given amount. This is supported for all + ## numerical types, as well as for timestamps. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## amount: the amount to decrement the value by. + ## + ## e: the new expiration interval of the modified key. If null, the current + ## expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global decrement: function(h: opaque of Broker::Store, k: any, + a: any &default = 1, + e: interval &default=0sec) : bool; + + ## Extends an existing string with another. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## s: the string to append. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global append: function(h: opaque of Broker::Store, k: any, s: string, + e: interval &default=0sec) : bool; + + ## Inserts an element into an existing set. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## i: the index to insert into the set. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global insert_into_set: function(h: opaque of Broker::Store, + k: any, i: any, + e: interval &default=0sec) : bool; + + ## Inserts an element into an existing table. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## i: the index to insert into the table + ## + ## v: the value to associate with the index. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global insert_into_table: function(h: opaque of Broker::Store, + k: any, i: any, v: any, + e: interval &default=0sec) : bool; + + ## Removes an element from an existing set or table. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## i: the index to remove from the set or table. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global remove_from: function(h: opaque of Broker::Store, + k: any, i: any, + e: interval &default=0sec) : bool; + + ## Appends an element to an existing vector. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## b: the value to append to the vector. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global push: function(h: opaque of Broker::Store, + k: any, v: any, + e: interval &default=0sec) : bool; + + ## Removes the last element of an existing vector. + ## + ## h: the handle of the store to modify. + ## + ## k: the key whose associated value is to be modified. The key must + ## already exist. + ## + ## e: the new expiration interval of the modified key. If null, the + ## current expiration time isn't changed. + ## + ## Returns: false if the store handle was not valid. + global pop: function(h: opaque of Broker::Store, + k: any, + e: interval &default=0sec) : bool; + + ## Returns a set with all of a store's keys. The results reflect a snapshot + ## in time that may diverge from reality soon afterwards. When acessing + ## any of the element, it may no longer actually be there. The function is + ## also expensive for large stores, as it copies the complete set. + ## + ## Returns: a set with the keys. If you expect the keys to be of + ## non-uniform type, consider using + ## :zeek:see:`Broker::set_iterator` to iterate over the result. + global keys: function(h: opaque of Broker::Store): QueryResult; + + ## Deletes all of a store's content, it will be empty afterwards. + ## + ## Returns: false if the store handle was not valid. + global clear: function(h: opaque of Broker::Store) : bool; + + ########################## + # Data API # + ########################## + + ## Convert any Zeek value to communication data. + ## + ## .. note:: Normally you won't need to use this function as data + ## conversion happens implicitly when passing Zeek values into Broker + ## functions. + ## + ## d: any Zeek value to attempt to convert (not all types are supported). + ## + ## Returns: the converted communication data. If the supplied Zeek data + ## type does not support conversion to communication data, the + ## returned record's optional field will not be set. + global data: function(d: any): Broker::Data; + + ## Retrieve the type of data associated with communication data. + ## + ## d: the communication data. + ## + ## Returns: The data type associated with the communication data. + ## Note that broker represents records in the same way as + ## vectors, so there is no "record" type. + global data_type: function(d: Broker::Data): Broker::DataType; + + ## Create communication data of type "set". + global set_create: function(): Broker::Data; + + ## Remove all elements within a set. + ## + ## s: the set to clear. + ## + ## Returns: always true. + global set_clear: function(s: Broker::Data) : bool; + + ## Get the number of elements within a set. + ## + ## s: the set to query. + ## + ## Returns: the number of elements in the set. + global set_size: function(s: Broker::Data): count; + + ## Check if a set contains a particular element. + ## + ## s: the set to query. + ## + ## key: the element to check for existence. + ## + ## Returns: true if the key exists in the set. + global set_contains: function(s: Broker::Data, key: any) : bool; + + ## Insert an element into a set. + ## + ## s: the set to modify. + ## + ## key: the element to insert. + ## + ## Returns: true if the key was inserted, or false if it already existed. + global set_insert: function(s: Broker::Data, key: any) : bool; + + ## Remove an element from a set. + ## + ## s: the set to modify. + ## + ## key: the element to remove. + ## + ## Returns: true if the element existed in the set and is now removed. + global set_remove: function(s: Broker::Data, key: any) : bool; + + ## Create an iterator for a set. Note that this makes a copy of the set + ## internally to ensure the iterator is always valid. + ## + ## s: the set to iterate over. + ## + ## Returns: an iterator. + global set_iterator: function(s: Broker::Data): opaque of Broker::SetIterator; + + ## Check if there are no more elements to iterate over. + ## + ## it: an iterator. + ## + ## Returns: true if there are no more elements to iterator over, i.e. + ## the iterator is one-past-the-final-element. + global set_iterator_last: function(it: opaque of Broker::SetIterator) : bool; + + ## Advance an iterator. + ## + ## it: an iterator. + ## + ## Returns: true if the iterator, after advancing, still references an element + ## in the collection. False if the iterator, after advancing, is + ## one-past-the-final-element. + global set_iterator_next: function(it: opaque of Broker::SetIterator) : bool; + + ## Retrieve the data at an iterator's current position. + ## + ## it: an iterator. + ## + ## Returns: element in the collection that the iterator currently references. + global set_iterator_value: function(it: opaque of Broker::SetIterator): Broker::Data; + + ## Create communication data of type "table". + global table_create: function(): Broker::Data; + + ## Remove all elements within a table. + ## + ## t: the table to clear. + ## + ## Returns: always true. + global table_clear: function(t: Broker::Data) : bool; + + ## Get the number of elements within a table. + ## + ## t: the table to query. + ## + ## Returns: the number of elements in the table. + global table_size: function(t: Broker::Data): count; + + ## Check if a table contains a particular key. + ## + ## t: the table to query. + ## + ## key: the key to check for existence. + ## + ## Returns: true if the key exists in the table. + global table_contains: function(t: Broker::Data, key: any) : bool; + + ## Insert a key-value pair into a table. + ## + ## t: the table to modify. + ## + ## key: the key at which to insert the value. + ## + ## val: the value to insert. + ## + ## Returns: true if the key-value pair was inserted, or false if the key + ## already existed in the table. + global table_insert: function(t: Broker::Data, key: any, val: any): Broker::Data; + + ## Remove a key-value pair from a table. + ## + ## t: the table to modify. + ## + ## key: the key to remove from the table. + ## + ## Returns: the value associated with the key. If the key did not exist, then + ## the optional field of the returned record is not set. + global table_remove: function(t: Broker::Data, key: any): Broker::Data; + + ## Retrieve a value from a table. + ## + ## t: the table to query. + ## + ## key: the key to lookup. + ## + ## Returns: the value associated with the key. If the key did not exist, then + ## the optional field of the returned record is not set. + global table_lookup: function(t: Broker::Data, key: any): Broker::Data; + + ## Create an iterator for a table. Note that this makes a copy of the table + ## internally to ensure the iterator is always valid. + ## + ## t: the table to iterate over. + ## + ## Returns: an iterator. + global table_iterator: function(t: Broker::Data): opaque of Broker::TableIterator; + + ## Check if there are no more elements to iterate over. + ## + ## it: an iterator. + ## + ## Returns: true if there are no more elements to iterator over, i.e. + ## the iterator is one-past-the-final-element. + global table_iterator_last: function(it: opaque of Broker::TableIterator) : bool; + + ## Advance an iterator. + ## + ## it: an iterator. + ## + ## Returns: true if the iterator, after advancing, still references an element + ## in the collection. False if the iterator, after advancing, is + ## one-past-the-final-element. + global table_iterator_next: function(it: opaque of Broker::TableIterator) : bool; + + ## Retrieve the data at an iterator's current position. + ## + ## it: an iterator. + ## + ## Returns: element in the collection that the iterator currently references. + global table_iterator_value: function(it: opaque of Broker::TableIterator): Broker::TableItem; + + ## Create communication data of type "vector". + global vector_create: function(): Broker::Data; + + ## Remove all elements within a vector. + ## + ## v: the vector to clear. + ## + ## Returns: always true. + global vector_clear: function(v: Broker::Data) : bool; + + ## Get the number of elements within a vector. + ## + ## v: the vector to query. + ## + ## Returns: the number of elements in the vector. + global vector_size: function(v: Broker::Data): count; + + ## Insert an element into a vector at a particular position, possibly displacing + ## existing elements (insertion always grows the size of the vector by one). + ## + ## v: the vector to modify. + ## + ## d: the element to insert. + ## + ## idx: the index at which to insert the data. If it is greater than the + ## current size of the vector, the element is inserted at the end. + ## + ## Returns: always true. + global vector_insert: function(v: Broker::Data, idx: count, d: any) : bool; + + ## Replace an element in a vector at a particular position. + ## + ## v: the vector to modify. + ## + ## d: the element to insert. + ## + ## idx: the index to replace. + ## + ## Returns: the value that was just evicted. If the index was larger than any + ## valid index, the optional field of the returned record is not set. + global vector_replace: function(v: Broker::Data, idx: count, d: any): Broker::Data; + + ## Remove an element from a vector at a particular position. + ## + ## v: the vector to modify. + ## + ## idx: the index to remove. + ## + ## Returns: the value that was just evicted. If the index was larger than any + ## valid index, the optional field of the returned record is not set. + global vector_remove: function(v: Broker::Data, idx: count): Broker::Data; + + ## Lookup an element in a vector at a particular position. + ## + ## v: the vector to query. + ## + ## idx: the index to lookup. + ## + ## Returns: the value at the index. If the index was larger than any + ## valid index, the optional field of the returned record is not set. + global vector_lookup: function(v: Broker::Data, idx: count): Broker::Data; + + ## Create an iterator for a vector. Note that this makes a copy of the vector + ## internally to ensure the iterator is always valid. + ## + ## v: the vector to iterate over. + ## + ## Returns: an iterator. + global vector_iterator: function(v: Broker::Data): opaque of Broker::VectorIterator; + + ## Check if there are no more elements to iterate over. + ## + ## it: an iterator. + ## + ## Returns: true if there are no more elements to iterator over, i.e. + ## the iterator is one-past-the-final-element. + global vector_iterator_last: function(it: opaque of Broker::VectorIterator) : bool; + + ## Advance an iterator. + ## + ## it: an iterator. + ## + ## Returns: true if the iterator, after advancing, still references an element + ## in the collection. False if the iterator, after advancing, is + ## one-past-the-final-element. + global vector_iterator_next: function(it: opaque of Broker::VectorIterator) : bool; + + ## Retrieve the data at an iterator's current position. + ## + ## it: an iterator. + ## + ## Returns: element in the collection that the iterator currently references. + global vector_iterator_value: function(it: opaque of Broker::VectorIterator): Broker::Data; + + ## Create communication data of type "record". + ## + ## sz: the number of fields in the record. + ## + ## Returns: record data, with all fields uninitialized. + global record_create: function(sz: count): Broker::Data; + + ## Get the number of fields within a record. + ## + ## r: the record to query. + ## + ## Returns: the number of fields in the record. + global record_size: function(r: Broker::Data): count; + + ## Replace a field in a record at a particular position. + ## + ## r: the record to modify. + ## + ## d: the new field value to assign. + ## + ## idx: the index to replace. + ## + ## Returns: false if the index was larger than any valid index, else true. + global record_assign: function(r: Broker::Data, idx: count, d: any) : bool; + + ## Lookup a field in a record at a particular position. + ## + ## r: the record to query. + ## + ## idx: the index to lookup. + ## + ## Returns: the value at the index. The optional field of the returned record + ## may not be set if the field of the record has no value or if the + ## index was not valid. + global record_lookup: function(r: Broker::Data, idx: count): Broker::Data; + + ## Create an iterator for a record. Note that this makes a copy of the record + ## internally to ensure the iterator is always valid. + ## + ## r: the record to iterate over. + ## + ## Returns: an iterator. + global record_iterator: function(r: Broker::Data): opaque of Broker::RecordIterator; + + ## Check if there are no more elements to iterate over. + ## + ## it: an iterator. + ## + ## Returns: true if there are no more elements to iterator over, i.e. + ## the iterator is one-past-the-final-element. + global record_iterator_last: function(it: opaque of Broker::RecordIterator) : bool; + + ## Advance an iterator. + ## + ## it: an iterator. + ## + ## Returns: true if the iterator, after advancing, still references an element + ## in the collection. False if the iterator, after advancing, is + ## one-past-the-final-element. + global record_iterator_next: function(it: opaque of Broker::RecordIterator) : bool; + + ## Retrieve the data at an iterator's current position. + ## + ## it: an iterator. + ## + ## Returns: element in the collection that the iterator currently references. + global record_iterator_value: function(it: opaque of Broker::RecordIterator): Broker::Data; +} + +@load base/bif/store.bif + +module Broker; + +function create_master(name: string, b: BackendType &default = MEMORY, + options: BackendOptions &default = BackendOptions()): opaque of Broker::Store + { + return __create_master(name, b, options); + } + +function create_clone(name: string, + resync_interval: interval &default = default_clone_resync_interval, + stale_interval: interval &default = default_clone_stale_interval, + mutation_buffer_interval: interval &default = default_clone_mutation_buffer_interval): opaque of Broker::Store + { + return __create_clone(name, resync_interval, stale_interval, + mutation_buffer_interval); + } + +function close(h: opaque of Broker::Store): bool + { + return __close(h); + } + +function is_closed(h: opaque of Broker::Store): bool + { + return __is_closed(h); + } + +function store_name(h: opaque of Broker::Store): string + { + return __store_name(h); + } + +function exists(h: opaque of Broker::Store, k: any): QueryResult + { + return __exists(h, k); + } + +function get(h: opaque of Broker::Store, k: any): QueryResult + { + return __get(h, k); + } + +function put_unique(h: opaque of Broker::Store, k: any, v: any, + e: interval &default=0sec): QueryResult + { + return __put_unique(h, k, v, e); + } + +function get_index_from_value(h: opaque of Broker::Store, k: any, i: any): QueryResult + { + return __get_index_from_value(h, k, i); + } + +function keys(h: opaque of Broker::Store): QueryResult + { + return __keys(h); + } + +function put(h: opaque of Broker::Store, k: any, v: any, e: interval) : bool + { + return __put(h, k, v, e); + } + +function erase(h: opaque of Broker::Store, k: any) : bool + { + return __erase(h, k); + } + +function increment(h: opaque of Broker::Store, k: any, a: any, e: interval) : bool + { + return __increment(h, k, a, e); + } + +function decrement(h: opaque of Broker::Store, k: any, a: any, e: interval) : bool + { + return __decrement(h, k, a, e); + } + +function append(h: opaque of Broker::Store, k: any, s: string, e: interval) : bool + { + return __append(h, k, s, e); + } + +function insert_into_set(h: opaque of Broker::Store, k: any, i: any, e: interval) : bool + { + return __insert_into_set(h, k, i, e); + } + +function insert_into_table(h: opaque of Broker::Store, k: any, i: any, v: any, e: interval) : bool + { + return __insert_into_table(h, k, i, v, e); + } + +function remove_from(h: opaque of Broker::Store, k: any, i: any, e: interval) : bool + { + return __remove_from(h, k, i, e); + } + +function push(h: opaque of Broker::Store, k: any, v: any, e: interval) : bool + { + return __push(h, k, v, e); + } + +function pop(h: opaque of Broker::Store, k: any, e: interval) : bool + { + return __pop(h, k, e); + } + +function clear(h: opaque of Broker::Store) : bool + { + return __clear(h); + } + +function data_type(d: Broker::Data): Broker::DataType + { + return __data_type(d); + } + +function data(d: any): Broker::Data + { + return __data(d); + } + +function set_create(): Broker::Data + { + return __set_create(); + } + +function set_clear(s: Broker::Data) : bool + { + return __set_clear(s); + } + +function set_size(s: Broker::Data): count + { + return __set_size(s); + } + +function set_contains(s: Broker::Data, key: any) : bool + { + return __set_contains(s, key); + } + +function set_insert(s: Broker::Data, key: any) : bool + { + return __set_insert(s, key); + } + +function set_remove(s: Broker::Data, key: any) : bool + { + return __set_remove(s, key); + } + +function set_iterator(s: Broker::Data): opaque of Broker::SetIterator + { + return __set_iterator(s); + } + +function set_iterator_last(it: opaque of Broker::SetIterator) : bool + { + return __set_iterator_last(it); + } + +function set_iterator_next(it: opaque of Broker::SetIterator) : bool + { + return __set_iterator_next(it); + } + +function set_iterator_value(it: opaque of Broker::SetIterator): Broker::Data + { + return __set_iterator_value(it); + } + +function table_create(): Broker::Data + { + return __table_create(); + } + +function table_clear(t: Broker::Data) : bool + { + return __table_clear(t); + } + +function table_size(t: Broker::Data): count + { + return __table_size(t); + } + +function table_contains(t: Broker::Data, key: any) : bool + { + return __table_contains(t, key); + } + +function table_insert(t: Broker::Data, key: any, val: any): Broker::Data + { + return __table_insert(t, key, val); + } + +function table_remove(t: Broker::Data, key: any): Broker::Data + { + return __table_remove(t, key); + } + +function table_lookup(t: Broker::Data, key: any): Broker::Data + { + return __table_lookup(t, key); + } + +function table_iterator(t: Broker::Data): opaque of Broker::TableIterator + { + return __table_iterator(t); + } + +function table_iterator_last(it: opaque of Broker::TableIterator) : bool + { + return __table_iterator_last(it); + } + +function table_iterator_next(it: opaque of Broker::TableIterator) : bool + { + return __table_iterator_next(it); + } + +function table_iterator_value(it: opaque of Broker::TableIterator): Broker::TableItem + { + return __table_iterator_value(it); + } + +function vector_create(): Broker::Data + { + return __vector_create(); + } + +function vector_clear(v: Broker::Data) : bool + { + return __vector_clear(v); + } + +function vector_size(v: Broker::Data): count + { + return __vector_size(v); + } + +function vector_insert(v: Broker::Data, idx: count, d: any) : bool + { + return __vector_insert(v, idx, d); + } + +function vector_replace(v: Broker::Data, idx: count, d: any): Broker::Data + { + return __vector_replace(v, idx, d); + } + +function vector_remove(v: Broker::Data, idx: count): Broker::Data + { + return __vector_remove(v, idx); + } + +function vector_lookup(v: Broker::Data, idx: count): Broker::Data + { + return __vector_lookup(v, idx); + } + +function vector_iterator(v: Broker::Data): opaque of Broker::VectorIterator + { + return __vector_iterator(v); + } + +function vector_iterator_last(it: opaque of Broker::VectorIterator) : bool + { + return __vector_iterator_last(it); + } + +function vector_iterator_next(it: opaque of Broker::VectorIterator) : bool + { + return __vector_iterator_next(it); + } + +function vector_iterator_value(it: opaque of Broker::VectorIterator): Broker::Data + { + return __vector_iterator_value(it); + } + +function record_create(sz: count): Broker::Data + { + return __record_create(sz); + } + +function record_size(r: Broker::Data): count + { + return __record_size(r); + } + +function record_assign(r: Broker::Data, idx: count, d: any) : bool + { + return __record_assign(r, idx, d); + } + +function record_lookup(r: Broker::Data, idx: count): Broker::Data + { + return __record_lookup(r, idx); + } + +function record_iterator(r: Broker::Data): opaque of Broker::RecordIterator + { + return __record_iterator(r); + } + +function record_iterator_last(it: opaque of Broker::RecordIterator) : bool + { + return __record_iterator_last(it); + } + +function record_iterator_next(it: opaque of Broker::RecordIterator) : bool + { + return __record_iterator_next(it); + } + +function record_iterator_value(it: opaque of Broker::RecordIterator): Broker::Data + { + return __record_iterator_value(it); + } + diff --git a/scripts/base/frameworks/cluster/README b/scripts/base/frameworks/cluster/README index 1bf9907d9b..98a1cfc95b 100644 --- a/scripts/base/frameworks/cluster/README +++ b/scripts/base/frameworks/cluster/README @@ -1,2 +1,2 @@ The cluster framework provides for establishing and controlling a cluster -of Bro instances. +of Zeek instances. diff --git a/scripts/base/frameworks/cluster/__load__.bro b/scripts/base/frameworks/cluster/__load__.bro deleted file mode 100644 index 20060357a4..0000000000 --- a/scripts/base/frameworks/cluster/__load__.bro +++ /dev/null @@ -1,48 +0,0 @@ -# Load the core cluster support. -@load ./main -@load ./pools - -@if ( Cluster::is_enabled() ) - -# Give the node being started up it's peer name. -redef peer_description = Cluster::node; - -@if ( Cluster::enable_round_robin_logging ) -redef Broker::log_topic = Cluster::rr_log_topic; -@endif - -# Add a cluster prefix. -@prefixes += cluster - -# If this script isn't found anywhere, the cluster bombs out. -# Loading the cluster framework requires that a script by this name exists -# somewhere in the BROPATH. The only thing in the file should be the -# cluster definition in the :bro:id:`Cluster::nodes` variable. -@load cluster-layout - -@if ( Cluster::node in Cluster::nodes ) - -@load ./setup-connections - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -@load ./nodes/manager -# If no logger is defined, then the manager receives logs. -@if ( Cluster::manager_is_logger ) -@load ./nodes/logger -@endif -@endif - -@if ( Cluster::local_node_type() == Cluster::LOGGER ) -@load ./nodes/logger -@endif - -@if ( Cluster::local_node_type() == Cluster::PROXY ) -@load ./nodes/proxy -@endif - -@if ( Cluster::local_node_type() == Cluster::WORKER ) -@load ./nodes/worker -@endif - -@endif -@endif diff --git a/scripts/base/frameworks/cluster/__load__.zeek b/scripts/base/frameworks/cluster/__load__.zeek new file mode 100644 index 0000000000..9effaf835a --- /dev/null +++ b/scripts/base/frameworks/cluster/__load__.zeek @@ -0,0 +1,48 @@ +# Load the core cluster support. +@load ./main +@load ./pools + +@if ( Cluster::is_enabled() ) + +# Give the node being started up it's peer name. +redef peer_description = Cluster::node; + +@if ( Cluster::enable_round_robin_logging ) +redef Broker::log_topic = Cluster::rr_log_topic; +@endif + +# Add a cluster prefix. +@prefixes += cluster + +# If this script isn't found anywhere, the cluster bombs out. +# Loading the cluster framework requires that a script by this name exists +# somewhere in the ZEEKPATH. The only thing in the file should be the +# cluster definition in the :zeek:id:`Cluster::nodes` variable. +@load cluster-layout + +@if ( Cluster::node in Cluster::nodes ) + +@load ./setup-connections + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +@load ./nodes/manager +# If no logger is defined, then the manager receives logs. +@if ( Cluster::manager_is_logger ) +@load ./nodes/logger +@endif +@endif + +@if ( Cluster::local_node_type() == Cluster::LOGGER ) +@load ./nodes/logger +@endif + +@if ( Cluster::local_node_type() == Cluster::PROXY ) +@load ./nodes/proxy +@endif + +@if ( Cluster::local_node_type() == Cluster::WORKER ) +@load ./nodes/worker +@endif + +@endif +@endif diff --git a/scripts/base/frameworks/cluster/main.bro b/scripts/base/frameworks/cluster/main.bro deleted file mode 100644 index 2d492454d4..0000000000 --- a/scripts/base/frameworks/cluster/main.bro +++ /dev/null @@ -1,459 +0,0 @@ -##! A framework for establishing and controlling a cluster of Bro instances. -##! In order to use the cluster framework, a script named -##! ``cluster-layout.bro`` must exist somewhere in Bro's script search path -##! which has a cluster definition of the :bro:id:`Cluster::nodes` variable. -##! The ``CLUSTER_NODE`` environment variable or :bro:id:`Cluster::node` -##! must also be sent and the cluster framework loaded as a package like -##! ``@load base/frameworks/cluster``. - -@load base/frameworks/control -@load base/frameworks/broker - -module Cluster; - -export { - ## Whether to distribute log messages among available logging nodes. - const enable_round_robin_logging = T &redef; - - ## The topic name used for exchanging messages that are relevant to - ## logger nodes in a cluster. Used with broker-enabled cluster communication. - const logger_topic = "bro/cluster/logger" &redef; - - ## The topic name used for exchanging messages that are relevant to - ## manager nodes in a cluster. Used with broker-enabled cluster communication. - const manager_topic = "bro/cluster/manager" &redef; - - ## The topic name used for exchanging messages that are relevant to - ## proxy nodes in a cluster. Used with broker-enabled cluster communication. - const proxy_topic = "bro/cluster/proxy" &redef; - - ## The topic name used for exchanging messages that are relevant to - ## worker nodes in a cluster. Used with broker-enabled cluster communication. - const worker_topic = "bro/cluster/worker" &redef; - - ## The topic name used for exchanging messages that are relevant to - ## time machine nodes in a cluster. Used with broker-enabled cluster communication. - const time_machine_topic = "bro/cluster/time_machine" &redef; - - ## The topic prefix used for exchanging messages that are relevant to - ## a named node in a cluster. Used with broker-enabled cluster communication. - const node_topic_prefix = "bro/cluster/node/" &redef; - - ## The topic prefix used for exchanging messages that are relevant to - ## a unique node in a cluster. Used with broker-enabled cluster communication. - const nodeid_topic_prefix = "bro/cluster/nodeid/" &redef; - - ## Name of the node on which master data stores will be created if no other - ## has already been specified by the user in :bro:see:`Cluster::stores`. - ## An empty value means "use whatever name corresponds to the manager - ## node". - const default_master_node = "" &redef; - - ## The type of data store backend that will be used for all data stores if - ## no other has already been specified by the user in :bro:see:`Cluster::stores`. - const default_backend = Broker::MEMORY &redef; - - ## The type of persistent data store backend that will be used for all data - ## stores if no other has already been specified by the user in - ## :bro:see:`Cluster::stores`. This will be used when script authors call - ## :bro:see:`Cluster::create_store` with the *persistent* argument set true. - const default_persistent_backend = Broker::SQLITE &redef; - - ## Setting a default dir will, for persistent backends that have not - ## been given an explicit file path via :bro:see:`Cluster::stores`, - ## automatically create a path within this dir that is based on the name of - ## the data store. - const default_store_dir = "" &redef; - - ## Information regarding a cluster-enabled data store. - type StoreInfo: record { - ## The name of the data store. - name: string &optional; - ## The store handle. - store: opaque of Broker::Store &optional; - ## The name of the cluster node on which the master version of the data - ## store resides. - master_node: string &default=default_master_node; - ## Whether the data store is the master version or a clone. - master: bool &default=F; - ## The type of backend used for storing data. - backend: Broker::BackendType &default=default_backend; - ## Parameters used for configuring the backend. - options: Broker::BackendOptions &default=Broker::BackendOptions(); - ## A resync/reconnect interval to pass through to - ## :bro:see:`Broker::create_clone`. - clone_resync_interval: interval &default=Broker::default_clone_resync_interval; - ## A staleness duration to pass through to - ## :bro:see:`Broker::create_clone`. - clone_stale_interval: interval &default=Broker::default_clone_stale_interval; - ## A mutation buffer interval to pass through to - ## :bro:see:`Broker::create_clone`. - clone_mutation_buffer_interval: interval &default=Broker::default_clone_mutation_buffer_interval; - }; - - ## A table of cluster-enabled data stores that have been created, indexed - ## by their name. This table will be populated automatically by - ## :bro:see:`Cluster::create_store`, but if you need to customize - ## the options related to a particular data store, you may redef this - ## table. Calls to :bro:see:`Cluster::create_store` will first check - ## the table for an entry of the same name and, if found, will use the - ## predefined options there when setting up the store. - global stores: table[string] of StoreInfo &default=StoreInfo() &redef; - - ## Sets up a cluster-enabled data store. They will also still properly - ## function for uses that are not operating a cluster. - ## - ## name: the name of the data store to create. - ## - ## persistent: whether the data store must be persistent. - ## - ## Returns: the store's information. For master stores, the store will be - ## ready to use immediately. For clones, the store field will not - ## be set until the node containing the master store has connected. - global create_store: function(name: string, persistent: bool &default=F): StoreInfo; - - ## The cluster logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The record type which contains the column fields of the cluster log. - type Info: record { - ## The time at which a cluster message was generated. - ts: time; - ## The name of the node that is creating the log record. - node: string; - ## A message indicating information about the cluster's operation. - message: string; - } &log; - - ## Types of nodes that are allowed to participate in the cluster - ## configuration. - type NodeType: enum { - ## A dummy node type indicating the local node is not operating - ## within a cluster. - NONE, - ## A node type which is allowed to view/manipulate the configuration - ## of other nodes in the cluster. - CONTROL, - ## A node type responsible for log management. - LOGGER, - ## A node type responsible for policy management. - MANAGER, - ## A node type for relaying worker node communication and synchronizing - ## worker node state. - PROXY, - ## The node type doing all the actual traffic analysis. - WORKER, - ## A node acting as a traffic recorder using the - ## `Time Machine `_ - ## software. - TIME_MACHINE, - }; - - ## Record type to indicate a node in a cluster. - type Node: record { - ## Identifies the type of cluster node in this node's configuration. - node_type: NodeType; - ## The IP address of the cluster node. - ip: addr; - ## If the *ip* field is a non-global IPv6 address, this field - ## can specify a particular :rfc:`4007` ``zone_id``. - zone_id: string &default=""; - ## The port that this node will listen on for peer connections. - p: port; - ## Identifier for the interface a worker is sniffing. - interface: string &optional; - ## Name of the manager node this node uses. For workers and proxies. - manager: string &optional; - ## Name of a time machine node with which this node connects. - time_machine: string &optional; - ## A unique identifier assigned to the node by the broker framework. - ## This field is only set while a node is connected. - id: string &optional; - }; - - ## This function can be called at any time to determine if the cluster - ## framework is being enabled for this run. - ## - ## Returns: True if :bro:id:`Cluster::node` has been set. - global is_enabled: function(): bool; - - ## This function can be called at any time to determine what type of - ## cluster node the current Bro instance is going to be acting as. - ## If :bro:id:`Cluster::is_enabled` returns false, then - ## :bro:enum:`Cluster::NONE` is returned. - ## - ## Returns: The :bro:type:`Cluster::NodeType` the calling node acts as. - global local_node_type: function(): NodeType; - - ## This gives the value for the number of workers currently connected to, - ## and it's maintained internally by the cluster framework. It's - ## primarily intended for use by managers to find out how many workers - ## should be responding to requests. - global worker_count: count = 0; - - ## The cluster layout definition. This should be placed into a filter - ## named cluster-layout.bro somewhere in the BROPATH. It will be - ## automatically loaded if the CLUSTER_NODE environment variable is set. - ## Note that BroControl handles all of this automatically. - ## The table is typically indexed by node names/labels (e.g. "manager" - ## or "worker-1"). - const nodes: table[string] of Node = {} &redef; - - ## Indicates whether or not the manager will act as the logger and receive - ## logs. This value should be set in the cluster-layout.bro script (the - ## value should be true only if no logger is specified in Cluster::nodes). - ## Note that BroControl handles this automatically. - const manager_is_logger = T &redef; - - ## This is usually supplied on the command line for each instance - ## of the cluster that is started up. - const node = getenv("CLUSTER_NODE") &redef; - - ## Interval for retrying failed connections between cluster nodes. - ## If set, the BRO_DEFAULT_CONNECT_RETRY (given in number of seconds) - ## overrides this option. - const retry_interval = 1min &redef; - - ## When using broker-enabled cluster framework, nodes broadcast this event - ## to exchange their user-defined name along with a string that uniquely - ## identifies it for the duration of its lifetime. This string may change - ## if the node dies and has to reconnect later. - global hello: event(name: string, id: string); - - ## When using broker-enabled cluster framework, this event will be emitted - ## locally whenever a cluster node connects or reconnects. - global node_up: event(name: string, id: string); - - ## When using broker-enabled cluster framework, this event will be emitted - ## locally whenever a connected cluster node becomes disconnected. - global node_down: event(name: string, id: string); - - ## Write a message to the cluster logging stream. - global log: function(msg: string); - - ## Retrieve the topic associated with a specific node in the cluster. - ## - ## name: the name of the cluster node (e.g. "manager"). - ## - ## Returns: a topic string that may used to send a message exclusively to - ## a given cluster node. - global node_topic: function(name: string): string; - - ## Retrieve the topic associated with a specific node in the cluster. - ## - ## id: the id of the cluster node (from :bro:see:`Broker::EndpointInfo` - ## or :bro:see:`Broker::node_id`. - ## - ## Returns: a topic string that may used to send a message exclusively to - ## a given cluster node. - global nodeid_topic: function(id: string): string; -} - -global active_worker_ids: set[string] = set(); - -type NamedNode: record { - name: string; - node: Node; -}; - -function nodes_with_type(node_type: NodeType): vector of NamedNode - { - local rval: vector of NamedNode = vector(); - local names: vector of string = vector(); - - for ( name in Cluster::nodes ) - names += name; - - names = sort(names, strcmp); - - for ( i in names ) - { - name = names[i]; - local n = Cluster::nodes[name]; - - if ( n$node_type != node_type ) - next; - - rval += NamedNode($name=name, $node=n); - } - - return rval; - } - -function is_enabled(): bool - { - return (node != ""); - } - -function local_node_type(): NodeType - { - return is_enabled() ? nodes[node]$node_type : NONE; - } - -function node_topic(name: string): string - { - return node_topic_prefix + name; - } - -function nodeid_topic(id: string): string - { - return node_topic_prefix + id; - } - -event Cluster::hello(name: string, id: string) &priority=10 - { - if ( name !in nodes ) - { - Reporter::error(fmt("Got Cluster::hello msg from unexpected node: %s", name)); - return; - } - - local n = nodes[name]; - - if ( n?$id ) - { - if ( n$id != id ) - Reporter::error(fmt("Got Cluster::hello msg from duplicate node:%s", - name)); - } - else - event Cluster::node_up(name, id); - - n$id = id; - Cluster::log(fmt("got hello from %s (%s)", name, id)); - - if ( n$node_type == WORKER ) - { - add active_worker_ids[id]; - worker_count = |active_worker_ids|; - } - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10 - { - if ( ! Cluster::is_enabled() ) - return; - - local e = Broker::make_event(Cluster::hello, node, Broker::node_id()); - Broker::publish(nodeid_topic(endpoint$id), e); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10 - { - for ( node_name, n in nodes ) - { - if ( n?$id && n$id == endpoint$id ) - { - Cluster::log(fmt("node down: %s", node_name)); - delete n$id; - - if ( n$node_type == WORKER ) - { - delete active_worker_ids[endpoint$id]; - worker_count = |active_worker_ids|; - } - - event Cluster::node_down(node_name, endpoint$id); - break; - } - } - } - -event bro_init() &priority=5 - { - # If a node is given, but it's an unknown name we need to fail. - if ( node != "" && node !in nodes ) - { - Reporter::error(fmt("'%s' is not a valid node in the Cluster::nodes configuration", node)); - terminate(); - } - - Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]); - } - -function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo - { - local info = stores[name]; - info$name = name; - - if ( Cluster::default_store_dir != "" ) - { - local default_options = Broker::BackendOptions(); - local path = Cluster::default_store_dir + "/" + name; - - if ( info$options$sqlite$path == default_options$sqlite$path ) - info$options$sqlite$path = path + ".sqlite"; - - if ( info$options$rocksdb$path == default_options$rocksdb$path ) - info$options$rocksdb$path = path + ".rocksdb"; - } - - if ( persistent ) - { - switch ( info$backend ) { - case Broker::MEMORY: - info$backend = Cluster::default_persistent_backend; - break; - case Broker::SQLITE: - fallthrough; - case Broker::ROCKSDB: - # no-op: user already asked for a specific persistent backend. - break; - default: - Reporter::error(fmt("unhandled data store type: %s", info$backend)); - break; - } - } - - if ( ! Cluster::is_enabled() ) - { - if ( info?$store ) - { - Reporter::warning(fmt("duplicate cluster store creation for %s", name)); - return info; - } - - info$store = Broker::create_master(name, info$backend, info$options); - info$master = T; - stores[name] = info; - return info; - } - - if ( info$master_node == "" ) - { - local mgr_nodes = nodes_with_type(Cluster::MANAGER); - - if ( |mgr_nodes| == 0 ) - Reporter::fatal(fmt("empty master node name for cluster store " + - "'%s', but there's no manager node to default", - name)); - - info$master_node = mgr_nodes[0]$name; - } - else if ( info$master_node !in Cluster::nodes ) - Reporter::fatal(fmt("master node '%s' for cluster store '%s' does not exist", - info$master_node, name)); - - if ( Cluster::node == info$master_node ) - { - info$store = Broker::create_master(name, info$backend, info$options); - info$master = T; - stores[name] = info; - Cluster::log(fmt("created master store: %s", name)); - return info; - } - - info$master = F; - stores[name] = info; - info$store = Broker::create_clone(info$name, - info$clone_resync_interval, - info$clone_stale_interval, - info$clone_mutation_buffer_interval); - Cluster::log(fmt("created clone store: %s", info$name)); - return info; - } - -function log(msg: string) - { - Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]); - } diff --git a/scripts/base/frameworks/cluster/main.zeek b/scripts/base/frameworks/cluster/main.zeek new file mode 100644 index 0000000000..9040c663e1 --- /dev/null +++ b/scripts/base/frameworks/cluster/main.zeek @@ -0,0 +1,459 @@ +##! A framework for establishing and controlling a cluster of Zeek instances. +##! In order to use the cluster framework, a script named +##! ``cluster-layout.zeek`` must exist somewhere in Zeek's script search path +##! which has a cluster definition of the :zeek:id:`Cluster::nodes` variable. +##! The ``CLUSTER_NODE`` environment variable or :zeek:id:`Cluster::node` +##! must also be sent and the cluster framework loaded as a package like +##! ``@load base/frameworks/cluster``. + +@load base/frameworks/control +@load base/frameworks/broker + +module Cluster; + +export { + ## Whether to distribute log messages among available logging nodes. + const enable_round_robin_logging = T &redef; + + ## The topic name used for exchanging messages that are relevant to + ## logger nodes in a cluster. Used with broker-enabled cluster communication. + const logger_topic = "zeek/cluster/logger" &redef; + + ## The topic name used for exchanging messages that are relevant to + ## manager nodes in a cluster. Used with broker-enabled cluster communication. + const manager_topic = "zeek/cluster/manager" &redef; + + ## The topic name used for exchanging messages that are relevant to + ## proxy nodes in a cluster. Used with broker-enabled cluster communication. + const proxy_topic = "zeek/cluster/proxy" &redef; + + ## The topic name used for exchanging messages that are relevant to + ## worker nodes in a cluster. Used with broker-enabled cluster communication. + const worker_topic = "zeek/cluster/worker" &redef; + + ## The topic name used for exchanging messages that are relevant to + ## time machine nodes in a cluster. Used with broker-enabled cluster communication. + const time_machine_topic = "zeek/cluster/time_machine" &redef; + + ## The topic prefix used for exchanging messages that are relevant to + ## a named node in a cluster. Used with broker-enabled cluster communication. + const node_topic_prefix = "zeek/cluster/node/" &redef; + + ## The topic prefix used for exchanging messages that are relevant to + ## a unique node in a cluster. Used with broker-enabled cluster communication. + const nodeid_topic_prefix = "zeek/cluster/nodeid/" &redef; + + ## Name of the node on which master data stores will be created if no other + ## has already been specified by the user in :zeek:see:`Cluster::stores`. + ## An empty value means "use whatever name corresponds to the manager + ## node". + const default_master_node = "" &redef; + + ## The type of data store backend that will be used for all data stores if + ## no other has already been specified by the user in :zeek:see:`Cluster::stores`. + const default_backend = Broker::MEMORY &redef; + + ## The type of persistent data store backend that will be used for all data + ## stores if no other has already been specified by the user in + ## :zeek:see:`Cluster::stores`. This will be used when script authors call + ## :zeek:see:`Cluster::create_store` with the *persistent* argument set true. + const default_persistent_backend = Broker::SQLITE &redef; + + ## Setting a default dir will, for persistent backends that have not + ## been given an explicit file path via :zeek:see:`Cluster::stores`, + ## automatically create a path within this dir that is based on the name of + ## the data store. + const default_store_dir = "" &redef; + + ## Information regarding a cluster-enabled data store. + type StoreInfo: record { + ## The name of the data store. + name: string &optional; + ## The store handle. + store: opaque of Broker::Store &optional; + ## The name of the cluster node on which the master version of the data + ## store resides. + master_node: string &default=default_master_node; + ## Whether the data store is the master version or a clone. + master: bool &default=F; + ## The type of backend used for storing data. + backend: Broker::BackendType &default=default_backend; + ## Parameters used for configuring the backend. + options: Broker::BackendOptions &default=Broker::BackendOptions(); + ## A resync/reconnect interval to pass through to + ## :zeek:see:`Broker::create_clone`. + clone_resync_interval: interval &default=Broker::default_clone_resync_interval; + ## A staleness duration to pass through to + ## :zeek:see:`Broker::create_clone`. + clone_stale_interval: interval &default=Broker::default_clone_stale_interval; + ## A mutation buffer interval to pass through to + ## :zeek:see:`Broker::create_clone`. + clone_mutation_buffer_interval: interval &default=Broker::default_clone_mutation_buffer_interval; + }; + + ## A table of cluster-enabled data stores that have been created, indexed + ## by their name. This table will be populated automatically by + ## :zeek:see:`Cluster::create_store`, but if you need to customize + ## the options related to a particular data store, you may redef this + ## table. Calls to :zeek:see:`Cluster::create_store` will first check + ## the table for an entry of the same name and, if found, will use the + ## predefined options there when setting up the store. + global stores: table[string] of StoreInfo &default=StoreInfo() &redef; + + ## Sets up a cluster-enabled data store. They will also still properly + ## function for uses that are not operating a cluster. + ## + ## name: the name of the data store to create. + ## + ## persistent: whether the data store must be persistent. + ## + ## Returns: the store's information. For master stores, the store will be + ## ready to use immediately. For clones, the store field will not + ## be set until the node containing the master store has connected. + global create_store: function(name: string, persistent: bool &default=F): StoreInfo; + + ## The cluster logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The record type which contains the column fields of the cluster log. + type Info: record { + ## The time at which a cluster message was generated. + ts: time; + ## The name of the node that is creating the log record. + node: string; + ## A message indicating information about the cluster's operation. + message: string; + } &log; + + ## Types of nodes that are allowed to participate in the cluster + ## configuration. + type NodeType: enum { + ## A dummy node type indicating the local node is not operating + ## within a cluster. + NONE, + ## A node type which is allowed to view/manipulate the configuration + ## of other nodes in the cluster. + CONTROL, + ## A node type responsible for log management. + LOGGER, + ## A node type responsible for policy management. + MANAGER, + ## A node type for relaying worker node communication and synchronizing + ## worker node state. + PROXY, + ## The node type doing all the actual traffic analysis. + WORKER, + ## A node acting as a traffic recorder using the + ## `Time Machine `_ + ## software. + TIME_MACHINE, + }; + + ## Record type to indicate a node in a cluster. + type Node: record { + ## Identifies the type of cluster node in this node's configuration. + node_type: NodeType; + ## The IP address of the cluster node. + ip: addr; + ## If the *ip* field is a non-global IPv6 address, this field + ## can specify a particular :rfc:`4007` ``zone_id``. + zone_id: string &default=""; + ## The port that this node will listen on for peer connections. + p: port; + ## Identifier for the interface a worker is sniffing. + interface: string &optional; + ## Name of the manager node this node uses. For workers and proxies. + manager: string &optional; + ## Name of a time machine node with which this node connects. + time_machine: string &optional; + ## A unique identifier assigned to the node by the broker framework. + ## This field is only set while a node is connected. + id: string &optional; + }; + + ## This function can be called at any time to determine if the cluster + ## framework is being enabled for this run. + ## + ## Returns: True if :zeek:id:`Cluster::node` has been set. + global is_enabled: function(): bool; + + ## This function can be called at any time to determine what type of + ## cluster node the current Zeek instance is going to be acting as. + ## If :zeek:id:`Cluster::is_enabled` returns false, then + ## :zeek:enum:`Cluster::NONE` is returned. + ## + ## Returns: The :zeek:type:`Cluster::NodeType` the calling node acts as. + global local_node_type: function(): NodeType; + + ## This gives the value for the number of workers currently connected to, + ## and it's maintained internally by the cluster framework. It's + ## primarily intended for use by managers to find out how many workers + ## should be responding to requests. + global worker_count: count = 0; + + ## The cluster layout definition. This should be placed into a filter + ## named cluster-layout.zeek somewhere in the ZEEKPATH. It will be + ## automatically loaded if the CLUSTER_NODE environment variable is set. + ## Note that ZeekControl handles all of this automatically. + ## The table is typically indexed by node names/labels (e.g. "manager" + ## or "worker-1"). + const nodes: table[string] of Node = {} &redef; + + ## Indicates whether or not the manager will act as the logger and receive + ## logs. This value should be set in the cluster-layout.zeek script (the + ## value should be true only if no logger is specified in Cluster::nodes). + ## Note that ZeekControl handles this automatically. + const manager_is_logger = T &redef; + + ## This is usually supplied on the command line for each instance + ## of the cluster that is started up. + const node = getenv("CLUSTER_NODE") &redef; + + ## Interval for retrying failed connections between cluster nodes. + ## If set, the ZEEK_DEFAULT_CONNECT_RETRY (given in number of seconds) + ## environment variable overrides this option. + const retry_interval = 1min &redef; + + ## When using broker-enabled cluster framework, nodes broadcast this event + ## to exchange their user-defined name along with a string that uniquely + ## identifies it for the duration of its lifetime. This string may change + ## if the node dies and has to reconnect later. + global hello: event(name: string, id: string); + + ## When using broker-enabled cluster framework, this event will be emitted + ## locally whenever a cluster node connects or reconnects. + global node_up: event(name: string, id: string); + + ## When using broker-enabled cluster framework, this event will be emitted + ## locally whenever a connected cluster node becomes disconnected. + global node_down: event(name: string, id: string); + + ## Write a message to the cluster logging stream. + global log: function(msg: string); + + ## Retrieve the topic associated with a specific node in the cluster. + ## + ## name: the name of the cluster node (e.g. "manager"). + ## + ## Returns: a topic string that may used to send a message exclusively to + ## a given cluster node. + global node_topic: function(name: string): string; + + ## Retrieve the topic associated with a specific node in the cluster. + ## + ## id: the id of the cluster node (from :zeek:see:`Broker::EndpointInfo` + ## or :zeek:see:`Broker::node_id`. + ## + ## Returns: a topic string that may used to send a message exclusively to + ## a given cluster node. + global nodeid_topic: function(id: string): string; +} + +global active_worker_ids: set[string] = set(); + +type NamedNode: record { + name: string; + node: Node; +}; + +function nodes_with_type(node_type: NodeType): vector of NamedNode + { + local rval: vector of NamedNode = vector(); + local names: vector of string = vector(); + + for ( name in Cluster::nodes ) + names += name; + + names = sort(names, strcmp); + + for ( i in names ) + { + name = names[i]; + local n = Cluster::nodes[name]; + + if ( n$node_type != node_type ) + next; + + rval += NamedNode($name=name, $node=n); + } + + return rval; + } + +function is_enabled(): bool + { + return (node != ""); + } + +function local_node_type(): NodeType + { + return is_enabled() ? nodes[node]$node_type : NONE; + } + +function node_topic(name: string): string + { + return node_topic_prefix + name; + } + +function nodeid_topic(id: string): string + { + return node_topic_prefix + id; + } + +event Cluster::hello(name: string, id: string) &priority=10 + { + if ( name !in nodes ) + { + Reporter::error(fmt("Got Cluster::hello msg from unexpected node: %s", name)); + return; + } + + local n = nodes[name]; + + if ( n?$id ) + { + if ( n$id != id ) + Reporter::error(fmt("Got Cluster::hello msg from duplicate node:%s", + name)); + } + else + event Cluster::node_up(name, id); + + n$id = id; + Cluster::log(fmt("got hello from %s (%s)", name, id)); + + if ( n$node_type == WORKER ) + { + add active_worker_ids[id]; + worker_count = |active_worker_ids|; + } + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10 + { + if ( ! Cluster::is_enabled() ) + return; + + local e = Broker::make_event(Cluster::hello, node, Broker::node_id()); + Broker::publish(nodeid_topic(endpoint$id), e); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10 + { + for ( node_name, n in nodes ) + { + if ( n?$id && n$id == endpoint$id ) + { + Cluster::log(fmt("node down: %s", node_name)); + delete n$id; + + if ( n$node_type == WORKER ) + { + delete active_worker_ids[endpoint$id]; + worker_count = |active_worker_ids|; + } + + event Cluster::node_down(node_name, endpoint$id); + break; + } + } + } + +event zeek_init() &priority=5 + { + # If a node is given, but it's an unknown name we need to fail. + if ( node != "" && node !in nodes ) + { + Reporter::error(fmt("'%s' is not a valid node in the Cluster::nodes configuration", node)); + terminate(); + } + + Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]); + } + +function create_store(name: string, persistent: bool &default=F): Cluster::StoreInfo + { + local info = stores[name]; + info$name = name; + + if ( Cluster::default_store_dir != "" ) + { + local default_options = Broker::BackendOptions(); + local path = Cluster::default_store_dir + "/" + name; + + if ( info$options$sqlite$path == default_options$sqlite$path ) + info$options$sqlite$path = path + ".sqlite"; + + if ( info$options$rocksdb$path == default_options$rocksdb$path ) + info$options$rocksdb$path = path + ".rocksdb"; + } + + if ( persistent ) + { + switch ( info$backend ) { + case Broker::MEMORY: + info$backend = Cluster::default_persistent_backend; + break; + case Broker::SQLITE: + fallthrough; + case Broker::ROCKSDB: + # no-op: user already asked for a specific persistent backend. + break; + default: + Reporter::error(fmt("unhandled data store type: %s", info$backend)); + break; + } + } + + if ( ! Cluster::is_enabled() ) + { + if ( info?$store ) + { + Reporter::warning(fmt("duplicate cluster store creation for %s", name)); + return info; + } + + info$store = Broker::create_master(name, info$backend, info$options); + info$master = T; + stores[name] = info; + return info; + } + + if ( info$master_node == "" ) + { + local mgr_nodes = nodes_with_type(Cluster::MANAGER); + + if ( |mgr_nodes| == 0 ) + Reporter::fatal(fmt("empty master node name for cluster store " + + "'%s', but there's no manager node to default", + name)); + + info$master_node = mgr_nodes[0]$name; + } + else if ( info$master_node !in Cluster::nodes ) + Reporter::fatal(fmt("master node '%s' for cluster store '%s' does not exist", + info$master_node, name)); + + if ( Cluster::node == info$master_node ) + { + info$store = Broker::create_master(name, info$backend, info$options); + info$master = T; + stores[name] = info; + Cluster::log(fmt("created master store: %s", name)); + return info; + } + + info$master = F; + stores[name] = info; + info$store = Broker::create_clone(info$name, + info$clone_resync_interval, + info$clone_stale_interval, + info$clone_mutation_buffer_interval); + Cluster::log(fmt("created clone store: %s", info$name)); + return info; + } + +function log(msg: string) + { + Log::write(Cluster::LOG, [$ts = network_time(), $node = node, $message = msg]); + } diff --git a/scripts/base/frameworks/cluster/nodes/logger.bro b/scripts/base/frameworks/cluster/nodes/logger.bro deleted file mode 100644 index 39dcb751df..0000000000 --- a/scripts/base/frameworks/cluster/nodes/logger.bro +++ /dev/null @@ -1,29 +0,0 @@ -##! This is the core Bro script to support the notion of a cluster logger. -##! -##! The logger is passive (other Bro instances connect to us), and once -##! connected the logger receives logs from other Bro instances. -##! This script will be automatically loaded if necessary based on the -##! type of node being started. - -##! This is where the cluster logger sets it's specific settings for other -##! frameworks and in the core. - -@prefixes += cluster-logger - -## Turn on local logging. -redef Log::enable_local_logging = T; - -## Turn off remote logging since this is the logger and should only log here. -redef Log::enable_remote_logging = F; - -## Log rotation interval. -redef Log::default_rotation_interval = 1 hrs; - -## Alarm summary mail interval. -redef Log::default_mail_alarms_interval = 24 hrs; - -## Use the cluster's archive logging script. -redef Log::default_rotation_postprocessor_cmd = "archive-log"; - -## We're processing essentially *only* remote events. -redef max_remote_events_processed = 10000; diff --git a/scripts/base/frameworks/cluster/nodes/logger.zeek b/scripts/base/frameworks/cluster/nodes/logger.zeek new file mode 100644 index 0000000000..6fb5d09208 --- /dev/null +++ b/scripts/base/frameworks/cluster/nodes/logger.zeek @@ -0,0 +1,26 @@ +##! This is the core Zeek script to support the notion of a cluster logger. +##! +##! The logger is passive (other Zeek instances connect to us), and once +##! connected the logger receives logs from other Zeek instances. +##! This script will be automatically loaded if necessary based on the +##! type of node being started. + +##! This is where the cluster logger sets it's specific settings for other +##! frameworks and in the core. + +@prefixes += cluster-logger + +## Turn on local logging. +redef Log::enable_local_logging = T; + +## Turn off remote logging since this is the logger and should only log here. +redef Log::enable_remote_logging = F; + +## Log rotation interval. +redef Log::default_rotation_interval = 1 hrs; + +## Alarm summary mail interval. +redef Log::default_mail_alarms_interval = 24 hrs; + +## Use the cluster's archive logging script. +redef Log::default_rotation_postprocessor_cmd = "archive-log"; diff --git a/scripts/base/frameworks/cluster/nodes/manager.bro b/scripts/base/frameworks/cluster/nodes/manager.bro deleted file mode 100644 index e54b090522..0000000000 --- a/scripts/base/frameworks/cluster/nodes/manager.bro +++ /dev/null @@ -1,26 +0,0 @@ -##! This is the core Bro script to support the notion of a cluster manager. -##! -##! The manager is passive (the workers connect to us), and once connected -##! the manager registers for the events on the workers that are needed -##! to get the desired data from the workers. This script will be -##! automatically loaded if necessary based on the type of node being started. - -##! This is where the cluster manager sets it's specific settings for other -##! frameworks and in the core. - -@prefixes += cluster-manager - -## Don't do any local logging since the logger handles writing logs. -redef Log::enable_local_logging = F; - -## Turn on remote logging since the logger handles writing logs. -redef Log::enable_remote_logging = T; - -## Log rotation interval. -redef Log::default_rotation_interval = 24 hrs; - -## Use the cluster's delete-log script. -redef Log::default_rotation_postprocessor_cmd = "delete-log"; - -## We're processing essentially *only* remote events. -redef max_remote_events_processed = 10000; diff --git a/scripts/base/frameworks/cluster/nodes/manager.zeek b/scripts/base/frameworks/cluster/nodes/manager.zeek new file mode 100644 index 0000000000..945d952f31 --- /dev/null +++ b/scripts/base/frameworks/cluster/nodes/manager.zeek @@ -0,0 +1,23 @@ +##! This is the core Zeek script to support the notion of a cluster manager. +##! +##! The manager is passive (the workers connect to us), and once connected +##! the manager registers for the events on the workers that are needed +##! to get the desired data from the workers. This script will be +##! automatically loaded if necessary based on the type of node being started. + +##! This is where the cluster manager sets it's specific settings for other +##! frameworks and in the core. + +@prefixes += cluster-manager + +## Don't do any local logging since the logger handles writing logs. +redef Log::enable_local_logging = F; + +## Turn on remote logging since the logger handles writing logs. +redef Log::enable_remote_logging = T; + +## Log rotation interval. +redef Log::default_rotation_interval = 24 hrs; + +## Use the cluster's delete-log script. +redef Log::default_rotation_postprocessor_cmd = "delete-log"; diff --git a/scripts/base/frameworks/cluster/nodes/proxy.bro b/scripts/base/frameworks/cluster/nodes/proxy.bro deleted file mode 100644 index e38a5e9109..0000000000 --- a/scripts/base/frameworks/cluster/nodes/proxy.bro +++ /dev/null @@ -1,22 +0,0 @@ -##! Redefines the options common to all proxy nodes within a Bro cluster. -##! In particular, proxies are not meant to produce logs locally and they -##! do not forward events anywhere, they mainly synchronize state between -##! worker nodes. - -@prefixes += cluster-proxy - -## The proxy only syncs state; does not forward events. -redef forward_remote_events = F; -redef forward_remote_state_changes = T; - -## Don't do any local logging. -redef Log::enable_local_logging = F; - -## Make sure that remote logging is enabled. -redef Log::enable_remote_logging = T; - -redef Log::default_rotation_interval = 24hrs; - -## Use the cluster's delete-log script. -redef Log::default_rotation_postprocessor_cmd = "delete-log"; - diff --git a/scripts/base/frameworks/cluster/nodes/proxy.zeek b/scripts/base/frameworks/cluster/nodes/proxy.zeek new file mode 100644 index 0000000000..dec0d69f6b --- /dev/null +++ b/scripts/base/frameworks/cluster/nodes/proxy.zeek @@ -0,0 +1,18 @@ +##! Redefines the options common to all proxy nodes within a Zeek cluster. +##! In particular, proxies are not meant to produce logs locally and they +##! do not forward events anywhere, they mainly synchronize state between +##! worker nodes. + +@prefixes += cluster-proxy + +## Don't do any local logging. +redef Log::enable_local_logging = F; + +## Make sure that remote logging is enabled. +redef Log::enable_remote_logging = T; + +redef Log::default_rotation_interval = 24hrs; + +## Use the cluster's delete-log script. +redef Log::default_rotation_postprocessor_cmd = "delete-log"; + diff --git a/scripts/base/frameworks/cluster/nodes/worker.bro b/scripts/base/frameworks/cluster/nodes/worker.bro deleted file mode 100644 index 826df848f7..0000000000 --- a/scripts/base/frameworks/cluster/nodes/worker.bro +++ /dev/null @@ -1,24 +0,0 @@ -##! Redefines some options common to all worker nodes within a Bro cluster. -##! In particular, worker nodes do not produce logs locally, instead they -##! send them off to a logger node for processing. - -@prefixes += cluster-worker - -## Don't do any local logging. -redef Log::enable_local_logging = F; - -## Make sure that remote logging is enabled. -redef Log::enable_remote_logging = T; - -redef Log::default_rotation_interval = 24hrs; - -## Use the cluster's delete-log script. -redef Log::default_rotation_postprocessor_cmd = "delete-log"; - -@load misc/trim-trace-file -## Record all packets into trace file. -## -## Note that this only indicates that *if* we are recording packets, we want all -## of them (rather than just those the core deems sufficiently important). -## Setting this does not turn recording on. Use '-w ' for that. -redef record_all_packets = T; diff --git a/scripts/base/frameworks/cluster/nodes/worker.zeek b/scripts/base/frameworks/cluster/nodes/worker.zeek new file mode 100644 index 0000000000..02c22fb127 --- /dev/null +++ b/scripts/base/frameworks/cluster/nodes/worker.zeek @@ -0,0 +1,24 @@ +##! Redefines some options common to all worker nodes within a Zeek cluster. +##! In particular, worker nodes do not produce logs locally, instead they +##! send them off to a logger node for processing. + +@prefixes += cluster-worker + +## Don't do any local logging. +redef Log::enable_local_logging = F; + +## Make sure that remote logging is enabled. +redef Log::enable_remote_logging = T; + +redef Log::default_rotation_interval = 24hrs; + +## Use the cluster's delete-log script. +redef Log::default_rotation_postprocessor_cmd = "delete-log"; + +@load misc/trim-trace-file +## Record all packets into trace file. +## +## Note that this only indicates that *if* we are recording packets, we want all +## of them (rather than just those the core deems sufficiently important). +## Setting this does not turn recording on. Use '-w ' for that. +redef record_all_packets = T; diff --git a/scripts/base/frameworks/cluster/pools.bro b/scripts/base/frameworks/cluster/pools.bro deleted file mode 100644 index 8f4e92b922..0000000000 --- a/scripts/base/frameworks/cluster/pools.bro +++ /dev/null @@ -1,458 +0,0 @@ -##! Defines an interface for managing pools of cluster nodes. Pools are -##! a useful way to distribute work or data among nodes within a cluster. - -@load ./main -@load base/utils/hash_hrw - -module Cluster; - -export { - ## Store state of a cluster within the context of a work pool. - type PoolNode: record { - ## The node name (e.g. "manager"). - name: string; - ## An alias of *name* used to prevent hashing collisions when creating - ## *site_id*. - alias: string; - ## A 32-bit unique identifier for the pool node, derived from name/alias. - site_id: count; - ## Whether the node is currently alive and can receive work. - alive: bool &default=F; - }; - - ## A pool specification. - type PoolSpec: record { - ## A topic string that can be used to reach all nodes within a pool. - topic: string &default = ""; - ## The type of nodes that are contained within the pool. - node_type: Cluster::NodeType &default = Cluster::PROXY; - ## The maximum number of nodes that may belong to the pool. - ## If not set, then all available nodes will be added to the pool, - ## else the cluster framework will automatically limit the pool - ## membership according to the threshhold. - max_nodes: count &optional; - ## Whether the pool requires exclusive access to nodes. If true, - ## then *max_nodes* nodes will not be assigned to any other pool. - ## When using this flag, *max_nodes* must also be set. - exclusive: bool &default = F; - }; - - type PoolNodeTable: table[string] of PoolNode; - type RoundRobinTable: table[string] of int; - - ## A pool used for distributing data/work among a set of cluster nodes. - type Pool: record { - ## The specification of the pool that was used when registering it. - spec: PoolSpec &default = PoolSpec(); - ## Nodes in the pool, indexed by their name (e.g. "manager"). - nodes: PoolNodeTable &default = PoolNodeTable(); - ## A list of nodes in the pool in a deterministic order. - node_list: vector of PoolNode &default = vector(); - ## The Rendezvous hashing structure. - hrw_pool: HashHRW::Pool &default = HashHRW::Pool(); - ## Round-Robin table indexed by arbitrary key and storing the next - ## index of *node_list* that will be eligible to receive work (if it's - ## alive at the time of next request). - rr_key_seq: RoundRobinTable &default = RoundRobinTable(); - ## Number of pool nodes that are currently alive. - alive_count: count &default = 0; - }; - - ## The specification for :bro:see:`Cluster::proxy_pool`. - global proxy_pool_spec: PoolSpec = - PoolSpec($topic = "bro/cluster/pool/proxy", - $node_type = Cluster::PROXY) &redef; - - ## The specification for :bro:see:`Cluster::worker_pool`. - global worker_pool_spec: PoolSpec = - PoolSpec($topic = "bro/cluster/pool/worker", - $node_type = Cluster::WORKER) &redef; - - ## The specification for :bro:see:`Cluster::logger_pool`. - global logger_pool_spec: PoolSpec = - PoolSpec($topic = "bro/cluster/pool/logger", - $node_type = Cluster::LOGGER) &redef; - - ## A pool containing all the proxy nodes of a cluster. - ## The pool's node membership/availability is automatically - ## maintained by the cluster framework. - global proxy_pool: Pool; - - ## A pool containing all the worker nodes of a cluster. - ## The pool's node membership/availability is automatically - ## maintained by the cluster framework. - global worker_pool: Pool; - - ## A pool containing all the logger nodes of a cluster. - ## The pool's node membership/availability is automatically - ## maintained by the cluster framework. - global logger_pool: Pool; - - ## Registers and initializes a pool. - global register_pool: function(spec: PoolSpec): Pool; - - ## Retrieve the topic associated with the node mapped via Rendezvous hash - ## of an arbitrary key. - ## - ## pool: the pool of nodes to consider. - ## - ## key: data used for input to the hashing function that will uniformly - ## distribute keys among available nodes. - ## - ## Returns: a topic string associated with a cluster node that is alive - ## or an empty string if nothing is alive. - global hrw_topic: function(pool: Pool, key: any): string; - - ## Retrieve the topic associated with the node in a round-robin fashion. - ## - ## pool: the pool of nodes to consider. - ## - ## key: an arbitrary string to identify the purpose for which you're - ## requesting the topic. e.g. consider using a name-spaced key - ## like "Intel::cluster_rr_key" if you need to guarantee that - ## a group of messages get distributed in a well-defined pattern - ## without other messages being interleaved within the round-robin. - ## Usually sharing the default key is fine for load-balancing - ## purposes. - ## - ## Returns: a topic string associated with a cluster node that is alive, - ## or an empty string if nothing is alive. - global rr_topic: function(pool: Pool, key: string &default=""): string; - - ## Distributes log message topics among logger nodes via round-robin. - ## This will be automatically assigned to :bro:see:`Broker::log_topic` - ## if :bro:see:`Cluster::enable_round_robin_logging` is enabled. - ## If no logger nodes are active, then this will return the value - ## of :bro:see:`Broker::default_log_topic`. - global rr_log_topic: function(id: Log::ID, path: string): string; -} - -## Initialize a node as a member of a pool. -## -## pool: the pool to which the node will belong. -## -## name: the name of the node (e.g. "manager"). -## -## Returns: F if a node of the same name already exists in the pool, else T. -global init_pool_node: function(pool: Pool, name: string): bool; - -## Mark a pool node as alive/online/available. :bro:see:`Cluster::hrw_topic` -## will distribute keys to nodes marked as alive. -## -## pool: the pool to which the node belongs. -## -## name: the name of the node to mark. -## -## Returns: F if the node does not exist in the pool, else T. -global mark_pool_node_alive: function(pool: Pool, name: string): bool; - -## Mark a pool node as dead/offline/unavailable. :bro:see:`Cluster::hrw_topic` -## will not distribute keys to nodes marked as dead. -## -## pool: the pool to which the node belongs. -## -## name: the name of the node to mark. -## -## Returns: F if the node does not exist in the pool, else T. -global mark_pool_node_dead: function(pool: Pool, name: string): bool; - -global registered_pools: vector of Pool = vector(); - -function register_pool(spec: PoolSpec): Pool - { - local rval = Pool($spec = spec); - registered_pools += rval; - return rval; - } - -function hrw_topic(pool: Pool, key: any): string - { - if ( |pool$hrw_pool$sites| == 0 ) - return ""; - - local site = HashHRW::get_site(pool$hrw_pool, key); - local pn: PoolNode = site$user_data; - return node_topic_prefix + pn$name; - } - -function rr_topic(pool: Pool, key: string): string - { - if ( key !in pool$rr_key_seq ) - pool$rr_key_seq[key] = 0; - - local next_idx = pool$rr_key_seq[key]; - local start = next_idx; - local rval = ""; - - if ( next_idx >= |pool$node_list| ) - return rval; - - while ( T ) - { - local pn = pool$node_list[next_idx]; - - ++next_idx; - - if ( next_idx == |pool$node_list| ) - next_idx = 0; - - if ( pn$alive ) - { - rval = node_topic_prefix + pn$name; - break; - } - - if ( next_idx == start ) - # no nodes alive - break; - } - - pool$rr_key_seq[key] = next_idx; - return rval; - } - -function rr_log_topic(id: Log::ID, path: string): string - { - local rval = rr_topic(logger_pool, "Cluster::rr_log_topic"); - - if ( rval != "" ) - return rval; - - rval = Broker::default_log_topic(id, path); - return rval; - } - -event Cluster::node_up(name: string, id: string) &priority=10 - { - for ( i in registered_pools ) - { - local pool = registered_pools[i]; - - if ( name in pool$nodes ) - mark_pool_node_alive(pool, name); - } - } - -event Cluster::node_down(name: string, id: string) &priority=10 - { - for ( i in registered_pools ) - { - local pool = registered_pools[i]; - - if ( name in pool$nodes ) - mark_pool_node_dead(pool, name); - } - } - -function site_id_in_pool(pool: Pool, site_id: count): bool - { - for ( i, pn in pool$nodes ) - { - if ( pn$site_id == site_id ) - return T; - } - - return F; - } - -function init_pool_node(pool: Pool, name: string): bool - { - if ( name in pool$nodes ) - return F; - - local loop = T; - local c = 0; - - while ( loop ) - { - # site id collisions are unlikely, but using aliases handles it... - # alternatively could terminate and ask user to pick a new node name - # if it ends up colliding. - local alias = name + fmt(".%s", c); - local site_id = fnv1a32(alias); - - if ( site_id_in_pool(pool, site_id) ) - ++c; - else - { - local pn = PoolNode($name=name, $alias=alias, $site_id=site_id, - $alive=Cluster::node == name); - pool$nodes[name] = pn; - pool$node_list += pn; - - if ( pn$alive ) - ++pool$alive_count; - - loop = F; - } - } - - return T; - } - -function mark_pool_node_alive(pool: Pool, name: string): bool - { - if ( name !in pool$nodes ) - return F; - - local pn = pool$nodes[name]; - - if ( ! pn$alive ) - { - pn$alive = T; - ++pool$alive_count; - } - - HashHRW::add_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn)); - return T; - } - -function mark_pool_node_dead(pool: Pool, name: string): bool - { - if ( name !in pool$nodes ) - return F; - - local pn = pool$nodes[name]; - - if ( pn$alive ) - { - pn$alive = F; - --pool$alive_count; - } - - HashHRW::rem_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn)); - return T; - } - -event bro_init() - { - worker_pool = register_pool(worker_pool_spec); - proxy_pool = register_pool(proxy_pool_spec); - logger_pool = register_pool(logger_pool_spec); - } - -type PoolEligibilityTracking: record { - eligible_nodes: vector of NamedNode &default = vector(); - next_idx: count &default = 0; - excluded: count &default = 0; -}; - -global pool_eligibility: table[Cluster::NodeType] of PoolEligibilityTracking = table(); - -function pool_sorter(a: Pool, b: Pool): int - { - return strcmp(a$spec$topic, b$spec$topic); - } - -# Needs to execute before the bro_init in setup-connections -event bro_init() &priority=-5 - { - if ( ! Cluster::is_enabled() ) - return; - - # Sorting now ensures the node distribution process is stable even if - # there's a change in the order of time-of-registration between Bro runs. - sort(registered_pools, pool_sorter); - - pool_eligibility[Cluster::WORKER] = - PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::WORKER)); - pool_eligibility[Cluster::PROXY] = - PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::PROXY)); - pool_eligibility[Cluster::LOGGER] = - PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::LOGGER)); - - if ( manager_is_logger ) - { - local mgr = nodes_with_type(Cluster::MANAGER); - - if ( |mgr| > 0 ) - { - local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes; - eln += mgr[0]; - } - } - - local pool: Pool; - local pet: PoolEligibilityTracking; - local en: vector of NamedNode; - - for ( i in registered_pools ) - { - pool = registered_pools[i]; - - if ( pool$spec$node_type !in pool_eligibility ) - Reporter::fatal(fmt("invalid pool node type: %s", pool$spec$node_type)); - - if ( ! pool$spec$exclusive ) - next; - - if ( ! pool$spec?$max_nodes ) - Reporter::fatal("Cluster::PoolSpec 'max_nodes' field must be set when using the 'exclusive' flag"); - - pet = pool_eligibility[pool$spec$node_type]; - pet$excluded += pool$spec$max_nodes; - } - - for ( nt, pet in pool_eligibility ) - { - if ( pet$excluded > |pet$eligible_nodes| ) - Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded)); - } - - for ( i in registered_pools ) - { - pool = registered_pools[i]; - - if ( ! pool$spec$exclusive ) - next; - - pet = pool_eligibility[pool$spec$node_type]; - - local e = 0; - - while ( e < pool$spec$max_nodes ) - { - init_pool_node(pool, pet$eligible_nodes[e]$name); - ++e; - } - - local nen: vector of NamedNode = vector(); - - for ( j in pet$eligible_nodes ) - { - if ( j < e ) - next; - - nen += pet$eligible_nodes[j]; - } - - pet$eligible_nodes = nen; - } - - for ( i in registered_pools ) - { - pool = registered_pools[i]; - - if ( pool$spec$exclusive ) - next; - - pet = pool_eligibility[pool$spec$node_type]; - local nodes_to_init = |pet$eligible_nodes|; - - if ( pool$spec?$max_nodes && - pool$spec$max_nodes < |pet$eligible_nodes| ) - nodes_to_init = pool$spec$max_nodes; - - local nodes_inited = 0; - - while ( nodes_inited < nodes_to_init ) - { - init_pool_node(pool, pet$eligible_nodes[pet$next_idx]$name); - ++nodes_inited; - ++pet$next_idx; - - if ( pet$next_idx == |pet$eligible_nodes| ) - pet$next_idx = 0; - } - } - } diff --git a/scripts/base/frameworks/cluster/pools.zeek b/scripts/base/frameworks/cluster/pools.zeek new file mode 100644 index 0000000000..9c21c3188d --- /dev/null +++ b/scripts/base/frameworks/cluster/pools.zeek @@ -0,0 +1,458 @@ +##! Defines an interface for managing pools of cluster nodes. Pools are +##! a useful way to distribute work or data among nodes within a cluster. + +@load ./main +@load base/utils/hash_hrw + +module Cluster; + +export { + ## Store state of a cluster within the context of a work pool. + type PoolNode: record { + ## The node name (e.g. "manager"). + name: string; + ## An alias of *name* used to prevent hashing collisions when creating + ## *site_id*. + alias: string; + ## A 32-bit unique identifier for the pool node, derived from name/alias. + site_id: count; + ## Whether the node is currently alive and can receive work. + alive: bool &default=F; + }; + + ## A pool specification. + type PoolSpec: record { + ## A topic string that can be used to reach all nodes within a pool. + topic: string &default = ""; + ## The type of nodes that are contained within the pool. + node_type: Cluster::NodeType &default = Cluster::PROXY; + ## The maximum number of nodes that may belong to the pool. + ## If not set, then all available nodes will be added to the pool, + ## else the cluster framework will automatically limit the pool + ## membership according to the threshhold. + max_nodes: count &optional; + ## Whether the pool requires exclusive access to nodes. If true, + ## then *max_nodes* nodes will not be assigned to any other pool. + ## When using this flag, *max_nodes* must also be set. + exclusive: bool &default = F; + }; + + type PoolNodeTable: table[string] of PoolNode; + type RoundRobinTable: table[string] of int; + + ## A pool used for distributing data/work among a set of cluster nodes. + type Pool: record { + ## The specification of the pool that was used when registering it. + spec: PoolSpec &default = PoolSpec(); + ## Nodes in the pool, indexed by their name (e.g. "manager"). + nodes: PoolNodeTable &default = PoolNodeTable(); + ## A list of nodes in the pool in a deterministic order. + node_list: vector of PoolNode &default = vector(); + ## The Rendezvous hashing structure. + hrw_pool: HashHRW::Pool &default = HashHRW::Pool(); + ## Round-Robin table indexed by arbitrary key and storing the next + ## index of *node_list* that will be eligible to receive work (if it's + ## alive at the time of next request). + rr_key_seq: RoundRobinTable &default = RoundRobinTable(); + ## Number of pool nodes that are currently alive. + alive_count: count &default = 0; + }; + + ## The specification for :zeek:see:`Cluster::proxy_pool`. + global proxy_pool_spec: PoolSpec = + PoolSpec($topic = "zeek/cluster/pool/proxy", + $node_type = Cluster::PROXY) &redef; + + ## The specification for :zeek:see:`Cluster::worker_pool`. + global worker_pool_spec: PoolSpec = + PoolSpec($topic = "zeek/cluster/pool/worker", + $node_type = Cluster::WORKER) &redef; + + ## The specification for :zeek:see:`Cluster::logger_pool`. + global logger_pool_spec: PoolSpec = + PoolSpec($topic = "zeek/cluster/pool/logger", + $node_type = Cluster::LOGGER) &redef; + + ## A pool containing all the proxy nodes of a cluster. + ## The pool's node membership/availability is automatically + ## maintained by the cluster framework. + global proxy_pool: Pool; + + ## A pool containing all the worker nodes of a cluster. + ## The pool's node membership/availability is automatically + ## maintained by the cluster framework. + global worker_pool: Pool; + + ## A pool containing all the logger nodes of a cluster. + ## The pool's node membership/availability is automatically + ## maintained by the cluster framework. + global logger_pool: Pool; + + ## Registers and initializes a pool. + global register_pool: function(spec: PoolSpec): Pool; + + ## Retrieve the topic associated with the node mapped via Rendezvous hash + ## of an arbitrary key. + ## + ## pool: the pool of nodes to consider. + ## + ## key: data used for input to the hashing function that will uniformly + ## distribute keys among available nodes. + ## + ## Returns: a topic string associated with a cluster node that is alive + ## or an empty string if nothing is alive. + global hrw_topic: function(pool: Pool, key: any): string; + + ## Retrieve the topic associated with the node in a round-robin fashion. + ## + ## pool: the pool of nodes to consider. + ## + ## key: an arbitrary string to identify the purpose for which you're + ## requesting the topic. e.g. consider using a name-spaced key + ## like "Intel::cluster_rr_key" if you need to guarantee that + ## a group of messages get distributed in a well-defined pattern + ## without other messages being interleaved within the round-robin. + ## Usually sharing the default key is fine for load-balancing + ## purposes. + ## + ## Returns: a topic string associated with a cluster node that is alive, + ## or an empty string if nothing is alive. + global rr_topic: function(pool: Pool, key: string &default=""): string; + + ## Distributes log message topics among logger nodes via round-robin. + ## This will be automatically assigned to :zeek:see:`Broker::log_topic` + ## if :zeek:see:`Cluster::enable_round_robin_logging` is enabled. + ## If no logger nodes are active, then this will return the value + ## of :zeek:see:`Broker::default_log_topic`. + global rr_log_topic: function(id: Log::ID, path: string): string; +} + +## Initialize a node as a member of a pool. +## +## pool: the pool to which the node will belong. +## +## name: the name of the node (e.g. "manager"). +## +## Returns: F if a node of the same name already exists in the pool, else T. +global init_pool_node: function(pool: Pool, name: string): bool; + +## Mark a pool node as alive/online/available. :zeek:see:`Cluster::hrw_topic` +## will distribute keys to nodes marked as alive. +## +## pool: the pool to which the node belongs. +## +## name: the name of the node to mark. +## +## Returns: F if the node does not exist in the pool, else T. +global mark_pool_node_alive: function(pool: Pool, name: string): bool; + +## Mark a pool node as dead/offline/unavailable. :zeek:see:`Cluster::hrw_topic` +## will not distribute keys to nodes marked as dead. +## +## pool: the pool to which the node belongs. +## +## name: the name of the node to mark. +## +## Returns: F if the node does not exist in the pool, else T. +global mark_pool_node_dead: function(pool: Pool, name: string): bool; + +global registered_pools: vector of Pool = vector(); + +function register_pool(spec: PoolSpec): Pool + { + local rval = Pool($spec = spec); + registered_pools += rval; + return rval; + } + +function hrw_topic(pool: Pool, key: any): string + { + if ( |pool$hrw_pool$sites| == 0 ) + return ""; + + local site = HashHRW::get_site(pool$hrw_pool, key); + local pn: PoolNode = site$user_data; + return node_topic_prefix + pn$name; + } + +function rr_topic(pool: Pool, key: string): string + { + if ( key !in pool$rr_key_seq ) + pool$rr_key_seq[key] = 0; + + local next_idx = pool$rr_key_seq[key]; + local start = next_idx; + local rval = ""; + + if ( next_idx >= |pool$node_list| ) + return rval; + + while ( T ) + { + local pn = pool$node_list[next_idx]; + + ++next_idx; + + if ( next_idx == |pool$node_list| ) + next_idx = 0; + + if ( pn$alive ) + { + rval = node_topic_prefix + pn$name; + break; + } + + if ( next_idx == start ) + # no nodes alive + break; + } + + pool$rr_key_seq[key] = next_idx; + return rval; + } + +function rr_log_topic(id: Log::ID, path: string): string + { + local rval = rr_topic(logger_pool, "Cluster::rr_log_topic"); + + if ( rval != "" ) + return rval; + + rval = Broker::default_log_topic(id, path); + return rval; + } + +event Cluster::node_up(name: string, id: string) &priority=10 + { + for ( i in registered_pools ) + { + local pool = registered_pools[i]; + + if ( name in pool$nodes ) + mark_pool_node_alive(pool, name); + } + } + +event Cluster::node_down(name: string, id: string) &priority=10 + { + for ( i in registered_pools ) + { + local pool = registered_pools[i]; + + if ( name in pool$nodes ) + mark_pool_node_dead(pool, name); + } + } + +function site_id_in_pool(pool: Pool, site_id: count): bool + { + for ( i, pn in pool$nodes ) + { + if ( pn$site_id == site_id ) + return T; + } + + return F; + } + +function init_pool_node(pool: Pool, name: string): bool + { + if ( name in pool$nodes ) + return F; + + local loop = T; + local c = 0; + + while ( loop ) + { + # site id collisions are unlikely, but using aliases handles it... + # alternatively could terminate and ask user to pick a new node name + # if it ends up colliding. + local alias = name + fmt(".%s", c); + local site_id = fnv1a32(alias); + + if ( site_id_in_pool(pool, site_id) ) + ++c; + else + { + local pn = PoolNode($name=name, $alias=alias, $site_id=site_id, + $alive=Cluster::node == name); + pool$nodes[name] = pn; + pool$node_list += pn; + + if ( pn$alive ) + ++pool$alive_count; + + loop = F; + } + } + + return T; + } + +function mark_pool_node_alive(pool: Pool, name: string): bool + { + if ( name !in pool$nodes ) + return F; + + local pn = pool$nodes[name]; + + if ( ! pn$alive ) + { + pn$alive = T; + ++pool$alive_count; + } + + HashHRW::add_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn)); + return T; + } + +function mark_pool_node_dead(pool: Pool, name: string): bool + { + if ( name !in pool$nodes ) + return F; + + local pn = pool$nodes[name]; + + if ( pn$alive ) + { + pn$alive = F; + --pool$alive_count; + } + + HashHRW::rem_site(pool$hrw_pool, HashHRW::Site($id=pn$site_id, $user_data=pn)); + return T; + } + +event zeek_init() + { + worker_pool = register_pool(worker_pool_spec); + proxy_pool = register_pool(proxy_pool_spec); + logger_pool = register_pool(logger_pool_spec); + } + +type PoolEligibilityTracking: record { + eligible_nodes: vector of NamedNode &default = vector(); + next_idx: count &default = 0; + excluded: count &default = 0; +}; + +global pool_eligibility: table[Cluster::NodeType] of PoolEligibilityTracking = table(); + +function pool_sorter(a: Pool, b: Pool): int + { + return strcmp(a$spec$topic, b$spec$topic); + } + +# Needs to execute before the zeek_init in setup-connections +event zeek_init() &priority=-5 + { + if ( ! Cluster::is_enabled() ) + return; + + # Sorting now ensures the node distribution process is stable even if + # there's a change in the order of time-of-registration between Zeek runs. + sort(registered_pools, pool_sorter); + + pool_eligibility[Cluster::WORKER] = + PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::WORKER)); + pool_eligibility[Cluster::PROXY] = + PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::PROXY)); + pool_eligibility[Cluster::LOGGER] = + PoolEligibilityTracking($eligible_nodes = nodes_with_type(Cluster::LOGGER)); + + if ( manager_is_logger ) + { + local mgr = nodes_with_type(Cluster::MANAGER); + + if ( |mgr| > 0 ) + { + local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes; + eln += mgr[0]; + } + } + + local pool: Pool; + local pet: PoolEligibilityTracking; + local en: vector of NamedNode; + + for ( i in registered_pools ) + { + pool = registered_pools[i]; + + if ( pool$spec$node_type !in pool_eligibility ) + Reporter::fatal(fmt("invalid pool node type: %s", pool$spec$node_type)); + + if ( ! pool$spec$exclusive ) + next; + + if ( ! pool$spec?$max_nodes ) + Reporter::fatal("Cluster::PoolSpec 'max_nodes' field must be set when using the 'exclusive' flag"); + + pet = pool_eligibility[pool$spec$node_type]; + pet$excluded += pool$spec$max_nodes; + } + + for ( nt, pet in pool_eligibility ) + { + if ( pet$excluded > |pet$eligible_nodes| ) + Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded)); + } + + for ( i in registered_pools ) + { + pool = registered_pools[i]; + + if ( ! pool$spec$exclusive ) + next; + + pet = pool_eligibility[pool$spec$node_type]; + + local e = 0; + + while ( e < pool$spec$max_nodes ) + { + init_pool_node(pool, pet$eligible_nodes[e]$name); + ++e; + } + + local nen: vector of NamedNode = vector(); + + for ( j in pet$eligible_nodes ) + { + if ( j < e ) + next; + + nen += pet$eligible_nodes[j]; + } + + pet$eligible_nodes = nen; + } + + for ( i in registered_pools ) + { + pool = registered_pools[i]; + + if ( pool$spec$exclusive ) + next; + + pet = pool_eligibility[pool$spec$node_type]; + local nodes_to_init = |pet$eligible_nodes|; + + if ( pool$spec?$max_nodes && + pool$spec$max_nodes < |pet$eligible_nodes| ) + nodes_to_init = pool$spec$max_nodes; + + local nodes_inited = 0; + + while ( nodes_inited < nodes_to_init ) + { + init_pool_node(pool, pet$eligible_nodes[pet$next_idx]$name); + ++nodes_inited; + ++pet$next_idx; + + if ( pet$next_idx == |pet$eligible_nodes| ) + pet$next_idx = 0; + } + } + } diff --git a/scripts/base/frameworks/cluster/setup-connections.bro b/scripts/base/frameworks/cluster/setup-connections.bro deleted file mode 100644 index a90081c639..0000000000 --- a/scripts/base/frameworks/cluster/setup-connections.bro +++ /dev/null @@ -1,126 +0,0 @@ -##! This script establishes communication among all nodes in a cluster -##! as defined by :bro:id:`Cluster::nodes`. - -@load ./main -@load ./pools -@load base/frameworks/broker - -module Cluster; - -function connect_peer(node_type: NodeType, node_name: string) - { - local nn = nodes_with_type(node_type); - - for ( i in nn ) - { - local n = nn[i]; - - if ( n$name != node_name ) - next; - - local status = Broker::peer(cat(n$node$ip), n$node$p, - Cluster::retry_interval); - Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s", - n$node$ip, n$node$p, Cluster::retry_interval, - status)); - } - } - -function connect_peers_with_type(node_type: NodeType) - { - local rval: vector of NamedNode = vector(); - local nn = nodes_with_type(node_type); - - for ( i in nn ) - { - local n = nn[i]; - local status = Broker::peer(cat(n$node$ip), n$node$p, - Cluster::retry_interval); - Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s", - n$node$ip, n$node$p, Cluster::retry_interval, - status)); - } - } - -event bro_init() &priority=-10 - { - if ( getenv("BROCTL_CHECK_CONFIG") != "" ) - return; - - local self = nodes[node]; - - for ( i in registered_pools ) - { - local pool = registered_pools[i]; - - if ( node in pool$nodes ) - Broker::subscribe(pool$spec$topic); - } - - switch ( self$node_type ) { - case NONE: - return; - case CONTROL: - break; - case LOGGER: - Broker::subscribe(Cluster::logger_topic); - Broker::subscribe(Broker::default_log_topic_prefix); - break; - case MANAGER: - Broker::subscribe(Cluster::manager_topic); - - if ( Cluster::manager_is_logger ) - Broker::subscribe(Broker::default_log_topic_prefix); - - break; - case PROXY: - Broker::subscribe(Cluster::proxy_topic); - break; - case WORKER: - Broker::subscribe(Cluster::worker_topic); - break; - case TIME_MACHINE: - Broker::subscribe(Cluster::time_machine_topic); - break; - default: - Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type)); - return; - } - - Broker::subscribe(nodeid_topic(Broker::node_id())); - Broker::subscribe(node_topic(node)); - - Broker::listen(Broker::default_listen_address, - self$p, - Broker::default_listen_retry); - - Cluster::log(fmt("listening on %s:%s", Broker::default_listen_address, self$p)); - - switch ( self$node_type ) { - case MANAGER: - connect_peers_with_type(LOGGER); - - if ( self?$time_machine ) - connect_peer(TIME_MACHINE, self$time_machine); - - break; - case PROXY: - connect_peers_with_type(LOGGER); - - if ( self?$manager ) - connect_peer(MANAGER, self$manager); - - break; - case WORKER: - connect_peers_with_type(LOGGER); - connect_peers_with_type(PROXY); - - if ( self?$manager ) - connect_peer(MANAGER, self$manager); - - if ( self?$time_machine ) - connect_peer(TIME_MACHINE, self$time_machine); - - break; - } - } diff --git a/scripts/base/frameworks/cluster/setup-connections.zeek b/scripts/base/frameworks/cluster/setup-connections.zeek new file mode 100644 index 0000000000..9e9374c8b9 --- /dev/null +++ b/scripts/base/frameworks/cluster/setup-connections.zeek @@ -0,0 +1,126 @@ +##! This script establishes communication among all nodes in a cluster +##! as defined by :zeek:id:`Cluster::nodes`. + +@load ./main +@load ./pools +@load base/frameworks/broker + +module Cluster; + +function connect_peer(node_type: NodeType, node_name: string) + { + local nn = nodes_with_type(node_type); + + for ( i in nn ) + { + local n = nn[i]; + + if ( n$name != node_name ) + next; + + local status = Broker::peer(cat(n$node$ip), n$node$p, + Cluster::retry_interval); + Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s", + n$node$ip, n$node$p, Cluster::retry_interval, + status)); + } + } + +function connect_peers_with_type(node_type: NodeType) + { + local rval: vector of NamedNode = vector(); + local nn = nodes_with_type(node_type); + + for ( i in nn ) + { + local n = nn[i]; + local status = Broker::peer(cat(n$node$ip), n$node$p, + Cluster::retry_interval); + Cluster::log(fmt("initiate peering with %s:%s, retry=%s, status=%s", + n$node$ip, n$node$p, Cluster::retry_interval, + status)); + } + } + +event zeek_init() &priority=-10 + { + if ( getenv("ZEEKCTL_CHECK_CONFIG") != "" ) + return; + + local self = nodes[node]; + + for ( i in registered_pools ) + { + local pool = registered_pools[i]; + + if ( node in pool$nodes ) + Broker::subscribe(pool$spec$topic); + } + + switch ( self$node_type ) { + case NONE: + return; + case CONTROL: + break; + case LOGGER: + Broker::subscribe(Cluster::logger_topic); + Broker::subscribe(Broker::default_log_topic_prefix); + break; + case MANAGER: + Broker::subscribe(Cluster::manager_topic); + + if ( Cluster::manager_is_logger ) + Broker::subscribe(Broker::default_log_topic_prefix); + + break; + case PROXY: + Broker::subscribe(Cluster::proxy_topic); + break; + case WORKER: + Broker::subscribe(Cluster::worker_topic); + break; + case TIME_MACHINE: + Broker::subscribe(Cluster::time_machine_topic); + break; + default: + Reporter::error(fmt("Unhandled cluster node type: %s", self$node_type)); + return; + } + + Broker::subscribe(nodeid_topic(Broker::node_id())); + Broker::subscribe(node_topic(node)); + + Broker::listen(Broker::default_listen_address, + self$p, + Broker::default_listen_retry); + + Cluster::log(fmt("listening on %s:%s", Broker::default_listen_address, self$p)); + + switch ( self$node_type ) { + case MANAGER: + connect_peers_with_type(LOGGER); + + if ( self?$time_machine ) + connect_peer(TIME_MACHINE, self$time_machine); + + break; + case PROXY: + connect_peers_with_type(LOGGER); + + if ( self?$manager ) + connect_peer(MANAGER, self$manager); + + break; + case WORKER: + connect_peers_with_type(LOGGER); + connect_peers_with_type(PROXY); + + if ( self?$manager ) + connect_peer(MANAGER, self$manager); + + if ( self?$time_machine ) + connect_peer(TIME_MACHINE, self$time_machine); + + break; + } + } diff --git a/scripts/base/frameworks/config/README b/scripts/base/frameworks/config/README index 3640d1e8c4..2869bc78bb 100644 --- a/scripts/base/frameworks/config/README +++ b/scripts/base/frameworks/config/README @@ -1,2 +1,2 @@ -The configuration framework provides a way to change the Bro configuration +The configuration framework provides a way to change the Zeek configuration in "option" values at run-time. diff --git a/scripts/base/frameworks/config/__load__.bro b/scripts/base/frameworks/config/__load__.zeek similarity index 100% rename from scripts/base/frameworks/config/__load__.bro rename to scripts/base/frameworks/config/__load__.zeek diff --git a/scripts/base/frameworks/config/input.bro b/scripts/base/frameworks/config/input.bro deleted file mode 100644 index 7c1f37567b..0000000000 --- a/scripts/base/frameworks/config/input.bro +++ /dev/null @@ -1,77 +0,0 @@ -##! File input for the configuration framework using the input framework. - -@load ./main -@load base/frameworks/cluster - -module Config; - -export { - ## Configuration files that will be read off disk. Files are reread - ## every time they are updated so updates should be atomic with "mv" - ## instead of writing the file in place. - ## - ## If the same configuration option is defined in several files with - ## different values, behavior is unspecified. - const config_files: set[string] = {} &redef; - - ## Read specified configuration file and apply values; updates to file - ## are not tracked. - global read_config: function(filename: string); -} - -global current_config: table[string] of string = table(); - -type ConfigItem: record { - option_nv: string; -}; - -type EventFields: record { - option_name: string; - option_val: string; -}; - -event config_line(description: Input::EventDescription, tpe: Input::Event, p: EventFields) - { - } - -event bro_init() &priority=5 - { - if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) - return; - - for ( fi in config_files ) - Input::add_table([$reader=Input::READER_CONFIG, - $mode=Input::REREAD, - $source=fi, - $name=cat("config-", fi), - $idx=ConfigItem, - $val=ConfigItem, - $want_record=F, - $destination=current_config]); - } - -event InputConfig::new_value(name: string, source: string, id: string, value: any) - { - if ( sub_bytes(name, 1, 15) != "config-oneshot-" && source !in config_files ) - return; - - Config::set_value(id, value, source); - } - -function read_config(filename: string) - { - # Only read the configuration on the manager. The other nodes are being fed - # from the manager. - if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) - return; - - local iname = cat("config-oneshot-", filename); - - Input::add_event([$reader=Input::READER_CONFIG, - $mode=Input::MANUAL, - $source=filename, - $name=iname, - $fields=EventFields, - $ev=config_line]); - Input::remove(iname); - } diff --git a/scripts/base/frameworks/config/input.zeek b/scripts/base/frameworks/config/input.zeek new file mode 100644 index 0000000000..9796d69f57 --- /dev/null +++ b/scripts/base/frameworks/config/input.zeek @@ -0,0 +1,77 @@ +##! File input for the configuration framework using the input framework. + +@load ./main +@load base/frameworks/cluster + +module Config; + +export { + ## Configuration files that will be read off disk. Files are reread + ## every time they are updated so updates should be atomic with "mv" + ## instead of writing the file in place. + ## + ## If the same configuration option is defined in several files with + ## different values, behavior is unspecified. + const config_files: set[string] = {} &redef; + + ## Read specified configuration file and apply values; updates to file + ## are not tracked. + global read_config: function(filename: string); +} + +global current_config: table[string] of string = table(); + +type ConfigItem: record { + option_nv: string; +}; + +type EventFields: record { + option_name: string; + option_val: string; +}; + +event config_line(description: Input::EventDescription, tpe: Input::Event, p: EventFields) + { + } + +event zeek_init() &priority=5 + { + if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) + return; + + for ( fi in config_files ) + Input::add_table([$reader=Input::READER_CONFIG, + $mode=Input::REREAD, + $source=fi, + $name=cat("config-", fi), + $idx=ConfigItem, + $val=ConfigItem, + $want_record=F, + $destination=current_config]); + } + +event InputConfig::new_value(name: string, source: string, id: string, value: any) + { + if ( sub_bytes(name, 1, 15) != "config-oneshot-" && source !in config_files ) + return; + + Config::set_value(id, value, source); + } + +function read_config(filename: string) + { + # Only read the configuration on the manager. The other nodes are being fed + # from the manager. + if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) + return; + + local iname = cat("config-oneshot-", filename); + + Input::add_event([$reader=Input::READER_CONFIG, + $mode=Input::MANUAL, + $source=filename, + $name=iname, + $fields=EventFields, + $ev=config_line]); + Input::remove(iname); + } diff --git a/scripts/base/frameworks/config/main.bro b/scripts/base/frameworks/config/main.bro deleted file mode 100644 index 2f9dbfc720..0000000000 --- a/scripts/base/frameworks/config/main.bro +++ /dev/null @@ -1,170 +0,0 @@ -##! The configuration framework provides a way to change Bro options -##! (as specified by the "option" keyword) at runtime. It also logs runtime -##! changes to options to config.log. - -@load base/frameworks/cluster - -module Config; - -export { - ## The config logging stream identifier. - redef enum Log::ID += { LOG }; - - ## Represents the data in config.log. - type Info: record { - ## Timestamp at which the configuration change occured. - ts: time &log; - ## ID of the value that was changed. - id: string &log; - ## Value before the change. - old_value: string &log; - ## Value after the change. - new_value: string &log; - ## Optional location that triggered the change. - location: string &optional &log; - }; - - ## Event that can be handled to access the :bro:type:`Config::Info` - ## record as it is sent on to the logging framework. - global log_config: event(rec: Info); - - ## This function is the config framework layer around the lower-level - ## :bro:see:`Option::set` call. Config::set_value will set the configuration - ## value for all nodes in the cluster, no matter where it was called. Note - ## that :bro:see:`Option::set` does not distribute configuration changes - ## to other nodes. - ## - ## ID: The ID of the option to update. - ## - ## val: The new value of the option. - ## - ## location: Optional parameter detailing where this change originated from. - ## - ## Returns: true on success, false when an error occurs. - global set_value: function(ID: string, val: any, location: string &default = "" &optional): bool; -} - -@if ( Cluster::is_enabled() ) -type OptionCacheValue: record { - val: any; - location: string; -}; - -global option_cache: table[string] of OptionCacheValue; - -global Config::cluster_set_option: event(ID: string, val: any, location: string); - -function broadcast_option(ID: string, val: any, location: string) - { - # There's not currently a common topic to broadcast to as then enabling - # implicit Broker forwarding would cause a routing loop. - Broker::publish(Cluster::worker_topic, Config::cluster_set_option, - ID, val, location); - Broker::publish(Cluster::proxy_topic, Config::cluster_set_option, - ID, val, location); - Broker::publish(Cluster::logger_topic, Config::cluster_set_option, - ID, val, location); - } - -event Config::cluster_set_option(ID: string, val: any, location: string) - { -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - option_cache[ID] = OptionCacheValue($val=val, $location=location); - broadcast_option(ID, val, location); -@endif - - Option::set(ID, val, location); - } - -function set_value(ID: string, val: any, location: string &default = "" &optional): bool - { - # Always copy the value to break references -- if caller mutates their - # value afterwards, we still guarantee the option has not changed. If - # one wants it to change, they need to explicitly call Option::set_value - # or Option::set with the intended value at the time of the call. - val = copy(val); - - # First try setting it locally - abort if not possible. - if ( ! Option::set(ID, val, location) ) - return F; - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - option_cache[ID] = OptionCacheValue($val=val, $location=location); - broadcast_option(ID, val, location); -@else - Broker::publish(Cluster::manager_topic, Config::cluster_set_option, - ID, val, location); -@endif - - return T; - } -@else # Standalone implementation -function set_value(ID: string, val: any, location: string &default = "" &optional): bool - { - return Option::set(ID, val, location); - } -@endif # Cluster::is_enabled - -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) -# Handling of new worker nodes. -event Cluster::node_up(name: string, id: string) &priority=-10 - { - # When a node connects, send it all current Option values. - if ( name in Cluster::nodes ) - for ( ID in option_cache ) - Broker::publish(Cluster::node_topic(name), Config::cluster_set_option, ID, option_cache[ID]$val, option_cache[ID]$location); - } -@endif - - -function format_value(value: any) : string - { - local tn = type_name(value); - local part: string_vec = vector(); - if ( /^set/ in tn ) - { - local it: set[bool] = value; - for ( sv in it ) - part += cat(sv); - return join_string_vec(part, ","); - } - else if ( /^vector/ in tn ) - { - local vit: vector of any = value; - for ( i in vit ) - part += cat(vit[i]); - return join_string_vec(part, ","); - } - else if ( tn == "string" ) - return value; - - return cat(value); - } - -function config_option_changed(ID: string, new_value: any, location: string): any - { - local log = Info($ts=network_time(), $id=ID, $old_value=format_value(lookup_ID(ID)), $new_value=format_value(new_value)); - if ( location != "" ) - log$location = location; - Log::write(LOG, log); - return new_value; - } - -event bro_init() &priority=10 - { - Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config"]); - - # Limit logging to the manager - everyone else just feeds off it. -@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER ) - # Iterate over all existing options and add ourselves as change handlers - # with a low priority so that we can log the changes. - local gids = global_ids(); - for ( i, gid in gids ) - { - if ( ! gid$option_value ) - next; - - Option::set_change_handler(i, config_option_changed, -100); - } -@endif - } diff --git a/scripts/base/frameworks/config/main.zeek b/scripts/base/frameworks/config/main.zeek new file mode 100644 index 0000000000..3b188732b9 --- /dev/null +++ b/scripts/base/frameworks/config/main.zeek @@ -0,0 +1,170 @@ +##! The configuration framework provides a way to change Zeek options +##! (as specified by the "option" keyword) at runtime. It also logs runtime +##! changes to options to config.log. + +@load base/frameworks/cluster + +module Config; + +export { + ## The config logging stream identifier. + redef enum Log::ID += { LOG }; + + ## Represents the data in config.log. + type Info: record { + ## Timestamp at which the configuration change occured. + ts: time &log; + ## ID of the value that was changed. + id: string &log; + ## Value before the change. + old_value: string &log; + ## Value after the change. + new_value: string &log; + ## Optional location that triggered the change. + location: string &optional &log; + }; + + ## Event that can be handled to access the :zeek:type:`Config::Info` + ## record as it is sent on to the logging framework. + global log_config: event(rec: Info); + + ## This function is the config framework layer around the lower-level + ## :zeek:see:`Option::set` call. Config::set_value will set the configuration + ## value for all nodes in the cluster, no matter where it was called. Note + ## that :zeek:see:`Option::set` does not distribute configuration changes + ## to other nodes. + ## + ## ID: The ID of the option to update. + ## + ## val: The new value of the option. + ## + ## location: Optional parameter detailing where this change originated from. + ## + ## Returns: true on success, false when an error occurs. + global set_value: function(ID: string, val: any, location: string &default = "" &optional): bool; +} + +@if ( Cluster::is_enabled() ) +type OptionCacheValue: record { + val: any; + location: string; +}; + +global option_cache: table[string] of OptionCacheValue; + +global Config::cluster_set_option: event(ID: string, val: any, location: string); + +function broadcast_option(ID: string, val: any, location: string) + { + # There's not currently a common topic to broadcast to as then enabling + # implicit Broker forwarding would cause a routing loop. + Broker::publish(Cluster::worker_topic, Config::cluster_set_option, + ID, val, location); + Broker::publish(Cluster::proxy_topic, Config::cluster_set_option, + ID, val, location); + Broker::publish(Cluster::logger_topic, Config::cluster_set_option, + ID, val, location); + } + +event Config::cluster_set_option(ID: string, val: any, location: string) + { +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + option_cache[ID] = OptionCacheValue($val=val, $location=location); + broadcast_option(ID, val, location); +@endif + + Option::set(ID, val, location); + } + +function set_value(ID: string, val: any, location: string &default = "" &optional): bool + { + # Always copy the value to break references -- if caller mutates their + # value afterwards, we still guarantee the option has not changed. If + # one wants it to change, they need to explicitly call Option::set_value + # or Option::set with the intended value at the time of the call. + val = copy(val); + + # First try setting it locally - abort if not possible. + if ( ! Option::set(ID, val, location) ) + return F; + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + option_cache[ID] = OptionCacheValue($val=val, $location=location); + broadcast_option(ID, val, location); +@else + Broker::publish(Cluster::manager_topic, Config::cluster_set_option, + ID, val, location); +@endif + + return T; + } +@else # Standalone implementation +function set_value(ID: string, val: any, location: string &default = "" &optional): bool + { + return Option::set(ID, val, location); + } +@endif # Cluster::is_enabled + +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) +# Handling of new worker nodes. +event Cluster::node_up(name: string, id: string) &priority=-10 + { + # When a node connects, send it all current Option values. + if ( name in Cluster::nodes ) + for ( ID in option_cache ) + Broker::publish(Cluster::node_topic(name), Config::cluster_set_option, ID, option_cache[ID]$val, option_cache[ID]$location); + } +@endif + + +function format_value(value: any) : string + { + local tn = type_name(value); + local part: string_vec = vector(); + if ( /^set/ in tn ) + { + local it: set[bool] = value; + for ( sv in it ) + part += cat(sv); + return join_string_vec(part, ","); + } + else if ( /^vector/ in tn ) + { + local vit: vector of any = value; + for ( i in vit ) + part += cat(vit[i]); + return join_string_vec(part, ","); + } + else if ( tn == "string" ) + return value; + + return cat(value); + } + +function config_option_changed(ID: string, new_value: any, location: string): any + { + local log = Info($ts=network_time(), $id=ID, $old_value=format_value(lookup_ID(ID)), $new_value=format_value(new_value)); + if ( location != "" ) + log$location = location; + Log::write(LOG, log); + return new_value; + } + +event zeek_init() &priority=10 + { + Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config"]); + + # Limit logging to the manager - everyone else just feeds off it. +@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER ) + # Iterate over all existing options and add ourselves as change handlers + # with a low priority so that we can log the changes. + local gids = global_ids(); + for ( i, gid in gids ) + { + if ( ! gid$option_value ) + next; + + Option::set_change_handler(i, config_option_changed, -100); + } +@endif + } diff --git a/scripts/base/frameworks/config/weird.bro b/scripts/base/frameworks/config/weird.bro deleted file mode 100644 index bc311e3029..0000000000 --- a/scripts/base/frameworks/config/weird.bro +++ /dev/null @@ -1,44 +0,0 @@ -##! This script sets up the config framework change handlers for weirds. - -@load ./main - -module Config; - -function weird_option_change_sampling_whitelist(ID: string, new_value: string_set, location: string) : string_set - { - if ( ID == "Weird::sampling_whitelist" ) - { - Reporter::set_weird_sampling_whitelist(new_value); - } - return new_value; - } - -function weird_option_change_count(ID: string, new_value: count, location: string) : count - { - if ( ID == "Weird::sampling_threshold" ) - { - Reporter::set_weird_sampling_threshold(new_value); - } - else if ( ID == "Weird::sampling_rate" ) - { - Reporter::set_weird_sampling_rate(new_value); - } - return new_value; - } - -function weird_option_change_interval(ID: string, new_value: interval, location: string) : interval - { - if ( ID == "Weird::sampling_duration" ) - { - Reporter::set_weird_sampling_duration(new_value); - } - return new_value; - } - -event bro_init() &priority=5 - { - Option::set_change_handler("Weird::sampling_whitelist", weird_option_change_sampling_whitelist, 5); - Option::set_change_handler("Weird::sampling_threshold", weird_option_change_count, 5); - Option::set_change_handler("Weird::sampling_rate", weird_option_change_count, 5); - Option::set_change_handler("Weird::sampling_duration", weird_option_change_interval, 5); - } diff --git a/scripts/base/frameworks/config/weird.zeek b/scripts/base/frameworks/config/weird.zeek new file mode 100644 index 0000000000..5e55b0b188 --- /dev/null +++ b/scripts/base/frameworks/config/weird.zeek @@ -0,0 +1,44 @@ +##! This script sets up the config framework change handlers for weirds. + +@load ./main + +module Config; + +function weird_option_change_sampling_whitelist(ID: string, new_value: string_set, location: string) : string_set + { + if ( ID == "Weird::sampling_whitelist" ) + { + Reporter::set_weird_sampling_whitelist(new_value); + } + return new_value; + } + +function weird_option_change_count(ID: string, new_value: count, location: string) : count + { + if ( ID == "Weird::sampling_threshold" ) + { + Reporter::set_weird_sampling_threshold(new_value); + } + else if ( ID == "Weird::sampling_rate" ) + { + Reporter::set_weird_sampling_rate(new_value); + } + return new_value; + } + +function weird_option_change_interval(ID: string, new_value: interval, location: string) : interval + { + if ( ID == "Weird::sampling_duration" ) + { + Reporter::set_weird_sampling_duration(new_value); + } + return new_value; + } + +event zeek_init() &priority=5 + { + Option::set_change_handler("Weird::sampling_whitelist", weird_option_change_sampling_whitelist, 5); + Option::set_change_handler("Weird::sampling_threshold", weird_option_change_count, 5); + Option::set_change_handler("Weird::sampling_rate", weird_option_change_count, 5); + Option::set_change_handler("Weird::sampling_duration", weird_option_change_interval, 5); + } diff --git a/scripts/base/frameworks/control/README b/scripts/base/frameworks/control/README index ba6998d43c..4927653f90 100644 --- a/scripts/base/frameworks/control/README +++ b/scripts/base/frameworks/control/README @@ -1,3 +1,3 @@ The control framework provides the foundation for providing "commands" -that can be taken remotely at runtime to modify a running Bro instance +that can be taken remotely at runtime to modify a running Zeek instance or collect information from the running instance. diff --git a/scripts/base/frameworks/control/__load__.bro b/scripts/base/frameworks/control/__load__.zeek similarity index 100% rename from scripts/base/frameworks/control/__load__.bro rename to scripts/base/frameworks/control/__load__.zeek diff --git a/scripts/base/frameworks/control/main.bro b/scripts/base/frameworks/control/main.bro deleted file mode 100644 index e374806b55..0000000000 --- a/scripts/base/frameworks/control/main.bro +++ /dev/null @@ -1,80 +0,0 @@ -##! The control framework provides the foundation for providing "commands" -##! that can be taken remotely at runtime to modify a running Bro instance -##! or collect information from the running instance. - -module Control; - -export { - ## The topic prefix used for exchanging control messages via Broker. - const topic_prefix = "bro/control"; - - ## Whether the controllee should call :bro:see:`Broker::listen`. - ## In a cluster, this isn't needed since the setup process calls it. - const controllee_listen = T &redef; - - ## The address of the host that will be controlled. - const host = 0.0.0.0 &redef; - - ## The port of the host that will be controlled. - const host_port = 0/tcp &redef; - - ## If :bro:id:`Control::host` is a non-global IPv6 address and - ## requires a specific :rfc:`4007` ``zone_id``, it can be set here. - const zone_id = "" &redef; - - ## The command that is being done. It's typically set on the - ## command line. - const cmd = "" &redef; - - ## This can be used by commands that take an argument. - const arg = "" &redef; - - ## The commands that can currently be given on the command line for - ## remote control. - const commands: set[string] = { - "id_value", - "peer_status", - "net_stats", - "configuration_update", - "shutdown", - } &redef; - - ## Variable IDs that are to be ignored by the update process. - const ignore_ids: set[string] = { }; - - ## Event for requesting the value of an ID (a variable). - global id_value_request: event(id: string); - ## Event for returning the value of an ID after an - ## :bro:id:`Control::id_value_request` event. - global id_value_response: event(id: string, val: string); - - ## Requests the current communication status. - global peer_status_request: event(); - ## Returns the current communication status. - global peer_status_response: event(s: string); - - ## Requests the current net_stats. - global net_stats_request: event(); - ## Returns the current net_stats. - global net_stats_response: event(s: string); - - ## Inform the remote Bro instance that it's configuration may have been - ## updated. - global configuration_update_request: event(); - ## This event is a wrapper and alias for the - ## :bro:id:`Control::configuration_update_request` event. - ## This event is also a primary hooking point for the control framework. - global configuration_update: event(); - ## Message in response to a configuration update request. - global configuration_update_response: event(); - - ## Requests that the Bro instance begins shutting down. - global shutdown_request: event(); - ## Message in response to a shutdown request. - global shutdown_response: event(); -} - -event terminate_event() - { - terminate(); - } diff --git a/scripts/base/frameworks/control/main.zeek b/scripts/base/frameworks/control/main.zeek new file mode 100644 index 0000000000..20cb93c234 --- /dev/null +++ b/scripts/base/frameworks/control/main.zeek @@ -0,0 +1,80 @@ +##! The control framework provides the foundation for providing "commands" +##! that can be taken remotely at runtime to modify a running Zeek instance +##! or collect information from the running instance. + +module Control; + +export { + ## The topic prefix used for exchanging control messages via Broker. + const topic_prefix = "zeek/control"; + + ## Whether the controllee should call :zeek:see:`Broker::listen`. + ## In a cluster, this isn't needed since the setup process calls it. + const controllee_listen = T &redef; + + ## The address of the host that will be controlled. + const host = 0.0.0.0 &redef; + + ## The port of the host that will be controlled. + const host_port = 0/tcp &redef; + + ## If :zeek:id:`Control::host` is a non-global IPv6 address and + ## requires a specific :rfc:`4007` ``zone_id``, it can be set here. + const zone_id = "" &redef; + + ## The command that is being done. It's typically set on the + ## command line. + const cmd = "" &redef; + + ## This can be used by commands that take an argument. + const arg = "" &redef; + + ## The commands that can currently be given on the command line for + ## remote control. + const commands: set[string] = { + "id_value", + "peer_status", + "net_stats", + "configuration_update", + "shutdown", + } &redef; + + ## Variable IDs that are to be ignored by the update process. + const ignore_ids: set[string] = { }; + + ## Event for requesting the value of an ID (a variable). + global id_value_request: event(id: string); + ## Event for returning the value of an ID after an + ## :zeek:id:`Control::id_value_request` event. + global id_value_response: event(id: string, val: string); + + ## Requests the current communication status. + global peer_status_request: event(); + ## Returns the current communication status. + global peer_status_response: event(s: string); + + ## Requests the current net_stats. + global net_stats_request: event(); + ## Returns the current net_stats. + global net_stats_response: event(s: string); + + ## Inform the remote Zeek instance that it's configuration may have been + ## updated. + global configuration_update_request: event(); + ## This event is a wrapper and alias for the + ## :zeek:id:`Control::configuration_update_request` event. + ## This event is also a primary hooking point for the control framework. + global configuration_update: event(); + ## Message in response to a configuration update request. + global configuration_update_response: event(); + + ## Requests that the Zeek instance begins shutting down. + global shutdown_request: event(); + ## Message in response to a shutdown request. + global shutdown_response: event(); +} + +event terminate_event() + { + terminate(); + } diff --git a/scripts/base/frameworks/analyzer/__load__.bro b/scripts/base/frameworks/dpd/__load__.zeek similarity index 100% rename from scripts/base/frameworks/analyzer/__load__.bro rename to scripts/base/frameworks/dpd/__load__.zeek diff --git a/scripts/base/frameworks/dpd/main.bro b/scripts/base/frameworks/dpd/main.bro deleted file mode 100644 index cce8b362d5..0000000000 --- a/scripts/base/frameworks/dpd/main.bro +++ /dev/null @@ -1,104 +0,0 @@ -##! Activates port-independent protocol detection and selectively disables -##! analyzers if protocol violations occur. - -module DPD; - -export { - ## Add the DPD logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The record type defining the columns to log in the DPD logging stream. - type Info: record { - ## Timestamp for when protocol analysis failed. - ts: time &log; - ## Connection unique ID. - uid: string &log; - ## Connection ID containing the 4-tuple which identifies endpoints. - id: conn_id &log; - ## Transport protocol for the violation. - proto: transport_proto &log; - ## The analyzer that generated the violation. - analyzer: string &log; - ## The textual reason for the analysis failure. - failure_reason: string &log; - - ## Disabled analyzer IDs. This is only for internal tracking - ## so as to not attempt to disable analyzers multiple times. - disabled_aids: set[count]; - }; - - ## Analyzers which you don't want to throw - option ignore_violations: set[Analyzer::Tag] = set(); - - ## Ignore violations which go this many bytes into the connection. - ## Set to 0 to never ignore protocol violations. - option ignore_violations_after = 10 * 1024; -} - -redef record connection += { - dpd: Info &optional; -}; - -event bro_init() &priority=5 - { - Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd"]); - } - -event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=10 - { - local analyzer = Analyzer::name(atype); - - if ( fmt("-%s",analyzer) in c$service ) - delete c$service[fmt("-%s", analyzer)]; - - add c$service[analyzer]; - } - -event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, - reason: string) &priority=10 - { - local analyzer = Analyzer::name(atype); - # If the service hasn't been confirmed yet, don't generate a log message - # for the protocol violation. - if ( analyzer !in c$service ) - return; - - delete c$service[analyzer]; - add c$service[fmt("-%s", analyzer)]; - - local info: Info; - info$ts=network_time(); - info$uid=c$uid; - info$id=c$id; - info$proto=get_conn_transport_proto(c$id); - info$analyzer=analyzer; - info$failure_reason=reason; - c$dpd = info; - } - -event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5 - { - if ( !c?$dpd || aid in c$dpd$disabled_aids ) - return; - - local size = c$orig$size + c$resp$size; - if ( ignore_violations_after > 0 && size > ignore_violations_after ) - return; - - if ( atype in ignore_violations ) - return; - - # Disable the analyzer that raised the last core-generated event. - disable_analyzer(c$id, aid, F); - add c$dpd$disabled_aids[aid]; - } - -event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, - reason: string) &priority=-5 - { - if ( c?$dpd ) - { - Log::write(DPD::LOG, c$dpd); - delete c$dpd; - } - } diff --git a/scripts/base/frameworks/dpd/main.zeek b/scripts/base/frameworks/dpd/main.zeek new file mode 100644 index 0000000000..c6a3515bc3 --- /dev/null +++ b/scripts/base/frameworks/dpd/main.zeek @@ -0,0 +1,104 @@ +##! Activates port-independent protocol detection and selectively disables +##! analyzers if protocol violations occur. + +module DPD; + +export { + ## Add the DPD logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The record type defining the columns to log in the DPD logging stream. + type Info: record { + ## Timestamp for when protocol analysis failed. + ts: time &log; + ## Connection unique ID. + uid: string &log; + ## Connection ID containing the 4-tuple which identifies endpoints. + id: conn_id &log; + ## Transport protocol for the violation. + proto: transport_proto &log; + ## The analyzer that generated the violation. + analyzer: string &log; + ## The textual reason for the analysis failure. + failure_reason: string &log; + + ## Disabled analyzer IDs. This is only for internal tracking + ## so as to not attempt to disable analyzers multiple times. + disabled_aids: set[count]; + }; + + ## Analyzers which you don't want to throw + option ignore_violations: set[Analyzer::Tag] = set(); + + ## Ignore violations which go this many bytes into the connection. + ## Set to 0 to never ignore protocol violations. + option ignore_violations_after = 10 * 1024; +} + +redef record connection += { + dpd: Info &optional; +}; + +event zeek_init() &priority=5 + { + Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd"]); + } + +event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=10 + { + local analyzer = Analyzer::name(atype); + + if ( fmt("-%s",analyzer) in c$service ) + delete c$service[fmt("-%s", analyzer)]; + + add c$service[analyzer]; + } + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, + reason: string) &priority=10 + { + local analyzer = Analyzer::name(atype); + # If the service hasn't been confirmed yet, don't generate a log message + # for the protocol violation. + if ( analyzer !in c$service ) + return; + + delete c$service[analyzer]; + add c$service[fmt("-%s", analyzer)]; + + local info: Info; + info$ts=network_time(); + info$uid=c$uid; + info$id=c$id; + info$proto=get_conn_transport_proto(c$id); + info$analyzer=analyzer; + info$failure_reason=reason; + c$dpd = info; + } + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5 + { + if ( !c?$dpd || aid in c$dpd$disabled_aids ) + return; + + local size = c$orig$size + c$resp$size; + if ( ignore_violations_after > 0 && size > ignore_violations_after ) + return; + + if ( atype in ignore_violations ) + return; + + # Disable the analyzer that raised the last core-generated event. + disable_analyzer(c$id, aid, F); + add c$dpd$disabled_aids[aid]; + } + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, + reason: string) &priority=-5 + { + if ( c?$dpd ) + { + Log::write(DPD::LOG, c$dpd); + delete c$dpd; + } + } diff --git a/scripts/base/frameworks/files/__load__.bro b/scripts/base/frameworks/files/__load__.bro deleted file mode 100644 index 2177d81e25..0000000000 --- a/scripts/base/frameworks/files/__load__.bro +++ /dev/null @@ -1,2 +0,0 @@ -@load ./main.bro -@load ./magic diff --git a/scripts/base/frameworks/files/__load__.zeek b/scripts/base/frameworks/files/__load__.zeek new file mode 100644 index 0000000000..2da9cffc66 --- /dev/null +++ b/scripts/base/frameworks/files/__load__.zeek @@ -0,0 +1,2 @@ +@load ./main +@load ./magic diff --git a/scripts/base/frameworks/files/magic/__load__.bro b/scripts/base/frameworks/files/magic/__load__.zeek similarity index 100% rename from scripts/base/frameworks/files/magic/__load__.bro rename to scripts/base/frameworks/files/magic/__load__.zeek diff --git a/scripts/base/frameworks/files/main.bro b/scripts/base/frameworks/files/main.bro deleted file mode 100644 index d3d37b30ab..0000000000 --- a/scripts/base/frameworks/files/main.bro +++ /dev/null @@ -1,545 +0,0 @@ -##! An interface for driving the analysis of files, possibly independent of -##! any network protocol over which they're transported. - -@load base/bif/file_analysis.bif -@load base/frameworks/analyzer -@load base/frameworks/logging -@load base/utils/site - -module Files; - -export { - redef enum Log::ID += { - ## Logging stream for file analysis. - LOG - }; - - ## A structure which parameterizes a type of file analysis. - type AnalyzerArgs: record { - ## An event which will be generated for all new file contents, - ## chunk-wise. Used when *tag* (in the - ## :bro:see:`Files::add_analyzer` function) is - ## :bro:see:`Files::ANALYZER_DATA_EVENT`. - chunk_event: event(f: fa_file, data: string, off: count) &optional; - - ## An event which will be generated for all new file contents, - ## stream-wise. Used when *tag* is - ## :bro:see:`Files::ANALYZER_DATA_EVENT`. - stream_event: event(f: fa_file, data: string) &optional; - } &redef; - - ## Contains all metadata related to the analysis of a given file. - ## For the most part, fields here are derived from ones of the same name - ## in :bro:see:`fa_file`. - type Info: record { - ## The time when the file was first seen. - ts: time &log; - - ## An identifier associated with a single file. - fuid: string &log; - - ## If this file was transferred over a network - ## connection this should show the host or hosts that - ## the data sourced from. - tx_hosts: set[addr] &default=addr_set() &log; - - ## If this file was transferred over a network - ## connection this should show the host or hosts that - ## the data traveled to. - rx_hosts: set[addr] &default=addr_set() &log; - - ## Connection UIDs over which the file was transferred. - conn_uids: set[string] &default=string_set() &log; - - ## An identification of the source of the file data. E.g. it - ## may be a network protocol over which it was transferred, or a - ## local file path which was read, or some other input source. - source: string &log &optional; - - ## A value to represent the depth of this file in relation - ## to its source. In SMTP, it is the depth of the MIME - ## attachment on the message. In HTTP, it is the depth of the - ## request within the TCP connection. - depth: count &default=0 &log; - - ## A set of analysis types done during the file analysis. - analyzers: set[string] &default=string_set() &log; - - ## A mime type provided by the strongest file magic signature - ## match against the *bof_buffer* field of :bro:see:`fa_file`, - ## or in the cases where no buffering of the beginning of file - ## occurs, an initial guess of the mime type based on the first - ## data seen. - mime_type: string &log &optional; - - ## A filename for the file if one is available from the source - ## for the file. These will frequently come from - ## "Content-Disposition" headers in network protocols. - filename: string &log &optional; - - ## The duration the file was analyzed for. - duration: interval &log &default=0secs; - - ## If the source of this file is a network connection, this field - ## indicates if the data originated from the local network or not as - ## determined by the configured :bro:see:`Site::local_nets`. - local_orig: bool &log &optional; - - ## If the source of this file is a network connection, this field - ## indicates if the file is being sent by the originator of the - ## connection or the responder. - is_orig: bool &log &optional; - - ## Number of bytes provided to the file analysis engine for the file. - seen_bytes: count &log &default=0; - - ## Total number of bytes that are supposed to comprise the full file. - total_bytes: count &log &optional; - - ## The number of bytes in the file stream that were completely missed - ## during the process of analysis e.g. due to dropped packets. - missing_bytes: count &log &default=0; - - ## The number of bytes in the file stream that were not delivered to - ## stream file analyzers. This could be overlapping bytes or - ## bytes that couldn't be reassembled. - overflow_bytes: count &log &default=0; - - ## Whether the file analysis timed out at least once for the file. - timedout: bool &log &default=F; - - ## Identifier associated with a container file from which this one was - ## extracted as part of the file analysis. - parent_fuid: string &log &optional; - } &redef; - - ## A table that can be used to disable file analysis completely for - ## any files transferred over given network protocol analyzers. - const disable: table[Files::Tag] of bool = table() &redef; - - ## The salt concatenated to unique file handle strings generated by - ## :bro:see:`get_file_handle` before hashing them in to a file id - ## (the *id* field of :bro:see:`fa_file`). - ## Provided to help mitigate the possibility of manipulating parts of - ## network connections that factor in to the file handle in order to - ## generate two handles that would hash to the same file id. - const salt = "I recommend changing this." &redef; - - ## Decide if you want to automatically attached analyzers to - ## files based on the detected mime type of the file. - const analyze_by_mime_type_automatically = T &redef; - - ## The default setting for file reassembly. - option enable_reassembler = T; - - ## The default per-file reassembly buffer size. - const reassembly_buffer_size = 524288 &redef; - - ## Lookup to see if a particular file id exists and is still valid. - ## - ## fuid: the file id. - ## - ## Returns: T if the file uid is known. - global file_exists: function(fuid: string): bool; - - ## Lookup an :bro:see:`fa_file` record with the file id. - ## - ## fuid: the file id. - ## - ## Returns: the associated :bro:see:`fa_file` record. - global lookup_file: function(fuid: string): fa_file; - - ## Allows the file reassembler to be used if it's necessary because the - ## file is transferred out of order. - ## - ## f: the file. - global enable_reassembly: function(f: fa_file); - - ## Disables the file reassembler on this file. If the file is not - ## transferred out of order this will have no effect. - ## - ## f: the file. - global disable_reassembly: function(f: fa_file); - - ## Set the maximum size the reassembly buffer is allowed to grow - ## for the given file. - ## - ## f: the file. - ## - ## max: Maximum allowed size of the reassembly buffer. - global set_reassembly_buffer_size: function(f: fa_file, max: count); - - ## Sets the *timeout_interval* field of :bro:see:`fa_file`, which is - ## used to determine the length of inactivity that is allowed for a file - ## before internal state related to it is cleaned up. When used within - ## a :bro:see:`file_timeout` handler, the analysis will delay timing out - ## again for the period specified by *t*. - ## - ## f: the file. - ## - ## t: the amount of time the file can remain inactive before discarding. - ## - ## Returns: true if the timeout interval was set, or false if analysis - ## for the file isn't currently active. - global set_timeout_interval: function(f: fa_file, t: interval): bool; - - ## Adds an analyzer to the analysis of a given file. - ## - ## f: the file. - ## - ## tag: the analyzer type. - ## - ## args: any parameters the analyzer takes. - ## - ## Returns: true if the analyzer will be added, or false if analysis - ## for the file isn't currently active or the *args* - ## were invalid for the analyzer type. - global add_analyzer: function(f: fa_file, - tag: Files::Tag, - args: AnalyzerArgs &default=AnalyzerArgs()): bool; - - ## Removes an analyzer from the analysis of a given file. - ## - ## f: the file. - ## - ## tag: the analyzer type. - ## - ## args: the analyzer (type and args) to remove. - ## - ## Returns: true if the analyzer will be removed, or false if analysis - ## for the file isn't currently active. - global remove_analyzer: function(f: fa_file, - tag: Files::Tag, - args: AnalyzerArgs &default=AnalyzerArgs()): bool; - - ## Stops/ignores any further analysis of a given file. - ## - ## f: the file. - ## - ## Returns: true if analysis for the given file will be ignored for the - ## rest of its contents, or false if analysis for the file - ## isn't currently active. - global stop: function(f: fa_file): bool; - - ## Translates a file analyzer enum value to a string with the - ## analyzer's name. - ## - ## tag: The analyzer tag. - ## - ## Returns: The analyzer name corresponding to the tag. - global analyzer_name: function(tag: Files::Tag): string; - - ## Provides a text description regarding metadata of the file. - ## For example, with HTTP it would return a URL. - ## - ## f: The file to be described. - ## - ## Returns: a text description regarding metadata of the file. - global describe: function(f: fa_file): string; - - type ProtoRegistration: record { - ## A callback to generate a file handle on demand when - ## one is needed by the core. - get_file_handle: function(c: connection, is_orig: bool): string; - - ## A callback to "describe" a file. In the case of an HTTP - ## transfer the most obvious description would be the URL. - ## It's like an extremely compressed version of the normal log. - describe: function(f: fa_file): string - &default=function(f: fa_file): string { return ""; }; - }; - - ## Register callbacks for protocols that work with the Files framework. - ## The callbacks must uniquely identify a file and each protocol can - ## only have a single callback registered for it. - ## - ## tag: Tag for the protocol analyzer having a callback being registered. - ## - ## reg: A :bro:see:`Files::ProtoRegistration` record. - ## - ## Returns: true if the protocol being registered was not previously registered. - global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool; - - ## Register a callback for file analyzers to use if they need to do some - ## manipulation when they are being added to a file before the core code - ## takes over. This is unlikely to be interesting for users and should - ## only be called by file analyzer authors but is *not required*. - ## - ## tag: Tag for the file analyzer. - ## - ## callback: Function to execute when the given file analyzer is being added. - global register_analyzer_add_callback: function(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs)); - - ## Registers a set of MIME types for an analyzer. If a future connection on one of - ## these types is seen, the analyzer will be automatically assigned to parsing it. - ## The function *adds* to all MIME types already registered, it doesn't replace - ## them. - ## - ## tag: The tag of the analyzer. - ## - ## mts: The set of MIME types, each in the form "foo/bar" (case-insensitive). - ## - ## Returns: True if the MIME types were successfully registered. - global register_for_mime_types: function(tag: Files::Tag, mts: set[string]) : bool; - - ## Registers a MIME type for an analyzer. If a future file with this type is seen, - ## the analyzer will be automatically assigned to parsing it. The function *adds* - ## to all MIME types already registered, it doesn't replace them. - ## - ## tag: The tag of the analyzer. - ## - ## mt: The MIME type in the form "foo/bar" (case-insensitive). - ## - ## Returns: True if the MIME type was successfully registered. - global register_for_mime_type: function(tag: Files::Tag, mt: string) : bool; - - ## Returns a set of all MIME types currently registered for a specific analyzer. - ## - ## tag: The tag of the analyzer. - ## - ## Returns: The set of MIME types. - global registered_mime_types: function(tag: Files::Tag) : set[string]; - - ## Returns a table of all MIME-type-to-analyzer mappings currently registered. - ## - ## Returns: A table mapping each analyzer to the set of MIME types - ## registered for it. - global all_registered_mime_types: function() : table[Files::Tag] of set[string]; - - ## Event that can be handled to access the Info record as it is sent on - ## to the logging framework. - global log_files: event(rec: Info); -} - -redef record fa_file += { - info: Info &optional; -}; - -# Store the callbacks for protocol analyzers that have files. -global registered_protocols: table[Analyzer::Tag] of ProtoRegistration = table(); - -# Store the MIME type to analyzer mappings. -global mime_types: table[Files::Tag] of set[string]; -global mime_type_to_analyzers: table[string] of set[Files::Tag]; - -global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: AnalyzerArgs) = table(); - -event bro_init() &priority=5 - { - Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files, $path="files"]); - } - -function set_info(f: fa_file) - { - if ( ! f?$info ) - { - local tmp: Info = Info($ts=f$last_active, - $fuid=f$id); - f$info = tmp; - } - - if ( f?$parent_id ) - f$info$parent_fuid = f$parent_id; - if ( f?$source ) - f$info$source = f$source; - f$info$duration = f$last_active - f$info$ts; - f$info$seen_bytes = f$seen_bytes; - if ( f?$total_bytes ) - f$info$total_bytes = f$total_bytes; - f$info$missing_bytes = f$missing_bytes; - f$info$overflow_bytes = f$overflow_bytes; - if ( f?$is_orig ) - f$info$is_orig = f$is_orig; - } - -function file_exists(fuid: string): bool - { - return __file_exists(fuid); - } - -function lookup_file(fuid: string): fa_file - { - return __lookup_file(fuid); - } - -function set_timeout_interval(f: fa_file, t: interval): bool - { - return __set_timeout_interval(f$id, t); - } - -function enable_reassembly(f: fa_file) - { - __enable_reassembly(f$id); - } - -function disable_reassembly(f: fa_file) - { - __disable_reassembly(f$id); - } - -function set_reassembly_buffer_size(f: fa_file, max: count) - { - __set_reassembly_buffer(f$id, max); - } - -function add_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool - { - add f$info$analyzers[Files::analyzer_name(tag)]; - - if ( tag in analyzer_add_callbacks ) - analyzer_add_callbacks[tag](f, args); - - if ( ! __add_analyzer(f$id, tag, args) ) - { - Reporter::warning(fmt("Analyzer %s not added successfully to file %s.", tag, f$id)); - return F; - } - return T; - } - -function register_analyzer_add_callback(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs)) - { - analyzer_add_callbacks[tag] = callback; - } - -function remove_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool - { - return __remove_analyzer(f$id, tag, args); - } - -function stop(f: fa_file): bool - { - return __stop(f$id); - } - -function analyzer_name(tag: Files::Tag): string - { - return __analyzer_name(tag); - } - -function register_protocol(tag: Analyzer::Tag, reg: ProtoRegistration): bool - { - local result = (tag !in registered_protocols); - registered_protocols[tag] = reg; - return result; - } - -function register_for_mime_types(tag: Files::Tag, mime_types: set[string]) : bool - { - local rc = T; - - for ( mt in mime_types ) - { - if ( ! register_for_mime_type(tag, mt) ) - rc = F; - } - - return rc; - } - -function register_for_mime_type(tag: Files::Tag, mt: string) : bool - { - if ( tag !in mime_types ) - { - mime_types[tag] = set(); - } - add mime_types[tag][mt]; - - if ( mt !in mime_type_to_analyzers ) - { - mime_type_to_analyzers[mt] = set(); - } - add mime_type_to_analyzers[mt][tag]; - - return T; - } - -function registered_mime_types(tag: Files::Tag) : set[string] - { - return tag in mime_types ? mime_types[tag] : set(); - } - -function all_registered_mime_types(): table[Files::Tag] of set[string] - { - return mime_types; - } - -function describe(f: fa_file): string - { - local tag = Analyzer::get_tag(f$source); - if ( tag !in registered_protocols ) - return ""; - - local handler = registered_protocols[tag]; - return handler$describe(f); - } - -event get_file_handle(tag: Files::Tag, c: connection, is_orig: bool) &priority=5 - { - if ( tag !in registered_protocols ) - return; - - local handler = registered_protocols[tag]; - set_file_handle(handler$get_file_handle(c, is_orig)); - } - -event file_new(f: fa_file) &priority=10 - { - set_info(f); - - if ( enable_reassembler ) - { - Files::enable_reassembly(f); - Files::set_reassembly_buffer_size(f, reassembly_buffer_size); - } - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=10 - { - set_info(f); - - add f$info$conn_uids[c$uid]; - local cid = c$id; - add f$info$tx_hosts[f$is_orig ? cid$orig_h : cid$resp_h]; - if( |Site::local_nets| > 0 ) - f$info$local_orig=Site::is_local_addr(f$is_orig ? cid$orig_h : cid$resp_h); - - add f$info$rx_hosts[f$is_orig ? cid$resp_h : cid$orig_h]; - } - -event file_sniff(f: fa_file, meta: fa_metadata) &priority=10 - { - set_info(f); - - if ( ! meta?$mime_type ) - return; - - f$info$mime_type = meta$mime_type; - - if ( analyze_by_mime_type_automatically && - meta$mime_type in mime_type_to_analyzers ) - { - local analyzers = mime_type_to_analyzers[meta$mime_type]; - for ( a in analyzers ) - { - add f$info$analyzers[Files::analyzer_name(a)]; - Files::add_analyzer(f, a); - } - } - } - -event file_timeout(f: fa_file) &priority=10 - { - set_info(f); - f$info$timedout = T; - } - -event file_state_remove(f: fa_file) &priority=10 - { - set_info(f); - } - -event file_state_remove(f: fa_file) &priority=-10 - { - Log::write(Files::LOG, f$info); - } diff --git a/scripts/base/frameworks/files/main.zeek b/scripts/base/frameworks/files/main.zeek new file mode 100644 index 0000000000..591d6724e6 --- /dev/null +++ b/scripts/base/frameworks/files/main.zeek @@ -0,0 +1,545 @@ +##! An interface for driving the analysis of files, possibly independent of +##! any network protocol over which they're transported. + +@load base/bif/file_analysis.bif +@load base/frameworks/analyzer +@load base/frameworks/logging +@load base/utils/site + +module Files; + +export { + redef enum Log::ID += { + ## Logging stream for file analysis. + LOG + }; + + ## A structure which parameterizes a type of file analysis. + type AnalyzerArgs: record { + ## An event which will be generated for all new file contents, + ## chunk-wise. Used when *tag* (in the + ## :zeek:see:`Files::add_analyzer` function) is + ## :zeek:see:`Files::ANALYZER_DATA_EVENT`. + chunk_event: event(f: fa_file, data: string, off: count) &optional; + + ## An event which will be generated for all new file contents, + ## stream-wise. Used when *tag* is + ## :zeek:see:`Files::ANALYZER_DATA_EVENT`. + stream_event: event(f: fa_file, data: string) &optional; + } &redef; + + ## Contains all metadata related to the analysis of a given file. + ## For the most part, fields here are derived from ones of the same name + ## in :zeek:see:`fa_file`. + type Info: record { + ## The time when the file was first seen. + ts: time &log; + + ## An identifier associated with a single file. + fuid: string &log; + + ## If this file was transferred over a network + ## connection this should show the host or hosts that + ## the data sourced from. + tx_hosts: set[addr] &default=addr_set() &log; + + ## If this file was transferred over a network + ## connection this should show the host or hosts that + ## the data traveled to. + rx_hosts: set[addr] &default=addr_set() &log; + + ## Connection UIDs over which the file was transferred. + conn_uids: set[string] &default=string_set() &log; + + ## An identification of the source of the file data. E.g. it + ## may be a network protocol over which it was transferred, or a + ## local file path which was read, or some other input source. + source: string &log &optional; + + ## A value to represent the depth of this file in relation + ## to its source. In SMTP, it is the depth of the MIME + ## attachment on the message. In HTTP, it is the depth of the + ## request within the TCP connection. + depth: count &default=0 &log; + + ## A set of analysis types done during the file analysis. + analyzers: set[string] &default=string_set() &log; + + ## A mime type provided by the strongest file magic signature + ## match against the *bof_buffer* field of :zeek:see:`fa_file`, + ## or in the cases where no buffering of the beginning of file + ## occurs, an initial guess of the mime type based on the first + ## data seen. + mime_type: string &log &optional; + + ## A filename for the file if one is available from the source + ## for the file. These will frequently come from + ## "Content-Disposition" headers in network protocols. + filename: string &log &optional; + + ## The duration the file was analyzed for. + duration: interval &log &default=0secs; + + ## If the source of this file is a network connection, this field + ## indicates if the data originated from the local network or not as + ## determined by the configured :zeek:see:`Site::local_nets`. + local_orig: bool &log &optional; + + ## If the source of this file is a network connection, this field + ## indicates if the file is being sent by the originator of the + ## connection or the responder. + is_orig: bool &log &optional; + + ## Number of bytes provided to the file analysis engine for the file. + seen_bytes: count &log &default=0; + + ## Total number of bytes that are supposed to comprise the full file. + total_bytes: count &log &optional; + + ## The number of bytes in the file stream that were completely missed + ## during the process of analysis e.g. due to dropped packets. + missing_bytes: count &log &default=0; + + ## The number of bytes in the file stream that were not delivered to + ## stream file analyzers. This could be overlapping bytes or + ## bytes that couldn't be reassembled. + overflow_bytes: count &log &default=0; + + ## Whether the file analysis timed out at least once for the file. + timedout: bool &log &default=F; + + ## Identifier associated with a container file from which this one was + ## extracted as part of the file analysis. + parent_fuid: string &log &optional; + } &redef; + + ## A table that can be used to disable file analysis completely for + ## any files transferred over given network protocol analyzers. + const disable: table[Files::Tag] of bool = table() &redef; + + ## The salt concatenated to unique file handle strings generated by + ## :zeek:see:`get_file_handle` before hashing them in to a file id + ## (the *id* field of :zeek:see:`fa_file`). + ## Provided to help mitigate the possibility of manipulating parts of + ## network connections that factor in to the file handle in order to + ## generate two handles that would hash to the same file id. + const salt = "I recommend changing this." &redef; + + ## Decide if you want to automatically attached analyzers to + ## files based on the detected mime type of the file. + const analyze_by_mime_type_automatically = T &redef; + + ## The default setting for file reassembly. + option enable_reassembler = T; + + ## The default per-file reassembly buffer size. + const reassembly_buffer_size = 524288 &redef; + + ## Lookup to see if a particular file id exists and is still valid. + ## + ## fuid: the file id. + ## + ## Returns: T if the file uid is known. + global file_exists: function(fuid: string): bool; + + ## Lookup an :zeek:see:`fa_file` record with the file id. + ## + ## fuid: the file id. + ## + ## Returns: the associated :zeek:see:`fa_file` record. + global lookup_file: function(fuid: string): fa_file; + + ## Allows the file reassembler to be used if it's necessary because the + ## file is transferred out of order. + ## + ## f: the file. + global enable_reassembly: function(f: fa_file); + + ## Disables the file reassembler on this file. If the file is not + ## transferred out of order this will have no effect. + ## + ## f: the file. + global disable_reassembly: function(f: fa_file); + + ## Set the maximum size the reassembly buffer is allowed to grow + ## for the given file. + ## + ## f: the file. + ## + ## max: Maximum allowed size of the reassembly buffer. + global set_reassembly_buffer_size: function(f: fa_file, max: count); + + ## Sets the *timeout_interval* field of :zeek:see:`fa_file`, which is + ## used to determine the length of inactivity that is allowed for a file + ## before internal state related to it is cleaned up. When used within + ## a :zeek:see:`file_timeout` handler, the analysis will delay timing out + ## again for the period specified by *t*. + ## + ## f: the file. + ## + ## t: the amount of time the file can remain inactive before discarding. + ## + ## Returns: true if the timeout interval was set, or false if analysis + ## for the file isn't currently active. + global set_timeout_interval: function(f: fa_file, t: interval): bool; + + ## Adds an analyzer to the analysis of a given file. + ## + ## f: the file. + ## + ## tag: the analyzer type. + ## + ## args: any parameters the analyzer takes. + ## + ## Returns: true if the analyzer will be added, or false if analysis + ## for the file isn't currently active or the *args* + ## were invalid for the analyzer type. + global add_analyzer: function(f: fa_file, + tag: Files::Tag, + args: AnalyzerArgs &default=AnalyzerArgs()): bool; + + ## Removes an analyzer from the analysis of a given file. + ## + ## f: the file. + ## + ## tag: the analyzer type. + ## + ## args: the analyzer (type and args) to remove. + ## + ## Returns: true if the analyzer will be removed, or false if analysis + ## for the file isn't currently active. + global remove_analyzer: function(f: fa_file, + tag: Files::Tag, + args: AnalyzerArgs &default=AnalyzerArgs()): bool; + + ## Stops/ignores any further analysis of a given file. + ## + ## f: the file. + ## + ## Returns: true if analysis for the given file will be ignored for the + ## rest of its contents, or false if analysis for the file + ## isn't currently active. + global stop: function(f: fa_file): bool; + + ## Translates a file analyzer enum value to a string with the + ## analyzer's name. + ## + ## tag: The analyzer tag. + ## + ## Returns: The analyzer name corresponding to the tag. + global analyzer_name: function(tag: Files::Tag): string; + + ## Provides a text description regarding metadata of the file. + ## For example, with HTTP it would return a URL. + ## + ## f: The file to be described. + ## + ## Returns: a text description regarding metadata of the file. + global describe: function(f: fa_file): string; + + type ProtoRegistration: record { + ## A callback to generate a file handle on demand when + ## one is needed by the core. + get_file_handle: function(c: connection, is_orig: bool): string; + + ## A callback to "describe" a file. In the case of an HTTP + ## transfer the most obvious description would be the URL. + ## It's like an extremely compressed version of the normal log. + describe: function(f: fa_file): string + &default=function(f: fa_file): string { return ""; }; + }; + + ## Register callbacks for protocols that work with the Files framework. + ## The callbacks must uniquely identify a file and each protocol can + ## only have a single callback registered for it. + ## + ## tag: Tag for the protocol analyzer having a callback being registered. + ## + ## reg: A :zeek:see:`Files::ProtoRegistration` record. + ## + ## Returns: true if the protocol being registered was not previously registered. + global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool; + + ## Register a callback for file analyzers to use if they need to do some + ## manipulation when they are being added to a file before the core code + ## takes over. This is unlikely to be interesting for users and should + ## only be called by file analyzer authors but is *not required*. + ## + ## tag: Tag for the file analyzer. + ## + ## callback: Function to execute when the given file analyzer is being added. + global register_analyzer_add_callback: function(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs)); + + ## Registers a set of MIME types for an analyzer. If a future connection on one of + ## these types is seen, the analyzer will be automatically assigned to parsing it. + ## The function *adds* to all MIME types already registered, it doesn't replace + ## them. + ## + ## tag: The tag of the analyzer. + ## + ## mts: The set of MIME types, each in the form "foo/bar" (case-insensitive). + ## + ## Returns: True if the MIME types were successfully registered. + global register_for_mime_types: function(tag: Files::Tag, mts: set[string]) : bool; + + ## Registers a MIME type for an analyzer. If a future file with this type is seen, + ## the analyzer will be automatically assigned to parsing it. The function *adds* + ## to all MIME types already registered, it doesn't replace them. + ## + ## tag: The tag of the analyzer. + ## + ## mt: The MIME type in the form "foo/bar" (case-insensitive). + ## + ## Returns: True if the MIME type was successfully registered. + global register_for_mime_type: function(tag: Files::Tag, mt: string) : bool; + + ## Returns a set of all MIME types currently registered for a specific analyzer. + ## + ## tag: The tag of the analyzer. + ## + ## Returns: The set of MIME types. + global registered_mime_types: function(tag: Files::Tag) : set[string]; + + ## Returns a table of all MIME-type-to-analyzer mappings currently registered. + ## + ## Returns: A table mapping each analyzer to the set of MIME types + ## registered for it. + global all_registered_mime_types: function() : table[Files::Tag] of set[string]; + + ## Event that can be handled to access the Info record as it is sent on + ## to the logging framework. + global log_files: event(rec: Info); +} + +redef record fa_file += { + info: Info &optional; +}; + +# Store the callbacks for protocol analyzers that have files. +global registered_protocols: table[Analyzer::Tag] of ProtoRegistration = table(); + +# Store the MIME type to analyzer mappings. +global mime_types: table[Files::Tag] of set[string]; +global mime_type_to_analyzers: table[string] of set[Files::Tag]; + +global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: AnalyzerArgs) = table(); + +event zeek_init() &priority=5 + { + Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files, $path="files"]); + } + +function set_info(f: fa_file) + { + if ( ! f?$info ) + { + local tmp: Info = Info($ts=f$last_active, + $fuid=f$id); + f$info = tmp; + } + + if ( f?$parent_id ) + f$info$parent_fuid = f$parent_id; + if ( f?$source ) + f$info$source = f$source; + f$info$duration = f$last_active - f$info$ts; + f$info$seen_bytes = f$seen_bytes; + if ( f?$total_bytes ) + f$info$total_bytes = f$total_bytes; + f$info$missing_bytes = f$missing_bytes; + f$info$overflow_bytes = f$overflow_bytes; + if ( f?$is_orig ) + f$info$is_orig = f$is_orig; + } + +function file_exists(fuid: string): bool + { + return __file_exists(fuid); + } + +function lookup_file(fuid: string): fa_file + { + return __lookup_file(fuid); + } + +function set_timeout_interval(f: fa_file, t: interval): bool + { + return __set_timeout_interval(f$id, t); + } + +function enable_reassembly(f: fa_file) + { + __enable_reassembly(f$id); + } + +function disable_reassembly(f: fa_file) + { + __disable_reassembly(f$id); + } + +function set_reassembly_buffer_size(f: fa_file, max: count) + { + __set_reassembly_buffer(f$id, max); + } + +function add_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool + { + add f$info$analyzers[Files::analyzer_name(tag)]; + + if ( tag in analyzer_add_callbacks ) + analyzer_add_callbacks[tag](f, args); + + if ( ! __add_analyzer(f$id, tag, args) ) + { + Reporter::warning(fmt("Analyzer %s not added successfully to file %s.", tag, f$id)); + return F; + } + return T; + } + +function register_analyzer_add_callback(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs)) + { + analyzer_add_callbacks[tag] = callback; + } + +function remove_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool + { + return __remove_analyzer(f$id, tag, args); + } + +function stop(f: fa_file): bool + { + return __stop(f$id); + } + +function analyzer_name(tag: Files::Tag): string + { + return __analyzer_name(tag); + } + +function register_protocol(tag: Analyzer::Tag, reg: ProtoRegistration): bool + { + local result = (tag !in registered_protocols); + registered_protocols[tag] = reg; + return result; + } + +function register_for_mime_types(tag: Files::Tag, mime_types: set[string]) : bool + { + local rc = T; + + for ( mt in mime_types ) + { + if ( ! register_for_mime_type(tag, mt) ) + rc = F; + } + + return rc; + } + +function register_for_mime_type(tag: Files::Tag, mt: string) : bool + { + if ( tag !in mime_types ) + { + mime_types[tag] = set(); + } + add mime_types[tag][mt]; + + if ( mt !in mime_type_to_analyzers ) + { + mime_type_to_analyzers[mt] = set(); + } + add mime_type_to_analyzers[mt][tag]; + + return T; + } + +function registered_mime_types(tag: Files::Tag) : set[string] + { + return tag in mime_types ? mime_types[tag] : set(); + } + +function all_registered_mime_types(): table[Files::Tag] of set[string] + { + return mime_types; + } + +function describe(f: fa_file): string + { + local tag = Analyzer::get_tag(f$source); + if ( tag !in registered_protocols ) + return ""; + + local handler = registered_protocols[tag]; + return handler$describe(f); + } + +event get_file_handle(tag: Files::Tag, c: connection, is_orig: bool) &priority=5 + { + if ( tag !in registered_protocols ) + return; + + local handler = registered_protocols[tag]; + set_file_handle(handler$get_file_handle(c, is_orig)); + } + +event file_new(f: fa_file) &priority=10 + { + set_info(f); + + if ( enable_reassembler ) + { + Files::enable_reassembly(f); + Files::set_reassembly_buffer_size(f, reassembly_buffer_size); + } + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=10 + { + set_info(f); + + add f$info$conn_uids[c$uid]; + local cid = c$id; + add f$info$tx_hosts[f$is_orig ? cid$orig_h : cid$resp_h]; + if( |Site::local_nets| > 0 ) + f$info$local_orig=Site::is_local_addr(f$is_orig ? cid$orig_h : cid$resp_h); + + add f$info$rx_hosts[f$is_orig ? cid$resp_h : cid$orig_h]; + } + +event file_sniff(f: fa_file, meta: fa_metadata) &priority=10 + { + set_info(f); + + if ( ! meta?$mime_type ) + return; + + f$info$mime_type = meta$mime_type; + + if ( analyze_by_mime_type_automatically && + meta$mime_type in mime_type_to_analyzers ) + { + local analyzers = mime_type_to_analyzers[meta$mime_type]; + for ( a in analyzers ) + { + add f$info$analyzers[Files::analyzer_name(a)]; + Files::add_analyzer(f, a); + } + } + } + +event file_timeout(f: fa_file) &priority=10 + { + set_info(f); + f$info$timedout = T; + } + +event file_state_remove(f: fa_file) &priority=10 + { + set_info(f); + } + +event file_state_remove(f: fa_file) &priority=-10 + { + Log::write(Files::LOG, f$info); + } diff --git a/scripts/base/frameworks/input/README b/scripts/base/frameworks/input/README index 6f00f5f6eb..3ce3232c19 100644 --- a/scripts/base/frameworks/input/README +++ b/scripts/base/frameworks/input/README @@ -1,2 +1,2 @@ The input framework provides a way to read previously stored data either as -an event stream or into a Bro table. +an event stream or into a Zeek table. diff --git a/scripts/base/frameworks/input/__load__.bro b/scripts/base/frameworks/input/__load__.zeek similarity index 100% rename from scripts/base/frameworks/input/__load__.bro rename to scripts/base/frameworks/input/__load__.zeek diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro deleted file mode 100644 index 0839602a7a..0000000000 --- a/scripts/base/frameworks/input/main.bro +++ /dev/null @@ -1,281 +0,0 @@ -##! The input framework provides a way to read previously stored data either -##! as an event stream or into a Bro table. - -module Input; - -export { - ## Type that describes what kind of change occurred. - type Event: enum { - ## New data has been imported. - EVENT_NEW = 0, - ## Existing data has been changed. - EVENT_CHANGED = 1, - ## Previously existing data has been removed. - EVENT_REMOVED = 2, - }; - - ## Type that defines the input stream read mode. - type Mode: enum { - ## Do not automatically reread the file after it has been read. - MANUAL = 0, - ## Reread the entire file each time a change is found. - REREAD = 1, - ## Read data from end of file each time new data is appended. - STREAM = 2 - }; - - ## The default input reader used. Defaults to `READER_ASCII`. - option default_reader = READER_ASCII; - - ## The default reader mode used. Defaults to `MANUAL`. - option default_mode = MANUAL; - - ## Separator between fields. - ## Please note that the separator has to be exactly one character long. - ## Individual readers can use a different value. - const separator = "\t" &redef; - - ## Separator between set elements. - ## Please note that the separator has to be exactly one character long. - ## Individual readers can use a different value. - const set_separator = "," &redef; - - ## String to use for empty fields. - ## Individual readers can use a different value. - const empty_field = "(empty)" &redef; - - ## String to use for an unset &optional field. - ## Individual readers can use a different value. - const unset_field = "-" &redef; - - ## Flag that controls if the input framework accepts records - ## that contain types that are not supported (at the moment - ## file and function). If true, the input framework will - ## warn in these cases, but continue. If false, it will - ## abort. Defaults to false (abort). - const accept_unsupported_types = F &redef; - - ## A table input stream type used to send data to a Bro table. - type TableDescription: record { - # Common definitions for tables and events - - ## String that allows the reader to find the source of the data. - ## For `READER_ASCII`, this is the filename. - source: string; - - ## Reader to use for this stream. - reader: Reader &default=default_reader; - - ## Read mode to use for this stream. - mode: Mode &default=default_mode; - - ## Name of the input stream. This is used by some functions to - ## manipulate the stream. - name: string; - - # Special definitions for tables - - ## Table which will receive the data read by the input framework. - destination: any; - - ## Record that defines the values used as the index of the table. - idx: any; - - ## Record that defines the values used as the elements of the table. - ## If this is undefined, then *destination* must be a set. - val: any &optional; - - ## Defines if the value of the table is a record (default), or a single - ## value. When this is set to false, then *val* can only contain one - ## element. - want_record: bool &default=T; - - ## The event that is raised each time a value is added to, changed in, - ## or removed from the table. The event will receive an - ## Input::TableDescription as the first argument, an Input::Event - ## enum as the second argument, the *idx* record as the third argument - ## and the value (record) as the fourth argument. - ev: any &optional; - - ## Predicate function that can decide if an insertion, update or removal - ## should really be executed. Parameters have same meaning as for the - ## event. - ## If true is returned, the update is performed. If false is returned, - ## it is skipped. - pred: function(typ: Input::Event, left: any, right: any): bool &optional; - - ## Error event that is raised when an information, warning or error - ## is raised by the input stream. If the level is error, the stream will automatically - ## be closed. - ## The event receives the Input::TableDescription as the first argument, the - ## message as the second argument and the Reporter::Level as the third argument. - ## - ## The event is raised like if it had been declared as follows: - ## error_ev: function(desc: TableDescription, message: string, level: Reporter::Level) &optional; - ## The actual declaration uses the ``any`` type because of deficiencies of the Bro type system. - error_ev: any &optional; - - ## A key/value table that will be passed to the reader. - ## Interpretation of the values is left to the reader, but - ## usually they will be used for configuration purposes. - config: table[string] of string &default=table(); - }; - - ## An event input stream type used to send input data to a Bro event. - type EventDescription: record { - # Common definitions for tables and events - - ## String that allows the reader to find the source. - ## For `READER_ASCII`, this is the filename. - source: string; - - ## Reader to use for this stream. - reader: Reader &default=default_reader; - - ## Read mode to use for this stream. - mode: Mode &default=default_mode; - - ## Descriptive name. Used to remove a stream at a later time. - name: string; - - # Special definitions for events - - ## Record type describing the fields to be retrieved from the input - ## source. - fields: any; - - ## If this is false, the event receives each value in *fields* as a - ## separate argument. - ## If this is set to true (default), the event receives all fields in - ## a single record value. - want_record: bool &default=T; - - ## The event that is raised each time a new line is received from the - ## reader. The event will receive an Input::EventDescription record - ## as the first argument, an Input::Event enum as the second - ## argument, and the fields (as specified in *fields*) as the following - ## arguments (this will either be a single record value containing - ## all fields, or each field value as a separate argument). - ev: any; - - ## Error event that is raised when an information, warning or error - ## is raised by the input stream. If the level is error, the stream will automatically - ## be closed. - ## The event receives the Input::EventDescription as the first argument, the - ## message as the second argument and the Reporter::Level as the third argument. - ## - ## The event is raised like it had been declared as follows: - ## error_ev: function(desc: EventDescription, message: string, level: Reporter::Level) &optional; - ## The actual declaration uses the ``any`` type because of deficiencies of the Bro type system. - error_ev: any &optional; - - ## A key/value table that will be passed to the reader. - ## Interpretation of the values is left to the reader, but - ## usually they will be used for configuration purposes. - config: table[string] of string &default=table(); - }; - - ## A file analysis input stream type used to forward input data to the - ## file analysis framework. - type AnalysisDescription: record { - ## String that allows the reader to find the source. - ## For `READER_ASCII`, this is the filename. - source: string; - - ## Reader to use for this stream. Compatible readers must be - ## able to accept a filter of a single string type (i.e. - ## they read a byte stream). - reader: Reader &default=Input::READER_BINARY; - - ## Read mode to use for this stream. - mode: Mode &default=default_mode; - - ## Descriptive name that uniquely identifies the input source. - ## Can be used to remove a stream at a later time. - ## This will also be used for the unique *source* field of - ## :bro:see:`fa_file`. Most of the time, the best choice for this - ## field will be the same value as the *source* field. - name: string; - - ## A key/value table that will be passed to the reader. - ## Interpretation of the values is left to the reader, but - ## usually they will be used for configuration purposes. - config: table[string] of string &default=table(); - }; - - ## Create a new table input stream from a given source. - ## - ## description: `TableDescription` record describing the source. - ## - ## Returns: true on success. - global add_table: function(description: Input::TableDescription) : bool; - - ## Create a new event input stream from a given source. - ## - ## description: `EventDescription` record describing the source. - ## - ## Returns: true on success. - global add_event: function(description: Input::EventDescription) : bool; - - ## Create a new file analysis input stream from a given source. Data read - ## from the source is automatically forwarded to the file analysis - ## framework. - ## - ## description: A record describing the source. - ## - ## Returns: true on success. - global add_analysis: function(description: Input::AnalysisDescription) : bool; - - ## Remove an input stream. - ## - ## id: string value identifying the stream to be removed. - ## - ## Returns: true on success and false if the named stream was not found. - global remove: function(id: string) : bool; - - ## Forces the current input to be checked for changes. - ## - ## id: string value identifying the stream. - ## - ## Returns: true on success and false if the named stream was not found. - global force_update: function(id: string) : bool; - - ## Event that is called when the end of a data source has been reached, - ## including after an update. - ## - ## name: Name of the input stream. - ## - ## source: String that identifies the data source (such as the filename). - global end_of_data: event(name: string, source: string); -} - -@load base/bif/input.bif - - -module Input; - -function add_table(description: Input::TableDescription) : bool - { - return __create_table_stream(description); - } - -function add_event(description: Input::EventDescription) : bool - { - return __create_event_stream(description); - } - -function add_analysis(description: Input::AnalysisDescription) : bool - { - return __create_analysis_stream(description); - } - -function remove(id: string) : bool - { - return __remove_stream(id); - } - -function force_update(id: string) : bool - { - return __force_update(id); - } - diff --git a/scripts/base/frameworks/input/main.zeek b/scripts/base/frameworks/input/main.zeek new file mode 100644 index 0000000000..973661f871 --- /dev/null +++ b/scripts/base/frameworks/input/main.zeek @@ -0,0 +1,281 @@ +##! The input framework provides a way to read previously stored data either +##! as an event stream or into a Zeek table. + +module Input; + +export { + ## Type that describes what kind of change occurred. + type Event: enum { + ## New data has been imported. + EVENT_NEW = 0, + ## Existing data has been changed. + EVENT_CHANGED = 1, + ## Previously existing data has been removed. + EVENT_REMOVED = 2, + }; + + ## Type that defines the input stream read mode. + type Mode: enum { + ## Do not automatically reread the file after it has been read. + MANUAL = 0, + ## Reread the entire file each time a change is found. + REREAD = 1, + ## Read data from end of file each time new data is appended. + STREAM = 2 + }; + + ## The default input reader used. Defaults to `READER_ASCII`. + option default_reader = READER_ASCII; + + ## The default reader mode used. Defaults to `MANUAL`. + option default_mode = MANUAL; + + ## Separator between fields. + ## Please note that the separator has to be exactly one character long. + ## Individual readers can use a different value. + const separator = "\t" &redef; + + ## Separator between set elements. + ## Please note that the separator has to be exactly one character long. + ## Individual readers can use a different value. + const set_separator = "," &redef; + + ## String to use for empty fields. + ## Individual readers can use a different value. + const empty_field = "(empty)" &redef; + + ## String to use for an unset &optional field. + ## Individual readers can use a different value. + const unset_field = "-" &redef; + + ## Flag that controls if the input framework accepts records + ## that contain types that are not supported (at the moment + ## file and function). If true, the input framework will + ## warn in these cases, but continue. If false, it will + ## abort. Defaults to false (abort). + const accept_unsupported_types = F &redef; + + ## A table input stream type used to send data to a Zeek table. + type TableDescription: record { + # Common definitions for tables and events + + ## String that allows the reader to find the source of the data. + ## For `READER_ASCII`, this is the filename. + source: string; + + ## Reader to use for this stream. + reader: Reader &default=default_reader; + + ## Read mode to use for this stream. + mode: Mode &default=default_mode; + + ## Name of the input stream. This is used by some functions to + ## manipulate the stream. + name: string; + + # Special definitions for tables + + ## Table which will receive the data read by the input framework. + destination: any; + + ## Record that defines the values used as the index of the table. + idx: any; + + ## Record that defines the values used as the elements of the table. + ## If this is undefined, then *destination* must be a set. + val: any &optional; + + ## Defines if the value of the table is a record (default), or a single + ## value. When this is set to false, then *val* can only contain one + ## element. + want_record: bool &default=T; + + ## The event that is raised each time a value is added to, changed in, + ## or removed from the table. The event will receive an + ## Input::TableDescription as the first argument, an Input::Event + ## enum as the second argument, the *idx* record as the third argument + ## and the value (record) as the fourth argument. + ev: any &optional; + + ## Predicate function that can decide if an insertion, update or removal + ## should really be executed. Parameters have same meaning as for the + ## event. + ## If true is returned, the update is performed. If false is returned, + ## it is skipped. + pred: function(typ: Input::Event, left: any, right: any): bool &optional; + + ## Error event that is raised when an information, warning or error + ## is raised by the input stream. If the level is error, the stream will automatically + ## be closed. + ## The event receives the Input::TableDescription as the first argument, the + ## message as the second argument and the Reporter::Level as the third argument. + ## + ## The event is raised like if it had been declared as follows: + ## error_ev: function(desc: TableDescription, message: string, level: Reporter::Level) &optional; + ## The actual declaration uses the ``any`` type because of deficiencies of the Zeek type system. + error_ev: any &optional; + + ## A key/value table that will be passed to the reader. + ## Interpretation of the values is left to the reader, but + ## usually they will be used for configuration purposes. + config: table[string] of string &default=table(); + }; + + ## An event input stream type used to send input data to a Zeek event. + type EventDescription: record { + # Common definitions for tables and events + + ## String that allows the reader to find the source. + ## For `READER_ASCII`, this is the filename. + source: string; + + ## Reader to use for this stream. + reader: Reader &default=default_reader; + + ## Read mode to use for this stream. + mode: Mode &default=default_mode; + + ## Descriptive name. Used to remove a stream at a later time. + name: string; + + # Special definitions for events + + ## Record type describing the fields to be retrieved from the input + ## source. + fields: any; + + ## If this is false, the event receives each value in *fields* as a + ## separate argument. + ## If this is set to true (default), the event receives all fields in + ## a single record value. + want_record: bool &default=T; + + ## The event that is raised each time a new line is received from the + ## reader. The event will receive an Input::EventDescription record + ## as the first argument, an Input::Event enum as the second + ## argument, and the fields (as specified in *fields*) as the following + ## arguments (this will either be a single record value containing + ## all fields, or each field value as a separate argument). + ev: any; + + ## Error event that is raised when an information, warning or error + ## is raised by the input stream. If the level is error, the stream will automatically + ## be closed. + ## The event receives the Input::EventDescription as the first argument, the + ## message as the second argument and the Reporter::Level as the third argument. + ## + ## The event is raised like it had been declared as follows: + ## error_ev: function(desc: EventDescription, message: string, level: Reporter::Level) &optional; + ## The actual declaration uses the ``any`` type because of deficiencies of the Zeek type system. + error_ev: any &optional; + + ## A key/value table that will be passed to the reader. + ## Interpretation of the values is left to the reader, but + ## usually they will be used for configuration purposes. + config: table[string] of string &default=table(); + }; + + ## A file analysis input stream type used to forward input data to the + ## file analysis framework. + type AnalysisDescription: record { + ## String that allows the reader to find the source. + ## For `READER_ASCII`, this is the filename. + source: string; + + ## Reader to use for this stream. Compatible readers must be + ## able to accept a filter of a single string type (i.e. + ## they read a byte stream). + reader: Reader &default=Input::READER_BINARY; + + ## Read mode to use for this stream. + mode: Mode &default=default_mode; + + ## Descriptive name that uniquely identifies the input source. + ## Can be used to remove a stream at a later time. + ## This will also be used for the unique *source* field of + ## :zeek:see:`fa_file`. Most of the time, the best choice for this + ## field will be the same value as the *source* field. + name: string; + + ## A key/value table that will be passed to the reader. + ## Interpretation of the values is left to the reader, but + ## usually they will be used for configuration purposes. + config: table[string] of string &default=table(); + }; + + ## Create a new table input stream from a given source. + ## + ## description: `TableDescription` record describing the source. + ## + ## Returns: true on success. + global add_table: function(description: Input::TableDescription) : bool; + + ## Create a new event input stream from a given source. + ## + ## description: `EventDescription` record describing the source. + ## + ## Returns: true on success. + global add_event: function(description: Input::EventDescription) : bool; + + ## Create a new file analysis input stream from a given source. Data read + ## from the source is automatically forwarded to the file analysis + ## framework. + ## + ## description: A record describing the source. + ## + ## Returns: true on success. + global add_analysis: function(description: Input::AnalysisDescription) : bool; + + ## Remove an input stream. + ## + ## id: string value identifying the stream to be removed. + ## + ## Returns: true on success and false if the named stream was not found. + global remove: function(id: string) : bool; + + ## Forces the current input to be checked for changes. + ## + ## id: string value identifying the stream. + ## + ## Returns: true on success and false if the named stream was not found. + global force_update: function(id: string) : bool; + + ## Event that is called when the end of a data source has been reached, + ## including after an update. + ## + ## name: Name of the input stream. + ## + ## source: String that identifies the data source (such as the filename). + global end_of_data: event(name: string, source: string); +} + +@load base/bif/input.bif + + +module Input; + +function add_table(description: Input::TableDescription) : bool + { + return __create_table_stream(description); + } + +function add_event(description: Input::EventDescription) : bool + { + return __create_event_stream(description); + } + +function add_analysis(description: Input::AnalysisDescription) : bool + { + return __create_analysis_stream(description); + } + +function remove(id: string) : bool + { + return __remove_stream(id); + } + +function force_update(id: string) : bool + { + return __force_update(id); + } + diff --git a/scripts/base/frameworks/input/readers/ascii.bro b/scripts/base/frameworks/input/readers/ascii.bro deleted file mode 100644 index c757718d53..0000000000 --- a/scripts/base/frameworks/input/readers/ascii.bro +++ /dev/null @@ -1,56 +0,0 @@ -##! Interface for the ascii input reader. -##! -##! The defaults are set to match Bro's ASCII output. - -module InputAscii; - -export { - ## Separator between fields. - ## Please note that the separator has to be exactly one character long. - const separator = Input::separator &redef; - - ## Separator between set and vector elements. - ## Please note that the separator has to be exactly one character long. - const set_separator = Input::set_separator &redef; - - ## String to use for empty fields. - const empty_field = Input::empty_field &redef; - - ## String to use for an unset &optional field. - const unset_field = Input::unset_field &redef; - - ## Fail on invalid lines. If set to false, the ascii - ## input reader will jump over invalid lines, reporting - ## warnings in reporter.log. If set to true, errors in - ## input lines will be handled as fatal errors for the - ## reader thread; reading will abort immediately and - ## an error will be logged to reporter.log. - ## Individual readers can use a different value using - ## the $config table. - ## fail_on_invalid_lines = T was the default behavior - ## until Bro 2.6. - const fail_on_invalid_lines = F &redef; - - ## Fail on file read problems. If set to true, the ascii - ## input reader will fail when encountering any problems - ## while reading a file different from invalid lines. - ## Examples of such problems are permission problems, or - ## missing files. - ## When set to false, these problems will be ignored. This - ## has an especially big effect for the REREAD mode, which will - ## seamlessly recover from read errors when a file is - ## only temporarily inaccessible. For MANUAL or STREAM files, - ## errors will most likely still be fatal since no automatic - ## re-reading of the file is attempted. - ## Individual readers can use a different value using - ## the $config table. - ## fail_on_file_problem = T was the default behavior - ## until Bro 2.6. - const fail_on_file_problem = F &redef; - - ## On input streams with a pathless or relative-path source filename, - ## prefix the following path. This prefix can, but need not be, absolute. - ## The default is to leave any filenames unchanged. This prefix has no - ## effect if the source already is an absolute path. - const path_prefix = "" &redef; -} diff --git a/scripts/base/frameworks/input/readers/ascii.zeek b/scripts/base/frameworks/input/readers/ascii.zeek new file mode 100644 index 0000000000..26d7b2360c --- /dev/null +++ b/scripts/base/frameworks/input/readers/ascii.zeek @@ -0,0 +1,56 @@ +##! Interface for the ascii input reader. +##! +##! The defaults are set to match Zeek's ASCII output. + +module InputAscii; + +export { + ## Separator between fields. + ## Please note that the separator has to be exactly one character long. + const separator = Input::separator &redef; + + ## Separator between set and vector elements. + ## Please note that the separator has to be exactly one character long. + const set_separator = Input::set_separator &redef; + + ## String to use for empty fields. + const empty_field = Input::empty_field &redef; + + ## String to use for an unset &optional field. + const unset_field = Input::unset_field &redef; + + ## Fail on invalid lines. If set to false, the ascii + ## input reader will jump over invalid lines, reporting + ## warnings in reporter.log. If set to true, errors in + ## input lines will be handled as fatal errors for the + ## reader thread; reading will abort immediately and + ## an error will be logged to reporter.log. + ## Individual readers can use a different value using + ## the $config table. + ## fail_on_invalid_lines = T was the default behavior + ## until Bro 2.6. + const fail_on_invalid_lines = F &redef; + + ## Fail on file read problems. If set to true, the ascii + ## input reader will fail when encountering any problems + ## while reading a file different from invalid lines. + ## Examples of such problems are permission problems, or + ## missing files. + ## When set to false, these problems will be ignored. This + ## has an especially big effect for the REREAD mode, which will + ## seamlessly recover from read errors when a file is + ## only temporarily inaccessible. For MANUAL or STREAM files, + ## errors will most likely still be fatal since no automatic + ## re-reading of the file is attempted. + ## Individual readers can use a different value using + ## the $config table. + ## fail_on_file_problem = T was the default behavior + ## until Bro 2.6. + const fail_on_file_problem = F &redef; + + ## On input streams with a pathless or relative-path source filename, + ## prefix the following path. This prefix can, but need not be, absolute. + ## The default is to leave any filenames unchanged. This prefix has no + ## effect if the source already is an absolute path. + const path_prefix = "" &redef; +} diff --git a/scripts/base/frameworks/input/readers/benchmark.bro b/scripts/base/frameworks/input/readers/benchmark.zeek similarity index 100% rename from scripts/base/frameworks/input/readers/benchmark.bro rename to scripts/base/frameworks/input/readers/benchmark.zeek diff --git a/scripts/base/frameworks/input/readers/binary.bro b/scripts/base/frameworks/input/readers/binary.zeek similarity index 100% rename from scripts/base/frameworks/input/readers/binary.bro rename to scripts/base/frameworks/input/readers/binary.zeek diff --git a/scripts/base/frameworks/input/readers/config.bro b/scripts/base/frameworks/input/readers/config.zeek similarity index 100% rename from scripts/base/frameworks/input/readers/config.bro rename to scripts/base/frameworks/input/readers/config.zeek diff --git a/scripts/base/frameworks/input/readers/raw.bro b/scripts/base/frameworks/input/readers/raw.zeek similarity index 100% rename from scripts/base/frameworks/input/readers/raw.bro rename to scripts/base/frameworks/input/readers/raw.zeek diff --git a/scripts/base/frameworks/input/readers/sqlite.bro b/scripts/base/frameworks/input/readers/sqlite.zeek similarity index 100% rename from scripts/base/frameworks/input/readers/sqlite.bro rename to scripts/base/frameworks/input/readers/sqlite.zeek diff --git a/scripts/base/frameworks/intel/__load__.bro b/scripts/base/frameworks/intel/__load__.zeek similarity index 100% rename from scripts/base/frameworks/intel/__load__.bro rename to scripts/base/frameworks/intel/__load__.zeek diff --git a/scripts/base/frameworks/intel/cluster.bro b/scripts/base/frameworks/intel/cluster.bro deleted file mode 100644 index b71e8c47ea..0000000000 --- a/scripts/base/frameworks/intel/cluster.bro +++ /dev/null @@ -1,97 +0,0 @@ -##! Cluster transparency support for the intelligence framework. This is mostly -##! oriented toward distributing intelligence information across clusters. - -@load ./main -@load base/frameworks/cluster - -module Intel; - -# Internal events for cluster data distribution. -global insert_item: event(item: Item); -global insert_indicator: event(item: Item); - -# If this process is not a manager process, we don't want the full metadata. -@if ( Cluster::local_node_type() != Cluster::MANAGER ) -redef have_full_data = F; -@endif - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, remove_indicator); - } - -# Handling of new worker nodes. -event Cluster::node_up(name: string, id: string) - { - # When a worker connects, send it the complete minimal data store. - # It will be kept up to date after this by the insert_indicator event. - if ( name in Cluster::nodes && Cluster::nodes[name]$node_type == Cluster::WORKER ) - { - Broker::publish_id(Cluster::node_topic(name), "Intel::min_data_store"); - } - } - -# On the manager, the new_item event indicates a new indicator that -# has to be distributed. -event Intel::new_item(item: Item) &priority=5 - { - local pt = Cluster::rr_topic(Cluster::proxy_pool, "intel_insert_rr_key"); - - if ( pt == "" ) - # No proxies alive, publish to all workers ourself instead of - # relaying via a proxy. - pt = Cluster::worker_topic; - - Broker::publish(pt, Intel::insert_indicator, item); - } - -# Handling of item insertion triggered by remote node. -event Intel::insert_item(item: Intel::Item) &priority=5 - { - Intel::_insert(item, T); - } - -# Handling of item removal triggered by remote node. -event Intel::remove_item(item: Item, purge_indicator: bool) &priority=5 - { - remove(item, purge_indicator); - } - -# Handling of match triggered by remote node. -event Intel::match_remote(s: Seen) &priority=5 - { - if ( Intel::find(s) ) - event Intel::match(s, Intel::get_items(s)); - } -@endif - -@if ( Cluster::local_node_type() == Cluster::WORKER ) -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, match_remote); - Broker::auto_publish(Cluster::manager_topic, remove_item); - } - -# On a worker, the new_item event requires to trigger the insertion -# on the manager to update the back-end data store. -event Intel::new_item(item: Intel::Item) &priority=5 - { - Broker::publish(Cluster::manager_topic, Intel::insert_item, item); - } - -# Handling of new indicators published by the manager. -event Intel::insert_indicator(item: Intel::Item) &priority=5 - { - Intel::_insert(item, F); - } -@endif - -@if ( Cluster::local_node_type() == Cluster::PROXY ) -event Intel::insert_indicator(item: Intel::Item) &priority=5 - { - # Just forwarding from manager to workers. - Broker::publish(Cluster::worker_topic, Intel::insert_indicator, item); - } -@endif - diff --git a/scripts/base/frameworks/intel/cluster.zeek b/scripts/base/frameworks/intel/cluster.zeek new file mode 100644 index 0000000000..2d51ffb200 --- /dev/null +++ b/scripts/base/frameworks/intel/cluster.zeek @@ -0,0 +1,97 @@ +##! Cluster transparency support for the intelligence framework. This is mostly +##! oriented toward distributing intelligence information across clusters. + +@load ./main +@load base/frameworks/cluster + +module Intel; + +# Internal events for cluster data distribution. +global insert_item: event(item: Item); +global insert_indicator: event(item: Item); + +# If this process is not a manager process, we don't want the full metadata. +@if ( Cluster::local_node_type() != Cluster::MANAGER ) +redef have_full_data = F; +@endif + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, remove_indicator); + } + +# Handling of new worker nodes. +event Cluster::node_up(name: string, id: string) + { + # When a worker connects, send it the complete minimal data store. + # It will be kept up to date after this by the insert_indicator event. + if ( name in Cluster::nodes && Cluster::nodes[name]$node_type == Cluster::WORKER ) + { + Broker::publish_id(Cluster::node_topic(name), "Intel::min_data_store"); + } + } + +# On the manager, the new_item event indicates a new indicator that +# has to be distributed. +event Intel::new_item(item: Item) &priority=5 + { + local pt = Cluster::rr_topic(Cluster::proxy_pool, "intel_insert_rr_key"); + + if ( pt == "" ) + # No proxies alive, publish to all workers ourself instead of + # relaying via a proxy. + pt = Cluster::worker_topic; + + Broker::publish(pt, Intel::insert_indicator, item); + } + +# Handling of item insertion triggered by remote node. +event Intel::insert_item(item: Intel::Item) &priority=5 + { + Intel::_insert(item, T); + } + +# Handling of item removal triggered by remote node. +event Intel::remove_item(item: Item, purge_indicator: bool) &priority=5 + { + remove(item, purge_indicator); + } + +# Handling of match triggered by remote node. +event Intel::match_remote(s: Seen) &priority=5 + { + if ( Intel::find(s) ) + event Intel::match(s, Intel::get_items(s)); + } +@endif + +@if ( Cluster::local_node_type() == Cluster::WORKER ) +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, match_remote); + Broker::auto_publish(Cluster::manager_topic, remove_item); + } + +# On a worker, the new_item event requires to trigger the insertion +# on the manager to update the back-end data store. +event Intel::new_item(item: Intel::Item) &priority=5 + { + Broker::publish(Cluster::manager_topic, Intel::insert_item, item); + } + +# Handling of new indicators published by the manager. +event Intel::insert_indicator(item: Intel::Item) &priority=5 + { + Intel::_insert(item, F); + } +@endif + +@if ( Cluster::local_node_type() == Cluster::PROXY ) +event Intel::insert_indicator(item: Intel::Item) &priority=5 + { + # Just forwarding from manager to workers. + Broker::publish(Cluster::worker_topic, Intel::insert_indicator, item); + } +@endif + diff --git a/scripts/base/frameworks/intel/files.bro b/scripts/base/frameworks/intel/files.zeek similarity index 100% rename from scripts/base/frameworks/intel/files.bro rename to scripts/base/frameworks/intel/files.zeek diff --git a/scripts/base/frameworks/intel/input.bro b/scripts/base/frameworks/intel/input.bro deleted file mode 100644 index aea3ac9a35..0000000000 --- a/scripts/base/frameworks/intel/input.bro +++ /dev/null @@ -1,56 +0,0 @@ -##! Input handling for the intelligence framework. This script implements the -##! import of intelligence data from files using the input framework. - -@load ./main - -module Intel; - -export { - ## Intelligence files that will be read off disk. The files are - ## reread every time they are updated so updates must be atomic - ## with "mv" instead of writing the file in place. - const read_files: set[string] = {} &redef; - - ## An optional path prefix for intel files. This prefix can, but - ## need not be, absolute. The default is to leave any filenames - ## unchanged. This prefix has no effect if a read_file entry is - ## an absolute path. This prefix gets applied _before_ entering - ## the input framework, so if the prefix is absolute, the input - ## framework won't munge it further. If it is relative, then - ## any path_prefix specified in the input framework will apply - ## additionally. - const path_prefix = "" &redef; -} - -event Intel::read_entry(desc: Input::EventDescription, tpe: Input::Event, item: Intel::Item) - { - Intel::insert(item); - } - -event bro_init() &priority=5 - { - if ( ! Cluster::is_enabled() || - Cluster::local_node_type() == Cluster::MANAGER ) - { - for ( a_file in read_files ) - { - # Handle prefixing of the source file name. Note - # that this currently always uses the ASCII reader, - # so we know we're dealing with filenames. - local source = a_file; - - # If we have a path prefix and the file doesn't - # already have an absolute path, prepend the prefix. - if ( |path_prefix| > 0 && sub_bytes(a_file, 0, 1) != "/" ) - source = cat(rstrip(path_prefix, "/"), "/", a_file); - - Input::add_event([$source=source, - $reader=Input::READER_ASCII, - $mode=Input::REREAD, - $name=cat("intel-", a_file), - $fields=Intel::Item, - $ev=Intel::read_entry]); - } - } - } - diff --git a/scripts/base/frameworks/intel/input.zeek b/scripts/base/frameworks/intel/input.zeek new file mode 100644 index 0000000000..4dfa011fad --- /dev/null +++ b/scripts/base/frameworks/intel/input.zeek @@ -0,0 +1,56 @@ +##! Input handling for the intelligence framework. This script implements the +##! import of intelligence data from files using the input framework. + +@load ./main + +module Intel; + +export { + ## Intelligence files that will be read off disk. The files are + ## reread every time they are updated so updates must be atomic + ## with "mv" instead of writing the file in place. + const read_files: set[string] = {} &redef; + + ## An optional path prefix for intel files. This prefix can, but + ## need not be, absolute. The default is to leave any filenames + ## unchanged. This prefix has no effect if a read_file entry is + ## an absolute path. This prefix gets applied _before_ entering + ## the input framework, so if the prefix is absolute, the input + ## framework won't munge it further. If it is relative, then + ## any path_prefix specified in the input framework will apply + ## additionally. + const path_prefix = "" &redef; +} + +event Intel::read_entry(desc: Input::EventDescription, tpe: Input::Event, item: Intel::Item) + { + Intel::insert(item); + } + +event zeek_init() &priority=5 + { + if ( ! Cluster::is_enabled() || + Cluster::local_node_type() == Cluster::MANAGER ) + { + for ( a_file in read_files ) + { + # Handle prefixing of the source file name. Note + # that this currently always uses the ASCII reader, + # so we know we're dealing with filenames. + local source = a_file; + + # If we have a path prefix and the file doesn't + # already have an absolute path, prepend the prefix. + if ( |path_prefix| > 0 && sub_bytes(a_file, 0, 1) != "/" ) + source = cat(rstrip(path_prefix, "/"), "/", a_file); + + Input::add_event([$source=source, + $reader=Input::READER_ASCII, + $mode=Input::REREAD, + $name=cat("intel-", a_file), + $fields=Intel::Item, + $ev=Intel::read_entry]); + } + } + } + diff --git a/scripts/base/frameworks/intel/main.bro b/scripts/base/frameworks/intel/main.bro deleted file mode 100644 index 4bc3b296dd..0000000000 --- a/scripts/base/frameworks/intel/main.bro +++ /dev/null @@ -1,616 +0,0 @@ -##! The intelligence framework provides a way to store and query intelligence -##! data (e.g. IP addresses, URLs and hashes). The intelligence items can be -##! associated with metadata to allow informed decisions about matching and -##! handling. - -@load base/frameworks/notice - -module Intel; - -export { - redef enum Log::ID += { LOG }; - - ## Enum type to represent various types of intelligence data. - type Type: enum { - ## An IP address. - ADDR, - ## A subnet in CIDR notation. - SUBNET, - ## A complete URL without the prefix ``"http://"``. - URL, - ## Software name. - SOFTWARE, - ## Email address. - EMAIL, - ## DNS domain name. - DOMAIN, - ## A user name. - USER_NAME, - ## Certificate SHA-1 hash. - CERT_HASH, - ## Public key MD5 hash. (SSH server host keys are a good example.) - PUBKEY_HASH, - }; - - ## Set of intelligence data types. - type TypeSet: set[Type]; - - ## Data about an :bro:type:`Intel::Item`. - type MetaData: record { - ## An arbitrary string value representing the data source. This - ## value is used as unique key to identify a metadata record in - ## the scope of a single intelligence item. - source: string; - ## A freeform description for the data. - desc: string &optional; - ## A URL for more information about the data. - url: string &optional; - }; - - ## Represents a piece of intelligence. - type Item: record { - ## The intelligence indicator. - indicator: string; - - ## The type of data that the indicator field represents. - indicator_type: Type; - - ## Metadata for the item. Typically represents more deeply - ## descriptive data for a piece of intelligence. - meta: MetaData; - }; - - ## Enum to represent where data came from when it was discovered. - ## The convention is to prefix the name with ``IN_``. - type Where: enum { - ## A catchall value to represent data of unknown provenance. - IN_ANYWHERE, - }; - - ## Information about a piece of "seen" data. - type Seen: record { - ## The string if the data is about a string. - indicator: string &log &optional; - - ## The type of data that the indicator represents. - indicator_type: Type &log &optional; - - ## If the indicator type was :bro:enum:`Intel::ADDR`, then this - ## field will be present. - host: addr &optional; - - ## Where the data was discovered. - where: Where &log; - - ## The name of the node where the match was discovered. - node: string &optional &log; - - ## If the data was discovered within a connection, the - ## connection record should go here to give context to the data. - conn: connection &optional; - - ## If the data was discovered within a connection, the - ## connection uid should go here to give context to the data. - ## If the *conn* field is provided, this will be automatically - ## filled out. - uid: string &optional; - }; - - ## Record used for the logging framework representing a positive - ## hit within the intelligence framework. - type Info: record { - ## Timestamp when the data was discovered. - ts: time &log; - - ## If a connection was associated with this intelligence hit, - ## this is the uid for the connection - uid: string &log &optional; - ## If a connection was associated with this intelligence hit, - ## this is the conn_id for the connection. - id: conn_id &log &optional; - - ## Where the data was seen. - seen: Seen &log; - ## Which indicator types matched. - matched: TypeSet &log; - ## Sources which supplied data that resulted in this match. - sources: set[string] &log &default=string_set(); - }; - - ## Function to insert intelligence data. If the indicator is already - ## present, the associated metadata will be added to the indicator. If - ## the indicator already contains a metadata record from the same source, - ## the existing metadata record will be updated. - global insert: function(item: Item); - - ## Function to remove intelligence data. If purge_indicator is set, the - ## given metadata is ignored and the indicator is removed completely. - global remove: function(item: Item, purge_indicator: bool &default = F); - - ## Function to declare discovery of a piece of data in order to check - ## it against known intelligence for matches. - global seen: function(s: Seen); - - ## Event to represent a match in the intelligence data from data that - ## was seen. On clusters there is no assurance as to when this event - ## will be generated so do not assume that arbitrary global state beyond - ## the given data will be available. - ## - ## This is the primary mechanism where a user may take actions based on - ## data provided by the intelligence framework. - global match: event(s: Seen, items: set[Item]); - - ## This hook can be used to influence the logging of intelligence hits - ## (e.g. by adding data to the Info record). The default information is - ## added with a priority of 5. - ## - ## info: The Info record that will be logged. - ## - ## s: Information about the data seen. - ## - ## items: The intel items that match the seen data. - ## - ## In case the hook execution is terminated using break, the match will - ## not be logged. - global extend_match: hook(info: Info, s: Seen, items: set[Item]); - - ## The expiration timeout for intelligence items. Once an item expires, the - ## :bro:id:`Intel::item_expired` hook is called. Reinsertion of an item - ## resets the timeout. A negative value disables expiration of intelligence - ## items. - const item_expiration = -1 min &redef; - - ## This hook can be used to handle expiration of intelligence items. - ## - ## indicator: The indicator of the expired item. - ## - ## indicator_type: The indicator type of the expired item. - ## - ## metas: The set of metadata describing the expired item. - ## - ## If all hook handlers are executed, the expiration timeout will be reset. - ## Otherwise, if one of the handlers terminates using break, the item will - ## be removed. - global item_expired: hook(indicator: string, indicator_type: Type, metas: set[MetaData]); - - ## This hook can be used to filter intelligence items that are about to be - ## inserted into the internal data store. In case the hook execution is - ## terminated using break, the item will not be (re)added to the internal - ## data store. - ## - ## item: The intel item that should be inserted. - global filter_item: hook(item: Intel::Item); - - global log_intel: event(rec: Info); -} - -# Internal handler for matches with no metadata available. -global match_remote: event(s: Seen); - -# Internal events for (cluster) data distribution. -global new_item: event(item: Item); -global remove_item: event(item: Item, purge_indicator: bool); -global remove_indicator: event(item: Item); - -# Optionally store metadata. This is used internally depending on -# if this is a cluster deployment or not. -const have_full_data = T &redef; - -# Table of metadata, indexed by source string. -type MetaDataTable: table[string] of MetaData; - -# Expiration handlers. -global expire_host_data: function(data: table[addr] of MetaDataTable, idx: addr): interval; -global expire_subnet_data: function(data: table[subnet] of MetaDataTable, idx: subnet): interval; -global expire_string_data: function(data: table[string, Type] of MetaDataTable, idx: any): interval; - -# The in memory data structure for holding intelligence. -type DataStore: record { - host_data: table[addr] of MetaDataTable &write_expire=item_expiration &expire_func=expire_host_data; - subnet_data: table[subnet] of MetaDataTable &write_expire=item_expiration &expire_func=expire_subnet_data; - string_data: table[string, Type] of MetaDataTable &write_expire=item_expiration &expire_func=expire_string_data; -}; -global data_store: DataStore &redef; - -# The in memory data structure for holding the barest matchable intelligence. -# This is primarily for workers to do the initial quick matches and store -# a minimal amount of data for the full match to happen on the manager. -type MinDataStore: record { - host_data: set[addr]; - subnet_data: set[subnet]; - string_data: set[string, Type]; -}; -global min_data_store: MinDataStore &redef; - - -event bro_init() &priority=5 - { - Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel"]); - } - -# Function that abstracts expiration of different types. -function expire_item(indicator: string, indicator_type: Type, metas: set[MetaData]): interval - { - if ( hook item_expired(indicator, indicator_type, metas) ) - return item_expiration; - else - remove([$indicator=indicator, $indicator_type=indicator_type, $meta=[$source=""]], T); - return 0 sec; - } - -# Expiration handler definitions. -function expire_host_data(data: table[addr] of MetaDataTable, idx: addr): interval - { - local meta_tbl: MetaDataTable = data[idx]; - local metas: set[MetaData]; - for ( src, md in meta_tbl ) - add metas[md]; - - return expire_item(cat(idx), ADDR, metas); - } - -function expire_subnet_data(data: table[subnet] of MetaDataTable, idx: subnet): interval - { - local meta_tbl: MetaDataTable = data[idx]; - local metas: set[MetaData]; - for ( src, md in meta_tbl ) - add metas[md]; - - return expire_item(cat(idx), SUBNET, metas); - } - -function expire_string_data(data: table[string, Type] of MetaDataTable, idx: any): interval - { - local indicator: string; - local indicator_type: Type; - [indicator, indicator_type] = idx; - - local meta_tbl: MetaDataTable = data[indicator, indicator_type]; - local metas: set[MetaData]; - for ( src, md in meta_tbl ) - add metas[md]; - - return expire_item(indicator, indicator_type, metas); - } - -# Function to check for intelligence hits. -function find(s: Seen): bool - { - if ( s?$host ) - { - if ( have_full_data ) - return ((s$host in data_store$host_data) || - (|matching_subnets(addr_to_subnet(s$host), data_store$subnet_data)| > 0)); - else - return ((s$host in min_data_store$host_data) || - (|matching_subnets(addr_to_subnet(s$host), min_data_store$subnet_data)| > 0)); - } - else - { - if ( have_full_data ) - return ([to_lower(s$indicator), s$indicator_type] in data_store$string_data); - else - return ([to_lower(s$indicator), s$indicator_type] in min_data_store$string_data); - } - } - -# Function to retrieve intelligence items while abstracting from different -# data stores for different indicator types. -function get_items(s: Seen): set[Item] - { - local return_data: set[Item]; - local mt: MetaDataTable; - - if ( ! have_full_data ) - { - Reporter::warning(fmt("Intel::get_items was called from a host (%s) that doesn't have the full data.", - peer_description)); - return return_data; - } - - if ( s?$host ) - { - # See if the host is known about and it has meta values - if ( s$host in data_store$host_data ) - { - mt = data_store$host_data[s$host]; - for ( m, md in mt ) - { - add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=md)]; - } - } - # See if the host is part of a known subnet, which has meta values - local nets: table[subnet] of MetaDataTable; - nets = filter_subnet_table(addr_to_subnet(s$host), data_store$subnet_data); - for ( n, mt in nets ) - { - for ( m, md in mt ) - { - add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=md)]; - } - } - } - else - { - local lower_indicator = to_lower(s$indicator); - # See if the string is known about and it has meta values - if ( [lower_indicator, s$indicator_type] in data_store$string_data ) - { - mt = data_store$string_data[lower_indicator, s$indicator_type]; - for ( m, md in mt ) - { - add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=md)]; - } - } - } - - return return_data; - } - -function Intel::seen(s: Seen) - { - if ( find(s) ) - { - if ( s?$host ) - { - s$indicator = cat(s$host); - s$indicator_type = Intel::ADDR; - } - - if ( ! s?$node ) - { - s$node = peer_description; - } - - if ( have_full_data ) - { - local items = get_items(s); - event Intel::match(s, items); - } - else - { - event Intel::match_remote(s); - } - } - } - -event Intel::match(s: Seen, items: set[Item]) &priority=5 - { - local info = Info($ts=network_time(), $seen=s, $matched=TypeSet()); - - if ( hook extend_match(info, s, items) ) - Log::write(Intel::LOG, info); - } - -hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=5 - { - # Add default information to matches. - if ( s?$conn ) - { - s$uid = s$conn$uid; - info$id = s$conn$id; - } - - if ( s?$uid ) - info$uid = s$uid; - - for ( item in items ) - { - add info$sources[item$meta$source]; - add info$matched[item$indicator_type]; - } - } - -# Function to insert metadata of an item. The function returns T -# if the given indicator is new. -function insert_meta_data(item: Item): bool - { - # Prepare the metadata entry. - local meta = item$meta; - local meta_tbl: table [string] of MetaData; - local is_new: bool = T; - - # All intelligence is case insensitive at the moment. - local lower_indicator = to_lower(item$indicator); - - switch ( item$indicator_type ) - { - case ADDR: - local host = to_addr(item$indicator); - - if ( host !in data_store$host_data ) - data_store$host_data[host] = table(); - else - { - is_new = F; - # Reset expiration timer. - data_store$host_data[host] = data_store$host_data[host]; - } - - meta_tbl = data_store$host_data[host]; - break; - case SUBNET: - local net = to_subnet(item$indicator); - - if ( !check_subnet(net, data_store$subnet_data) ) - data_store$subnet_data[net] = table(); - else - { - is_new = F; - # Reset expiration timer. - data_store$subnet_data[net] = data_store$subnet_data[net]; - } - - meta_tbl = data_store$subnet_data[net]; - break; - default: - if ( [lower_indicator, item$indicator_type] !in data_store$string_data ) - data_store$string_data[lower_indicator, item$indicator_type] = table(); - else - { - is_new = F; - # Reset expiration timer. - data_store$string_data[lower_indicator, item$indicator_type] = - data_store$string_data[lower_indicator, item$indicator_type]; - } - - meta_tbl = data_store$string_data[lower_indicator, item$indicator_type]; - break; - } - - # Insert new metadata or update if already present. - meta_tbl[meta$source] = meta; - - return is_new; - } - -# Function to encapsulate insertion logic. The first_dispatch parameter -# indicates whether the item might be new for other nodes. -function _insert(item: Item, first_dispatch: bool &default = T) - { - # Assume that the item is new by default. - local is_new: bool = T; - - # All intelligence is case insensitive at the moment. - local lower_indicator = to_lower(item$indicator); - - # Insert indicator into MinDataStore (might exist already). - switch ( item$indicator_type ) - { - case ADDR: - local host = to_addr(item$indicator); - add min_data_store$host_data[host]; - break; - case SUBNET: - local net = to_subnet(item$indicator); - add min_data_store$subnet_data[net]; - break; - default: - add min_data_store$string_data[lower_indicator, item$indicator_type]; - break; - } - - if ( have_full_data ) - { - # Insert new metadata or update if already present. - is_new = insert_meta_data(item); - } - - if ( first_dispatch && is_new ) - # Announce a (possibly) new item if this is the first dispatch and - # we know it is new or have to assume that on a worker. - event Intel::new_item(item); - } - -function insert(item: Item) - { - if ( hook filter_item(item) ) - { - # Insert possibly new item. - _insert(item, T); - } - } - -# Function to check whether an item is present. -function item_exists(item: Item): bool - { - switch ( item$indicator_type ) - { - case ADDR: - return have_full_data ? to_addr(item$indicator) in data_store$host_data : - to_addr(item$indicator) in min_data_store$host_data; - case SUBNET: - return have_full_data ? to_subnet(item$indicator) in data_store$subnet_data : - to_subnet(item$indicator) in min_data_store$subnet_data; - default: - return have_full_data ? [item$indicator, item$indicator_type] in data_store$string_data : - [item$indicator, item$indicator_type] in min_data_store$string_data; - } - } - -# Function to remove metadata of an item. The function returns T -# if there is no metadata left for the given indicator. -function remove_meta_data(item: Item): bool - { - if ( ! have_full_data ) - { - Reporter::warning(fmt("Intel::remove_meta_data was called from a host (%s) that doesn't have the full data.", - peer_description)); - return F; - } - - switch ( item$indicator_type ) - { - case ADDR: - local host = to_addr(item$indicator); - delete data_store$host_data[host][item$meta$source]; - return (|data_store$host_data[host]| == 0); - case SUBNET: - local net = to_subnet(item$indicator); - delete data_store$subnet_data[net][item$meta$source]; - return (|data_store$subnet_data[net]| == 0); - default: - delete data_store$string_data[item$indicator, item$indicator_type][item$meta$source]; - return (|data_store$string_data[item$indicator, item$indicator_type]| == 0); - } - } - -function remove(item: Item, purge_indicator: bool) - { - # Check whether the indicator is present - if ( ! item_exists(item) ) - { - Reporter::info(fmt("Tried to remove non-existing item '%s' (%s).", - item$indicator, item$indicator_type)); - return; - } - - # Delegate removal if we are on a worker - if ( !have_full_data ) - { - event Intel::remove_item(item, purge_indicator); - return; - } - - # Remove metadata from manager's data store - local no_meta_data = remove_meta_data(item); - # Remove whole indicator if necessary - if ( no_meta_data || purge_indicator ) - { - switch ( item$indicator_type ) - { - case ADDR: - local host = to_addr(item$indicator); - delete data_store$host_data[host]; - break; - case SUBNET: - local net = to_subnet(item$indicator); - delete data_store$subnet_data[net]; - break; - default: - delete data_store$string_data[item$indicator, item$indicator_type]; - break; - } - # Trigger deletion in minimal data stores - event Intel::remove_indicator(item); - } - } - -# Handling of indicator removal in minimal data stores. -event remove_indicator(item: Item) - { - switch ( item$indicator_type ) - { - case ADDR: - local host = to_addr(item$indicator); - delete min_data_store$host_data[host]; - break; - case SUBNET: - local net = to_subnet(item$indicator); - delete min_data_store$subnet_data[net]; - break; - default: - delete min_data_store$string_data[item$indicator, item$indicator_type]; - break; - } - } diff --git a/scripts/base/frameworks/intel/main.zeek b/scripts/base/frameworks/intel/main.zeek new file mode 100644 index 0000000000..380cb39eaa --- /dev/null +++ b/scripts/base/frameworks/intel/main.zeek @@ -0,0 +1,616 @@ +##! The intelligence framework provides a way to store and query intelligence +##! data (e.g. IP addresses, URLs and hashes). The intelligence items can be +##! associated with metadata to allow informed decisions about matching and +##! handling. + +@load base/frameworks/notice + +module Intel; + +export { + redef enum Log::ID += { LOG }; + + ## Enum type to represent various types of intelligence data. + type Type: enum { + ## An IP address. + ADDR, + ## A subnet in CIDR notation. + SUBNET, + ## A complete URL without the prefix ``"http://"``. + URL, + ## Software name. + SOFTWARE, + ## Email address. + EMAIL, + ## DNS domain name. + DOMAIN, + ## A user name. + USER_NAME, + ## Certificate SHA-1 hash. + CERT_HASH, + ## Public key MD5 hash. (SSH server host keys are a good example.) + PUBKEY_HASH, + }; + + ## Set of intelligence data types. + type TypeSet: set[Type]; + + ## Data about an :zeek:type:`Intel::Item`. + type MetaData: record { + ## An arbitrary string value representing the data source. This + ## value is used as unique key to identify a metadata record in + ## the scope of a single intelligence item. + source: string; + ## A freeform description for the data. + desc: string &optional; + ## A URL for more information about the data. + url: string &optional; + }; + + ## Represents a piece of intelligence. + type Item: record { + ## The intelligence indicator. + indicator: string; + + ## The type of data that the indicator field represents. + indicator_type: Type; + + ## Metadata for the item. Typically represents more deeply + ## descriptive data for a piece of intelligence. + meta: MetaData; + }; + + ## Enum to represent where data came from when it was discovered. + ## The convention is to prefix the name with ``IN_``. + type Where: enum { + ## A catchall value to represent data of unknown provenance. + IN_ANYWHERE, + }; + + ## Information about a piece of "seen" data. + type Seen: record { + ## The string if the data is about a string. + indicator: string &log &optional; + + ## The type of data that the indicator represents. + indicator_type: Type &log &optional; + + ## If the indicator type was :zeek:enum:`Intel::ADDR`, then this + ## field will be present. + host: addr &optional; + + ## Where the data was discovered. + where: Where &log; + + ## The name of the node where the match was discovered. + node: string &optional &log; + + ## If the data was discovered within a connection, the + ## connection record should go here to give context to the data. + conn: connection &optional; + + ## If the data was discovered within a connection, the + ## connection uid should go here to give context to the data. + ## If the *conn* field is provided, this will be automatically + ## filled out. + uid: string &optional; + }; + + ## Record used for the logging framework representing a positive + ## hit within the intelligence framework. + type Info: record { + ## Timestamp when the data was discovered. + ts: time &log; + + ## If a connection was associated with this intelligence hit, + ## this is the uid for the connection + uid: string &log &optional; + ## If a connection was associated with this intelligence hit, + ## this is the conn_id for the connection. + id: conn_id &log &optional; + + ## Where the data was seen. + seen: Seen &log; + ## Which indicator types matched. + matched: TypeSet &log; + ## Sources which supplied data that resulted in this match. + sources: set[string] &log &default=string_set(); + }; + + ## Function to insert intelligence data. If the indicator is already + ## present, the associated metadata will be added to the indicator. If + ## the indicator already contains a metadata record from the same source, + ## the existing metadata record will be updated. + global insert: function(item: Item); + + ## Function to remove intelligence data. If purge_indicator is set, the + ## given metadata is ignored and the indicator is removed completely. + global remove: function(item: Item, purge_indicator: bool &default = F); + + ## Function to declare discovery of a piece of data in order to check + ## it against known intelligence for matches. + global seen: function(s: Seen); + + ## Event to represent a match in the intelligence data from data that + ## was seen. On clusters there is no assurance as to when this event + ## will be generated so do not assume that arbitrary global state beyond + ## the given data will be available. + ## + ## This is the primary mechanism where a user may take actions based on + ## data provided by the intelligence framework. + global match: event(s: Seen, items: set[Item]); + + ## This hook can be used to influence the logging of intelligence hits + ## (e.g. by adding data to the Info record). The default information is + ## added with a priority of 5. + ## + ## info: The Info record that will be logged. + ## + ## s: Information about the data seen. + ## + ## items: The intel items that match the seen data. + ## + ## In case the hook execution is terminated using break, the match will + ## not be logged. + global extend_match: hook(info: Info, s: Seen, items: set[Item]); + + ## The expiration timeout for intelligence items. Once an item expires, the + ## :zeek:id:`Intel::item_expired` hook is called. Reinsertion of an item + ## resets the timeout. A negative value disables expiration of intelligence + ## items. + const item_expiration = -1 min &redef; + + ## This hook can be used to handle expiration of intelligence items. + ## + ## indicator: The indicator of the expired item. + ## + ## indicator_type: The indicator type of the expired item. + ## + ## metas: The set of metadata describing the expired item. + ## + ## If all hook handlers are executed, the expiration timeout will be reset. + ## Otherwise, if one of the handlers terminates using break, the item will + ## be removed. + global item_expired: hook(indicator: string, indicator_type: Type, metas: set[MetaData]); + + ## This hook can be used to filter intelligence items that are about to be + ## inserted into the internal data store. In case the hook execution is + ## terminated using break, the item will not be (re)added to the internal + ## data store. + ## + ## item: The intel item that should be inserted. + global filter_item: hook(item: Intel::Item); + + global log_intel: event(rec: Info); +} + +# Internal handler for matches with no metadata available. +global match_remote: event(s: Seen); + +# Internal events for (cluster) data distribution. +global new_item: event(item: Item); +global remove_item: event(item: Item, purge_indicator: bool); +global remove_indicator: event(item: Item); + +# Optionally store metadata. This is used internally depending on +# if this is a cluster deployment or not. +const have_full_data = T &redef; + +# Table of metadata, indexed by source string. +type MetaDataTable: table[string] of MetaData; + +# Expiration handlers. +global expire_host_data: function(data: table[addr] of MetaDataTable, idx: addr): interval; +global expire_subnet_data: function(data: table[subnet] of MetaDataTable, idx: subnet): interval; +global expire_string_data: function(data: table[string, Type] of MetaDataTable, idx: any): interval; + +# The in memory data structure for holding intelligence. +type DataStore: record { + host_data: table[addr] of MetaDataTable &write_expire=item_expiration &expire_func=expire_host_data; + subnet_data: table[subnet] of MetaDataTable &write_expire=item_expiration &expire_func=expire_subnet_data; + string_data: table[string, Type] of MetaDataTable &write_expire=item_expiration &expire_func=expire_string_data; +}; +global data_store: DataStore &redef; + +# The in memory data structure for holding the barest matchable intelligence. +# This is primarily for workers to do the initial quick matches and store +# a minimal amount of data for the full match to happen on the manager. +type MinDataStore: record { + host_data: set[addr]; + subnet_data: set[subnet]; + string_data: set[string, Type]; +}; +global min_data_store: MinDataStore &redef; + + +event zeek_init() &priority=5 + { + Log::create_stream(LOG, [$columns=Info, $ev=log_intel, $path="intel"]); + } + +# Function that abstracts expiration of different types. +function expire_item(indicator: string, indicator_type: Type, metas: set[MetaData]): interval + { + if ( hook item_expired(indicator, indicator_type, metas) ) + return item_expiration; + else + remove([$indicator=indicator, $indicator_type=indicator_type, $meta=[$source=""]], T); + return 0 sec; + } + +# Expiration handler definitions. +function expire_host_data(data: table[addr] of MetaDataTable, idx: addr): interval + { + local meta_tbl: MetaDataTable = data[idx]; + local metas: set[MetaData]; + for ( src, md in meta_tbl ) + add metas[md]; + + return expire_item(cat(idx), ADDR, metas); + } + +function expire_subnet_data(data: table[subnet] of MetaDataTable, idx: subnet): interval + { + local meta_tbl: MetaDataTable = data[idx]; + local metas: set[MetaData]; + for ( src, md in meta_tbl ) + add metas[md]; + + return expire_item(cat(idx), SUBNET, metas); + } + +function expire_string_data(data: table[string, Type] of MetaDataTable, idx: any): interval + { + local indicator: string; + local indicator_type: Type; + [indicator, indicator_type] = idx; + + local meta_tbl: MetaDataTable = data[indicator, indicator_type]; + local metas: set[MetaData]; + for ( src, md in meta_tbl ) + add metas[md]; + + return expire_item(indicator, indicator_type, metas); + } + +# Function to check for intelligence hits. +function find(s: Seen): bool + { + if ( s?$host ) + { + if ( have_full_data ) + return ((s$host in data_store$host_data) || + (|matching_subnets(addr_to_subnet(s$host), data_store$subnet_data)| > 0)); + else + return ((s$host in min_data_store$host_data) || + (|matching_subnets(addr_to_subnet(s$host), min_data_store$subnet_data)| > 0)); + } + else + { + if ( have_full_data ) + return ([to_lower(s$indicator), s$indicator_type] in data_store$string_data); + else + return ([to_lower(s$indicator), s$indicator_type] in min_data_store$string_data); + } + } + +# Function to retrieve intelligence items while abstracting from different +# data stores for different indicator types. +function get_items(s: Seen): set[Item] + { + local return_data: set[Item]; + local mt: MetaDataTable; + + if ( ! have_full_data ) + { + Reporter::warning(fmt("Intel::get_items was called from a host (%s) that doesn't have the full data.", + peer_description)); + return return_data; + } + + if ( s?$host ) + { + # See if the host is known about and it has meta values + if ( s$host in data_store$host_data ) + { + mt = data_store$host_data[s$host]; + for ( m, md in mt ) + { + add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=md)]; + } + } + # See if the host is part of a known subnet, which has meta values + local nets: table[subnet] of MetaDataTable; + nets = filter_subnet_table(addr_to_subnet(s$host), data_store$subnet_data); + for ( n, mt in nets ) + { + for ( m, md in mt ) + { + add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=md)]; + } + } + } + else + { + local lower_indicator = to_lower(s$indicator); + # See if the string is known about and it has meta values + if ( [lower_indicator, s$indicator_type] in data_store$string_data ) + { + mt = data_store$string_data[lower_indicator, s$indicator_type]; + for ( m, md in mt ) + { + add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=md)]; + } + } + } + + return return_data; + } + +function Intel::seen(s: Seen) + { + if ( find(s) ) + { + if ( s?$host ) + { + s$indicator = cat(s$host); + s$indicator_type = Intel::ADDR; + } + + if ( ! s?$node ) + { + s$node = peer_description; + } + + if ( have_full_data ) + { + local items = get_items(s); + event Intel::match(s, items); + } + else + { + event Intel::match_remote(s); + } + } + } + +event Intel::match(s: Seen, items: set[Item]) &priority=5 + { + local info = Info($ts=network_time(), $seen=s, $matched=TypeSet()); + + if ( hook extend_match(info, s, items) ) + Log::write(Intel::LOG, info); + } + +hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=5 + { + # Add default information to matches. + if ( s?$conn ) + { + s$uid = s$conn$uid; + info$id = s$conn$id; + } + + if ( s?$uid ) + info$uid = s$uid; + + for ( item in items ) + { + add info$sources[item$meta$source]; + add info$matched[item$indicator_type]; + } + } + +# Function to insert metadata of an item. The function returns T +# if the given indicator is new. +function insert_meta_data(item: Item): bool + { + # Prepare the metadata entry. + local meta = item$meta; + local meta_tbl: table [string] of MetaData; + local is_new: bool = T; + + # All intelligence is case insensitive at the moment. + local lower_indicator = to_lower(item$indicator); + + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + + if ( host !in data_store$host_data ) + data_store$host_data[host] = table(); + else + { + is_new = F; + # Reset expiration timer. + data_store$host_data[host] = data_store$host_data[host]; + } + + meta_tbl = data_store$host_data[host]; + break; + case SUBNET: + local net = to_subnet(item$indicator); + + if ( !check_subnet(net, data_store$subnet_data) ) + data_store$subnet_data[net] = table(); + else + { + is_new = F; + # Reset expiration timer. + data_store$subnet_data[net] = data_store$subnet_data[net]; + } + + meta_tbl = data_store$subnet_data[net]; + break; + default: + if ( [lower_indicator, item$indicator_type] !in data_store$string_data ) + data_store$string_data[lower_indicator, item$indicator_type] = table(); + else + { + is_new = F; + # Reset expiration timer. + data_store$string_data[lower_indicator, item$indicator_type] = + data_store$string_data[lower_indicator, item$indicator_type]; + } + + meta_tbl = data_store$string_data[lower_indicator, item$indicator_type]; + break; + } + + # Insert new metadata or update if already present. + meta_tbl[meta$source] = meta; + + return is_new; + } + +# Function to encapsulate insertion logic. The first_dispatch parameter +# indicates whether the item might be new for other nodes. +function _insert(item: Item, first_dispatch: bool &default = T) + { + # Assume that the item is new by default. + local is_new: bool = T; + + # All intelligence is case insensitive at the moment. + local lower_indicator = to_lower(item$indicator); + + # Insert indicator into MinDataStore (might exist already). + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + add min_data_store$host_data[host]; + break; + case SUBNET: + local net = to_subnet(item$indicator); + add min_data_store$subnet_data[net]; + break; + default: + add min_data_store$string_data[lower_indicator, item$indicator_type]; + break; + } + + if ( have_full_data ) + { + # Insert new metadata or update if already present. + is_new = insert_meta_data(item); + } + + if ( first_dispatch && is_new ) + # Announce a (possibly) new item if this is the first dispatch and + # we know it is new or have to assume that on a worker. + event Intel::new_item(item); + } + +function insert(item: Item) + { + if ( hook filter_item(item) ) + { + # Insert possibly new item. + _insert(item, T); + } + } + +# Function to check whether an item is present. +function item_exists(item: Item): bool + { + switch ( item$indicator_type ) + { + case ADDR: + return have_full_data ? to_addr(item$indicator) in data_store$host_data : + to_addr(item$indicator) in min_data_store$host_data; + case SUBNET: + return have_full_data ? to_subnet(item$indicator) in data_store$subnet_data : + to_subnet(item$indicator) in min_data_store$subnet_data; + default: + return have_full_data ? [item$indicator, item$indicator_type] in data_store$string_data : + [item$indicator, item$indicator_type] in min_data_store$string_data; + } + } + +# Function to remove metadata of an item. The function returns T +# if there is no metadata left for the given indicator. +function remove_meta_data(item: Item): bool + { + if ( ! have_full_data ) + { + Reporter::warning(fmt("Intel::remove_meta_data was called from a host (%s) that doesn't have the full data.", + peer_description)); + return F; + } + + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + delete data_store$host_data[host][item$meta$source]; + return (|data_store$host_data[host]| == 0); + case SUBNET: + local net = to_subnet(item$indicator); + delete data_store$subnet_data[net][item$meta$source]; + return (|data_store$subnet_data[net]| == 0); + default: + delete data_store$string_data[item$indicator, item$indicator_type][item$meta$source]; + return (|data_store$string_data[item$indicator, item$indicator_type]| == 0); + } + } + +function remove(item: Item, purge_indicator: bool) + { + # Check whether the indicator is present + if ( ! item_exists(item) ) + { + Reporter::info(fmt("Tried to remove non-existing item '%s' (%s).", + item$indicator, item$indicator_type)); + return; + } + + # Delegate removal if we are on a worker + if ( !have_full_data ) + { + event Intel::remove_item(item, purge_indicator); + return; + } + + # Remove metadata from manager's data store + local no_meta_data = remove_meta_data(item); + # Remove whole indicator if necessary + if ( no_meta_data || purge_indicator ) + { + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + delete data_store$host_data[host]; + break; + case SUBNET: + local net = to_subnet(item$indicator); + delete data_store$subnet_data[net]; + break; + default: + delete data_store$string_data[item$indicator, item$indicator_type]; + break; + } + # Trigger deletion in minimal data stores + event Intel::remove_indicator(item); + } + } + +# Handling of indicator removal in minimal data stores. +event remove_indicator(item: Item) + { + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + delete min_data_store$host_data[host]; + break; + case SUBNET: + local net = to_subnet(item$indicator); + delete min_data_store$subnet_data[net]; + break; + default: + delete min_data_store$string_data[item$indicator, item$indicator_type]; + break; + } + } diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.zeek similarity index 100% rename from scripts/base/frameworks/logging/__load__.bro rename to scripts/base/frameworks/logging/__load__.zeek diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro deleted file mode 100644 index 798b54839e..0000000000 --- a/scripts/base/frameworks/logging/main.bro +++ /dev/null @@ -1,645 +0,0 @@ -##! The Bro logging interface. -##! -##! See :doc:`/frameworks/logging` for an introduction to Bro's -##! logging framework. - -module Log; - -export { - ## Type that defines an ID unique to each log stream. Scripts creating new - ## log streams need to redef this enum to add their own specific log ID. - ## The log ID implicitly determines the default name of the generated log - ## file. - type Log::ID: enum { - ## Dummy place-holder. - UNKNOWN - }; - - ## If true, local logging is by default enabled for all filters. - const enable_local_logging = T &redef; - - ## If true, remote logging is by default enabled for all filters. - const enable_remote_logging = T &redef; - - ## Default writer to use if a filter does not specify anything else. - const default_writer = WRITER_ASCII &redef; - - ## Default separator to use between fields. - ## Individual writers can use a different value. - const separator = "\t" &redef; - - ## Default separator to use between elements of a set. - ## Individual writers can use a different value. - const set_separator = "," &redef; - - ## Default string to use for empty fields. This should be different - ## from *unset_field* to make the output unambiguous. - ## Individual writers can use a different value. - const empty_field = "(empty)" &redef; - - ## Default string to use for an unset &optional field. - ## Individual writers can use a different value. - const unset_field = "-" &redef; - - ## Type defining the content of a logging stream. - type Stream: record { - ## A record type defining the log's columns. - columns: any; - - ## Event that will be raised once for each log entry. - ## The event receives a single same parameter, an instance of - ## type ``columns``. - ev: any &optional; - - ## A path that will be inherited by any filters added to the - ## stream which do not already specify their own path. - path: string &optional; - }; - - ## Builds the default path values for log filters if not otherwise - ## specified by a filter. The default implementation uses *id* - ## to derive a name. Upon adding a filter to a stream, if neither - ## ``path`` nor ``path_func`` is explicitly set by them, then - ## this function is used as the ``path_func``. - ## - ## id: The ID associated with the log stream. - ## - ## path: A suggested path value, which may be either the filter's - ## ``path`` if defined, else a previous result from the function. - ## If no ``path`` is defined for the filter, then the first call - ## to the function will contain an empty string. - ## - ## rec: An instance of the stream's ``columns`` type with its - ## fields set to the values to be logged. - ## - ## Returns: The path to be used for the filter. - global default_path_func: function(id: ID, path: string, rec: any) : string &redef; - - # Log rotation support. - - ## Information passed into rotation callback functions. - type RotationInfo: record { - writer: Writer; ##< The log writer being used. - fname: string; ##< Full name of the rotated file. - path: string; ##< Original path value. - open: time; ##< Time when opened. - close: time; ##< Time when closed. - terminating: bool; ##< True if rotation occured due to Bro shutting down. - }; - - ## Default rotation interval to use for filters that do not specify - ## an interval. Zero disables rotation. - ## - ## Note that this is overridden by the BroControl LogRotationInterval - ## option. - const default_rotation_interval = 0secs &redef; - - ## Default naming format for timestamps embedded into filenames. - ## Uses a ``strftime()`` style. - const default_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; - - ## Default shell command to run on rotated files. Empty for none. - const default_rotation_postprocessor_cmd = "" &redef; - - ## Specifies the default postprocessor function per writer type. - ## Entries in this table are initialized by each writer type. - const default_rotation_postprocessors: table[Writer] of function(info: RotationInfo) : bool &redef; - - ## Default alarm summary mail interval. Zero disables alarm summary - ## mails. - ## - ## Note that this is overridden by the BroControl MailAlarmsInterval - ## option. - const default_mail_alarms_interval = 0secs &redef; - - ## Default field name mapping for renaming fields in a logging framework - ## filter. This is typically used to ease integration with external - ## data storage and analysis systems. - const default_field_name_map: table[string] of string = table() &redef; - - ## Default separator for log field scopes when logs are unrolled and - ## flattened. This will be the string between field name components. - ## For example, setting this to "_" will cause the typical field - ## "id.orig_h" to turn into "id_orig_h". - const default_scope_sep = "." &redef; - - ## A prefix for extension fields which can be optionally prefixed - ## on all log lines by setting the `ext_func` field in the - ## log filter. - const Log::default_ext_prefix: string = "_" &redef; - - ## Default log extension function in the case that you would like to - ## apply the same extensions to all logs. The function *must* return - ## a record with all of the fields to be included in the log. The - ## default function included here does not return a value, which indicates - ## that no extensions are added. - const Log::default_ext_func: function(path: string): any = - function(path: string) { } &redef; - - ## A filter type describes how to customize logging streams. - type Filter: record { - ## Descriptive name to reference this filter. - name: string; - - ## The logging writer implementation to use. - writer: Writer &default=default_writer; - - ## Indicates whether a log entry should be recorded. - ## If not given, all entries are recorded. - ## - ## rec: An instance of the stream's ``columns`` type with its - ## fields set to the values to be logged. - ## - ## Returns: True if the entry is to be recorded. - pred: function(rec: any): bool &optional; - - ## Output path for recording entries matching this - ## filter. - ## - ## The specific interpretation of the string is up to the - ## logging writer, and may for example be the destination - ## file name. Generally, filenames are expected to be given - ## without any extensions; writers will add appropriate - ## extensions automatically. - ## - ## If this path is found to conflict with another filter's - ## for the same writer type, it is automatically corrected - ## by appending "-N", where N is the smallest integer greater - ## or equal to 2 that allows the corrected path name to not - ## conflict with another filter's. - path: string &optional; - - ## A function returning the output path for recording entries - ## matching this filter. This is similar to *path* yet allows - ## to compute the string dynamically. It is ok to return - ## different strings for separate calls, but be careful: it's - ## easy to flood the disk by returning a new string for each - ## connection. Upon adding a filter to a stream, if neither - ## ``path`` nor ``path_func`` is explicitly set by them, then - ## :bro:see:`Log::default_path_func` is used. - ## - ## id: The ID associated with the log stream. - ## - ## path: A suggested path value, which may be either the filter's - ## ``path`` if defined, else a previous result from the - ## function. If no ``path`` is defined for the filter, - ## then the first call to the function will contain an - ## empty string. - ## - ## rec: An instance of the stream's ``columns`` type with its - ## fields set to the values to be logged. - ## - ## Returns: The path to be used for the filter, which will be - ## subject to the same automatic correction rules as - ## the *path* field of :bro:type:`Log::Filter` in the - ## case of conflicts with other filters trying to use - ## the same writer/path pair. - path_func: function(id: ID, path: string, rec: any): string &optional; - - ## Subset of column names to record. If not given, all - ## columns are recorded. - include: set[string] &optional; - - ## Subset of column names to exclude from recording. If not - ## given, all columns are recorded. - exclude: set[string] &optional; - - ## If true, entries are recorded locally. - log_local: bool &default=enable_local_logging; - - ## If true, entries are passed on to remote peers. - log_remote: bool &default=enable_remote_logging; - - ## Field name map to rename fields before the fields are written - ## to the output. - field_name_map: table[string] of string &default=default_field_name_map; - - ## A string that is used for unrolling and flattening field names - ## for nested record types. - scope_sep: string &default=default_scope_sep; - - ## Default prefix for all extension fields. It's typically - ## prudent to set this to something that Bro's logging - ## framework can't normally write out in a field name. - ext_prefix: string &default=default_ext_prefix; - - ## Function to collect a log extension value. If not specified, - ## no log extension will be provided for the log. - ## The return value from the function *must* be a record. - ext_func: function(path: string): any &default=default_ext_func; - - ## Rotation interval. Zero disables rotation. - interv: interval &default=default_rotation_interval; - - ## Callback function to trigger for rotated files. If not set, the - ## default comes out of :bro:id:`Log::default_rotation_postprocessors`. - postprocessor: function(info: RotationInfo) : bool &optional; - - ## A key/value table that will be passed on to the writer. - ## Interpretation of the values is left to the writer, but - ## usually they will be used for configuration purposes. - config: table[string] of string &default=table(); - }; - - ## Sentinel value for indicating that a filter was not found when looked up. - const no_filter: Filter = [$name=""]; - - ## Creates a new logging stream with the default filter. - ## - ## id: The ID enum to be associated with the new logging stream. - ## - ## stream: A record defining the content that the new stream will log. - ## - ## Returns: True if a new logging stream was successfully created and - ## a default filter added to it. - ## - ## .. bro:see:: Log::add_default_filter Log::remove_default_filter - global create_stream: function(id: ID, stream: Stream) : bool; - - ## Removes a logging stream completely, stopping all the threads. - ## - ## id: The ID associated with the logging stream. - ## - ## Returns: True if the stream was successfully removed. - ## - ## .. bro:see:: Log::create_stream - global remove_stream: function(id: ID) : bool; - - ## Enables a previously disabled logging stream. Disabled streams - ## will not be written to until they are enabled again. New streams - ## are enabled by default. - ## - ## id: The ID associated with the logging stream to enable. - ## - ## Returns: True if the stream is re-enabled or was not previously disabled. - ## - ## .. bro:see:: Log::disable_stream - global enable_stream: function(id: ID) : bool; - - ## Disables a currently enabled logging stream. Disabled streams - ## will not be written to until they are enabled again. New streams - ## are enabled by default. - ## - ## id: The ID associated with the logging stream to disable. - ## - ## Returns: True if the stream is now disabled or was already disabled. - ## - ## .. bro:see:: Log::enable_stream - global disable_stream: function(id: ID) : bool; - - ## Adds a custom filter to an existing logging stream. If a filter - ## with a matching ``name`` field already exists for the stream, it - ## is removed when the new filter is successfully added. - ## - ## id: The ID associated with the logging stream to filter. - ## - ## filter: A record describing the desired logging parameters. - ## - ## Returns: True if the filter was successfully added, false if - ## the filter was not added or the *filter* argument was not - ## the correct type. - ## - ## .. bro:see:: Log::remove_filter Log::add_default_filter - ## Log::remove_default_filter Log::get_filter Log::get_filter_names - global add_filter: function(id: ID, filter: Filter) : bool; - - ## Removes a filter from an existing logging stream. - ## - ## id: The ID associated with the logging stream from which to - ## remove a filter. - ## - ## name: A string to match against the ``name`` field of a - ## :bro:type:`Log::Filter` for identification purposes. - ## - ## Returns: True if the logging stream's filter was removed or - ## if no filter associated with *name* was found. - ## - ## .. bro:see:: Log::remove_filter Log::add_default_filter - ## Log::remove_default_filter Log::get_filter Log::get_filter_names - global remove_filter: function(id: ID, name: string) : bool; - - ## Gets the names of all filters associated with an existing - ## logging stream. - ## - ## id: The ID of a logging stream from which to obtain the list - ## of filter names. - ## - ## Returns: The set of filter names associated with the stream. - ## - ## ..bro:see:: Log::remove_filter Log::add_default_filter - ## Log::remove_default_filter Log::get_filter - global get_filter_names: function(id: ID) : set[string]; - - ## Gets a filter associated with an existing logging stream. - ## - ## id: The ID associated with a logging stream from which to - ## obtain one of its filters. - ## - ## name: A string to match against the ``name`` field of a - ## :bro:type:`Log::Filter` for identification purposes. - ## - ## Returns: A filter attached to the logging stream *id* matching - ## *name* or, if no matches are found returns the - ## :bro:id:`Log::no_filter` sentinel value. - ## - ## .. bro:see:: Log::add_filter Log::remove_filter Log::add_default_filter - ## Log::remove_default_filter Log::get_filter_names - global get_filter: function(id: ID, name: string) : Filter; - - ## Writes a new log line/entry to a logging stream. - ## - ## id: The ID associated with a logging stream to be written to. - ## - ## columns: A record value describing the values of each field/column - ## to write to the log stream. - ## - ## Returns: True if the stream was found and no error occurred in writing - ## to it or if the stream was disabled and nothing was written. - ## False if the stream was not found, or the *columns* - ## argument did not match what the stream was initially defined - ## to handle, or one of the stream's filters has an invalid - ## ``path_func``. - ## - ## .. bro:see:: Log::enable_stream Log::disable_stream - global write: function(id: ID, columns: any) : bool; - - ## Sets the buffering status for all the writers of a given logging stream. - ## A given writer implementation may or may not support buffering and if - ## it doesn't then toggling buffering with this function has no effect. - ## - ## id: The ID associated with a logging stream for which to - ## enable/disable buffering. - ## - ## buffered: Whether to enable or disable log buffering. - ## - ## Returns: True if buffering status was set, false if the logging stream - ## does not exist. - ## - ## .. bro:see:: Log::flush - global set_buf: function(id: ID, buffered: bool): bool; - - ## Flushes any currently buffered output for all the writers of a given - ## logging stream. - ## - ## id: The ID associated with a logging stream for which to flush buffered - ## data. - ## - ## Returns: True if all writers of a log stream were signalled to flush - ## buffered data or if the logging stream is disabled, - ## false if the logging stream does not exist. - ## - ## .. bro:see:: Log::set_buf Log::enable_stream Log::disable_stream - global flush: function(id: ID): bool; - - ## Adds a default :bro:type:`Log::Filter` record with ``name`` field - ## set as "default" to a given logging stream. - ## - ## id: The ID associated with a logging stream for which to add a default - ## filter. - ## - ## Returns: The status of a call to :bro:id:`Log::add_filter` using a - ## default :bro:type:`Log::Filter` argument with ``name`` field - ## set to "default". - ## - ## .. bro:see:: Log::add_filter Log::remove_filter - ## Log::remove_default_filter - global add_default_filter: function(id: ID) : bool; - - ## Removes the :bro:type:`Log::Filter` with ``name`` field equal to - ## "default". - ## - ## id: The ID associated with a logging stream from which to remove the - ## default filter. - ## - ## Returns: The status of a call to :bro:id:`Log::remove_filter` using - ## "default" as the argument. - ## - ## .. bro:see:: Log::add_filter Log::remove_filter Log::add_default_filter - global remove_default_filter: function(id: ID) : bool; - - ## Runs a command given by :bro:id:`Log::default_rotation_postprocessor_cmd` - ## on a rotated file. Meant to be called from postprocessor functions - ## that are added to :bro:id:`Log::default_rotation_postprocessors`. - ## - ## info: A record holding meta-information about the log being rotated. - ## - ## npath: The new path of the file (after already being rotated/processed - ## by writer-specific postprocessor as defined in - ## :bro:id:`Log::default_rotation_postprocessors`). - ## - ## Returns: True when :bro:id:`Log::default_rotation_postprocessor_cmd` - ## is empty or the system command given by it has been invoked - ## to postprocess a rotated log file. - ## - ## .. bro:see:: Log::default_rotation_date_format - ## Log::default_rotation_postprocessor_cmd - ## Log::default_rotation_postprocessors - global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool; - - ## The streams which are currently active and not disabled. - ## This table is not meant to be modified by users! Only use it for - ## examining which streams are active. - global active_streams: table[ID] of Stream = table(); -} - -global all_streams: table[ID] of Stream = table(); - -global stream_filters: table[ID] of set[string] = table(); - -# We keep a script-level copy of all filters so that we can manipulate them. -global filters: table[ID, string] of Filter; - -@load base/bif/logging.bif # Needs Filter and Stream defined. - -module Log; - -# Used internally by the log manager. -function __default_rotation_postprocessor(info: RotationInfo) : bool - { - if ( info$writer in default_rotation_postprocessors ) - return default_rotation_postprocessors[info$writer](info); - else - # Return T by default so that postprocessor-less writers don't shutdown. - return T; - } - -function default_path_func(id: ID, path: string, rec: any) : string - { - # The suggested path value is a previous result of this function - # or a filter path explicitly set by the user, so continue using it. - if ( path != "" ) - return path; - - local id_str = fmt("%s", id); - - local parts = split_string1(id_str, /::/); - if ( |parts| == 2 ) - { - # Example: Notice::LOG -> "notice" - if ( parts[1] == "LOG" ) - { - local module_parts = split_string_n(parts[0], /[^A-Z][A-Z][a-z]*/, T, 4); - local output = ""; - if ( 0 in module_parts ) - output = module_parts[0]; - if ( 1 in module_parts && module_parts[1] != "" ) - output = cat(output, sub_bytes(module_parts[1],1,1), "_", sub_bytes(module_parts[1], 2, |module_parts[1]|)); - if ( 2 in module_parts && module_parts[2] != "" ) - output = cat(output, "_", module_parts[2]); - if ( 3 in module_parts && module_parts[3] != "" ) - output = cat(output, sub_bytes(module_parts[3],1,1), "_", sub_bytes(module_parts[3], 2, |module_parts[3]|)); - return to_lower(output); - } - - # Example: Notice::POLICY_LOG -> "notice_policy" - if ( /_LOG$/ in parts[1] ) - parts[1] = sub(parts[1], /_LOG$/, ""); - - return cat(to_lower(parts[0]),"_",to_lower(parts[1])); - } - else - return to_lower(id_str); - } - -# Run post-processor on file. If there isn't any postprocessor defined, -# we move the file to a nicer name. -function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : bool - { - local pp_cmd = default_rotation_postprocessor_cmd; - - if ( pp_cmd == "" ) - return T; - - # Turn, e.g., Log::WRITER_ASCII into "ascii". - local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", ""); - - # The date format is hard-coded here to provide a standardized - # script interface. - system(fmt("%s %s %s %s %s %d %s", - pp_cmd, npath, info$path, - strftime("%y-%m-%d_%H.%M.%S", info$open), - strftime("%y-%m-%d_%H.%M.%S", info$close), - info$terminating, writer)); - - return T; - } - -function create_stream(id: ID, stream: Stream) : bool - { - if ( ! __create_stream(id, stream) ) - return F; - - active_streams[id] = stream; - all_streams[id] = stream; - - return add_default_filter(id); - } - -function remove_stream(id: ID) : bool - { - delete active_streams[id]; - delete all_streams[id]; - - if ( id in stream_filters ) - { - for ( i in stream_filters[id] ) - delete filters[id, i]; - - delete stream_filters[id]; - } - return __remove_stream(id); - } - -function disable_stream(id: ID) : bool - { - delete active_streams[id]; - return __disable_stream(id); - } - -function enable_stream(id: ID) : bool - { - if ( ! __enable_stream(id) ) - return F; - - if ( id in all_streams ) - active_streams[id] = all_streams[id]; - } - -# convenience function to add a filter name to stream_filters -function add_stream_filters(id: ID, name: string) - { - if ( id in stream_filters ) - add stream_filters[id][name]; - else - stream_filters[id] = set(name); - } - -function add_filter(id: ID, filter: Filter) : bool - { - local stream = all_streams[id]; - - if ( stream?$path && ! filter?$path ) - filter$path = stream$path; - - if ( ! filter?$path && ! filter?$path_func ) - filter$path_func = default_path_func; - - local res = __add_filter(id, filter); - if ( res ) - { - add_stream_filters(id, filter$name); - filters[id, filter$name] = filter; - } - return res; - } - -function remove_filter(id: ID, name: string) : bool - { - if ( id in stream_filters ) - delete stream_filters[id][name]; - - delete filters[id, name]; - - return __remove_filter(id, name); - } - -function get_filter(id: ID, name: string) : Filter - { - if ( [id, name] in filters ) - return filters[id, name]; - - return no_filter; - } - -function get_filter_names(id: ID) : set[string] - { - if ( id in stream_filters ) - return stream_filters[id]; - else - return set(); - } - -function write(id: ID, columns: any) : bool - { - return __write(id, columns); - } - -function set_buf(id: ID, buffered: bool): bool - { - return __set_buf(id, buffered); - } - -function flush(id: ID): bool - { - return __flush(id); - } - -function add_default_filter(id: ID) : bool - { - return add_filter(id, [$name="default"]); - } - -function remove_default_filter(id: ID) : bool - { - return remove_filter(id, "default"); - } diff --git a/scripts/base/frameworks/logging/main.zeek b/scripts/base/frameworks/logging/main.zeek new file mode 100644 index 0000000000..3a83808369 --- /dev/null +++ b/scripts/base/frameworks/logging/main.zeek @@ -0,0 +1,645 @@ +##! The Zeek logging interface. +##! +##! See :doc:`/frameworks/logging` for an introduction to Zeek's +##! logging framework. + +module Log; + +export { + ## Type that defines an ID unique to each log stream. Scripts creating new + ## log streams need to redef this enum to add their own specific log ID. + ## The log ID implicitly determines the default name of the generated log + ## file. + type Log::ID: enum { + ## Dummy place-holder. + UNKNOWN + }; + + ## If true, local logging is by default enabled for all filters. + const enable_local_logging = T &redef; + + ## If true, remote logging is by default enabled for all filters. + const enable_remote_logging = T &redef; + + ## Default writer to use if a filter does not specify anything else. + const default_writer = WRITER_ASCII &redef; + + ## Default separator to use between fields. + ## Individual writers can use a different value. + const separator = "\t" &redef; + + ## Default separator to use between elements of a set. + ## Individual writers can use a different value. + const set_separator = "," &redef; + + ## Default string to use for empty fields. This should be different + ## from *unset_field* to make the output unambiguous. + ## Individual writers can use a different value. + const empty_field = "(empty)" &redef; + + ## Default string to use for an unset &optional field. + ## Individual writers can use a different value. + const unset_field = "-" &redef; + + ## Type defining the content of a logging stream. + type Stream: record { + ## A record type defining the log's columns. + columns: any; + + ## Event that will be raised once for each log entry. + ## The event receives a single same parameter, an instance of + ## type ``columns``. + ev: any &optional; + + ## A path that will be inherited by any filters added to the + ## stream which do not already specify their own path. + path: string &optional; + }; + + ## Builds the default path values for log filters if not otherwise + ## specified by a filter. The default implementation uses *id* + ## to derive a name. Upon adding a filter to a stream, if neither + ## ``path`` nor ``path_func`` is explicitly set by them, then + ## this function is used as the ``path_func``. + ## + ## id: The ID associated with the log stream. + ## + ## path: A suggested path value, which may be either the filter's + ## ``path`` if defined, else a previous result from the function. + ## If no ``path`` is defined for the filter, then the first call + ## to the function will contain an empty string. + ## + ## rec: An instance of the stream's ``columns`` type with its + ## fields set to the values to be logged. + ## + ## Returns: The path to be used for the filter. + global default_path_func: function(id: ID, path: string, rec: any) : string &redef; + + # Log rotation support. + + ## Information passed into rotation callback functions. + type RotationInfo: record { + writer: Writer; ##< The log writer being used. + fname: string; ##< Full name of the rotated file. + path: string; ##< Original path value. + open: time; ##< Time when opened. + close: time; ##< Time when closed. + terminating: bool; ##< True if rotation occured due to Zeek shutting down. + }; + + ## Default rotation interval to use for filters that do not specify + ## an interval. Zero disables rotation. + ## + ## Note that this is overridden by the ZeekControl LogRotationInterval + ## option. + const default_rotation_interval = 0secs &redef; + + ## Default naming format for timestamps embedded into filenames. + ## Uses a ``strftime()`` style. + const default_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; + + ## Default shell command to run on rotated files. Empty for none. + const default_rotation_postprocessor_cmd = "" &redef; + + ## Specifies the default postprocessor function per writer type. + ## Entries in this table are initialized by each writer type. + const default_rotation_postprocessors: table[Writer] of function(info: RotationInfo) : bool &redef; + + ## Default alarm summary mail interval. Zero disables alarm summary + ## mails. + ## + ## Note that this is overridden by the ZeekControl MailAlarmsInterval + ## option. + const default_mail_alarms_interval = 0secs &redef; + + ## Default field name mapping for renaming fields in a logging framework + ## filter. This is typically used to ease integration with external + ## data storage and analysis systems. + const default_field_name_map: table[string] of string = table() &redef; + + ## Default separator for log field scopes when logs are unrolled and + ## flattened. This will be the string between field name components. + ## For example, setting this to "_" will cause the typical field + ## "id.orig_h" to turn into "id_orig_h". + const default_scope_sep = "." &redef; + + ## A prefix for extension fields which can be optionally prefixed + ## on all log lines by setting the `ext_func` field in the + ## log filter. + const Log::default_ext_prefix: string = "_" &redef; + + ## Default log extension function in the case that you would like to + ## apply the same extensions to all logs. The function *must* return + ## a record with all of the fields to be included in the log. The + ## default function included here does not return a value, which indicates + ## that no extensions are added. + const Log::default_ext_func: function(path: string): any = + function(path: string) { } &redef; + + ## A filter type describes how to customize logging streams. + type Filter: record { + ## Descriptive name to reference this filter. + name: string; + + ## The logging writer implementation to use. + writer: Writer &default=default_writer; + + ## Indicates whether a log entry should be recorded. + ## If not given, all entries are recorded. + ## + ## rec: An instance of the stream's ``columns`` type with its + ## fields set to the values to be logged. + ## + ## Returns: True if the entry is to be recorded. + pred: function(rec: any): bool &optional; + + ## Output path for recording entries matching this + ## filter. + ## + ## The specific interpretation of the string is up to the + ## logging writer, and may for example be the destination + ## file name. Generally, filenames are expected to be given + ## without any extensions; writers will add appropriate + ## extensions automatically. + ## + ## If this path is found to conflict with another filter's + ## for the same writer type, it is automatically corrected + ## by appending "-N", where N is the smallest integer greater + ## or equal to 2 that allows the corrected path name to not + ## conflict with another filter's. + path: string &optional; + + ## A function returning the output path for recording entries + ## matching this filter. This is similar to *path* yet allows + ## to compute the string dynamically. It is ok to return + ## different strings for separate calls, but be careful: it's + ## easy to flood the disk by returning a new string for each + ## connection. Upon adding a filter to a stream, if neither + ## ``path`` nor ``path_func`` is explicitly set by them, then + ## :zeek:see:`Log::default_path_func` is used. + ## + ## id: The ID associated with the log stream. + ## + ## path: A suggested path value, which may be either the filter's + ## ``path`` if defined, else a previous result from the + ## function. If no ``path`` is defined for the filter, + ## then the first call to the function will contain an + ## empty string. + ## + ## rec: An instance of the stream's ``columns`` type with its + ## fields set to the values to be logged. + ## + ## Returns: The path to be used for the filter, which will be + ## subject to the same automatic correction rules as + ## the *path* field of :zeek:type:`Log::Filter` in the + ## case of conflicts with other filters trying to use + ## the same writer/path pair. + path_func: function(id: ID, path: string, rec: any): string &optional; + + ## Subset of column names to record. If not given, all + ## columns are recorded. + include: set[string] &optional; + + ## Subset of column names to exclude from recording. If not + ## given, all columns are recorded. + exclude: set[string] &optional; + + ## If true, entries are recorded locally. + log_local: bool &default=enable_local_logging; + + ## If true, entries are passed on to remote peers. + log_remote: bool &default=enable_remote_logging; + + ## Field name map to rename fields before the fields are written + ## to the output. + field_name_map: table[string] of string &default=default_field_name_map; + + ## A string that is used for unrolling and flattening field names + ## for nested record types. + scope_sep: string &default=default_scope_sep; + + ## Default prefix for all extension fields. It's typically + ## prudent to set this to something that Zeek's logging + ## framework can't normally write out in a field name. + ext_prefix: string &default=default_ext_prefix; + + ## Function to collect a log extension value. If not specified, + ## no log extension will be provided for the log. + ## The return value from the function *must* be a record. + ext_func: function(path: string): any &default=default_ext_func; + + ## Rotation interval. Zero disables rotation. + interv: interval &default=default_rotation_interval; + + ## Callback function to trigger for rotated files. If not set, the + ## default comes out of :zeek:id:`Log::default_rotation_postprocessors`. + postprocessor: function(info: RotationInfo) : bool &optional; + + ## A key/value table that will be passed on to the writer. + ## Interpretation of the values is left to the writer, but + ## usually they will be used for configuration purposes. + config: table[string] of string &default=table(); + }; + + ## Sentinel value for indicating that a filter was not found when looked up. + const no_filter: Filter = [$name=""]; + + ## Creates a new logging stream with the default filter. + ## + ## id: The ID enum to be associated with the new logging stream. + ## + ## stream: A record defining the content that the new stream will log. + ## + ## Returns: True if a new logging stream was successfully created and + ## a default filter added to it. + ## + ## .. zeek:see:: Log::add_default_filter Log::remove_default_filter + global create_stream: function(id: ID, stream: Stream) : bool; + + ## Removes a logging stream completely, stopping all the threads. + ## + ## id: The ID associated with the logging stream. + ## + ## Returns: True if the stream was successfully removed. + ## + ## .. zeek:see:: Log::create_stream + global remove_stream: function(id: ID) : bool; + + ## Enables a previously disabled logging stream. Disabled streams + ## will not be written to until they are enabled again. New streams + ## are enabled by default. + ## + ## id: The ID associated with the logging stream to enable. + ## + ## Returns: True if the stream is re-enabled or was not previously disabled. + ## + ## .. zeek:see:: Log::disable_stream + global enable_stream: function(id: ID) : bool; + + ## Disables a currently enabled logging stream. Disabled streams + ## will not be written to until they are enabled again. New streams + ## are enabled by default. + ## + ## id: The ID associated with the logging stream to disable. + ## + ## Returns: True if the stream is now disabled or was already disabled. + ## + ## .. zeek:see:: Log::enable_stream + global disable_stream: function(id: ID) : bool; + + ## Adds a custom filter to an existing logging stream. If a filter + ## with a matching ``name`` field already exists for the stream, it + ## is removed when the new filter is successfully added. + ## + ## id: The ID associated with the logging stream to filter. + ## + ## filter: A record describing the desired logging parameters. + ## + ## Returns: True if the filter was successfully added, false if + ## the filter was not added or the *filter* argument was not + ## the correct type. + ## + ## .. zeek:see:: Log::remove_filter Log::add_default_filter + ## Log::remove_default_filter Log::get_filter Log::get_filter_names + global add_filter: function(id: ID, filter: Filter) : bool; + + ## Removes a filter from an existing logging stream. + ## + ## id: The ID associated with the logging stream from which to + ## remove a filter. + ## + ## name: A string to match against the ``name`` field of a + ## :zeek:type:`Log::Filter` for identification purposes. + ## + ## Returns: True if the logging stream's filter was removed or + ## if no filter associated with *name* was found. + ## + ## .. zeek:see:: Log::remove_filter Log::add_default_filter + ## Log::remove_default_filter Log::get_filter Log::get_filter_names + global remove_filter: function(id: ID, name: string) : bool; + + ## Gets the names of all filters associated with an existing + ## logging stream. + ## + ## id: The ID of a logging stream from which to obtain the list + ## of filter names. + ## + ## Returns: The set of filter names associated with the stream. + ## + ## ..zeek:see:: Log::remove_filter Log::add_default_filter + ## Log::remove_default_filter Log::get_filter + global get_filter_names: function(id: ID) : set[string]; + + ## Gets a filter associated with an existing logging stream. + ## + ## id: The ID associated with a logging stream from which to + ## obtain one of its filters. + ## + ## name: A string to match against the ``name`` field of a + ## :zeek:type:`Log::Filter` for identification purposes. + ## + ## Returns: A filter attached to the logging stream *id* matching + ## *name* or, if no matches are found returns the + ## :zeek:id:`Log::no_filter` sentinel value. + ## + ## .. zeek:see:: Log::add_filter Log::remove_filter Log::add_default_filter + ## Log::remove_default_filter Log::get_filter_names + global get_filter: function(id: ID, name: string) : Filter; + + ## Writes a new log line/entry to a logging stream. + ## + ## id: The ID associated with a logging stream to be written to. + ## + ## columns: A record value describing the values of each field/column + ## to write to the log stream. + ## + ## Returns: True if the stream was found and no error occurred in writing + ## to it or if the stream was disabled and nothing was written. + ## False if the stream was not found, or the *columns* + ## argument did not match what the stream was initially defined + ## to handle, or one of the stream's filters has an invalid + ## ``path_func``. + ## + ## .. zeek:see:: Log::enable_stream Log::disable_stream + global write: function(id: ID, columns: any) : bool; + + ## Sets the buffering status for all the writers of a given logging stream. + ## A given writer implementation may or may not support buffering and if + ## it doesn't then toggling buffering with this function has no effect. + ## + ## id: The ID associated with a logging stream for which to + ## enable/disable buffering. + ## + ## buffered: Whether to enable or disable log buffering. + ## + ## Returns: True if buffering status was set, false if the logging stream + ## does not exist. + ## + ## .. zeek:see:: Log::flush + global set_buf: function(id: ID, buffered: bool): bool; + + ## Flushes any currently buffered output for all the writers of a given + ## logging stream. + ## + ## id: The ID associated with a logging stream for which to flush buffered + ## data. + ## + ## Returns: True if all writers of a log stream were signalled to flush + ## buffered data or if the logging stream is disabled, + ## false if the logging stream does not exist. + ## + ## .. zeek:see:: Log::set_buf Log::enable_stream Log::disable_stream + global flush: function(id: ID): bool; + + ## Adds a default :zeek:type:`Log::Filter` record with ``name`` field + ## set as "default" to a given logging stream. + ## + ## id: The ID associated with a logging stream for which to add a default + ## filter. + ## + ## Returns: The status of a call to :zeek:id:`Log::add_filter` using a + ## default :zeek:type:`Log::Filter` argument with ``name`` field + ## set to "default". + ## + ## .. zeek:see:: Log::add_filter Log::remove_filter + ## Log::remove_default_filter + global add_default_filter: function(id: ID) : bool; + + ## Removes the :zeek:type:`Log::Filter` with ``name`` field equal to + ## "default". + ## + ## id: The ID associated with a logging stream from which to remove the + ## default filter. + ## + ## Returns: The status of a call to :zeek:id:`Log::remove_filter` using + ## "default" as the argument. + ## + ## .. zeek:see:: Log::add_filter Log::remove_filter Log::add_default_filter + global remove_default_filter: function(id: ID) : bool; + + ## Runs a command given by :zeek:id:`Log::default_rotation_postprocessor_cmd` + ## on a rotated file. Meant to be called from postprocessor functions + ## that are added to :zeek:id:`Log::default_rotation_postprocessors`. + ## + ## info: A record holding meta-information about the log being rotated. + ## + ## npath: The new path of the file (after already being rotated/processed + ## by writer-specific postprocessor as defined in + ## :zeek:id:`Log::default_rotation_postprocessors`). + ## + ## Returns: True when :zeek:id:`Log::default_rotation_postprocessor_cmd` + ## is empty or the system command given by it has been invoked + ## to postprocess a rotated log file. + ## + ## .. zeek:see:: Log::default_rotation_date_format + ## Log::default_rotation_postprocessor_cmd + ## Log::default_rotation_postprocessors + global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool; + + ## The streams which are currently active and not disabled. + ## This table is not meant to be modified by users! Only use it for + ## examining which streams are active. + global active_streams: table[ID] of Stream = table(); +} + +global all_streams: table[ID] of Stream = table(); + +global stream_filters: table[ID] of set[string] = table(); + +# We keep a script-level copy of all filters so that we can manipulate them. +global filters: table[ID, string] of Filter; + +@load base/bif/logging.bif # Needs Filter and Stream defined. + +module Log; + +# Used internally by the log manager. +function __default_rotation_postprocessor(info: RotationInfo) : bool + { + if ( info$writer in default_rotation_postprocessors ) + return default_rotation_postprocessors[info$writer](info); + else + # Return T by default so that postprocessor-less writers don't shutdown. + return T; + } + +function default_path_func(id: ID, path: string, rec: any) : string + { + # The suggested path value is a previous result of this function + # or a filter path explicitly set by the user, so continue using it. + if ( path != "" ) + return path; + + local id_str = fmt("%s", id); + + local parts = split_string1(id_str, /::/); + if ( |parts| == 2 ) + { + # Example: Notice::LOG -> "notice" + if ( parts[1] == "LOG" ) + { + local module_parts = split_string_n(parts[0], /[^A-Z][A-Z][a-z]*/, T, 4); + local output = ""; + if ( 0 in module_parts ) + output = module_parts[0]; + if ( 1 in module_parts && module_parts[1] != "" ) + output = cat(output, sub_bytes(module_parts[1],1,1), "_", sub_bytes(module_parts[1], 2, |module_parts[1]|)); + if ( 2 in module_parts && module_parts[2] != "" ) + output = cat(output, "_", module_parts[2]); + if ( 3 in module_parts && module_parts[3] != "" ) + output = cat(output, sub_bytes(module_parts[3],1,1), "_", sub_bytes(module_parts[3], 2, |module_parts[3]|)); + return to_lower(output); + } + + # Example: Notice::POLICY_LOG -> "notice_policy" + if ( /_LOG$/ in parts[1] ) + parts[1] = sub(parts[1], /_LOG$/, ""); + + return cat(to_lower(parts[0]),"_",to_lower(parts[1])); + } + else + return to_lower(id_str); + } + +# Run post-processor on file. If there isn't any postprocessor defined, +# we move the file to a nicer name. +function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : bool + { + local pp_cmd = default_rotation_postprocessor_cmd; + + if ( pp_cmd == "" ) + return T; + + # Turn, e.g., Log::WRITER_ASCII into "ascii". + local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", ""); + + # The date format is hard-coded here to provide a standardized + # script interface. + system(fmt("%s %s %s %s %s %d %s", + pp_cmd, npath, info$path, + strftime("%y-%m-%d_%H.%M.%S", info$open), + strftime("%y-%m-%d_%H.%M.%S", info$close), + info$terminating, writer)); + + return T; + } + +function create_stream(id: ID, stream: Stream) : bool + { + if ( ! __create_stream(id, stream) ) + return F; + + active_streams[id] = stream; + all_streams[id] = stream; + + return add_default_filter(id); + } + +function remove_stream(id: ID) : bool + { + delete active_streams[id]; + delete all_streams[id]; + + if ( id in stream_filters ) + { + for ( i in stream_filters[id] ) + delete filters[id, i]; + + delete stream_filters[id]; + } + return __remove_stream(id); + } + +function disable_stream(id: ID) : bool + { + delete active_streams[id]; + return __disable_stream(id); + } + +function enable_stream(id: ID) : bool + { + if ( ! __enable_stream(id) ) + return F; + + if ( id in all_streams ) + active_streams[id] = all_streams[id]; + } + +# convenience function to add a filter name to stream_filters +function add_stream_filters(id: ID, name: string) + { + if ( id in stream_filters ) + add stream_filters[id][name]; + else + stream_filters[id] = set(name); + } + +function add_filter(id: ID, filter: Filter) : bool + { + local stream = all_streams[id]; + + if ( stream?$path && ! filter?$path ) + filter$path = stream$path; + + if ( ! filter?$path && ! filter?$path_func ) + filter$path_func = default_path_func; + + local res = __add_filter(id, filter); + if ( res ) + { + add_stream_filters(id, filter$name); + filters[id, filter$name] = filter; + } + return res; + } + +function remove_filter(id: ID, name: string) : bool + { + if ( id in stream_filters ) + delete stream_filters[id][name]; + + delete filters[id, name]; + + return __remove_filter(id, name); + } + +function get_filter(id: ID, name: string) : Filter + { + if ( [id, name] in filters ) + return filters[id, name]; + + return no_filter; + } + +function get_filter_names(id: ID) : set[string] + { + if ( id in stream_filters ) + return stream_filters[id]; + else + return set(); + } + +function write(id: ID, columns: any) : bool + { + return __write(id, columns); + } + +function set_buf(id: ID, buffered: bool): bool + { + return __set_buf(id, buffered); + } + +function flush(id: ID): bool + { + return __flush(id); + } + +function add_default_filter(id: ID) : bool + { + return add_filter(id, [$name="default"]); + } + +function remove_default_filter(id: ID) : bool + { + return remove_filter(id, "default"); + } diff --git a/scripts/base/frameworks/logging/postprocessors/__load__.bro b/scripts/base/frameworks/logging/postprocessors/__load__.zeek similarity index 100% rename from scripts/base/frameworks/logging/postprocessors/__load__.bro rename to scripts/base/frameworks/logging/postprocessors/__load__.zeek diff --git a/scripts/base/frameworks/logging/postprocessors/scp.bro b/scripts/base/frameworks/logging/postprocessors/scp.bro deleted file mode 100644 index d63520abe6..0000000000 --- a/scripts/base/frameworks/logging/postprocessors/scp.bro +++ /dev/null @@ -1,72 +0,0 @@ -##! This script defines a postprocessing function that can be applied -##! to a logging filter in order to automatically SCP (secure copy) -##! a log stream (or a subset of it) to a remote host at configurable -##! rotation time intervals. Generally, to use this functionality -##! you must handle the :bro:id:`bro_init` event and do the following -##! in your handler: -##! -##! 1) Create a new :bro:type:`Log::Filter` record that defines a name/path, -##! rotation interval, and set the ``postprocessor`` to -##! :bro:id:`Log::scp_postprocessor`. -##! 2) Add the filter to a logging stream using :bro:id:`Log::add_filter`. -##! 3) Add a table entry to :bro:id:`Log::scp_destinations` for the filter's -##! writer/path pair which defines a set of :bro:type:`Log::SCPDestination` -##! records. - -module Log; - -export { - ## Secure-copies the rotated log to all the remote hosts - ## defined in :bro:id:`Log::scp_destinations` and then deletes - ## the local copy of the rotated log. It's not active when - ## reading from trace files. - ## - ## info: A record holding meta-information about the log file to be - ## postprocessed. - ## - ## Returns: True if secure-copy system command was initiated or - ## if no destination was configured for the log as described - ## by *info*. - global scp_postprocessor: function(info: Log::RotationInfo): bool; - - ## A container that describes the remote destination for the SCP command - ## argument as ``user@host:path``. - type SCPDestination: record { - ## The remote user to log in as. A trust mechanism should be - ## pre-established. - user: string; - ## The remote host to which to transfer logs. - host: string; - ## The path/directory on the remote host to send logs. - path: string; - }; - - ## A table indexed by a particular log writer and filter path, that yields - ## a set of remote destinations. The :bro:id:`Log::scp_postprocessor` - ## function queries this table upon log rotation and performs a secure - ## copy of the rotated log to each destination in the set. This - ## table can be modified at run-time. - global scp_destinations: table[Writer, string] of set[SCPDestination]; - - ## Default naming format for timestamps embedded into log filenames - ## that use the SCP rotator. - const scp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; -} - -function scp_postprocessor(info: Log::RotationInfo): bool - { - if ( reading_traces() || [info$writer, info$path] !in scp_destinations ) - return T; - - local command = ""; - for ( d in scp_destinations[info$writer, info$path] ) - { - local dst = fmt("%s/%s.%s.log", d$path, info$path, - strftime(Log::scp_rotation_date_format, info$open)); - command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, dst); - } - - command += fmt("/bin/rm %s", info$fname); - system(command); - return T; - } diff --git a/scripts/base/frameworks/logging/postprocessors/scp.zeek b/scripts/base/frameworks/logging/postprocessors/scp.zeek new file mode 100644 index 0000000000..22adc29e47 --- /dev/null +++ b/scripts/base/frameworks/logging/postprocessors/scp.zeek @@ -0,0 +1,72 @@ +##! This script defines a postprocessing function that can be applied +##! to a logging filter in order to automatically SCP (secure copy) +##! a log stream (or a subset of it) to a remote host at configurable +##! rotation time intervals. Generally, to use this functionality +##! you must handle the :zeek:id:`zeek_init` event and do the following +##! in your handler: +##! +##! 1) Create a new :zeek:type:`Log::Filter` record that defines a name/path, +##! rotation interval, and set the ``postprocessor`` to +##! :zeek:id:`Log::scp_postprocessor`. +##! 2) Add the filter to a logging stream using :zeek:id:`Log::add_filter`. +##! 3) Add a table entry to :zeek:id:`Log::scp_destinations` for the filter's +##! writer/path pair which defines a set of :zeek:type:`Log::SCPDestination` +##! records. + +module Log; + +export { + ## Secure-copies the rotated log to all the remote hosts + ## defined in :zeek:id:`Log::scp_destinations` and then deletes + ## the local copy of the rotated log. It's not active when + ## reading from trace files. + ## + ## info: A record holding meta-information about the log file to be + ## postprocessed. + ## + ## Returns: True if secure-copy system command was initiated or + ## if no destination was configured for the log as described + ## by *info*. + global scp_postprocessor: function(info: Log::RotationInfo): bool; + + ## A container that describes the remote destination for the SCP command + ## argument as ``user@host:path``. + type SCPDestination: record { + ## The remote user to log in as. A trust mechanism should be + ## pre-established. + user: string; + ## The remote host to which to transfer logs. + host: string; + ## The path/directory on the remote host to send logs. + path: string; + }; + + ## A table indexed by a particular log writer and filter path, that yields + ## a set of remote destinations. The :zeek:id:`Log::scp_postprocessor` + ## function queries this table upon log rotation and performs a secure + ## copy of the rotated log to each destination in the set. This + ## table can be modified at run-time. + global scp_destinations: table[Writer, string] of set[SCPDestination]; + + ## Default naming format for timestamps embedded into log filenames + ## that use the SCP rotator. + const scp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; +} + +function scp_postprocessor(info: Log::RotationInfo): bool + { + if ( reading_traces() || [info$writer, info$path] !in scp_destinations ) + return T; + + local command = ""; + for ( d in scp_destinations[info$writer, info$path] ) + { + local dst = fmt("%s/%s.%s.log", d$path, info$path, + strftime(Log::scp_rotation_date_format, info$open)); + command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, dst); + } + + command += fmt("/bin/rm %s", info$fname); + system(command); + return T; + } diff --git a/scripts/base/frameworks/logging/postprocessors/sftp.bro b/scripts/base/frameworks/logging/postprocessors/sftp.bro deleted file mode 100644 index 8c77899864..0000000000 --- a/scripts/base/frameworks/logging/postprocessors/sftp.bro +++ /dev/null @@ -1,75 +0,0 @@ -##! This script defines a postprocessing function that can be applied -##! to a logging filter in order to automatically SFTP -##! a log stream (or a subset of it) to a remote host at configurable -##! rotation time intervals. Generally, to use this functionality -##! you must handle the :bro:id:`bro_init` event and do the following -##! in your handler: -##! -##! 1) Create a new :bro:type:`Log::Filter` record that defines a name/path, -##! rotation interval, and set the ``postprocessor`` to -##! :bro:id:`Log::sftp_postprocessor`. -##! 2) Add the filter to a logging stream using :bro:id:`Log::add_filter`. -##! 3) Add a table entry to :bro:id:`Log::sftp_destinations` for the filter's -##! writer/path pair which defines a set of :bro:type:`Log::SFTPDestination` -##! records. - -module Log; - -export { - ## Securely transfers the rotated log to all the remote hosts - ## defined in :bro:id:`Log::sftp_destinations` and then deletes - ## the local copy of the rotated log. It's not active when - ## reading from trace files. - ## - ## info: A record holding meta-information about the log file to be - ## postprocessed. - ## - ## Returns: True if sftp system command was initiated or - ## if no destination was configured for the log as described - ## by *info*. - global sftp_postprocessor: function(info: Log::RotationInfo): bool; - - ## A container that describes the remote destination for the SFTP command, - ## comprised of the username, host, and path at which to upload the file. - type SFTPDestination: record { - ## The remote user to log in as. A trust mechanism should be - ## pre-established. - user: string; - ## The remote host to which to transfer logs. - host: string; - ## The port to connect to. Defaults to 22 - host_port: count &default=22; - ## The path/directory on the remote host to send logs. - path: string; - }; - - ## A table indexed by a particular log writer and filter path, that yields - ## a set of remote destinations. The :bro:id:`Log::sftp_postprocessor` - ## function queries this table upon log rotation and performs a secure - ## transfer of the rotated log to each destination in the set. This - ## table can be modified at run-time. - global sftp_destinations: table[Writer, string] of set[SFTPDestination]; - - ## Default naming format for timestamps embedded into log filenames - ## that use the SFTP rotator. - const sftp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; -} - -function sftp_postprocessor(info: Log::RotationInfo): bool - { - if ( reading_traces() || [info$writer, info$path] !in sftp_destinations ) - return T; - - local command = ""; - for ( d in sftp_destinations[info$writer, info$path] ) - { - local dst = fmt("%s/%s.%s.log", d$path, info$path, - strftime(Log::sftp_rotation_date_format, info$open)); - command += fmt("echo put %s %s | sftp -P %d -b - %s@%s;", info$fname, dst, - d$host_port, d$user, d$host); - } - - command += fmt("/bin/rm %s", info$fname); - system(command); - return T; - } diff --git a/scripts/base/frameworks/logging/postprocessors/sftp.zeek b/scripts/base/frameworks/logging/postprocessors/sftp.zeek new file mode 100644 index 0000000000..75ab438809 --- /dev/null +++ b/scripts/base/frameworks/logging/postprocessors/sftp.zeek @@ -0,0 +1,75 @@ +##! This script defines a postprocessing function that can be applied +##! to a logging filter in order to automatically SFTP +##! a log stream (or a subset of it) to a remote host at configurable +##! rotation time intervals. Generally, to use this functionality +##! you must handle the :zeek:id:`zeek_init` event and do the following +##! in your handler: +##! +##! 1) Create a new :zeek:type:`Log::Filter` record that defines a name/path, +##! rotation interval, and set the ``postprocessor`` to +##! :zeek:id:`Log::sftp_postprocessor`. +##! 2) Add the filter to a logging stream using :zeek:id:`Log::add_filter`. +##! 3) Add a table entry to :zeek:id:`Log::sftp_destinations` for the filter's +##! writer/path pair which defines a set of :zeek:type:`Log::SFTPDestination` +##! records. + +module Log; + +export { + ## Securely transfers the rotated log to all the remote hosts + ## defined in :zeek:id:`Log::sftp_destinations` and then deletes + ## the local copy of the rotated log. It's not active when + ## reading from trace files. + ## + ## info: A record holding meta-information about the log file to be + ## postprocessed. + ## + ## Returns: True if sftp system command was initiated or + ## if no destination was configured for the log as described + ## by *info*. + global sftp_postprocessor: function(info: Log::RotationInfo): bool; + + ## A container that describes the remote destination for the SFTP command, + ## comprised of the username, host, and path at which to upload the file. + type SFTPDestination: record { + ## The remote user to log in as. A trust mechanism should be + ## pre-established. + user: string; + ## The remote host to which to transfer logs. + host: string; + ## The port to connect to. Defaults to 22 + host_port: count &default=22; + ## The path/directory on the remote host to send logs. + path: string; + }; + + ## A table indexed by a particular log writer and filter path, that yields + ## a set of remote destinations. The :zeek:id:`Log::sftp_postprocessor` + ## function queries this table upon log rotation and performs a secure + ## transfer of the rotated log to each destination in the set. This + ## table can be modified at run-time. + global sftp_destinations: table[Writer, string] of set[SFTPDestination]; + + ## Default naming format for timestamps embedded into log filenames + ## that use the SFTP rotator. + const sftp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; +} + +function sftp_postprocessor(info: Log::RotationInfo): bool + { + if ( reading_traces() || [info$writer, info$path] !in sftp_destinations ) + return T; + + local command = ""; + for ( d in sftp_destinations[info$writer, info$path] ) + { + local dst = fmt("%s/%s.%s.log", d$path, info$path, + strftime(Log::sftp_rotation_date_format, info$open)); + command += fmt("echo put %s %s | sftp -P %d -b - %s@%s;", info$fname, dst, + d$host_port, d$user, d$host); + } + + command += fmt("/bin/rm %s", info$fname); + system(command); + return T; + } diff --git a/scripts/base/frameworks/logging/writers/ascii.bro b/scripts/base/frameworks/logging/writers/ascii.bro deleted file mode 100644 index 8cab6fa0ff..0000000000 --- a/scripts/base/frameworks/logging/writers/ascii.bro +++ /dev/null @@ -1,99 +0,0 @@ -##! Interface for the ASCII log writer. Redefinable options are available -##! to tweak the output format of ASCII logs. -##! -##! The ASCII writer currently supports one writer-specific per-filter config -##! option: setting ``tsv`` to the string ``T`` turns the output into -##! "tab-separated-value" mode where only a single header row with the column -##! names is printed out as meta information, with no "# fields" prepended; no -##! other meta data gets included in that mode. Example filter using this:: -##! -##! local f: Log::Filter = [$name = "my-filter", -##! $writer = Log::WRITER_ASCII, -##! $config = table(["tsv"] = "T")]; -##! - -module LogAscii; - -export { - ## If true, output everything to stdout rather than - ## into files. This is primarily for debugging purposes. - ## - ## This option is also available as a per-filter ``$config`` option. - const output_to_stdout = F &redef; - - ## If true, the default will be to write logs in a JSON format. - ## - ## This option is also available as a per-filter ``$config`` option. - const use_json = F &redef; - - ## Define the gzip level to compress the logs. If 0, then no gzip - ## compression is performed. Enabling compression also changes - ## the log file name extension to include ".gz". - ## - ## This option is also available as a per-filter ``$config`` option. - const gzip_level = 0 &redef; - - ## Format of timestamps when writing out JSON. By default, the JSON - ## formatter will use double values for timestamps which represent the - ## number of seconds from the UNIX epoch. - ## - ## This option is also available as a per-filter ``$config`` option. - const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef; - - ## If true, include lines with log meta information such as column names - ## with types, the values of ASCII logging options that are in use, and - ## the time when the file was opened and closed (the latter at the end). - ## - ## If writing in JSON format, this is implicitly disabled. - const include_meta = T &redef; - - ## Prefix for lines with meta information. - ## - ## This option is also available as a per-filter ``$config`` option. - const meta_prefix = "#" &redef; - - ## Separator between fields. - ## - ## This option is also available as a per-filter ``$config`` option. - const separator = Log::separator &redef; - - ## Separator between set elements. - ## - ## This option is also available as a per-filter ``$config`` option. - const set_separator = Log::set_separator &redef; - - ## String to use for empty fields. This should be different from - ## *unset_field* to make the output unambiguous. - ## - ## This option is also available as a per-filter ``$config`` option. - const empty_field = Log::empty_field &redef; - - ## String to use for an unset &optional field. - ## - ## This option is also available as a per-filter ``$config`` option. - const unset_field = Log::unset_field &redef; -} - -# Default function to postprocess a rotated ASCII log file. It moves the rotated -# file to a new name that includes a timestamp with the opening time, and then -# runs the writer's default postprocessor command on it. -function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool - { - # If the filename has a ".gz" extension, then keep it. - local gz = info$fname[-3:] == ".gz" ? ".gz" : ""; - local bls = getenv("BRO_LOG_SUFFIX"); - - if ( bls == "" ) - bls = "log"; - - # Move file to name including both opening and closing time. - local dst = fmt("%s.%s.%s%s", info$path, - strftime(Log::default_rotation_date_format, info$open), bls, gz); - - system(fmt("/bin/mv %s %s", info$fname, dst)); - - # Run default postprocessor. - return Log::run_rotation_postprocessor_cmd(info, dst); - } - -redef Log::default_rotation_postprocessors += { [Log::WRITER_ASCII] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/ascii.zeek b/scripts/base/frameworks/logging/writers/ascii.zeek new file mode 100644 index 0000000000..a32ce552e3 --- /dev/null +++ b/scripts/base/frameworks/logging/writers/ascii.zeek @@ -0,0 +1,99 @@ +##! Interface for the ASCII log writer. Redefinable options are available +##! to tweak the output format of ASCII logs. +##! +##! The ASCII writer currently supports one writer-specific per-filter config +##! option: setting ``tsv`` to the string ``T`` turns the output into +##! "tab-separated-value" mode where only a single header row with the column +##! names is printed out as meta information, with no "# fields" prepended; no +##! other meta data gets included in that mode. Example filter using this:: +##! +##! local f: Log::Filter = [$name = "my-filter", +##! $writer = Log::WRITER_ASCII, +##! $config = table(["tsv"] = "T")]; +##! + +module LogAscii; + +export { + ## If true, output everything to stdout rather than + ## into files. This is primarily for debugging purposes. + ## + ## This option is also available as a per-filter ``$config`` option. + const output_to_stdout = F &redef; + + ## If true, the default will be to write logs in a JSON format. + ## + ## This option is also available as a per-filter ``$config`` option. + const use_json = F &redef; + + ## Define the gzip level to compress the logs. If 0, then no gzip + ## compression is performed. Enabling compression also changes + ## the log file name extension to include ".gz". + ## + ## This option is also available as a per-filter ``$config`` option. + const gzip_level = 0 &redef; + + ## Format of timestamps when writing out JSON. By default, the JSON + ## formatter will use double values for timestamps which represent the + ## number of seconds from the UNIX epoch. + ## + ## This option is also available as a per-filter ``$config`` option. + const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef; + + ## If true, include lines with log meta information such as column names + ## with types, the values of ASCII logging options that are in use, and + ## the time when the file was opened and closed (the latter at the end). + ## + ## If writing in JSON format, this is implicitly disabled. + const include_meta = T &redef; + + ## Prefix for lines with meta information. + ## + ## This option is also available as a per-filter ``$config`` option. + const meta_prefix = "#" &redef; + + ## Separator between fields. + ## + ## This option is also available as a per-filter ``$config`` option. + const separator = Log::separator &redef; + + ## Separator between set elements. + ## + ## This option is also available as a per-filter ``$config`` option. + const set_separator = Log::set_separator &redef; + + ## String to use for empty fields. This should be different from + ## *unset_field* to make the output unambiguous. + ## + ## This option is also available as a per-filter ``$config`` option. + const empty_field = Log::empty_field &redef; + + ## String to use for an unset &optional field. + ## + ## This option is also available as a per-filter ``$config`` option. + const unset_field = Log::unset_field &redef; +} + +# Default function to postprocess a rotated ASCII log file. It moves the rotated +# file to a new name that includes a timestamp with the opening time, and then +# runs the writer's default postprocessor command on it. +function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool + { + # If the filename has a ".gz" extension, then keep it. + local gz = info$fname[-3:] == ".gz" ? ".gz" : ""; + local bls = getenv("ZEEK_LOG_SUFFIX"); + + if ( bls == "" ) + bls = "log"; + + # Move file to name including both opening and closing time. + local dst = fmt("%s.%s.%s%s", info$path, + strftime(Log::default_rotation_date_format, info$open), bls, gz); + + system(fmt("/bin/mv %s %s", info$fname, dst)); + + # Run default postprocessor. + return Log::run_rotation_postprocessor_cmd(info, dst); + } + +redef Log::default_rotation_postprocessors += { [Log::WRITER_ASCII] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/none.bro b/scripts/base/frameworks/logging/writers/none.zeek similarity index 100% rename from scripts/base/frameworks/logging/writers/none.bro rename to scripts/base/frameworks/logging/writers/none.zeek diff --git a/scripts/base/frameworks/logging/writers/sqlite.bro b/scripts/base/frameworks/logging/writers/sqlite.zeek similarity index 100% rename from scripts/base/frameworks/logging/writers/sqlite.bro rename to scripts/base/frameworks/logging/writers/sqlite.zeek diff --git a/scripts/base/frameworks/netcontrol/README b/scripts/base/frameworks/netcontrol/README index a8635da300..0702c6b036 100644 --- a/scripts/base/frameworks/netcontrol/README +++ b/scripts/base/frameworks/netcontrol/README @@ -1,3 +1,3 @@ -The NetControl framework provides a way for Bro to interact with networking +The NetControl framework provides a way for Zeek to interact with networking hard- and software, e.g. for dropping and shunting IP addresses/connections, etc. diff --git a/scripts/base/frameworks/netcontrol/__load__.bro b/scripts/base/frameworks/netcontrol/__load__.bro deleted file mode 100644 index a8e391f7c8..0000000000 --- a/scripts/base/frameworks/netcontrol/__load__.bro +++ /dev/null @@ -1,15 +0,0 @@ -@load ./types -@load ./main -@load ./plugins -@load ./drop -@load ./shunt -@load ./catch-and-release - -# The cluster framework must be loaded first. -@load base/frameworks/cluster - -@if ( Cluster::is_enabled() ) -@load ./cluster -@else -@load ./non-cluster -@endif diff --git a/scripts/base/frameworks/netcontrol/__load__.zeek b/scripts/base/frameworks/netcontrol/__load__.zeek new file mode 100644 index 0000000000..c18ad6a026 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/__load__.zeek @@ -0,0 +1,14 @@ +@load ./types +@load ./main +@load ./plugins +@load ./drop +@load ./shunt + +# The cluster framework must be loaded first. +@load base/frameworks/cluster + +@if ( Cluster::is_enabled() ) +@load ./cluster +@else +@load ./non-cluster +@endif diff --git a/scripts/base/frameworks/netcontrol/catch-and-release.bro b/scripts/base/frameworks/netcontrol/catch-and-release.bro deleted file mode 100644 index 79de7d9662..0000000000 --- a/scripts/base/frameworks/netcontrol/catch-and-release.bro +++ /dev/null @@ -1,535 +0,0 @@ -##! Implementation of catch-and-release functionality for NetControl. - -module NetControl; - -@load base/frameworks/cluster -@load ./main -@load ./drop - -export { - - redef enum Log::ID += { CATCH_RELEASE }; - - ## This record is used for storing information about current blocks that are - ## part of catch and release. - type BlockInfo: record { - ## Absolute time indicating until when a block is inserted using NetControl. - block_until: time &optional; - ## Absolute time indicating until when an IP address is watched to reblock it. - watch_until: time; - ## Number of times an IP address was reblocked. - num_reblocked: count &default=0; - ## Number indicating at which catch and release interval we currently are. - current_interval: count; - ## ID of the inserted block, if any. - current_block_id: string; - ## User specified string. - location: string &optional; - }; - - ## The enum that contains the different kinds of messages that are logged by - ## catch and release. - type CatchReleaseActions: enum { - ## Log lines marked with info are purely informational; no action was taken. - INFO, - ## A rule for the specified IP address already existed in NetControl (outside - ## of catch-and-release). Catch and release did not add a new rule, but is now - ## watching the IP address and will add a new rule after the current rule expires. - ADDED, - ## A drop was requested by catch and release. - DROP, - ## An address was successfully blocked by catch and release. - DROPPED, - ## An address was unblocked after the timeout expired. - UNBLOCK, - ## An address was forgotten because it did not reappear within the `watch_until` interval. - FORGOTTEN, - ## A watched IP address was seen again; catch and release will re-block it. - SEEN_AGAIN - }; - - ## The record type that is used for representing and logging - type CatchReleaseInfo: record { - ## The absolute time indicating when the action for this log-line occured. - ts: time &log; - ## The rule id that this log line refers to. - rule_id: string &log &optional; - ## The IP address that this line refers to. - ip: addr &log; - ## The action that was taken in this log-line. - action: CatchReleaseActions &log; - ## The current block_interaval (for how long the address is blocked). - block_interval: interval &log &optional; - ## The current watch_interval (for how long the address will be watched and re-block if it reappears). - watch_interval: interval &log &optional; - ## The absolute time until which the address is blocked. - blocked_until: time &log &optional; - ## The absolute time until which the address will be monitored. - watched_until: time &log &optional; - ## Number of times that this address was blocked in the current cycle. - num_blocked: count &log &optional; - ## The user specified location string. - location: string &log &optional; - ## Additional informational string by the catch and release framework about this log-line. - message: string &log &optional; - }; - - ## Stops all packets involving an IP address from being forwarded. This function - ## uses catch-and-release functionality, where the IP address is only dropped for - ## a short amount of time that is incremented steadily when the IP is encountered - ## again. - ## - ## In cluster mode, this function works on workers as well as the manager. On managers, - ## the returned :bro:see:`NetControl::BlockInfo` record will not contain the block ID, - ## which will be assigned on the manager. - ## - ## a: The address to be dropped. - ## - ## t: How long to drop it, with 0 being indefinitely. - ## - ## location: An optional string describing where the drop was triggered. - ## - ## Returns: The :bro:see:`NetControl::BlockInfo` record containing information about - ## the inserted block. - global drop_address_catch_release: function(a: addr, location: string &default="") : BlockInfo; - - ## Removes an address from being watched with catch and release. Returns true if the - ## address was found and removed; returns false if it was unknown to catch and release. - ## - ## If the address is currently blocked, and the block was inserted by catch and release, - ## the block is removed. - ## - ## a: The address to be unblocked. - ## - ## reason: A reason for the unblock. - ## - ## Returns: True if the address was unblocked. - global unblock_address_catch_release: function(a: addr, reason: string &default="") : bool; - - ## This function can be called to notify the catch and release script that activity by - ## an IP address was seen. If the respective IP address is currently monitored by catch and - ## release and not blocked, the block will be reinstated. See the documentation of watch_new_connection - ## which events the catch and release functionality usually monitors for activity. - ## - ## a: The address that was seen and should be re-dropped if it is being watched. - global catch_release_seen: function(a: addr); - - ## Get the :bro:see:`NetControl::BlockInfo` record for an address currently blocked by catch and release. - ## If the address is unknown to catch and release, the watch_until time will be set to 0. - ## - ## In cluster mode, this function works on the manager and workers. On workers, the data will - ## lag slightly behind the manager; if you add a block, it will not be instantly available via - ## this function. - ## - ## a: The address to get information about. - ## - ## Returns: The :bro:see:`NetControl::BlockInfo` record containing information about - ## the inserted block. - global get_catch_release_info: function(a: addr) : BlockInfo; - - ## Event is raised when catch and release cases management of an IP address because no - ## activity was seen within the watch_until period. - ## - ## a: The address that is no longer being managed. - ## - ## bi: The :bro:see:`NetControl::BlockInfo` record containing information about the block. - global catch_release_forgotten: event(a: addr, bi: BlockInfo); - - ## If true, catch_release_seen is called on the connection originator in new_connection, - ## connection_established, partial_connection, connection_attempt, connection_rejected, - ## connection_reset and connection_pending - const watch_connections = T &redef; - - ## If true, catch and release warns if packets of an IP address are still seen after it - ## should have been blocked. - option catch_release_warn_blocked_ip_encountered = F; - - ## Time intervals for which subsequent drops of the same IP take - ## effect. - const catch_release_intervals: vector of interval = vector(10min, 1hr, 24hrs, 7days) &redef; - - ## Event that can be handled to access the :bro:type:`NetControl::CatchReleaseInfo` - ## record as it is sent on to the logging framework. - global log_netcontrol_catch_release: event(rec: CatchReleaseInfo); - - # Cluster events for catch and release - global catch_release_block_new: event(a: addr, b: BlockInfo); - global catch_release_block_delete: event(a: addr); - global catch_release_add: event(a: addr, location: string); - global catch_release_delete: event(a: addr, reason: string); - global catch_release_encountered: event(a: addr); -} - -# Set that is used to only send seen notifications to the master every ~30 seconds. -global catch_release_recently_notified: set[addr] &create_expire=30secs; - -event bro_init() &priority=5 - { - Log::create_stream(NetControl::CATCH_RELEASE, [$columns=CatchReleaseInfo, $ev=log_netcontrol_catch_release, $path="netcontrol_catch_release"]); - } - -function get_watch_interval(current_interval: count): interval - { - if ( (current_interval + 1) in catch_release_intervals ) - return catch_release_intervals[current_interval+1]; - else - return catch_release_intervals[current_interval]; - } - -function populate_log_record(ip: addr, bi: BlockInfo, action: CatchReleaseActions): CatchReleaseInfo - { - local log = CatchReleaseInfo($ts=network_time(), $ip=ip, $action=action, - $block_interval=catch_release_intervals[bi$current_interval], - $watch_interval=get_watch_interval(bi$current_interval), - $watched_until=bi$watch_until, - $num_blocked=bi$num_reblocked+1 - ); - - if ( bi?$block_until ) - log$blocked_until = bi$block_until; - - if ( bi?$current_block_id && bi$current_block_id != "" ) - log$rule_id = bi$current_block_id; - - if ( bi?$location ) - log$location = bi$location; - - return log; - } - -function per_block_interval(t: table[addr] of BlockInfo, idx: addr): interval - { - local remaining_time = t[idx]$watch_until - network_time(); - if ( remaining_time < 0secs ) - remaining_time = 0secs; - -@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) - if ( remaining_time == 0secs ) - { - local log = populate_log_record(idx, t[idx], FORGOTTEN); - Log::write(CATCH_RELEASE, log); - - event NetControl::catch_release_forgotten(idx, t[idx]); - } -@endif - - return remaining_time; - } - -# This is the internally maintained table containing all the addresses that are currently being -# watched to see if they will re-surface. After the time is reached, monitoring of that specific -# IP will stop. -global blocks: table[addr] of BlockInfo = {} - &create_expire=0secs - &expire_func=per_block_interval; - - -@if ( Cluster::is_enabled() ) - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_new); - Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_delete); - } -@else -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_add); - Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_delete); - Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_encountered); - } -@endif - -@endif - -function cr_check_rule(r: Rule): bool - { - if ( r$ty == DROP && r$entity$ty == ADDRESS ) - { - local ip = r$entity$ip; - if ( ( is_v4_subnet(ip) && subnet_width(ip) == 32 ) || ( is_v6_subnet(ip) && subnet_width(ip) == 128 ) ) - { - if ( subnet_to_addr(ip) in blocks ) - return T; - } - } - - return F; - } - -@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) - -event rule_added(r: Rule, p: PluginState, msg: string &default="") - { - if ( !cr_check_rule(r) ) - return; - - local ip = subnet_to_addr(r$entity$ip); - local bi = blocks[ip]; - - local log = populate_log_record(ip, bi, DROPPED); - if ( msg != "" ) - log$message = msg; - Log::write(CATCH_RELEASE, log); - } - - -event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) - { - if ( !cr_check_rule(r) ) - return; - - local ip = subnet_to_addr(r$entity$ip); - local bi = blocks[ip]; - - local log = populate_log_record(ip, bi, UNBLOCK); - if ( bi?$block_until ) - { - local difference: interval = network_time() - bi$block_until; - if ( interval_to_double(difference) > 60 || interval_to_double(difference) < -60 ) - log$message = fmt("Difference between network_time and block time excessive: %f", difference); - } - - Log::write(CATCH_RELEASE, log); - } - -@endif - -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) -event catch_release_add(a: addr, location: string) - { - drop_address_catch_release(a, location); - } - -event catch_release_delete(a: addr, reason: string) - { - unblock_address_catch_release(a, reason); - } - -event catch_release_encountered(a: addr) - { - catch_release_seen(a); - } -@endif - -@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) -event catch_release_block_new(a: addr, b: BlockInfo) - { - blocks[a] = b; - } - -event catch_release_block_delete(a: addr) - { - if ( a in blocks ) - delete blocks[a]; - } -@endif - -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) -@endif - -function get_catch_release_info(a: addr): BlockInfo - { - if ( a in blocks ) - return blocks[a]; - - return BlockInfo($watch_until=double_to_time(0), $current_interval=0, $current_block_id=""); - } - -function drop_address_catch_release(a: addr, location: string &default=""): BlockInfo - { - local bi: BlockInfo; - local log: CatchReleaseInfo; - - if ( a in blocks ) - { - log = populate_log_record(a, blocks[a], INFO); - log$message = "Already blocked using catch-and-release - ignoring duplicate"; - Log::write(CATCH_RELEASE, log); - - return blocks[a]; - } - - local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a)); - if ( [e,DROP] in rule_entities ) - { - local r = rule_entities[e,DROP]; - - bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $current_interval=0, $current_block_id=r$id); - if ( location != "" ) - bi$location = location; -@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) - log = populate_log_record(a, bi, ADDED); - log$message = "Address already blocked outside of catch-and-release. Catch and release will monitor and only actively block if it appears in network traffic."; - Log::write(CATCH_RELEASE, log); - blocks[a] = bi; - event NetControl::catch_release_block_new(a, bi); -@endif -@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) - event NetControl::catch_release_add(a, location); -@endif - return bi; - } - - local block_interval = catch_release_intervals[0]; - -@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) - local ret = drop_address(a, block_interval, location); - - if ( ret != "" ) - { - bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id=ret); - if ( location != "" ) - bi$location = location; - blocks[a] = bi; - event NetControl::catch_release_block_new(a, bi); - blocks[a] = bi; - log = populate_log_record(a, bi, DROP); - Log::write(CATCH_RELEASE, log); - return bi; - } - Reporter::error(fmt("Catch and release could not add block for %s; failing.", a)); - return BlockInfo($watch_until=double_to_time(0), $current_interval=0, $current_block_id=""); -@endif - -@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) - bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id=""); - event NetControl::catch_release_add(a, location); - return bi; -@endif - - } - -function unblock_address_catch_release(a: addr, reason: string &default=""): bool - { - if ( a !in blocks ) - return F; - -@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) - local bi = blocks[a]; - local log = populate_log_record(a, bi, UNBLOCK); - if ( reason != "" ) - log$message = reason; - Log::write(CATCH_RELEASE, log); - delete blocks[a]; - if ( bi?$block_until && bi$block_until > network_time() && bi$current_block_id != "" ) - remove_rule(bi$current_block_id, reason); -@endif -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) - event NetControl::catch_release_block_delete(a); -@endif -@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) - event NetControl::catch_release_delete(a, reason); -@endif - - return T; - } - -function catch_release_seen(a: addr) - { - if ( a in blocks ) - { -@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) - local bi = blocks[a]; - local log: CatchReleaseInfo; - local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a)); - - if ( [e,DROP] in rule_entities ) - { - if ( catch_release_warn_blocked_ip_encountered == F ) - return; - - # This should be blocked - block has not been applied yet by hardware? Ignore for the moment... - log = populate_log_record(a, bi, INFO); - log$action = INFO; - log$message = "Block seen while in rule_entities. No action taken."; - Log::write(CATCH_RELEASE, log); - return; - } - - # ok, this one returned again while still in the backoff period. - - local try = bi$current_interval; - if ( (try+1) in catch_release_intervals ) - ++try; - - bi$current_interval = try; - if ( (try+1) in catch_release_intervals ) - bi$watch_until = network_time() + catch_release_intervals[try+1]; - else - bi$watch_until = network_time() + catch_release_intervals[try]; - - bi$block_until = network_time() + catch_release_intervals[try]; - ++bi$num_reblocked; - - local block_interval = catch_release_intervals[try]; - local location = ""; - if ( bi?$location ) - location = bi$location; - local drop = drop_address(a, block_interval, fmt("Re-drop by catch-and-release: %s", location)); - bi$current_block_id = drop; - - blocks[a] = bi; - - log = populate_log_record(a, bi, SEEN_AGAIN); - Log::write(CATCH_RELEASE, log); -@endif -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) - event NetControl::catch_release_block_new(a, bi); -@endif -@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) - if ( a in catch_release_recently_notified ) - return; - - event NetControl::catch_release_encountered(a); - add catch_release_recently_notified[a]; -@endif - - return; - } - - return; - } - -event new_connection(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } - -event connection_established(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } - -event partial_connection(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } - -event connection_attempt(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } - -event connection_rejected(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } - -event connection_reset(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } - -event connection_pending(c: connection) - { - if ( watch_connections ) - catch_release_seen(c$id$orig_h); - } diff --git a/scripts/base/frameworks/netcontrol/cluster.bro b/scripts/base/frameworks/netcontrol/cluster.bro deleted file mode 100644 index d70ab6d1c1..0000000000 --- a/scripts/base/frameworks/netcontrol/cluster.bro +++ /dev/null @@ -1,175 +0,0 @@ -##! Cluster support for the NetControl framework. - -@load ./main -@load base/frameworks/cluster - -module NetControl; - -export { - ## This is the event used to transport add_rule calls to the manager. - global cluster_netcontrol_add_rule: event(r: Rule); - - ## This is the event used to transport remove_rule calls to the manager. - global cluster_netcontrol_remove_rule: event(id: string, reason: string); - - ## This is the event used to transport delete_rule calls to the manager. - global cluster_netcontrol_delete_rule: event(id: string, reason: string); -} - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added); - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_removed); - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_timeout); - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_error); - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_exists); - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_new); - Broker::auto_publish(Cluster::worker_topic, NetControl::rule_destroyed); - } -@else -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_add_rule); - Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_remove_rule); - Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_delete_rule); - } -@endif - -function activate(p: PluginState, priority: int) - { - # We only run the activate function on the manager. - if ( Cluster::local_node_type() != Cluster::MANAGER ) - return; - - activate_impl(p, priority); - } - -global local_rule_count: count = 1; - -function add_rule(r: Rule) : string - { - if ( Cluster::local_node_type() == Cluster::MANAGER ) - return add_rule_impl(r); - else - { - # We sync rule entities accross the cluster, so we - # actually can test if the rule already exists. If yes, - # refuse insertion already at the node. - - if ( [r$entity, r$ty] in rule_entities ) - { - log_rule_no_plugin(r, FAILED, "discarded duplicate insertion"); - return ""; - } - - if ( r$id == "" ) - r$id = cat(Cluster::node, ":", ++local_rule_count); - - event NetControl::cluster_netcontrol_add_rule(r); - return r$id; - } - } - -function delete_rule(id: string, reason: string &default="") : bool - { - if ( Cluster::local_node_type() == Cluster::MANAGER ) - return delete_rule_impl(id, reason); - else - { - event NetControl::cluster_netcontrol_delete_rule(id, reason); - return T; # well, we can't know here. So - just hope... - } - } - -function remove_rule(id: string, reason: string &default="") : bool - { - if ( Cluster::local_node_type() == Cluster::MANAGER ) - return remove_rule_impl(id, reason); - else - { - event NetControl::cluster_netcontrol_remove_rule(id, reason); - return T; # well, we can't know here. So - just hope... - } - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -event NetControl::cluster_netcontrol_delete_rule(id: string, reason: string) - { - delete_rule_impl(id, reason); - } - -event NetControl::cluster_netcontrol_add_rule(r: Rule) - { - add_rule_impl(r); - } - -event NetControl::cluster_netcontrol_remove_rule(id: string, reason: string) - { - remove_rule_impl(id, reason); - } - -event rule_expire(r: Rule, p: PluginState) &priority=-5 - { - rule_expire_impl(r, p); - } - -event rule_exists(r: Rule, p: PluginState, msg: string &default="") &priority=5 - { - rule_added_impl(r, p, T, msg); - - if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) - schedule r$expire { rule_expire(r, p) }; - } - -event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5 - { - rule_added_impl(r, p, F, msg); - - if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) - schedule r$expire { rule_expire(r, p) }; - } - -event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5 - { - rule_removed_impl(r, p, msg); - } - -event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5 - { - rule_timeout_impl(r, i, p); - } - -event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5 - { - rule_error_impl(r, p, msg); - } -@endif - -# Workers use the events to keep track in their local state tables -@if ( Cluster::local_node_type() != Cluster::MANAGER ) - -event rule_new(r: Rule) &priority=5 - { - if ( r$id in rules ) - return; - - rules[r$id] = r; - rule_entities[r$entity, r$ty] = r; - - add_subnet_entry(r); - } - -event rule_destroyed(r: Rule) &priority=5 - { - if ( r$id !in rules ) - return; - - remove_subnet_entry(r); - if ( [r$entity, r$ty] in rule_entities ) - delete rule_entities[r$entity, r$ty]; - - delete rules[r$id]; - } - -@endif diff --git a/scripts/base/frameworks/netcontrol/cluster.zeek b/scripts/base/frameworks/netcontrol/cluster.zeek new file mode 100644 index 0000000000..3fbd4cd6a1 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/cluster.zeek @@ -0,0 +1,175 @@ +##! Cluster support for the NetControl framework. + +@load ./main +@load base/frameworks/cluster + +module NetControl; + +export { + ## This is the event used to transport add_rule calls to the manager. + global cluster_netcontrol_add_rule: event(r: Rule); + + ## This is the event used to transport remove_rule calls to the manager. + global cluster_netcontrol_remove_rule: event(id: string, reason: string); + + ## This is the event used to transport delete_rule calls to the manager. + global cluster_netcontrol_delete_rule: event(id: string, reason: string); +} + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added); + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_removed); + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_timeout); + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_error); + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_exists); + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_new); + Broker::auto_publish(Cluster::worker_topic, NetControl::rule_destroyed); + } +@else +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_add_rule); + Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_remove_rule); + Broker::auto_publish(Cluster::manager_topic, NetControl::cluster_netcontrol_delete_rule); + } +@endif + +function activate(p: PluginState, priority: int) + { + # We only run the activate function on the manager. + if ( Cluster::local_node_type() != Cluster::MANAGER ) + return; + + activate_impl(p, priority); + } + +global local_rule_count: count = 1; + +function add_rule(r: Rule) : string + { + if ( Cluster::local_node_type() == Cluster::MANAGER ) + return add_rule_impl(r); + else + { + # We sync rule entities accross the cluster, so we + # actually can test if the rule already exists. If yes, + # refuse insertion already at the node. + + if ( [r$entity, r$ty] in rule_entities ) + { + log_rule_no_plugin(r, FAILED, "discarded duplicate insertion"); + return ""; + } + + if ( r$id == "" ) + r$id = cat(Cluster::node, ":", ++local_rule_count); + + event NetControl::cluster_netcontrol_add_rule(r); + return r$id; + } + } + +function delete_rule(id: string, reason: string &default="") : bool + { + if ( Cluster::local_node_type() == Cluster::MANAGER ) + return delete_rule_impl(id, reason); + else + { + event NetControl::cluster_netcontrol_delete_rule(id, reason); + return T; # well, we can't know here. So - just hope... + } + } + +function remove_rule(id: string, reason: string &default="") : bool + { + if ( Cluster::local_node_type() == Cluster::MANAGER ) + return remove_rule_impl(id, reason); + else + { + event NetControl::cluster_netcontrol_remove_rule(id, reason); + return T; # well, we can't know here. So - just hope... + } + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +event NetControl::cluster_netcontrol_delete_rule(id: string, reason: string) + { + delete_rule_impl(id, reason); + } + +event NetControl::cluster_netcontrol_add_rule(r: Rule) + { + add_rule_impl(r); + } + +event NetControl::cluster_netcontrol_remove_rule(id: string, reason: string) + { + remove_rule_impl(id, reason); + } + +event rule_expire(r: Rule, p: PluginState) &priority=-5 + { + rule_expire_impl(r, p); + } + +event rule_exists(r: Rule, p: PluginState, msg: string &default="") &priority=5 + { + rule_added_impl(r, p, T, msg); + + if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) + schedule r$expire { rule_expire(r, p) }; + } + +event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5 + { + rule_added_impl(r, p, F, msg); + + if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) + schedule r$expire { rule_expire(r, p) }; + } + +event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5 + { + rule_removed_impl(r, p, msg); + } + +event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5 + { + rule_timeout_impl(r, i, p); + } + +event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5 + { + rule_error_impl(r, p, msg); + } +@endif + +# Workers use the events to keep track in their local state tables +@if ( Cluster::local_node_type() != Cluster::MANAGER ) + +event rule_new(r: Rule) &priority=5 + { + if ( r$id in rules ) + return; + + rules[r$id] = r; + rule_entities[r$entity, r$ty] = r; + + add_subnet_entry(r); + } + +event rule_destroyed(r: Rule) &priority=5 + { + if ( r$id !in rules ) + return; + + remove_subnet_entry(r); + if ( [r$entity, r$ty] in rule_entities ) + delete rule_entities[r$entity, r$ty]; + + delete rules[r$id]; + } + +@endif diff --git a/scripts/base/frameworks/netcontrol/drop.bro b/scripts/base/frameworks/netcontrol/drop.bro deleted file mode 100644 index 8b31996057..0000000000 --- a/scripts/base/frameworks/netcontrol/drop.bro +++ /dev/null @@ -1,110 +0,0 @@ -##! Implementation of the drop functionality for NetControl. - -module NetControl; - -@load ./main - -export { - redef enum Log::ID += { DROP }; - - ## Stops all packets involving an IP address from being forwarded. - ## - ## a: The address to be dropped. - ## - ## t: How long to drop it, with 0 being indefinitely. - ## - ## location: An optional string describing where the drop was triggered. - ## - ## Returns: The id of the inserted rule on success and zero on failure. - global drop_address: function(a: addr, t: interval, location: string &default="") : string; - - ## Stops all packets involving a connection address from being forwarded. - ## - ## c: The connection to be dropped. - ## - ## t: How long to drop it, with 0 being indefinitely. - ## - ## location: An optional string describing where the drop was triggered. - ## - ## Returns: The id of the inserted rule on success and zero on failure. - global drop_connection: function(c: conn_id, t: interval, location: string &default="") : string; - - type DropInfo: record { - ## Time at which the recorded activity occurred. - ts: time &log; - ## ID of the rule; unique during each Bro run. - rule_id: string &log; - orig_h: addr &log; ##< The originator's IP address. - orig_p: port &log &optional; ##< The originator's port number. - resp_h: addr &log &optional; ##< The responder's IP address. - resp_p: port &log &optional; ##< The responder's port number. - ## Expiry time of the shunt. - expire: interval &log; - ## Location where the underlying action was triggered. - location: string &log &optional; - }; - - ## Hook that allows the modification of rules passed to drop_* before they - ## are passed on. If one of the hooks uses break, the rule is ignored. - ## - ## r: The rule to be added. - global NetControl::drop_rule_policy: hook(r: Rule); - - ## Event that can be handled to access the :bro:type:`NetControl::ShuntInfo` - ## record as it is sent on to the logging framework. - global log_netcontrol_drop: event(rec: DropInfo); -} - -event bro_init() &priority=5 - { - Log::create_stream(NetControl::DROP, [$columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop"]); - } - -function drop_connection(c: conn_id, t: interval, location: string &default="") : string - { - local e: Entity = [$ty=CONNECTION, $conn=c]; - local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location]; - - if ( ! hook NetControl::drop_rule_policy(r) ) - return ""; - - local id = add_rule(r); - - # Error should already be logged - if ( id == "" ) - return id; - - local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=c$orig_h, $orig_p=c$orig_p, $resp_h=c$resp_h, $resp_p=c$resp_p, $expire=t); - - if ( location != "" ) - log$location=location; - - Log::write(DROP, log); - - return id; - } - -function drop_address(a: addr, t: interval, location: string &default="") : string - { - local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)]; - local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location]; - - if ( ! hook NetControl::drop_rule_policy(r) ) - return ""; - - local id = add_rule(r); - - # Error should already be logged - if ( id == "" ) - return id; - - local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=a, $expire=t); - - if ( location != "" ) - log$location=location; - - Log::write(DROP, log); - - return id; - } - diff --git a/scripts/base/frameworks/netcontrol/drop.zeek b/scripts/base/frameworks/netcontrol/drop.zeek new file mode 100644 index 0000000000..d5feb8d83a --- /dev/null +++ b/scripts/base/frameworks/netcontrol/drop.zeek @@ -0,0 +1,110 @@ +##! Implementation of the drop functionality for NetControl. + +@load ./main + +module NetControl; + +export { + redef enum Log::ID += { DROP }; + + ## Stops all packets involving an IP address from being forwarded. + ## + ## a: The address to be dropped. + ## + ## t: How long to drop it, with 0 being indefinitely. + ## + ## location: An optional string describing where the drop was triggered. + ## + ## Returns: The id of the inserted rule on success and zero on failure. + global drop_address: function(a: addr, t: interval, location: string &default="") : string; + + ## Stops all packets involving a connection address from being forwarded. + ## + ## c: The connection to be dropped. + ## + ## t: How long to drop it, with 0 being indefinitely. + ## + ## location: An optional string describing where the drop was triggered. + ## + ## Returns: The id of the inserted rule on success and zero on failure. + global drop_connection: function(c: conn_id, t: interval, location: string &default="") : string; + + type DropInfo: record { + ## Time at which the recorded activity occurred. + ts: time &log; + ## ID of the rule; unique during each Zeek run. + rule_id: string &log; + orig_h: addr &log; ##< The originator's IP address. + orig_p: port &log &optional; ##< The originator's port number. + resp_h: addr &log &optional; ##< The responder's IP address. + resp_p: port &log &optional; ##< The responder's port number. + ## Expiry time of the shunt. + expire: interval &log; + ## Location where the underlying action was triggered. + location: string &log &optional; + }; + + ## Hook that allows the modification of rules passed to drop_* before they + ## are passed on. If one of the hooks uses break, the rule is ignored. + ## + ## r: The rule to be added. + global NetControl::drop_rule_policy: hook(r: Rule); + + ## Event that can be handled to access the :zeek:type:`NetControl::ShuntInfo` + ## record as it is sent on to the logging framework. + global log_netcontrol_drop: event(rec: DropInfo); +} + +event zeek_init() &priority=5 + { + Log::create_stream(NetControl::DROP, [$columns=DropInfo, $ev=log_netcontrol_drop, $path="netcontrol_drop"]); + } + +function drop_connection(c: conn_id, t: interval, location: string &default="") : string + { + local e: Entity = [$ty=CONNECTION, $conn=c]; + local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location]; + + if ( ! hook NetControl::drop_rule_policy(r) ) + return ""; + + local id = add_rule(r); + + # Error should already be logged + if ( id == "" ) + return id; + + local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=c$orig_h, $orig_p=c$orig_p, $resp_h=c$resp_h, $resp_p=c$resp_p, $expire=t); + + if ( location != "" ) + log$location=location; + + Log::write(DROP, log); + + return id; + } + +function drop_address(a: addr, t: interval, location: string &default="") : string + { + local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)]; + local r: Rule = [$ty=DROP, $target=FORWARD, $entity=e, $expire=t, $location=location]; + + if ( ! hook NetControl::drop_rule_policy(r) ) + return ""; + + local id = add_rule(r); + + # Error should already be logged + if ( id == "" ) + return id; + + local log = DropInfo($ts=network_time(), $rule_id=id, $orig_h=a, $expire=t); + + if ( location != "" ) + log$location=location; + + Log::write(DROP, log); + + return id; + } + diff --git a/scripts/base/frameworks/netcontrol/main.bro b/scripts/base/frameworks/netcontrol/main.bro deleted file mode 100644 index a9418508af..0000000000 --- a/scripts/base/frameworks/netcontrol/main.bro +++ /dev/null @@ -1,1060 +0,0 @@ -##! Bro's NetControl framework. -##! -##! This plugin-based framework allows to control the traffic that Bro monitors -##! as well as, if having access to the forwarding path, the traffic the network -##! forwards. By default, the framework lets everything through, to both Bro -##! itself as well as on the network. Scripts can then add rules to impose -##! restrictions on entities, such as specific connections or IP addresses. -##! -##! This framework has two APIs: a high-level and low-level. The high-level API -##! provides convenience functions for a set of common operations. The -##! low-level API provides full flexibility. - -module NetControl; - -@load ./plugin -@load ./types - -export { - ## The framework's logging stream identifier. - redef enum Log::ID += { LOG }; - - # ### - # ### Generic functions and events. - # ### - - ## Activates a plugin. - ## - ## p: The plugin to activate. - ## - ## priority: The higher the priority, the earlier this plugin will be checked - ## whether it supports an operation, relative to other plugins. - global activate: function(p: PluginState, priority: int); - - ## Event that is used to initialize plugins. Place all plugin initialization - ## related functionality in this event. - global NetControl::init: event(); - - ## Event that is raised once all plugins activated in ``NetControl::init`` - ## have finished their initialization. - global NetControl::init_done: event(); - - # ### - # ### High-level API. - # ### - - # ### Note - other high level primitives are in catch-and-release.bro, shunt.bro and - # ### drop.bro - - ## Allows all traffic involving a specific IP address to be forwarded. - ## - ## a: The address to be whitelisted. - ## - ## t: How long to whitelist it, with 0 being indefinitely. - ## - ## location: An optional string describing whitelist was triddered. - ## - ## Returns: The id of the inserted rule on success and zero on failure. - global whitelist_address: function(a: addr, t: interval, location: string &default="") : string; - - ## Allows all traffic involving a specific IP subnet to be forwarded. - ## - ## s: The subnet to be whitelisted. - ## - ## t: How long to whitelist it, with 0 being indefinitely. - ## - ## location: An optional string describing whitelist was triddered. - ## - ## Returns: The id of the inserted rule on success and zero on failure. - global whitelist_subnet: function(s: subnet, t: interval, location: string &default="") : string; - - ## Redirects a uni-directional flow to another port. - ## - ## f: The flow to redirect. - ## - ## out_port: Port to redirect the flow to. - ## - ## t: How long to leave the redirect in place, with 0 being indefinitely. - ## - ## location: An optional string describing where the redirect was triggered. - ## - ## Returns: The id of the inserted rule on success and zero on failure. - global redirect_flow: function(f: flow_id, out_port: count, t: interval, location: string &default="") : string; - - ## Quarantines a host. This requires a special quarantine server, which runs a HTTP server explaining - ## the quarantine and a DNS server which resolves all requests to the quarantine server. DNS queries - ## from the host to the network DNS server will be rewritten and will be sent to the quarantine server - ## instead. Only http communication infected to quarantinehost is allowed. All other network communication - ## is blocked. - ## - ## infected: the host to quarantine. - ## - ## dns: the network dns server. - ## - ## quarantine: the quarantine server running a dns and a web server. - ## - ## t: how long to leave the quarantine in place. - ## - ## Returns: Vector of inserted rules on success, empty list on failure. - global quarantine_host: function(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string; - - ## Flushes all state by calling :bro:see:`NetControl::remove_rule` on all currently active rules. - global clear: function(); - - # ### - # ### Low-level API. - # ### - - ###### Manipulation of rules. - - ## Installs a rule. - ## - ## r: The rule to install. - ## - ## Returns: If successful, returns an ID string unique to the rule that can - ## later be used to refer to it. If unsuccessful, returns an empty - ## string. The ID is also assigned to ``r$id``. Note that - ## "successful" means "a plugin knew how to handle the rule", it - ## doesn't necessarily mean that it was indeed successfully put in - ## place, because that might happen asynchronously and thus fail - ## only later. - global add_rule: function(r: Rule) : string; - - ## Removes a rule. - ## - ## id: The rule to remove, specified as the ID returned by :bro:see:`NetControl::add_rule`. - ## - ## reason: Optional string argument giving information on why the rule was removed. - ## - ## Returns: True if successful, the relevant plugin indicated that it knew - ## how to handle the removal. Note that again "success" means the - ## plugin accepted the removal. It might still fail to put it - ## into effect, as that might happen asynchronously and thus go - ## wrong at that point. - global remove_rule: function(id: string, reason: string &default="") : bool; - - ## Deletes a rule without removing it from the backends to which it has been - ## added before. This means that no messages will be sent to the switches to which - ## the rule has been added; if it is not removed from them by a separate mechanism, - ## it will stay installed and not be removed later. - ## - ## id: The rule to delete, specified as the ID returned by :bro:see:`NetControl::add_rule`. - ## - ## reason: Optional string argument giving information on why the rule was deleted. - ## - ## Returns: True if removal is successful, or sent to manager. - ## False if the rule could not be found. - global delete_rule: function(id: string, reason: string &default="") : bool; - - ## Searches all rules affecting a certain IP address. - ## - ## This function works on both the manager and workers of a cluster. Note that on - ## the worker, the internal rule variables (starting with _) will not reflect the - ## current state. - ## - ## ip: The ip address to search for. - ## - ## Returns: vector of all rules affecting the IP address. - global find_rules_addr: function(ip: addr) : vector of Rule; - - ## Searches all rules affecting a certain subnet. - ## - ## A rule affects a subnet, if it covers the whole subnet. Note especially that - ## this function will not reveal all rules that are covered by a subnet. - ## - ## For example, a search for 192.168.17.0/8 will reveal a rule that exists for - ## 192.168.0.0/16, since this rule affects the subnet. However, it will not reveal - ## a more specific rule for 192.168.17.1/32, which does not directy affect the whole - ## subnet. - ## - ## This function works on both the manager and workers of a cluster. Note that on - ## the worker, the internal rule variables (starting with _) will not reflect the - ## current state. - ## - ## sn: The subnet to search for. - ## - ## Returns: vector of all rules affecting the subnet. - global find_rules_subnet: function(sn: subnet) : vector of Rule; - - ###### Asynchronous feedback on rules. - - ## Confirms that a rule was put in place by a plugin. - ## - ## r: The rule now in place. - ## - ## p: The state for the plugin that put it into place. - ## - ## msg: An optional informational message by the plugin. - global rule_added: event(r: Rule, p: PluginState, msg: string &default=""); - - ## Signals that a rule that was supposed to be put in place was already - ## existing at the specified plugin. Rules that already have been existing - ## continue to be tracked like normal, but no timeout calls will be sent - ## to the specified plugins. Removal of the rule from the hardware can - ## still be forced by manually issuing a remove_rule call. - ## - ## r: The rule that was already in place. - ## - ## p: The plugin that reported that the rule already was in place. - ## - ## msg: An optional informational message by the plugin. - global rule_exists: event(r: Rule, p: PluginState, msg: string &default=""); - - ## Reports that a plugin reports a rule was removed due to a - ## remove_rule function call. - ## - ## r: The rule now removed. - ## - ## p: The state for the plugin that had the rule in place and now - ## removed it. - ## - ## msg: An optional informational message by the plugin. - global rule_removed: event(r: Rule, p: PluginState, msg: string &default=""); - - ## Reports that a rule was removed from a plugin due to a timeout. - ## - ## r: The rule now removed. - ## - ## i: Additional flow information, if supported by the protocol. - ## - ## p: The state for the plugin that had the rule in place and now - ## removed it. - ## - ## msg: An optional informational message by the plugin. - global rule_timeout: event(r: Rule, i: FlowInfo, p: PluginState); - - ## Reports an error when operating on a rule. - ## - ## r: The rule that encountered an error. - ## - ## p: The state for the plugin that reported the error. - ## - ## msg: An optional informational message by the plugin. - global rule_error: event(r: Rule, p: PluginState, msg: string &default=""); - - ## This event is raised when a new rule is created by the NetControl framework - ## due to a call to add_rule. From this moment, until the rule_destroyed event - ## is raised, the rule is tracked internally by the NetControl framework. - ## - ## Note that this event does not mean that a rule was successfully added by - ## any backend; it just means that the rule has been accepted and addition - ## to the specified backend is queued. To get information when rules are actually - ## installed by the hardware, use the rule_added, rule_exists, rule_removed, rule_timeout - ## and rule_error events. - global rule_new: event(r: Rule); - - ## This event is raised when a rule is deleted from the NetControl framework, - ## because it is no longer in use. This can be caused by the fact that a rule - ## was removed by all plugins to which it was added, by the fact that it timed out - ## or due to rule errors. - ## - ## To get the cause of a rule remove, catch the rule_removed, rule_timeout and - ## rule_error events. - global rule_destroyed: event(r: Rule); - - ## Hook that allows the modification of rules passed to add_rule before they - ## are passed on to the plugins. If one of the hooks uses break, the rule is - ## ignored and not passed on to any plugin. - ## - ## r: The rule to be added. - global NetControl::rule_policy: hook(r: Rule); - - ##### Plugin functions - - ## Function called by plugins once they finished their activation. After all - ## plugins defined in bro_init finished to activate, rules will start to be sent - ## to the plugins. Rules that scripts try to set before the backends are ready - ## will be discarded. - global plugin_activated: function(p: PluginState); - - ## Type of an entry in the NetControl log. - type InfoCategory: enum { - ## A log entry reflecting a framework message. - MESSAGE, - ## A log entry reflecting a framework message. - ERROR, - ## A log entry about a rule. - RULE - }; - - ## State of an entry in the NetControl log. - type InfoState: enum { - REQUESTED, ##< The request to add/remove a rule was sent to the respective backend. - SUCCEEDED, ##< A rule was successfully added by a backend. - EXISTS, ##< A backend reported that a rule was already existing. - FAILED, ##< A rule addition failed. - REMOVED, ##< A rule was successfully removed by a backend. - TIMEOUT, ##< A rule timeout was triggered by the NetControl framework or a backend. - }; - - ## The record type defining the column fields of the NetControl log. - type Info: record { - ## Time at which the recorded activity occurred. - ts: time &log; - ## ID of the rule; unique during each Bro run. - rule_id: string &log &optional; - ## Type of the log entry. - category: InfoCategory &log &optional; - ## The command the log entry is about. - cmd: string &log &optional; - ## State the log entry reflects. - state: InfoState &log &optional; - ## String describing an action the entry is about. - action: string &log &optional; - ## The target type of the action. - target: TargetType &log &optional; - ## Type of the entity the log entry is about. - entity_type: string &log &optional; - ## String describing the entity the log entry is about. - entity: string &log &optional; - ## String describing the optional modification of the entry (e.h. redirect) - mod: string &log &optional; - ## String with an additional message. - msg: string &log &optional; - ## Number describing the priority of the log entry. - priority: int &log &optional; - ## Expiry time of the log entry. - expire: interval &log &optional; - ## Location where the underlying action was triggered. - location: string &log &optional; - ## Plugin triggering the log entry. - plugin: string &log &optional; - }; - - ## Event that can be handled to access the :bro:type:`NetControl::Info` - ## record as it is sent on to the logging framework. - global log_netcontrol: event(rec: Info); -} - -redef record Rule += { - ## Internally set to the plugins handling the rule. - _plugin_ids: set[count] &default=count_set(); - ## Internally set to the plugins on which the rule is currently active. - _active_plugin_ids: set[count] &default=count_set(); - ## Internally set to plugins where the rule should not be removed upon timeout. - _no_expire_plugins: set[count] &default=count_set(); - ## Track if the rule was added successfully by all responsible plugins. - _added: bool &default=F; -}; - -# Variable tracking the state of plugin activation. Once all plugins that -# have been added in bro_init are activated, this will switch to T and -# the event NetControl::init_done will be raised. -global plugins_active: bool = F; - -# Set to true at the end of bro_init (with very low priority). -# Used to track when plugin activation could potentially be finished -global bro_init_done: bool = F; - -# The counters that are used to generate the rule and plugin IDs -global rule_counter: count = 1; -global plugin_counter: count = 1; - -# List of the currently active plugins -global plugins: vector of PluginState; -global plugin_ids: table[count] of PluginState; - -# These tables hold information about rules. -global rules: table[string] of Rule; # Rules indexed by id and cid - -# All rules that apply to a certain subnet/IP address. -global rules_by_subnets: table[subnet] of set[string]; - -# Rules pertaining to a specific entity. -# There always only can be one rule of each type for one entity. -global rule_entities: table[Entity, RuleType] of Rule; - -event bro_init() &priority=5 - { - Log::create_stream(NetControl::LOG, [$columns=Info, $ev=log_netcontrol, $path="netcontrol"]); - } - -function entity_to_info(info: Info, e: Entity) - { - info$entity_type = fmt("%s", e$ty); - - switch ( e$ty ) { - case ADDRESS: - info$entity = fmt("%s", e$ip); - break; - - case CONNECTION: - info$entity = fmt("%s/%d<->%s/%d", - e$conn$orig_h, e$conn$orig_p, - e$conn$resp_h, e$conn$resp_p); - break; - - case FLOW: - local ffrom_ip = "*"; - local ffrom_port = "*"; - local fto_ip = "*"; - local fto_port = "*"; - local ffrom_mac = "*"; - local fto_mac = "*"; - if ( e$flow?$src_h ) - ffrom_ip = cat(e$flow$src_h); - if ( e$flow?$src_p ) - ffrom_port = fmt("%d", e$flow$src_p); - if ( e$flow?$dst_h ) - fto_ip = cat(e$flow$dst_h); - if ( e$flow?$dst_p ) - fto_port = fmt("%d", e$flow$dst_p); - info$entity = fmt("%s/%s->%s/%s", - ffrom_ip, ffrom_port, - fto_ip, fto_port); - if ( e$flow?$src_m || e$flow?$dst_m ) - { - if ( e$flow?$src_m ) - ffrom_mac = e$flow$src_m; - if ( e$flow?$dst_m ) - fto_mac = e$flow$dst_m; - - info$entity = fmt("%s (%s->%s)", info$entity, ffrom_mac, fto_mac); - } - break; - - case MAC: - info$entity = e$mac; - break; - - default: - info$entity = ""; - break; - } - } - -function rule_to_info(info: Info, r: Rule) - { - info$action = fmt("%s", r$ty); - info$target = r$target; - info$rule_id = r$id; - info$expire = r$expire; - info$priority = r$priority; - - if ( r?$location && r$location != "" ) - info$location = r$location; - - if ( r$ty == REDIRECT ) - info$mod = fmt("-> %d", r$out_port); - - if ( r$ty == MODIFY ) - { - local mfrom_ip = "_"; - local mfrom_port = "_"; - local mto_ip = "_"; - local mto_port = "_"; - local mfrom_mac = "_"; - local mto_mac = "_"; - if ( r$mod?$src_h ) - mfrom_ip = cat(r$mod$src_h); - if ( r$mod?$src_p ) - mfrom_port = fmt("%d", r$mod$src_p); - if ( r$mod?$dst_h ) - mto_ip = cat(r$mod$dst_h); - if ( r$mod?$dst_p ) - mto_port = fmt("%d", r$mod$dst_p); - - if ( r$mod?$src_m ) - mfrom_mac = r$mod$src_m; - if ( r$mod?$dst_m ) - mto_mac = r$mod$dst_m; - - info$mod = fmt("Src: %s/%s (%s) Dst: %s/%s (%s)", - mfrom_ip, mfrom_port, mfrom_mac, mto_ip, mto_port, mto_mac); - - if ( r$mod?$redirect_port ) - info$mod = fmt("%s -> %d", info$mod, r$mod$redirect_port); - - } - - entity_to_info(info, r$entity); - } - -function log_msg(msg: string, p: PluginState) - { - Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)]); - } - -function log_error(msg: string, p: PluginState) - { - Log::write(LOG, [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]); - } - -function log_msg_no_plugin(msg: string) - { - Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg]); - } - -function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="") - { - local info: Info = [$ts=network_time()]; - info$category = RULE; - info$cmd = cmd; - info$state = state; - info$plugin = p$plugin$name(p); - if ( msg != "" ) - info$msg = msg; - - rule_to_info(info, r); - - Log::write(LOG, info); - } - -function log_rule_error(r: Rule, msg: string, p: PluginState) - { - local info: Info = [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]; - rule_to_info(info, r); - Log::write(LOG, info); - } - -function log_rule_no_plugin(r: Rule, state: InfoState, msg: string) - { - local info: Info = [$ts=network_time()]; - info$category = RULE; - info$state = state; - info$msg = msg; - - rule_to_info(info, r); - - Log::write(LOG, info); - } - -function whitelist_address(a: addr, t: interval, location: string &default="") : string - { - local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)]; - local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location]; - - return add_rule(r); - } - -function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string - { - local e: Entity = [$ty=ADDRESS, $ip=s]; - local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location]; - - return add_rule(r); - } - - -function redirect_flow(f: flow_id, out_port: count, t: interval, location: string &default="") : string - { - local flow = NetControl::Flow( - $src_h=addr_to_subnet(f$src_h), - $src_p=f$src_p, - $dst_h=addr_to_subnet(f$dst_h), - $dst_p=f$dst_p - ); - local e: Entity = [$ty=FLOW, $flow=flow]; - local r: Rule = [$ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port]; - - return add_rule(r); - } - -function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string - { - local orules: vector of string = vector(); - local edrop: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected))]; - local rdrop: Rule = [$ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location]; - orules += add_rule(rdrop); - - local todnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp)]; - local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5); - orules += add_rule(todnsr); - - local fromdnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected))]; - local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5); - orules += add_rule(fromdnsr); - - local wle: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp)]; - local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5); - orules += add_rule(wlr); - - return orules; - } - -function check_plugins() - { - if ( plugins_active ) - return; - - local all_active = T; - for ( i in plugins ) - { - local p = plugins[i]; - if ( p$_activated == F ) - all_active = F; - } - - if ( all_active ) - { - plugins_active = T; - - # Skip log message if there are no plugins - if ( |plugins| > 0 ) - log_msg_no_plugin("plugin initialization done"); - - event NetControl::init_done(); - } - } - -function plugin_activated(p: PluginState) - { - local id = p$_id; - if ( id !in plugin_ids ) - { - log_error("unknown plugin activated", p); - return; - } - - # Suppress duplicate activation - if ( plugin_ids[id]$_activated == T ) - return; - - plugin_ids[id]$_activated = T; - log_msg("activation finished", p); - - if ( bro_init_done ) - check_plugins(); - } - -event bro_init() &priority=-5 - { - event NetControl::init(); - } - -event NetControl::init() &priority=-20 - { - bro_init_done = T; - - check_plugins(); - - if ( plugins_active == F ) - log_msg_no_plugin("waiting for plugins to initialize"); - } - -# Low-level functions that only runs on the manager (or standalone) Bro node. - -function activate_impl(p: PluginState, priority: int) - { - p$_priority = priority; - plugins += p; - sort(plugins, function(p1: PluginState, p2: PluginState) : int { return p2$_priority - p1$_priority; }); - - plugin_ids[plugin_counter] = p; - p$_id = plugin_counter; - ++plugin_counter; - - # perform one-time initialization - if ( p$plugin?$init ) - { - log_msg(fmt("activating plugin with priority %d", priority), p); - p$plugin$init(p); - } - else - { - # no initialization necessary, mark plugin as active right away - plugin_activated(p); - } - - } - -function add_one_subnet_entry(s: subnet, r: Rule) - { - if ( ! check_subnet(s, rules_by_subnets) ) - rules_by_subnets[s] = set(r$id); - else - add rules_by_subnets[s][r$id]; - } - -function add_subnet_entry(rule: Rule) - { - local e = rule$entity; - if ( e$ty == ADDRESS ) - { - add_one_subnet_entry(e$ip, rule); - } - else if ( e$ty == CONNECTION ) - { - add_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule); - add_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule); - } - else if ( e$ty == FLOW ) - { - if ( e$flow?$src_h ) - add_one_subnet_entry(e$flow$src_h, rule); - if ( e$flow?$dst_h ) - add_one_subnet_entry(e$flow$dst_h, rule); - } - } - -function remove_one_subnet_entry(s: subnet, r: Rule) - { - if ( ! check_subnet(s, rules_by_subnets) ) - return; - - if ( r$id !in rules_by_subnets[s] ) - return; - - delete rules_by_subnets[s][r$id]; - if ( |rules_by_subnets[s]| == 0 ) - delete rules_by_subnets[s]; - } - -function remove_subnet_entry(rule: Rule) - { - local e = rule$entity; - if ( e$ty == ADDRESS ) - { - remove_one_subnet_entry(e$ip, rule); - } - else if ( e$ty == CONNECTION ) - { - remove_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule); - remove_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule); - } - else if ( e$ty == FLOW ) - { - if ( e$flow?$src_h ) - remove_one_subnet_entry(e$flow$src_h, rule); - if ( e$flow?$dst_h ) - remove_one_subnet_entry(e$flow$dst_h, rule); - } - } - -function find_rules_subnet(sn: subnet) : vector of Rule - { - local ret: vector of Rule = vector(); - - local matches = matching_subnets(sn, rules_by_subnets); - - for ( m in matches ) - { - local sn_entry = matches[m]; - local rule_ids = rules_by_subnets[sn_entry]; - for ( rule_id in rules_by_subnets[sn_entry] ) - { - if ( rule_id in rules ) - ret += rules[rule_id]; - else - Reporter::error("find_rules_subnet - internal data structure error, missing rule"); - } - } - - return ret; - } - -function find_rules_addr(ip: addr) : vector of Rule - { - return find_rules_subnet(addr_to_subnet(ip)); - } - -function add_rule_impl(rule: Rule) : string - { - if ( ! plugins_active ) - { - log_rule_no_plugin(rule, FAILED, "plugins not initialized yet"); - return ""; - } - - rule$cid = ++rule_counter; # numeric id that can be used by plugins for their rules. - - if ( ! rule?$id || rule$id == "" ) - rule$id = cat(rule$cid); - - if ( ! hook NetControl::rule_policy(rule) ) - return ""; - - if ( [rule$entity, rule$ty] in rule_entities ) - { - log_rule_no_plugin(rule, FAILED, "discarded duplicate insertion"); - return ""; - } - - local accepted = F; - local priority: int = +0; - - for ( i in plugins ) - { - local p = plugins[i]; - - if ( p$_activated == F ) - next; - - # in this case, rule was accepted by earlier plugin and this plugin has a lower - # priority. Abort and do not send there... - if ( accepted == T && p$_priority != priority ) - break; - - if ( p$plugin$add_rule(p, rule) ) - { - accepted = T; - priority = p$_priority; - log_rule(rule, "ADD", REQUESTED, p); - - add rule$_plugin_ids[p$_id]; - } - } - - if ( accepted ) - { - rules[rule$id] = rule; - rule_entities[rule$entity, rule$ty] = rule; - - add_subnet_entry(rule); - - event NetControl::rule_new(rule); - - return rule$id; - } - - log_rule_no_plugin(rule, FAILED, "not supported"); - return ""; - } - -function rule_cleanup(r: Rule) - { - if ( |r$_active_plugin_ids| > 0 ) - return; - - remove_subnet_entry(r); - - delete rule_entities[r$entity, r$ty]; - delete rules[r$id]; - - event NetControl::rule_destroyed(r); - } - -function delete_rule_impl(id: string, reason: string): bool - { - if ( id !in rules ) - { - Reporter::error(fmt("Rule %s does not exist in NetControl::delete_rule", id)); - return F; - } - - local rule = rules[id]; - - rule$_active_plugin_ids = set(); - - rule_cleanup(rule); - if ( reason != "" ) - log_rule_no_plugin(rule, REMOVED, fmt("delete_rule: %s", reason)); - else - log_rule_no_plugin(rule, REMOVED, "delete_rule"); - - return T; - } - -function remove_rule_plugin(r: Rule, p: PluginState, reason: string &default=""): bool - { - local success = T; - - if ( ! p$plugin$remove_rule(p, r, reason) ) - { - # still continue and send to other plugins - if ( reason != "" ) - log_rule_error(r, fmt("remove failed (original reason: %s)", reason), p); - else - log_rule_error(r, "remove failed", p); - success = F; - } - else - { - log_rule(r, "REMOVE", REQUESTED, p, reason); - } - - return success; - } - -function remove_rule_impl(id: string, reason: string) : bool - { - if ( id !in rules ) - { - Reporter::error(fmt("Rule %s does not exist in NetControl::remove_rule", id)); - return F; - } - - local r = rules[id]; - - local success = T; - for ( plugin_id in r$_active_plugin_ids ) - { - local p = plugin_ids[plugin_id]; - success = remove_rule_plugin(r, p, reason); - } - - return success; - } - -function rule_expire_impl(r: Rule, p: PluginState) &priority=-5 - { - # do not emit timeout events on shutdown - if ( bro_is_terminating() ) - return; - - if ( r$id !in rules ) - # Removed already. - return; - - local rule = rules[r$id]; - - if ( p$_id in rule$_no_expire_plugins ) - { - # in this case - don't log anything, just remove the plugin from the rule - # and cleaup - delete rule$_active_plugin_ids[p$_id]; - delete rule$_no_expire_plugins[p$_id]; - rule_cleanup(rule); - } - else - event NetControl::rule_timeout(r, FlowInfo(), p); # timeout implementation will handle the removal - } - -function rule_added_impl(r: Rule, p: PluginState, exists: bool, msg: string &default="") - { - if ( r$id !in rules ) - { - log_rule_error(r, "Addition of unknown rule", p); - return; - } - - # use our version to prevent operating on copies. - local rule = rules[r$id]; - if ( p$_id !in rule$_plugin_ids ) - { - log_rule_error(rule, "Rule added to non-responsible plugin", p); - return; - } - - # The rule was already existing on the backend. Mark this so we don't timeout - # it on this backend. - if ( exists ) - { - add rule$_no_expire_plugins[p$_id]; - log_rule(r, "ADD", EXISTS, p, msg); - } - else - log_rule(r, "ADD", SUCCEEDED, p, msg); - - add rule$_active_plugin_ids[p$_id]; - if ( |rule$_plugin_ids| == |rule$_active_plugin_ids| ) - { - # rule was completely added. - rule$_added = T; - } - } - -function rule_removed_impl(r: Rule, p: PluginState, msg: string &default="") - { - if ( r$id !in rules ) - { - log_rule_error(r, "Removal of non-existing rule", p); - return; - } - - # use our version to prevent operating on copies. - local rule = rules[r$id]; - - if ( p$_id !in rule$_plugin_ids ) - { - log_rule_error(r, "Removed from non-assigned plugin", p); - return; - } - - if ( p$_id in rule$_active_plugin_ids ) - { - delete rule$_active_plugin_ids[p$_id]; - } - - log_rule(rule, "REMOVE", SUCCEEDED, p, msg); - rule_cleanup(rule); - } - -function rule_timeout_impl(r: Rule, i: FlowInfo, p: PluginState) - { - if ( r$id !in rules ) - { - log_rule_error(r, "Timeout of non-existing rule", p); - return; - } - - local rule = rules[r$id]; - - local msg = ""; - if ( i?$packet_count ) - msg = fmt("Packets: %d", i$packet_count); - if ( i?$byte_count ) - { - if ( msg != "" ) - msg = msg + " "; - msg = fmt("%sBytes: %s", msg, i$byte_count); - } - - log_rule(rule, "EXPIRE", TIMEOUT, p, msg); - - if ( ! p$plugin$can_expire ) - { - # in this case, we actually have to delete the rule and the timeout - # call just originated locally - remove_rule_plugin(rule, p); - return; - } - - if ( p$_id !in rule$_plugin_ids ) - { - log_rule_error(r, "Timeout from non-assigned plugin", p); - return; - } - - if ( p$_id in rule$_active_plugin_ids ) - { - delete rule$_active_plugin_ids[p$_id]; - } - - rule_cleanup(rule); - } - -function rule_error_impl(r: Rule, p: PluginState, msg: string &default="") - { - if ( r$id !in rules ) - { - log_rule_error(r, "Error of non-existing rule", p); - return; - } - - local rule = rules[r$id]; - - log_rule_error(rule, msg, p); - - # Remove the plugin both from active and all plugins of the rule. If there - # are no plugins left afterwards - delete it - if ( p$_id !in rule$_plugin_ids ) - { - log_rule_error(r, "Error from non-assigned plugin", p); - return; - } - - if ( p$_id in rule$_active_plugin_ids ) - { - # error during removal. Let's pretend it worked. - delete rule$_plugin_ids[p$_id]; - delete rule$_active_plugin_ids[p$_id]; - rule_cleanup(rule); - } - else - { - # error during insertion. Meh. If we are the only plugin, remove the rule again. - # Otherwhise - keep it, minus us. - delete rule$_plugin_ids[p$_id]; - if ( |rule$_plugin_ids| == 0 ) - { - rule_cleanup(rule); - } - } - } - -function clear() - { - for ( id in rules ) - remove_rule(id); - } diff --git a/scripts/base/frameworks/netcontrol/main.zeek b/scripts/base/frameworks/netcontrol/main.zeek new file mode 100644 index 0000000000..f22d1eb06c --- /dev/null +++ b/scripts/base/frameworks/netcontrol/main.zeek @@ -0,0 +1,1060 @@ +##! Zeek's NetControl framework. +##! +##! This plugin-based framework allows to control the traffic that Zeek monitors +##! as well as, if having access to the forwarding path, the traffic the network +##! forwards. By default, the framework lets everything through, to both Zeek +##! itself as well as on the network. Scripts can then add rules to impose +##! restrictions on entities, such as specific connections or IP addresses. +##! +##! This framework has two APIs: a high-level and low-level. The high-level API +##! provides convenience functions for a set of common operations. The +##! low-level API provides full flexibility. + +@load ./plugin +@load ./types + +module NetControl; + +export { + ## The framework's logging stream identifier. + redef enum Log::ID += { LOG }; + + # ### + # ### Generic functions and events. + # ### + + ## Activates a plugin. + ## + ## p: The plugin to activate. + ## + ## priority: The higher the priority, the earlier this plugin will be checked + ## whether it supports an operation, relative to other plugins. + global activate: function(p: PluginState, priority: int); + + ## Event that is used to initialize plugins. Place all plugin initialization + ## related functionality in this event. + global NetControl::init: event(); + + ## Event that is raised once all plugins activated in ``NetControl::init`` + ## have finished their initialization. + global NetControl::init_done: event(); + + # ### + # ### High-level API. + # ### + + # ### Note - other high level primitives are in catch-and-release.zeek, + # ### shunt.zeek and drop.zeek + + ## Allows all traffic involving a specific IP address to be forwarded. + ## + ## a: The address to be whitelisted. + ## + ## t: How long to whitelist it, with 0 being indefinitely. + ## + ## location: An optional string describing whitelist was triddered. + ## + ## Returns: The id of the inserted rule on success and zero on failure. + global whitelist_address: function(a: addr, t: interval, location: string &default="") : string; + + ## Allows all traffic involving a specific IP subnet to be forwarded. + ## + ## s: The subnet to be whitelisted. + ## + ## t: How long to whitelist it, with 0 being indefinitely. + ## + ## location: An optional string describing whitelist was triddered. + ## + ## Returns: The id of the inserted rule on success and zero on failure. + global whitelist_subnet: function(s: subnet, t: interval, location: string &default="") : string; + + ## Redirects a uni-directional flow to another port. + ## + ## f: The flow to redirect. + ## + ## out_port: Port to redirect the flow to. + ## + ## t: How long to leave the redirect in place, with 0 being indefinitely. + ## + ## location: An optional string describing where the redirect was triggered. + ## + ## Returns: The id of the inserted rule on success and zero on failure. + global redirect_flow: function(f: flow_id, out_port: count, t: interval, location: string &default="") : string; + + ## Quarantines a host. This requires a special quarantine server, which runs a HTTP server explaining + ## the quarantine and a DNS server which resolves all requests to the quarantine server. DNS queries + ## from the host to the network DNS server will be rewritten and will be sent to the quarantine server + ## instead. Only http communication infected to quarantinehost is allowed. All other network communication + ## is blocked. + ## + ## infected: the host to quarantine. + ## + ## dns: the network dns server. + ## + ## quarantine: the quarantine server running a dns and a web server. + ## + ## t: how long to leave the quarantine in place. + ## + ## Returns: Vector of inserted rules on success, empty list on failure. + global quarantine_host: function(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string; + + ## Flushes all state by calling :zeek:see:`NetControl::remove_rule` on all currently active rules. + global clear: function(); + + # ### + # ### Low-level API. + # ### + + ###### Manipulation of rules. + + ## Installs a rule. + ## + ## r: The rule to install. + ## + ## Returns: If successful, returns an ID string unique to the rule that can + ## later be used to refer to it. If unsuccessful, returns an empty + ## string. The ID is also assigned to ``r$id``. Note that + ## "successful" means "a plugin knew how to handle the rule", it + ## doesn't necessarily mean that it was indeed successfully put in + ## place, because that might happen asynchronously and thus fail + ## only later. + global add_rule: function(r: Rule) : string; + + ## Removes a rule. + ## + ## id: The rule to remove, specified as the ID returned by :zeek:see:`NetControl::add_rule`. + ## + ## reason: Optional string argument giving information on why the rule was removed. + ## + ## Returns: True if successful, the relevant plugin indicated that it knew + ## how to handle the removal. Note that again "success" means the + ## plugin accepted the removal. It might still fail to put it + ## into effect, as that might happen asynchronously and thus go + ## wrong at that point. + global remove_rule: function(id: string, reason: string &default="") : bool; + + ## Deletes a rule without removing it from the backends to which it has been + ## added before. This means that no messages will be sent to the switches to which + ## the rule has been added; if it is not removed from them by a separate mechanism, + ## it will stay installed and not be removed later. + ## + ## id: The rule to delete, specified as the ID returned by :zeek:see:`NetControl::add_rule`. + ## + ## reason: Optional string argument giving information on why the rule was deleted. + ## + ## Returns: True if removal is successful, or sent to manager. + ## False if the rule could not be found. + global delete_rule: function(id: string, reason: string &default="") : bool; + + ## Searches all rules affecting a certain IP address. + ## + ## This function works on both the manager and workers of a cluster. Note that on + ## the worker, the internal rule variables (starting with _) will not reflect the + ## current state. + ## + ## ip: The ip address to search for. + ## + ## Returns: vector of all rules affecting the IP address. + global find_rules_addr: function(ip: addr) : vector of Rule; + + ## Searches all rules affecting a certain subnet. + ## + ## A rule affects a subnet, if it covers the whole subnet. Note especially that + ## this function will not reveal all rules that are covered by a subnet. + ## + ## For example, a search for 192.168.17.0/8 will reveal a rule that exists for + ## 192.168.0.0/16, since this rule affects the subnet. However, it will not reveal + ## a more specific rule for 192.168.17.1/32, which does not directy affect the whole + ## subnet. + ## + ## This function works on both the manager and workers of a cluster. Note that on + ## the worker, the internal rule variables (starting with _) will not reflect the + ## current state. + ## + ## sn: The subnet to search for. + ## + ## Returns: vector of all rules affecting the subnet. + global find_rules_subnet: function(sn: subnet) : vector of Rule; + + ###### Asynchronous feedback on rules. + + ## Confirms that a rule was put in place by a plugin. + ## + ## r: The rule now in place. + ## + ## p: The state for the plugin that put it into place. + ## + ## msg: An optional informational message by the plugin. + global rule_added: event(r: Rule, p: PluginState, msg: string &default=""); + + ## Signals that a rule that was supposed to be put in place was already + ## existing at the specified plugin. Rules that already have been existing + ## continue to be tracked like normal, but no timeout calls will be sent + ## to the specified plugins. Removal of the rule from the hardware can + ## still be forced by manually issuing a remove_rule call. + ## + ## r: The rule that was already in place. + ## + ## p: The plugin that reported that the rule already was in place. + ## + ## msg: An optional informational message by the plugin. + global rule_exists: event(r: Rule, p: PluginState, msg: string &default=""); + + ## Reports that a plugin reports a rule was removed due to a + ## remove_rule function call. + ## + ## r: The rule now removed. + ## + ## p: The state for the plugin that had the rule in place and now + ## removed it. + ## + ## msg: An optional informational message by the plugin. + global rule_removed: event(r: Rule, p: PluginState, msg: string &default=""); + + ## Reports that a rule was removed from a plugin due to a timeout. + ## + ## r: The rule now removed. + ## + ## i: Additional flow information, if supported by the protocol. + ## + ## p: The state for the plugin that had the rule in place and now + ## removed it. + ## + ## msg: An optional informational message by the plugin. + global rule_timeout: event(r: Rule, i: FlowInfo, p: PluginState); + + ## Reports an error when operating on a rule. + ## + ## r: The rule that encountered an error. + ## + ## p: The state for the plugin that reported the error. + ## + ## msg: An optional informational message by the plugin. + global rule_error: event(r: Rule, p: PluginState, msg: string &default=""); + + ## This event is raised when a new rule is created by the NetControl framework + ## due to a call to add_rule. From this moment, until the rule_destroyed event + ## is raised, the rule is tracked internally by the NetControl framework. + ## + ## Note that this event does not mean that a rule was successfully added by + ## any backend; it just means that the rule has been accepted and addition + ## to the specified backend is queued. To get information when rules are actually + ## installed by the hardware, use the rule_added, rule_exists, rule_removed, rule_timeout + ## and rule_error events. + global rule_new: event(r: Rule); + + ## This event is raised when a rule is deleted from the NetControl framework, + ## because it is no longer in use. This can be caused by the fact that a rule + ## was removed by all plugins to which it was added, by the fact that it timed out + ## or due to rule errors. + ## + ## To get the cause of a rule remove, catch the rule_removed, rule_timeout and + ## rule_error events. + global rule_destroyed: event(r: Rule); + + ## Hook that allows the modification of rules passed to add_rule before they + ## are passed on to the plugins. If one of the hooks uses break, the rule is + ## ignored and not passed on to any plugin. + ## + ## r: The rule to be added. + global NetControl::rule_policy: hook(r: Rule); + + ##### Plugin functions + + ## Function called by plugins once they finished their activation. After all + ## plugins defined in zeek_init finished to activate, rules will start to be sent + ## to the plugins. Rules that scripts try to set before the backends are ready + ## will be discarded. + global plugin_activated: function(p: PluginState); + + ## Type of an entry in the NetControl log. + type InfoCategory: enum { + ## A log entry reflecting a framework message. + MESSAGE, + ## A log entry reflecting a framework message. + ERROR, + ## A log entry about a rule. + RULE + }; + + ## State of an entry in the NetControl log. + type InfoState: enum { + REQUESTED, ##< The request to add/remove a rule was sent to the respective backend. + SUCCEEDED, ##< A rule was successfully added by a backend. + EXISTS, ##< A backend reported that a rule was already existing. + FAILED, ##< A rule addition failed. + REMOVED, ##< A rule was successfully removed by a backend. + TIMEOUT, ##< A rule timeout was triggered by the NetControl framework or a backend. + }; + + ## The record type defining the column fields of the NetControl log. + type Info: record { + ## Time at which the recorded activity occurred. + ts: time &log; + ## ID of the rule; unique during each Zeek run. + rule_id: string &log &optional; + ## Type of the log entry. + category: InfoCategory &log &optional; + ## The command the log entry is about. + cmd: string &log &optional; + ## State the log entry reflects. + state: InfoState &log &optional; + ## String describing an action the entry is about. + action: string &log &optional; + ## The target type of the action. + target: TargetType &log &optional; + ## Type of the entity the log entry is about. + entity_type: string &log &optional; + ## String describing the entity the log entry is about. + entity: string &log &optional; + ## String describing the optional modification of the entry (e.h. redirect) + mod: string &log &optional; + ## String with an additional message. + msg: string &log &optional; + ## Number describing the priority of the log entry. + priority: int &log &optional; + ## Expiry time of the log entry. + expire: interval &log &optional; + ## Location where the underlying action was triggered. + location: string &log &optional; + ## Plugin triggering the log entry. + plugin: string &log &optional; + }; + + ## Event that can be handled to access the :zeek:type:`NetControl::Info` + ## record as it is sent on to the logging framework. + global log_netcontrol: event(rec: Info); +} + +redef record Rule += { + ## Internally set to the plugins handling the rule. + _plugin_ids: set[count] &default=count_set(); + ## Internally set to the plugins on which the rule is currently active. + _active_plugin_ids: set[count] &default=count_set(); + ## Internally set to plugins where the rule should not be removed upon timeout. + _no_expire_plugins: set[count] &default=count_set(); + ## Track if the rule was added successfully by all responsible plugins. + _added: bool &default=F; +}; + +# Variable tracking the state of plugin activation. Once all plugins that +# have been added in zeek_init are activated, this will switch to T and +# the event NetControl::init_done will be raised. +global plugins_active: bool = F; + +# Set to true at the end of zeek_init (with very low priority). +# Used to track when plugin activation could potentially be finished +global zeek_init_done: bool = F; + +# The counters that are used to generate the rule and plugin IDs +global rule_counter: count = 1; +global plugin_counter: count = 1; + +# List of the currently active plugins +global plugins: vector of PluginState; +global plugin_ids: table[count] of PluginState; + +# These tables hold information about rules. +global rules: table[string] of Rule; # Rules indexed by id and cid + +# All rules that apply to a certain subnet/IP address. +global rules_by_subnets: table[subnet] of set[string]; + +# Rules pertaining to a specific entity. +# There always only can be one rule of each type for one entity. +global rule_entities: table[Entity, RuleType] of Rule; + +event zeek_init() &priority=5 + { + Log::create_stream(NetControl::LOG, [$columns=Info, $ev=log_netcontrol, $path="netcontrol"]); + } + +function entity_to_info(info: Info, e: Entity) + { + info$entity_type = fmt("%s", e$ty); + + switch ( e$ty ) { + case ADDRESS: + info$entity = fmt("%s", e$ip); + break; + + case CONNECTION: + info$entity = fmt("%s/%d<->%s/%d", + e$conn$orig_h, e$conn$orig_p, + e$conn$resp_h, e$conn$resp_p); + break; + + case FLOW: + local ffrom_ip = "*"; + local ffrom_port = "*"; + local fto_ip = "*"; + local fto_port = "*"; + local ffrom_mac = "*"; + local fto_mac = "*"; + if ( e$flow?$src_h ) + ffrom_ip = cat(e$flow$src_h); + if ( e$flow?$src_p ) + ffrom_port = fmt("%d", e$flow$src_p); + if ( e$flow?$dst_h ) + fto_ip = cat(e$flow$dst_h); + if ( e$flow?$dst_p ) + fto_port = fmt("%d", e$flow$dst_p); + info$entity = fmt("%s/%s->%s/%s", + ffrom_ip, ffrom_port, + fto_ip, fto_port); + if ( e$flow?$src_m || e$flow?$dst_m ) + { + if ( e$flow?$src_m ) + ffrom_mac = e$flow$src_m; + if ( e$flow?$dst_m ) + fto_mac = e$flow$dst_m; + + info$entity = fmt("%s (%s->%s)", info$entity, ffrom_mac, fto_mac); + } + break; + + case MAC: + info$entity = e$mac; + break; + + default: + info$entity = ""; + break; + } + } + +function rule_to_info(info: Info, r: Rule) + { + info$action = fmt("%s", r$ty); + info$target = r$target; + info$rule_id = r$id; + info$expire = r$expire; + info$priority = r$priority; + + if ( r?$location && r$location != "" ) + info$location = r$location; + + if ( r$ty == REDIRECT ) + info$mod = fmt("-> %d", r$out_port); + + if ( r$ty == MODIFY ) + { + local mfrom_ip = "_"; + local mfrom_port = "_"; + local mto_ip = "_"; + local mto_port = "_"; + local mfrom_mac = "_"; + local mto_mac = "_"; + if ( r$mod?$src_h ) + mfrom_ip = cat(r$mod$src_h); + if ( r$mod?$src_p ) + mfrom_port = fmt("%d", r$mod$src_p); + if ( r$mod?$dst_h ) + mto_ip = cat(r$mod$dst_h); + if ( r$mod?$dst_p ) + mto_port = fmt("%d", r$mod$dst_p); + + if ( r$mod?$src_m ) + mfrom_mac = r$mod$src_m; + if ( r$mod?$dst_m ) + mto_mac = r$mod$dst_m; + + info$mod = fmt("Src: %s/%s (%s) Dst: %s/%s (%s)", + mfrom_ip, mfrom_port, mfrom_mac, mto_ip, mto_port, mto_mac); + + if ( r$mod?$redirect_port ) + info$mod = fmt("%s -> %d", info$mod, r$mod$redirect_port); + + } + + entity_to_info(info, r$entity); + } + +function log_msg(msg: string, p: PluginState) + { + Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg, $plugin=p$plugin$name(p)]); + } + +function log_error(msg: string, p: PluginState) + { + Log::write(LOG, [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]); + } + +function log_msg_no_plugin(msg: string) + { + Log::write(LOG, [$ts=network_time(), $category=MESSAGE, $msg=msg]); + } + +function log_rule(r: Rule, cmd: string, state: InfoState, p: PluginState, msg: string &default="") + { + local info: Info = [$ts=network_time()]; + info$category = RULE; + info$cmd = cmd; + info$state = state; + info$plugin = p$plugin$name(p); + if ( msg != "" ) + info$msg = msg; + + rule_to_info(info, r); + + Log::write(LOG, info); + } + +function log_rule_error(r: Rule, msg: string, p: PluginState) + { + local info: Info = [$ts=network_time(), $category=ERROR, $msg=msg, $plugin=p$plugin$name(p)]; + rule_to_info(info, r); + Log::write(LOG, info); + } + +function log_rule_no_plugin(r: Rule, state: InfoState, msg: string) + { + local info: Info = [$ts=network_time()]; + info$category = RULE; + info$state = state; + info$msg = msg; + + rule_to_info(info, r); + + Log::write(LOG, info); + } + +function whitelist_address(a: addr, t: interval, location: string &default="") : string + { + local e: Entity = [$ty=ADDRESS, $ip=addr_to_subnet(a)]; + local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location]; + + return add_rule(r); + } + +function whitelist_subnet(s: subnet, t: interval, location: string &default="") : string + { + local e: Entity = [$ty=ADDRESS, $ip=s]; + local r: Rule = [$ty=WHITELIST, $priority=whitelist_priority, $target=FORWARD, $entity=e, $expire=t, $location=location]; + + return add_rule(r); + } + + +function redirect_flow(f: flow_id, out_port: count, t: interval, location: string &default="") : string + { + local flow = NetControl::Flow( + $src_h=addr_to_subnet(f$src_h), + $src_p=f$src_p, + $dst_h=addr_to_subnet(f$dst_h), + $dst_p=f$dst_p + ); + local e: Entity = [$ty=FLOW, $flow=flow]; + local r: Rule = [$ty=REDIRECT, $target=FORWARD, $entity=e, $expire=t, $location=location, $out_port=out_port]; + + return add_rule(r); + } + +function quarantine_host(infected: addr, dns: addr, quarantine: addr, t: interval, location: string &default="") : vector of string + { + local orules: vector of string = vector(); + local edrop: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected))]; + local rdrop: Rule = [$ty=DROP, $target=FORWARD, $entity=edrop, $expire=t, $location=location]; + orules += add_rule(rdrop); + + local todnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(dns), $dst_p=53/udp)]; + local todnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=todnse, $expire=t, $location=location, $mod=FlowMod($dst_h=quarantine), $priority=+5); + orules += add_rule(todnsr); + + local fromdnse: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(dns), $src_p=53/udp, $dst_h=addr_to_subnet(infected))]; + local fromdnsr = Rule($ty=MODIFY, $target=FORWARD, $entity=fromdnse, $expire=t, $location=location, $mod=FlowMod($src_h=dns), $priority=+5); + orules += add_rule(fromdnsr); + + local wle: Entity = [$ty=FLOW, $flow=Flow($src_h=addr_to_subnet(infected), $dst_h=addr_to_subnet(quarantine), $dst_p=80/tcp)]; + local wlr = Rule($ty=WHITELIST, $target=FORWARD, $entity=wle, $expire=t, $location=location, $priority=+5); + orules += add_rule(wlr); + + return orules; + } + +function check_plugins() + { + if ( plugins_active ) + return; + + local all_active = T; + for ( i in plugins ) + { + local p = plugins[i]; + if ( p$_activated == F ) + all_active = F; + } + + if ( all_active ) + { + plugins_active = T; + + # Skip log message if there are no plugins + if ( |plugins| > 0 ) + log_msg_no_plugin("plugin initialization done"); + + event NetControl::init_done(); + } + } + +function plugin_activated(p: PluginState) + { + local id = p$_id; + if ( id !in plugin_ids ) + { + log_error("unknown plugin activated", p); + return; + } + + # Suppress duplicate activation + if ( plugin_ids[id]$_activated == T ) + return; + + plugin_ids[id]$_activated = T; + log_msg("activation finished", p); + + if ( zeek_init_done ) + check_plugins(); + } + +event zeek_init() &priority=-5 + { + event NetControl::init(); + } + +event NetControl::init() &priority=-20 + { + zeek_init_done = T; + + check_plugins(); + + if ( plugins_active == F ) + log_msg_no_plugin("waiting for plugins to initialize"); + } + +# Low-level functions that only runs on the manager (or standalone) Zeek node. + +function activate_impl(p: PluginState, priority: int) + { + p$_priority = priority; + plugins += p; + sort(plugins, function(p1: PluginState, p2: PluginState) : int { return p2$_priority - p1$_priority; }); + + plugin_ids[plugin_counter] = p; + p$_id = plugin_counter; + ++plugin_counter; + + # perform one-time initialization + if ( p$plugin?$init ) + { + log_msg(fmt("activating plugin with priority %d", priority), p); + p$plugin$init(p); + } + else + { + # no initialization necessary, mark plugin as active right away + plugin_activated(p); + } + + } + +function add_one_subnet_entry(s: subnet, r: Rule) + { + if ( ! check_subnet(s, rules_by_subnets) ) + rules_by_subnets[s] = set(r$id); + else + add rules_by_subnets[s][r$id]; + } + +function add_subnet_entry(rule: Rule) + { + local e = rule$entity; + if ( e$ty == ADDRESS ) + { + add_one_subnet_entry(e$ip, rule); + } + else if ( e$ty == CONNECTION ) + { + add_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule); + add_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule); + } + else if ( e$ty == FLOW ) + { + if ( e$flow?$src_h ) + add_one_subnet_entry(e$flow$src_h, rule); + if ( e$flow?$dst_h ) + add_one_subnet_entry(e$flow$dst_h, rule); + } + } + +function remove_one_subnet_entry(s: subnet, r: Rule) + { + if ( ! check_subnet(s, rules_by_subnets) ) + return; + + if ( r$id !in rules_by_subnets[s] ) + return; + + delete rules_by_subnets[s][r$id]; + if ( |rules_by_subnets[s]| == 0 ) + delete rules_by_subnets[s]; + } + +function remove_subnet_entry(rule: Rule) + { + local e = rule$entity; + if ( e$ty == ADDRESS ) + { + remove_one_subnet_entry(e$ip, rule); + } + else if ( e$ty == CONNECTION ) + { + remove_one_subnet_entry(addr_to_subnet(e$conn$orig_h), rule); + remove_one_subnet_entry(addr_to_subnet(e$conn$resp_h), rule); + } + else if ( e$ty == FLOW ) + { + if ( e$flow?$src_h ) + remove_one_subnet_entry(e$flow$src_h, rule); + if ( e$flow?$dst_h ) + remove_one_subnet_entry(e$flow$dst_h, rule); + } + } + +function find_rules_subnet(sn: subnet) : vector of Rule + { + local ret: vector of Rule = vector(); + + local matches = matching_subnets(sn, rules_by_subnets); + + for ( m in matches ) + { + local sn_entry = matches[m]; + local rule_ids = rules_by_subnets[sn_entry]; + for ( rule_id in rules_by_subnets[sn_entry] ) + { + if ( rule_id in rules ) + ret += rules[rule_id]; + else + Reporter::error("find_rules_subnet - internal data structure error, missing rule"); + } + } + + return ret; + } + +function find_rules_addr(ip: addr) : vector of Rule + { + return find_rules_subnet(addr_to_subnet(ip)); + } + +function add_rule_impl(rule: Rule) : string + { + if ( ! plugins_active ) + { + log_rule_no_plugin(rule, FAILED, "plugins not initialized yet"); + return ""; + } + + rule$cid = ++rule_counter; # numeric id that can be used by plugins for their rules. + + if ( ! rule?$id || rule$id == "" ) + rule$id = cat(rule$cid); + + if ( ! hook NetControl::rule_policy(rule) ) + return ""; + + if ( [rule$entity, rule$ty] in rule_entities ) + { + log_rule_no_plugin(rule, FAILED, "discarded duplicate insertion"); + return ""; + } + + local accepted = F; + local priority: int = +0; + + for ( i in plugins ) + { + local p = plugins[i]; + + if ( p$_activated == F ) + next; + + # in this case, rule was accepted by earlier plugin and this plugin has a lower + # priority. Abort and do not send there... + if ( accepted == T && p$_priority != priority ) + break; + + if ( p$plugin$add_rule(p, rule) ) + { + accepted = T; + priority = p$_priority; + log_rule(rule, "ADD", REQUESTED, p); + + add rule$_plugin_ids[p$_id]; + } + } + + if ( accepted ) + { + rules[rule$id] = rule; + rule_entities[rule$entity, rule$ty] = rule; + + add_subnet_entry(rule); + + event NetControl::rule_new(rule); + + return rule$id; + } + + log_rule_no_plugin(rule, FAILED, "not supported"); + return ""; + } + +function rule_cleanup(r: Rule) + { + if ( |r$_active_plugin_ids| > 0 ) + return; + + remove_subnet_entry(r); + + delete rule_entities[r$entity, r$ty]; + delete rules[r$id]; + + event NetControl::rule_destroyed(r); + } + +function delete_rule_impl(id: string, reason: string): bool + { + if ( id !in rules ) + { + Reporter::error(fmt("Rule %s does not exist in NetControl::delete_rule", id)); + return F; + } + + local rule = rules[id]; + + rule$_active_plugin_ids = set(); + + rule_cleanup(rule); + if ( reason != "" ) + log_rule_no_plugin(rule, REMOVED, fmt("delete_rule: %s", reason)); + else + log_rule_no_plugin(rule, REMOVED, "delete_rule"); + + return T; + } + +function remove_rule_plugin(r: Rule, p: PluginState, reason: string &default=""): bool + { + local success = T; + + if ( ! p$plugin$remove_rule(p, r, reason) ) + { + # still continue and send to other plugins + if ( reason != "" ) + log_rule_error(r, fmt("remove failed (original reason: %s)", reason), p); + else + log_rule_error(r, "remove failed", p); + success = F; + } + else + { + log_rule(r, "REMOVE", REQUESTED, p, reason); + } + + return success; + } + +function remove_rule_impl(id: string, reason: string) : bool + { + if ( id !in rules ) + { + Reporter::error(fmt("Rule %s does not exist in NetControl::remove_rule", id)); + return F; + } + + local r = rules[id]; + + local success = T; + for ( plugin_id in r$_active_plugin_ids ) + { + local p = plugin_ids[plugin_id]; + success = remove_rule_plugin(r, p, reason); + } + + return success; + } + +function rule_expire_impl(r: Rule, p: PluginState) &priority=-5 + { + # do not emit timeout events on shutdown + if ( zeek_is_terminating() ) + return; + + if ( r$id !in rules ) + # Removed already. + return; + + local rule = rules[r$id]; + + if ( p$_id in rule$_no_expire_plugins ) + { + # in this case - don't log anything, just remove the plugin from the rule + # and cleaup + delete rule$_active_plugin_ids[p$_id]; + delete rule$_no_expire_plugins[p$_id]; + rule_cleanup(rule); + } + else + event NetControl::rule_timeout(r, FlowInfo(), p); # timeout implementation will handle the removal + } + +function rule_added_impl(r: Rule, p: PluginState, exists: bool, msg: string &default="") + { + if ( r$id !in rules ) + { + log_rule_error(r, "Addition of unknown rule", p); + return; + } + + # use our version to prevent operating on copies. + local rule = rules[r$id]; + if ( p$_id !in rule$_plugin_ids ) + { + log_rule_error(rule, "Rule added to non-responsible plugin", p); + return; + } + + # The rule was already existing on the backend. Mark this so we don't timeout + # it on this backend. + if ( exists ) + { + add rule$_no_expire_plugins[p$_id]; + log_rule(r, "ADD", EXISTS, p, msg); + } + else + log_rule(r, "ADD", SUCCEEDED, p, msg); + + add rule$_active_plugin_ids[p$_id]; + if ( |rule$_plugin_ids| == |rule$_active_plugin_ids| ) + { + # rule was completely added. + rule$_added = T; + } + } + +function rule_removed_impl(r: Rule, p: PluginState, msg: string &default="") + { + if ( r$id !in rules ) + { + log_rule_error(r, "Removal of non-existing rule", p); + return; + } + + # use our version to prevent operating on copies. + local rule = rules[r$id]; + + if ( p$_id !in rule$_plugin_ids ) + { + log_rule_error(r, "Removed from non-assigned plugin", p); + return; + } + + if ( p$_id in rule$_active_plugin_ids ) + { + delete rule$_active_plugin_ids[p$_id]; + } + + log_rule(rule, "REMOVE", SUCCEEDED, p, msg); + rule_cleanup(rule); + } + +function rule_timeout_impl(r: Rule, i: FlowInfo, p: PluginState) + { + if ( r$id !in rules ) + { + log_rule_error(r, "Timeout of non-existing rule", p); + return; + } + + local rule = rules[r$id]; + + local msg = ""; + if ( i?$packet_count ) + msg = fmt("Packets: %d", i$packet_count); + if ( i?$byte_count ) + { + if ( msg != "" ) + msg = msg + " "; + msg = fmt("%sBytes: %s", msg, i$byte_count); + } + + log_rule(rule, "EXPIRE", TIMEOUT, p, msg); + + if ( ! p$plugin$can_expire ) + { + # in this case, we actually have to delete the rule and the timeout + # call just originated locally + remove_rule_plugin(rule, p); + return; + } + + if ( p$_id !in rule$_plugin_ids ) + { + log_rule_error(r, "Timeout from non-assigned plugin", p); + return; + } + + if ( p$_id in rule$_active_plugin_ids ) + { + delete rule$_active_plugin_ids[p$_id]; + } + + rule_cleanup(rule); + } + +function rule_error_impl(r: Rule, p: PluginState, msg: string &default="") + { + if ( r$id !in rules ) + { + log_rule_error(r, "Error of non-existing rule", p); + return; + } + + local rule = rules[r$id]; + + log_rule_error(rule, msg, p); + + # Remove the plugin both from active and all plugins of the rule. If there + # are no plugins left afterwards - delete it + if ( p$_id !in rule$_plugin_ids ) + { + log_rule_error(r, "Error from non-assigned plugin", p); + return; + } + + if ( p$_id in rule$_active_plugin_ids ) + { + # error during removal. Let's pretend it worked. + delete rule$_plugin_ids[p$_id]; + delete rule$_active_plugin_ids[p$_id]; + rule_cleanup(rule); + } + else + { + # error during insertion. Meh. If we are the only plugin, remove the rule again. + # Otherwhise - keep it, minus us. + delete rule$_plugin_ids[p$_id]; + if ( |rule$_plugin_ids| == 0 ) + { + rule_cleanup(rule); + } + } + } + +function clear() + { + for ( id in rules ) + remove_rule(id); + } diff --git a/scripts/base/frameworks/netcontrol/non-cluster.bro b/scripts/base/frameworks/netcontrol/non-cluster.bro deleted file mode 100644 index ff300f2492..0000000000 --- a/scripts/base/frameworks/netcontrol/non-cluster.bro +++ /dev/null @@ -1,60 +0,0 @@ -module NetControl; - -@load ./main - -function activate(p: PluginState, priority: int) - { - activate_impl(p, priority); - } - -function add_rule(r: Rule) : string - { - return add_rule_impl(r); - } - -function delete_rule(id: string, reason: string &default="") : bool - { - return delete_rule_impl(id, reason); - } - -function remove_rule(id: string, reason: string &default="") : bool - { - return remove_rule_impl(id, reason); - } - -event rule_expire(r: Rule, p: PluginState) &priority=-5 - { - rule_expire_impl(r, p); - } - -event rule_exists(r: Rule, p: PluginState, msg: string &default="") &priority=5 - { - rule_added_impl(r, p, T, msg); - - if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) - schedule r$expire { rule_expire(r, p) }; - } - -event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5 - { - rule_added_impl(r, p, F, msg); - - if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) - schedule r$expire { rule_expire(r, p) }; - } - -event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5 - { - rule_removed_impl(r, p, msg); - } - -event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5 - { - rule_timeout_impl(r, i, p); - } - -event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5 - { - rule_error_impl(r, p, msg); - } - diff --git a/scripts/base/frameworks/netcontrol/non-cluster.zeek b/scripts/base/frameworks/netcontrol/non-cluster.zeek new file mode 100644 index 0000000000..e1363fd883 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/non-cluster.zeek @@ -0,0 +1,61 @@ + +@load ./main + +module NetControl; + +function activate(p: PluginState, priority: int) + { + activate_impl(p, priority); + } + +function add_rule(r: Rule) : string + { + return add_rule_impl(r); + } + +function delete_rule(id: string, reason: string &default="") : bool + { + return delete_rule_impl(id, reason); + } + +function remove_rule(id: string, reason: string &default="") : bool + { + return remove_rule_impl(id, reason); + } + +event rule_expire(r: Rule, p: PluginState) &priority=-5 + { + rule_expire_impl(r, p); + } + +event rule_exists(r: Rule, p: PluginState, msg: string &default="") &priority=5 + { + rule_added_impl(r, p, T, msg); + + if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) + schedule r$expire { rule_expire(r, p) }; + } + +event rule_added(r: Rule, p: PluginState, msg: string &default="") &priority=5 + { + rule_added_impl(r, p, F, msg); + + if ( r?$expire && r$expire > 0secs && ! p$plugin$can_expire ) + schedule r$expire { rule_expire(r, p) }; + } + +event rule_removed(r: Rule, p: PluginState, msg: string &default="") &priority=-5 + { + rule_removed_impl(r, p, msg); + } + +event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) &priority=-5 + { + rule_timeout_impl(r, i, p); + } + +event rule_error(r: Rule, p: PluginState, msg: string &default="") &priority=-5 + { + rule_error_impl(r, p, msg); + } + diff --git a/scripts/base/frameworks/netcontrol/plugin.bro b/scripts/base/frameworks/netcontrol/plugin.bro deleted file mode 100644 index 2b432e7597..0000000000 --- a/scripts/base/frameworks/netcontrol/plugin.bro +++ /dev/null @@ -1,85 +0,0 @@ -##! This file defines the plugin interface for NetControl. - -module NetControl; - -@load ./types - -export { - ## This record keeps the per instance state of a plugin. - ## - ## Individual plugins commonly extend this record to suit their needs. - type PluginState: record { - ## Table for a plugin to store custom, instance-specific state. - config: table[string] of string &default=table(); - - ## Unique plugin identifier -- used for backlookup of plugins from Rules. Set internally. - _id: count &optional; - - ## Set internally. - _priority: int &default=+0; - - ## Set internally. Signifies if the plugin has returned that it has activated successfully. - _activated: bool &default=F; - }; - - ## Definition of a plugin. - ## - ## Generally a plugin needs to implement only what it can support. By - ## returning failure, it indicates that it can't support something and - ## the framework will then try another plugin, if available; or inform the - ## that the operation failed. If a function isn't implemented by a plugin, - ## that's considered an implicit failure to support the operation. - ## - ## If plugin accepts a rule operation, it *must* generate one of the reporting - ## events ``rule_{added,remove,error}`` to signal if it indeed worked out; - ## this is separate from accepting the operation because often a plugin - ## will only know later (i.e., asynchronously) if that was an error for - ## something it thought it could handle. - type Plugin: record { - ## Returns a descriptive name of the plugin instance, suitable for use in logging - ## messages. Note that this function is not optional. - name: function(state: PluginState) : string; - - ## If true, plugin can expire rules itself. If false, the NetControl - ## framework will manage rule expiration. - can_expire: bool; - - ## One-time initialization function called when plugin gets registered, and - ## before any other methods are called. - ## - ## If this function is provided, NetControl assumes that the plugin has to - ## perform, potentially lengthy, initialization before the plugin will become - ## active. In this case, the plugin has to call ``NetControl::plugin_activated``, - ## once initialization finishes. - init: function(state: PluginState) &optional; - - ## One-time finalization function called when a plugin is shutdown; no further - ## functions will be called afterwords. - done: function(state: PluginState) &optional; - - ## Implements the add_rule() operation. If the plugin accepts the rule, - ## it returns true, false otherwise. The rule will already have its - ## ``id`` field set, which the plugin may use for identification - ## purposes. - add_rule: function(state: PluginState, r: Rule) : bool &optional; - - ## Implements the remove_rule() operation. This will only be called for - ## rules that the plugin has previously accepted with add_rule(). The - ## ``id`` field will match that of the add_rule() call. Generally, - ## a plugin that accepts an add_rule() should also accept the - ## remove_rule(). - remove_rule: function(state: PluginState, r: Rule, reason: string) : bool &optional; - }; - - ## Table for a plugin to store instance-specific configuration information. - ## - ## Note, it would be nicer to pass the Plugin instance to all the below, instead - ## of this state table. However Bro's type resolver has trouble with refering to a - ## record type from inside itself. - redef record PluginState += { - ## The plugin that the state belongs to. (Defined separately - ## because of cyclic type dependency.) - plugin: Plugin &optional; - }; - -} diff --git a/scripts/base/frameworks/netcontrol/plugin.zeek b/scripts/base/frameworks/netcontrol/plugin.zeek new file mode 100644 index 0000000000..36d5a76173 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/plugin.zeek @@ -0,0 +1,85 @@ +##! This file defines the plugin interface for NetControl. + +@load ./types + +module NetControl; + +export { + ## This record keeps the per instance state of a plugin. + ## + ## Individual plugins commonly extend this record to suit their needs. + type PluginState: record { + ## Table for a plugin to store custom, instance-specific state. + config: table[string] of string &default=table(); + + ## Unique plugin identifier -- used for backlookup of plugins from Rules. Set internally. + _id: count &optional; + + ## Set internally. + _priority: int &default=+0; + + ## Set internally. Signifies if the plugin has returned that it has activated successfully. + _activated: bool &default=F; + }; + + ## Definition of a plugin. + ## + ## Generally a plugin needs to implement only what it can support. By + ## returning failure, it indicates that it can't support something and + ## the framework will then try another plugin, if available; or inform the + ## that the operation failed. If a function isn't implemented by a plugin, + ## that's considered an implicit failure to support the operation. + ## + ## If plugin accepts a rule operation, it *must* generate one of the reporting + ## events ``rule_{added,remove,error}`` to signal if it indeed worked out; + ## this is separate from accepting the operation because often a plugin + ## will only know later (i.e., asynchronously) if that was an error for + ## something it thought it could handle. + type Plugin: record { + ## Returns a descriptive name of the plugin instance, suitable for use in logging + ## messages. Note that this function is not optional. + name: function(state: PluginState) : string; + + ## If true, plugin can expire rules itself. If false, the NetControl + ## framework will manage rule expiration. + can_expire: bool; + + ## One-time initialization function called when plugin gets registered, and + ## before any other methods are called. + ## + ## If this function is provided, NetControl assumes that the plugin has to + ## perform, potentially lengthy, initialization before the plugin will become + ## active. In this case, the plugin has to call ``NetControl::plugin_activated``, + ## once initialization finishes. + init: function(state: PluginState) &optional; + + ## One-time finalization function called when a plugin is shutdown; no further + ## functions will be called afterwords. + done: function(state: PluginState) &optional; + + ## Implements the add_rule() operation. If the plugin accepts the rule, + ## it returns true, false otherwise. The rule will already have its + ## ``id`` field set, which the plugin may use for identification + ## purposes. + add_rule: function(state: PluginState, r: Rule) : bool &optional; + + ## Implements the remove_rule() operation. This will only be called for + ## rules that the plugin has previously accepted with add_rule(). The + ## ``id`` field will match that of the add_rule() call. Generally, + ## a plugin that accepts an add_rule() should also accept the + ## remove_rule(). + remove_rule: function(state: PluginState, r: Rule, reason: string) : bool &optional; + }; + + ## Table for a plugin to store instance-specific configuration information. + ## + ## Note, it would be nicer to pass the Plugin instance to all the below, instead + ## of this state table. However Zeek's type resolver has trouble with refering to a + ## record type from inside itself. + redef record PluginState += { + ## The plugin that the state belongs to. (Defined separately + ## because of cyclic type dependency.) + plugin: Plugin &optional; + }; + +} diff --git a/scripts/base/frameworks/netcontrol/plugins/__load__.bro b/scripts/base/frameworks/netcontrol/plugins/__load__.zeek similarity index 100% rename from scripts/base/frameworks/netcontrol/plugins/__load__.bro rename to scripts/base/frameworks/netcontrol/plugins/__load__.zeek diff --git a/scripts/base/frameworks/netcontrol/plugins/acld.bro b/scripts/base/frameworks/netcontrol/plugins/acld.bro deleted file mode 100644 index 99a9166ce9..0000000000 --- a/scripts/base/frameworks/netcontrol/plugins/acld.bro +++ /dev/null @@ -1,316 +0,0 @@ -##! Acld plugin for the netcontrol framework. - -module NetControl; - -@load ../main -@load ../plugin -@load base/frameworks/broker - -export { - type AclRule : record { - command: string; - cookie: count; - arg: string; - comment: string &optional; - }; - - type AcldConfig: record { - ## The acld topic to send events to. - acld_topic: string; - ## Broker host to connect to. - acld_host: addr; - ## Broker port to connect to. - acld_port: port; - ## Do we accept rules for the monitor path? Default false. - monitor: bool &default=F; - ## Do we accept rules for the forward path? Default true. - forward: bool &default=T; - - ## Predicate that is called on rule insertion or removal. - ## - ## p: Current plugin state. - ## - ## r: The rule to be inserted or removed. - ## - ## Returns: T if the rule can be handled by the current backend, F otherwise. - check_pred: function(p: PluginState, r: Rule): bool &optional; - }; - - ## Instantiates the acld plugin. - global create_acld: function(config: AcldConfig) : PluginState; - - redef record PluginState += { - acld_config: AcldConfig &optional; - ## The ID of this acld instance - for the mapping to PluginStates. - acld_id: count &optional; - }; - - ## Hook that is called after a rule is converted to an acld rule. - ## The hook may modify the rule before it is sent to acld. - ## Setting the acld command to F will cause the rule to be rejected - ## by the plugin. - ## - ## p: Current plugin state. - ## - ## r: The rule to be inserted or removed. - ## - ## ar: The acld rule to be inserted or removed. - global NetControl::acld_rule_policy: hook(p: PluginState, r: Rule, ar: AclRule); - - ## Events that are sent from us to Broker. - global acld_add_rule: event(id: count, r: Rule, ar: AclRule); - global acld_remove_rule: event(id: count, r: Rule, ar: AclRule); - - ## Events that are sent from Broker to us. - global acld_rule_added: event(id: count, r: Rule, msg: string); - global acld_rule_removed: event(id: count, r: Rule, msg: string); - global acld_rule_exists: event(id: count, r: Rule, msg: string); - global acld_rule_error: event(id: count, r: Rule, msg: string); -} - -global netcontrol_acld_peers: table[port, string] of PluginState; -global netcontrol_acld_topics: set[string] = set(); -global netcontrol_acld_id: table[count] of PluginState = table(); -global netcontrol_acld_current_id: count = 0; - -const acld_add_to_remove: table[string] of string = { - ["drop"] = "restore", - ["addwhitelist"] = "remwhitelist", - ["blockhosthost"] = "restorehosthost", - ["droptcpport"] = "restoretcpport", - ["dropudpport"] = "restoreudpport", - ["droptcpdsthostport"] ="restoretcpdsthostport", - ["dropudpdsthostport"] ="restoreudpdsthostport", - ["permittcpdsthostport"] ="unpermittcpdsthostport", - ["permitudpdsthostport"] ="unpermitudpdsthostport", - ["nullzero"] ="nonullzero" -}; - -event NetControl::acld_rule_added(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_acld_id ) - { - Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_acld_id[id]; - - event NetControl::rule_added(r, p, msg); - } - -event NetControl::acld_rule_exists(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_acld_id ) - { - Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_acld_id[id]; - - event NetControl::rule_exists(r, p, msg); - } - -event NetControl::acld_rule_removed(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_acld_id ) - { - Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_acld_id[id]; - - event NetControl::rule_removed(r, p, msg); - } - -event NetControl::acld_rule_error(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_acld_id ) - { - Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_acld_id[id]; - - event NetControl::rule_error(r, p, msg); - } - -function acld_name(p: PluginState) : string - { - return fmt("Acld-%s", p$acld_config$acld_topic); - } - -# check that subnet specifies an addr -function check_sn(sn: subnet) : bool - { - if ( is_v4_subnet(sn) && subnet_width(sn) == 32 ) - return T; - if ( is_v6_subnet(sn) && subnet_width(sn) == 128 ) - return T; - - Reporter::error(fmt("Acld: rule_to_acl_rule was given a subnet that does not specify a distinct address where needed - %s", sn)); - return F; - } - -function rule_to_acl_rule(p: PluginState, r: Rule) : AclRule - { - local e = r$entity; - - local command: string = ""; - local arg: string = ""; - - if ( e$ty == ADDRESS ) - { - if ( r$ty == DROP ) - command = "drop"; - else if ( r$ty == WHITELIST ) - command = "addwhitelist"; - arg = cat(e$ip); - } - else if ( e$ty == FLOW ) - { - local f = e$flow; - if ( ( ! f?$src_h ) && ( ! f?$src_p ) && f?$dst_h && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) ) - { - if ( !check_sn(f$dst_h) ) - command = ""; # invalid addr, do nothing - else if ( is_tcp_port(f$dst_p) && r$ty == DROP ) - command = "droptcpdsthostport"; - else if ( is_tcp_port(f$dst_p) && r$ty == WHITELIST ) - command = "permittcpdsthostport"; - else if ( is_udp_port(f$dst_p) && r$ty == DROP) - command = "dropucpdsthostport"; - else if ( is_udp_port(f$dst_p) && r$ty == WHITELIST) - command = "permitucpdsthostport"; - - arg = fmt("%s %d", subnet_to_addr(f$dst_h), f$dst_p); - } - else if ( f?$src_h && ( ! f?$src_p ) && f?$dst_h && ( ! f?$dst_p ) && ( ! f?$src_m ) && ( ! f?$dst_m ) ) - { - if ( !check_sn(f$src_h) || !check_sn(f$dst_h) ) - command = ""; - else if ( r$ty == DROP ) - command = "blockhosthost"; - arg = fmt("%s %s", subnet_to_addr(f$src_h), subnet_to_addr(f$dst_h)); - } - else if ( ( ! f?$src_h ) && ( ! f?$src_p ) && ( ! f?$dst_h ) && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) ) - { - if ( is_tcp_port(f$dst_p) && r$ty == DROP ) - command = "droptcpport"; - else if ( is_udp_port(f$dst_p) && r$ty == DROP ) - command = "dropudpport"; - arg = fmt("%d", f$dst_p); - } - } - - local ar = AclRule($command=command, $cookie=r$cid, $arg=arg); - if ( r?$location ) - ar$comment = r$location; - - hook NetControl::acld_rule_policy(p, r, ar); - - return ar; - } - -function acld_check_rule(p: PluginState, r: Rule) : bool - { - local c = p$acld_config; - - if ( p$acld_config?$check_pred ) - return p$acld_config$check_pred(p, r); - - if ( r$target == MONITOR && c$monitor ) - return T; - - if ( r$target == FORWARD && c$forward ) - return T; - - return F; - } - -function acld_add_rule_fun(p: PluginState, r: Rule) : bool - { - if ( ! acld_check_rule(p, r) ) - return F; - - local ar = rule_to_acl_rule(p, r); - - if ( ar$command == "" ) - return F; - - Broker::publish(p$acld_config$acld_topic, acld_add_rule, p$acld_id, r, ar); - return T; - } - -function acld_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool - { - if ( ! acld_check_rule(p, r) ) - return F; - - local ar = rule_to_acl_rule(p, r); - if ( ar$command in acld_add_to_remove ) - ar$command = acld_add_to_remove[ar$command]; - else - return F; - - if ( reason != "" ) - { - if ( ar?$comment ) - ar$comment = fmt("%s (%s)", reason, ar$comment); - else - ar$comment = reason; - } - - Broker::publish(p$acld_config$acld_topic, acld_remove_rule, p$acld_id, r, ar); - return T; - } - -function acld_init(p: PluginState) - { - Broker::subscribe(p$acld_config$acld_topic); - Broker::peer(cat(p$acld_config$acld_host), p$acld_config$acld_port); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - local peer_address = cat(endpoint$network$address); - local peer_port = endpoint$network$bound_port; - if ( [peer_port, peer_address] !in netcontrol_acld_peers ) - # ok, this one was none of ours... - return; - - local p = netcontrol_acld_peers[peer_port, peer_address]; - plugin_activated(p); - } - -global acld_plugin = Plugin( - $name=acld_name, - $can_expire = F, - $add_rule = acld_add_rule_fun, - $remove_rule = acld_remove_rule_fun, - $init = acld_init - ); - -function create_acld(config: AcldConfig) : PluginState - { - if ( config$acld_topic in netcontrol_acld_topics ) - Reporter::warning(fmt("Topic %s was added to NetControl acld plugin twice. Possible duplication of commands", config$acld_topic)); - else - add netcontrol_acld_topics[config$acld_topic]; - - local host = cat(config$acld_host); - local p: PluginState = [$acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id]; - - if ( [config$acld_port, host] in netcontrol_acld_peers ) - Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port)); - else - netcontrol_acld_peers[config$acld_port, host] = p; - - netcontrol_acld_id[netcontrol_acld_current_id] = p; - ++netcontrol_acld_current_id; - - return p; - } diff --git a/scripts/base/frameworks/netcontrol/plugins/acld.zeek b/scripts/base/frameworks/netcontrol/plugins/acld.zeek new file mode 100644 index 0000000000..b985fefc51 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/plugins/acld.zeek @@ -0,0 +1,316 @@ +##! Acld plugin for the netcontrol framework. + +@load ../main +@load ../plugin +@load base/frameworks/broker + +module NetControl; + +export { + type AclRule : record { + command: string; + cookie: count; + arg: string; + comment: string &optional; + }; + + type AcldConfig: record { + ## The acld topic to send events to. + acld_topic: string; + ## Broker host to connect to. + acld_host: addr; + ## Broker port to connect to. + acld_port: port; + ## Do we accept rules for the monitor path? Default false. + monitor: bool &default=F; + ## Do we accept rules for the forward path? Default true. + forward: bool &default=T; + + ## Predicate that is called on rule insertion or removal. + ## + ## p: Current plugin state. + ## + ## r: The rule to be inserted or removed. + ## + ## Returns: T if the rule can be handled by the current backend, F otherwise. + check_pred: function(p: PluginState, r: Rule): bool &optional; + }; + + ## Instantiates the acld plugin. + global create_acld: function(config: AcldConfig) : PluginState; + + redef record PluginState += { + acld_config: AcldConfig &optional; + ## The ID of this acld instance - for the mapping to PluginStates. + acld_id: count &optional; + }; + + ## Hook that is called after a rule is converted to an acld rule. + ## The hook may modify the rule before it is sent to acld. + ## Setting the acld command to F will cause the rule to be rejected + ## by the plugin. + ## + ## p: Current plugin state. + ## + ## r: The rule to be inserted or removed. + ## + ## ar: The acld rule to be inserted or removed. + global NetControl::acld_rule_policy: hook(p: PluginState, r: Rule, ar: AclRule); + + ## Events that are sent from us to Broker. + global acld_add_rule: event(id: count, r: Rule, ar: AclRule); + global acld_remove_rule: event(id: count, r: Rule, ar: AclRule); + + ## Events that are sent from Broker to us. + global acld_rule_added: event(id: count, r: Rule, msg: string); + global acld_rule_removed: event(id: count, r: Rule, msg: string); + global acld_rule_exists: event(id: count, r: Rule, msg: string); + global acld_rule_error: event(id: count, r: Rule, msg: string); +} + +global netcontrol_acld_peers: table[port, string] of PluginState; +global netcontrol_acld_topics: set[string] = set(); +global netcontrol_acld_id: table[count] of PluginState = table(); +global netcontrol_acld_current_id: count = 0; + +const acld_add_to_remove: table[string] of string = { + ["drop"] = "restore", + ["addwhitelist"] = "remwhitelist", + ["blockhosthost"] = "restorehosthost", + ["droptcpport"] = "restoretcpport", + ["dropudpport"] = "restoreudpport", + ["droptcpdsthostport"] ="restoretcpdsthostport", + ["dropudpdsthostport"] ="restoreudpdsthostport", + ["permittcpdsthostport"] ="unpermittcpdsthostport", + ["permitudpdsthostport"] ="unpermitudpdsthostport", + ["nullzero"] ="nonullzero" +}; + +event NetControl::acld_rule_added(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_acld_id ) + { + Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_acld_id[id]; + + event NetControl::rule_added(r, p, msg); + } + +event NetControl::acld_rule_exists(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_acld_id ) + { + Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_acld_id[id]; + + event NetControl::rule_exists(r, p, msg); + } + +event NetControl::acld_rule_removed(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_acld_id ) + { + Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_acld_id[id]; + + event NetControl::rule_removed(r, p, msg); + } + +event NetControl::acld_rule_error(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_acld_id ) + { + Reporter::error(fmt("NetControl acld plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_acld_id[id]; + + event NetControl::rule_error(r, p, msg); + } + +function acld_name(p: PluginState) : string + { + return fmt("Acld-%s", p$acld_config$acld_topic); + } + +# check that subnet specifies an addr +function check_sn(sn: subnet) : bool + { + if ( is_v4_subnet(sn) && subnet_width(sn) == 32 ) + return T; + if ( is_v6_subnet(sn) && subnet_width(sn) == 128 ) + return T; + + Reporter::error(fmt("Acld: rule_to_acl_rule was given a subnet that does not specify a distinct address where needed - %s", sn)); + return F; + } + +function rule_to_acl_rule(p: PluginState, r: Rule) : AclRule + { + local e = r$entity; + + local command: string = ""; + local arg: string = ""; + + if ( e$ty == ADDRESS ) + { + if ( r$ty == DROP ) + command = "drop"; + else if ( r$ty == WHITELIST ) + command = "addwhitelist"; + arg = cat(e$ip); + } + else if ( e$ty == FLOW ) + { + local f = e$flow; + if ( ( ! f?$src_h ) && ( ! f?$src_p ) && f?$dst_h && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) ) + { + if ( !check_sn(f$dst_h) ) + command = ""; # invalid addr, do nothing + else if ( is_tcp_port(f$dst_p) && r$ty == DROP ) + command = "droptcpdsthostport"; + else if ( is_tcp_port(f$dst_p) && r$ty == WHITELIST ) + command = "permittcpdsthostport"; + else if ( is_udp_port(f$dst_p) && r$ty == DROP) + command = "dropucpdsthostport"; + else if ( is_udp_port(f$dst_p) && r$ty == WHITELIST) + command = "permitucpdsthostport"; + + arg = fmt("%s %d", subnet_to_addr(f$dst_h), f$dst_p); + } + else if ( f?$src_h && ( ! f?$src_p ) && f?$dst_h && ( ! f?$dst_p ) && ( ! f?$src_m ) && ( ! f?$dst_m ) ) + { + if ( !check_sn(f$src_h) || !check_sn(f$dst_h) ) + command = ""; + else if ( r$ty == DROP ) + command = "blockhosthost"; + arg = fmt("%s %s", subnet_to_addr(f$src_h), subnet_to_addr(f$dst_h)); + } + else if ( ( ! f?$src_h ) && ( ! f?$src_p ) && ( ! f?$dst_h ) && f?$dst_p && ( ! f?$src_m ) && ( ! f?$dst_m ) ) + { + if ( is_tcp_port(f$dst_p) && r$ty == DROP ) + command = "droptcpport"; + else if ( is_udp_port(f$dst_p) && r$ty == DROP ) + command = "dropudpport"; + arg = fmt("%d", f$dst_p); + } + } + + local ar = AclRule($command=command, $cookie=r$cid, $arg=arg); + if ( r?$location ) + ar$comment = r$location; + + hook NetControl::acld_rule_policy(p, r, ar); + + return ar; + } + +function acld_check_rule(p: PluginState, r: Rule) : bool + { + local c = p$acld_config; + + if ( p$acld_config?$check_pred ) + return p$acld_config$check_pred(p, r); + + if ( r$target == MONITOR && c$monitor ) + return T; + + if ( r$target == FORWARD && c$forward ) + return T; + + return F; + } + +function acld_add_rule_fun(p: PluginState, r: Rule) : bool + { + if ( ! acld_check_rule(p, r) ) + return F; + + local ar = rule_to_acl_rule(p, r); + + if ( ar$command == "" ) + return F; + + Broker::publish(p$acld_config$acld_topic, acld_add_rule, p$acld_id, r, ar); + return T; + } + +function acld_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool + { + if ( ! acld_check_rule(p, r) ) + return F; + + local ar = rule_to_acl_rule(p, r); + if ( ar$command in acld_add_to_remove ) + ar$command = acld_add_to_remove[ar$command]; + else + return F; + + if ( reason != "" ) + { + if ( ar?$comment ) + ar$comment = fmt("%s (%s)", reason, ar$comment); + else + ar$comment = reason; + } + + Broker::publish(p$acld_config$acld_topic, acld_remove_rule, p$acld_id, r, ar); + return T; + } + +function acld_init(p: PluginState) + { + Broker::subscribe(p$acld_config$acld_topic); + Broker::peer(cat(p$acld_config$acld_host), p$acld_config$acld_port); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + local peer_address = cat(endpoint$network$address); + local peer_port = endpoint$network$bound_port; + if ( [peer_port, peer_address] !in netcontrol_acld_peers ) + # ok, this one was none of ours... + return; + + local p = netcontrol_acld_peers[peer_port, peer_address]; + plugin_activated(p); + } + +global acld_plugin = Plugin( + $name=acld_name, + $can_expire = F, + $add_rule = acld_add_rule_fun, + $remove_rule = acld_remove_rule_fun, + $init = acld_init + ); + +function create_acld(config: AcldConfig) : PluginState + { + if ( config$acld_topic in netcontrol_acld_topics ) + Reporter::warning(fmt("Topic %s was added to NetControl acld plugin twice. Possible duplication of commands", config$acld_topic)); + else + add netcontrol_acld_topics[config$acld_topic]; + + local host = cat(config$acld_host); + local p: PluginState = [$acld_config=config, $plugin=acld_plugin, $acld_id=netcontrol_acld_current_id]; + + if ( [config$acld_port, host] in netcontrol_acld_peers ) + Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, config$acld_port)); + else + netcontrol_acld_peers[config$acld_port, host] = p; + + netcontrol_acld_id[netcontrol_acld_current_id] = p; + ++netcontrol_acld_current_id; + + return p; + } diff --git a/scripts/base/frameworks/netcontrol/plugins/broker.bro b/scripts/base/frameworks/netcontrol/plugins/broker.bro deleted file mode 100644 index 4bfb231c94..0000000000 --- a/scripts/base/frameworks/netcontrol/plugins/broker.bro +++ /dev/null @@ -1,220 +0,0 @@ -##! Broker plugin for the NetControl framework. Sends the raw data structures -##! used in NetControl on to Broker to allow for easy handling, e.g., of -##! command-line scripts. - -module NetControl; - -@load ../main -@load ../plugin -@load base/frameworks/broker - -export { - ## This record specifies the configuration that is passed to :bro:see:`NetControl::create_broker`. - type BrokerConfig: record { - ## The broker topic to send events to. - topic: string &optional; - ## Broker host to connect to. - host: addr &optional; - ## Broker port to connect to. - bport: port &optional; - - ## Do we accept rules for the monitor path? Default true. - monitor: bool &default=T; - ## Do we accept rules for the forward path? Default true. - forward: bool &default=T; - - ## Predicate that is called on rule insertion or removal. - ## - ## p: Current plugin state. - ## - ## r: The rule to be inserted or removed. - ## - ## Returns: T if the rule can be handled by the current backend, F otherwise. - check_pred: function(p: PluginState, r: Rule): bool &optional; - }; - - ## Instantiates the broker plugin. - global create_broker: function(config: BrokerConfig, can_expire: bool) : PluginState; - - redef record PluginState += { - ## OpenFlow controller for NetControl Broker plugin. - broker_config: BrokerConfig &optional; - ## The ID of this broker instance - for the mapping to PluginStates. - broker_id: count &optional; - }; - - global broker_add_rule: event(id: count, r: Rule); - global broker_remove_rule: event(id: count, r: Rule, reason: string); - - global broker_rule_added: event(id: count, r: Rule, msg: string); - global broker_rule_removed: event(id: count, r: Rule, msg: string); - global broker_rule_exists: event(id: count, r: Rule, msg: string); - global broker_rule_error: event(id: count, r: Rule, msg: string); - global broker_rule_timeout: event(id: count, r: Rule, i: FlowInfo); -} - -global netcontrol_broker_peers: table[port, string] of PluginState; -global netcontrol_broker_topics: set[string] = set(); -global netcontrol_broker_id: table[count] of PluginState = table(); -global netcontrol_broker_current_id: count = 0; - -event NetControl::broker_rule_added(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_broker_id ) - { - Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_broker_id[id]; - - event NetControl::rule_added(r, p, msg); - } - -event NetControl::broker_rule_exists(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_broker_id ) - { - Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_broker_id[id]; - - event NetControl::rule_exists(r, p, msg); - } - -event NetControl::broker_rule_removed(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_broker_id ) - { - Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_broker_id[id]; - - event NetControl::rule_removed(r, p, msg); - } - -event NetControl::broker_rule_error(id: count, r: Rule, msg: string) - { - if ( id !in netcontrol_broker_id ) - { - Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_broker_id[id]; - - event NetControl::rule_error(r, p, msg); - } - -event NetControl::broker_rule_timeout(id: count, r: Rule, i: FlowInfo) - { - if ( id !in netcontrol_broker_id ) - { - Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); - return; - } - - local p = netcontrol_broker_id[id]; - - event NetControl::rule_timeout(r, i, p); - } - -function broker_name(p: PluginState) : string - { - return fmt("Broker-%s", p$broker_config$topic); - } - -function broker_check_rule(p: PluginState, r: Rule) : bool - { - local c = p$broker_config; - - if ( p$broker_config?$check_pred ) - return p$broker_config$check_pred(p, r); - - if ( r$target == MONITOR && c$monitor ) - return T; - - if ( r$target == FORWARD && c$forward ) - return T; - - return F; - } - -function broker_add_rule_fun(p: PluginState, r: Rule) : bool - { - if ( ! broker_check_rule(p, r) ) - return F; - - Broker::publish(p$broker_config$topic, Broker::make_event(broker_add_rule, p$broker_id, r)); - return T; - } - -function broker_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool - { - if ( ! broker_check_rule(p, r) ) - return F; - - Broker::publish(p$broker_config$topic, Broker::make_event(broker_remove_rule, p$broker_id, r, reason)); - return T; - } - -function broker_init(p: PluginState) - { - Broker::subscribe(p$broker_config$topic); - Broker::peer(cat(p$broker_config$host), p$broker_config$bport); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - local peer_address = cat(endpoint$network$address); - local peer_port = endpoint$network$bound_port; - if ( [peer_port, peer_address] !in netcontrol_broker_peers ) - return; - - local p = netcontrol_broker_peers[peer_port, peer_address]; - plugin_activated(p); - } - -global broker_plugin = Plugin( - $name=broker_name, - $can_expire = F, - $add_rule = broker_add_rule_fun, - $remove_rule = broker_remove_rule_fun, - $init = broker_init - ); - -global broker_plugin_can_expire = Plugin( - $name=broker_name, - $can_expire = T, - $add_rule = broker_add_rule_fun, - $remove_rule = broker_remove_rule_fun, - $init = broker_init - ); - -function create_broker(config: BrokerConfig, can_expire: bool) : PluginState - { - if ( config$topic in netcontrol_broker_topics ) - Reporter::warning(fmt("Topic %s was added to NetControl broker plugin twice. Possible duplication of commands", config$topic)); - else - add netcontrol_broker_topics[config$topic]; - - local plugin = broker_plugin; - if ( can_expire ) - plugin = broker_plugin_can_expire; - - local p = PluginState($plugin=plugin, $broker_id=netcontrol_broker_current_id, $broker_config=config); - - if ( [config$bport, cat(config$host)] in netcontrol_broker_peers ) - Reporter::warning(fmt("Peer %s:%s was added to NetControl broker plugin twice.", config$host, config$bport)); - else - netcontrol_broker_peers[config$bport, cat(config$host)] = p; - - netcontrol_broker_id[netcontrol_broker_current_id] = p; - ++netcontrol_broker_current_id; - - return p; - } diff --git a/scripts/base/frameworks/netcontrol/plugins/broker.zeek b/scripts/base/frameworks/netcontrol/plugins/broker.zeek new file mode 100644 index 0000000000..92384cc183 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/plugins/broker.zeek @@ -0,0 +1,220 @@ +##! Broker plugin for the NetControl framework. Sends the raw data structures +##! used in NetControl on to Broker to allow for easy handling, e.g., of +##! command-line scripts. + +@load ../main +@load ../plugin +@load base/frameworks/broker + +module NetControl; + +export { + ## This record specifies the configuration that is passed to :zeek:see:`NetControl::create_broker`. + type BrokerConfig: record { + ## The broker topic to send events to. + topic: string &optional; + ## Broker host to connect to. + host: addr &optional; + ## Broker port to connect to. + bport: port &optional; + + ## Do we accept rules for the monitor path? Default true. + monitor: bool &default=T; + ## Do we accept rules for the forward path? Default true. + forward: bool &default=T; + + ## Predicate that is called on rule insertion or removal. + ## + ## p: Current plugin state. + ## + ## r: The rule to be inserted or removed. + ## + ## Returns: T if the rule can be handled by the current backend, F otherwise. + check_pred: function(p: PluginState, r: Rule): bool &optional; + }; + + ## Instantiates the broker plugin. + global create_broker: function(config: BrokerConfig, can_expire: bool) : PluginState; + + redef record PluginState += { + ## OpenFlow controller for NetControl Broker plugin. + broker_config: BrokerConfig &optional; + ## The ID of this broker instance - for the mapping to PluginStates. + broker_id: count &optional; + }; + + global broker_add_rule: event(id: count, r: Rule); + global broker_remove_rule: event(id: count, r: Rule, reason: string); + + global broker_rule_added: event(id: count, r: Rule, msg: string); + global broker_rule_removed: event(id: count, r: Rule, msg: string); + global broker_rule_exists: event(id: count, r: Rule, msg: string); + global broker_rule_error: event(id: count, r: Rule, msg: string); + global broker_rule_timeout: event(id: count, r: Rule, i: FlowInfo); +} + +global netcontrol_broker_peers: table[port, string] of PluginState; +global netcontrol_broker_topics: set[string] = set(); +global netcontrol_broker_id: table[count] of PluginState = table(); +global netcontrol_broker_current_id: count = 0; + +event NetControl::broker_rule_added(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_broker_id ) + { + Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_broker_id[id]; + + event NetControl::rule_added(r, p, msg); + } + +event NetControl::broker_rule_exists(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_broker_id ) + { + Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_broker_id[id]; + + event NetControl::rule_exists(r, p, msg); + } + +event NetControl::broker_rule_removed(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_broker_id ) + { + Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_broker_id[id]; + + event NetControl::rule_removed(r, p, msg); + } + +event NetControl::broker_rule_error(id: count, r: Rule, msg: string) + { + if ( id !in netcontrol_broker_id ) + { + Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_broker_id[id]; + + event NetControl::rule_error(r, p, msg); + } + +event NetControl::broker_rule_timeout(id: count, r: Rule, i: FlowInfo) + { + if ( id !in netcontrol_broker_id ) + { + Reporter::error(fmt("NetControl broker plugin with id %d not found, aborting", id)); + return; + } + + local p = netcontrol_broker_id[id]; + + event NetControl::rule_timeout(r, i, p); + } + +function broker_name(p: PluginState) : string + { + return fmt("Broker-%s", p$broker_config$topic); + } + +function broker_check_rule(p: PluginState, r: Rule) : bool + { + local c = p$broker_config; + + if ( p$broker_config?$check_pred ) + return p$broker_config$check_pred(p, r); + + if ( r$target == MONITOR && c$monitor ) + return T; + + if ( r$target == FORWARD && c$forward ) + return T; + + return F; + } + +function broker_add_rule_fun(p: PluginState, r: Rule) : bool + { + if ( ! broker_check_rule(p, r) ) + return F; + + Broker::publish(p$broker_config$topic, Broker::make_event(broker_add_rule, p$broker_id, r)); + return T; + } + +function broker_remove_rule_fun(p: PluginState, r: Rule, reason: string) : bool + { + if ( ! broker_check_rule(p, r) ) + return F; + + Broker::publish(p$broker_config$topic, Broker::make_event(broker_remove_rule, p$broker_id, r, reason)); + return T; + } + +function broker_init(p: PluginState) + { + Broker::subscribe(p$broker_config$topic); + Broker::peer(cat(p$broker_config$host), p$broker_config$bport); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + local peer_address = cat(endpoint$network$address); + local peer_port = endpoint$network$bound_port; + if ( [peer_port, peer_address] !in netcontrol_broker_peers ) + return; + + local p = netcontrol_broker_peers[peer_port, peer_address]; + plugin_activated(p); + } + +global broker_plugin = Plugin( + $name=broker_name, + $can_expire = F, + $add_rule = broker_add_rule_fun, + $remove_rule = broker_remove_rule_fun, + $init = broker_init + ); + +global broker_plugin_can_expire = Plugin( + $name=broker_name, + $can_expire = T, + $add_rule = broker_add_rule_fun, + $remove_rule = broker_remove_rule_fun, + $init = broker_init + ); + +function create_broker(config: BrokerConfig, can_expire: bool) : PluginState + { + if ( config$topic in netcontrol_broker_topics ) + Reporter::warning(fmt("Topic %s was added to NetControl broker plugin twice. Possible duplication of commands", config$topic)); + else + add netcontrol_broker_topics[config$topic]; + + local plugin = broker_plugin; + if ( can_expire ) + plugin = broker_plugin_can_expire; + + local p = PluginState($plugin=plugin, $broker_id=netcontrol_broker_current_id, $broker_config=config); + + if ( [config$bport, cat(config$host)] in netcontrol_broker_peers ) + Reporter::warning(fmt("Peer %s:%s was added to NetControl broker plugin twice.", config$host, config$bport)); + else + netcontrol_broker_peers[config$bport, cat(config$host)] = p; + + netcontrol_broker_id[netcontrol_broker_current_id] = p; + ++netcontrol_broker_current_id; + + return p; + } diff --git a/scripts/base/frameworks/netcontrol/plugins/debug.bro b/scripts/base/frameworks/netcontrol/plugins/debug.zeek similarity index 100% rename from scripts/base/frameworks/netcontrol/plugins/debug.bro rename to scripts/base/frameworks/netcontrol/plugins/debug.zeek diff --git a/scripts/base/frameworks/netcontrol/plugins/openflow.bro b/scripts/base/frameworks/netcontrol/plugins/openflow.bro deleted file mode 100644 index f1403a70a8..0000000000 --- a/scripts/base/frameworks/netcontrol/plugins/openflow.bro +++ /dev/null @@ -1,454 +0,0 @@ -##! OpenFlow plugin for the NetControl framework. - -@load ../main -@load ../plugin -@load base/frameworks/openflow - -module NetControl; - -export { - ## This record specifies the configuration that is passed to :bro:see:`NetControl::create_openflow`. - type OfConfig: record { - monitor: bool &default=T; ##< Accept rules that target the monitor path. - forward: bool &default=T; ##< Accept rules that target the forward path. - idle_timeout: count &default=0; ##< Default OpenFlow idle timeout. - table_id: count &optional; ##< Default OpenFlow table ID. - priority_offset: int &default=+0; ##< Add this to all rule priorities. Can be useful if you want the openflow priorities be offset from the netcontrol priorities without having to write a filter function. - - ## Predicate that is called on rule insertion or removal. - ## - ## p: Current plugin state. - ## - ## r: The rule to be inserted or removed. - ## - ## Returns: T if the rule can be handled by the current backend, F otherwise. - check_pred: function(p: PluginState, r: Rule): bool &optional; - - ## This predicate is called each time an OpenFlow match record is created. - ## The predicate can modify the match structure before it is sent on to the - ## device. - ## - ## p: Current plugin state. - ## - ## r: The rule to be inserted or removed. - ## - ## m: The openflow match structures that were generated for this rules. - ## - ## Returns: The modified OpenFlow match structures that will be used in place of the structures passed in m. - match_pred: function(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match): vector of OpenFlow::ofp_match &optional; - - ## This predicate is called before a FlowMod message is sent to the OpenFlow - ## device. It can modify the FlowMod message before it is passed on. - ## - ## p: Current plugin state. - ## - ## r: The rule to be inserted or removed. - ## - ## m: The OpenFlow FlowMod message. - ## - ## Returns: The modified FlowMod message that is used in lieu of m. - flow_mod_pred: function(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod &optional; - }; - - redef record PluginState += { - ## OpenFlow controller for NetControl OpenFlow plugin. - of_controller: OpenFlow::Controller &optional; - ## OpenFlow configuration record that is passed on initialization. - of_config: OfConfig &optional; - }; - - type OfTable: record { - p: PluginState; - r: Rule; - c: count &default=0; # how many replies did we see so far? needed for ids where we have multiple rules... - packet_count: count &default=0; - byte_count: count &default=0; - duration_sec: double &default=0.0; - }; - - ## The time interval after which an openflow message is considered to be timed out - ## and we delete it from our internal tracking. - const openflow_message_timeout = 20secs &redef; - - ## The time interval after we consider a flow timed out. This should be fairly high (or - ## even disabled) if you expect a lot of long flows. However, one also will have state - ## buildup for quite a while if keeping this around... - const openflow_flow_timeout = 24hrs &redef; - - ## Instantiates an openflow plugin for the NetControl framework. - global create_openflow: function(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState; -} - -global of_messages: table[count, OpenFlow::ofp_flow_mod_command] of OfTable &create_expire=openflow_message_timeout - &expire_func=function(t: table[count, OpenFlow::ofp_flow_mod_command] of OfTable, idx: any): interval - { - local rid: count; - local command: OpenFlow::ofp_flow_mod_command; - [rid, command] = idx; - - local p = t[rid, command]$p; - local r = t[rid, command]$r; - event NetControl::rule_error(r, p, "Timeout during rule insertion/removal"); - return 0secs; - }; - -global of_flows: table[count] of OfTable &create_expire=openflow_flow_timeout; -global of_instances: table[string] of PluginState; - -function openflow_name(p: PluginState) : string - { - return fmt("Openflow-%s", p$of_controller$describe(p$of_controller$state)); - } - -function openflow_check_rule(p: PluginState, r: Rule) : bool - { - local c = p$of_config; - - if ( p$of_config?$check_pred ) - return p$of_config$check_pred(p, r); - - if ( r$target == MONITOR && c$monitor ) - return T; - - if ( r$target == FORWARD && c$forward ) - return T; - - return F; - } - -function openflow_match_pred(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match) : vector of OpenFlow::ofp_match - { - if ( p$of_config?$match_pred ) - return p$of_config$match_pred(p, e, m); - - return m; - } - -function openflow_flow_mod_pred(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod - { - if ( p$of_config?$flow_mod_pred ) - return p$of_config$flow_mod_pred(p, r, m); - - return m; - } - -function determine_dl_type(s: subnet): count - { - local pdl = OpenFlow::ETH_IPv4; - if ( is_v6_subnet(s) ) - pdl = OpenFlow::ETH_IPv6; - - return pdl; - } - -function determine_proto(p: port): count - { - local proto = OpenFlow::IP_TCP; - if ( is_udp_port(p) ) - proto = OpenFlow::IP_UDP; - else if ( is_icmp_port(p) ) - proto = OpenFlow::IP_ICMP; - - return proto; - } - -function entity_to_match(p: PluginState, e: Entity): vector of OpenFlow::ofp_match - { - local v : vector of OpenFlow::ofp_match = vector(); - - if ( e$ty == CONNECTION ) - { - v += OpenFlow::match_conn(e$conn); # forward and... - v += OpenFlow::match_conn(e$conn, T); # reverse - return openflow_match_pred(p, e, v); - } - - if ( e$ty == MAC ) - { - v += OpenFlow::ofp_match( - $dl_src=e$mac - ); - v += OpenFlow::ofp_match( - $dl_dst=e$mac - ); - - return openflow_match_pred(p, e, v); - } - - local dl_type = OpenFlow::ETH_IPv4; - - if ( e$ty == ADDRESS ) - { - if ( is_v6_subnet(e$ip) ) - dl_type = OpenFlow::ETH_IPv6; - - v += OpenFlow::ofp_match( - $dl_type=dl_type, - $nw_src=e$ip - ); - - v += OpenFlow::ofp_match( - $dl_type=dl_type, - $nw_dst=e$ip - ); - - return openflow_match_pred(p, e, v); - } - - local proto = OpenFlow::IP_TCP; - - if ( e$ty == FLOW ) - { - local m = OpenFlow::ofp_match(); - local f = e$flow; - - if ( f?$src_m ) - m$dl_src=f$src_m; - if ( f?$dst_m ) - m$dl_dst=f$dst_m; - - if ( f?$src_h ) - { - m$dl_type = determine_dl_type(f$src_h); - m$nw_src = f$src_h; - } - - if ( f?$dst_h ) - { - m$dl_type = determine_dl_type(f$dst_h); - m$nw_dst = f$dst_h; - } - - if ( f?$src_p ) - { - m$nw_proto = determine_proto(f$src_p); - m$tp_src = port_to_count(f$src_p); - } - - if ( f?$dst_p ) - { - m$nw_proto = determine_proto(f$dst_p); - m$tp_dst = port_to_count(f$dst_p); - } - - v += m; - - return openflow_match_pred(p, e, v); - } - - Reporter::error(fmt("Entity type %s not supported for openflow yet", cat(e$ty))); - return openflow_match_pred(p, e, v); - } - -function openflow_rule_to_flow_mod(p: PluginState, r: Rule) : OpenFlow::ofp_flow_mod - { - local c = p$of_config; - - local flow_mod = OpenFlow::ofp_flow_mod( - $cookie=OpenFlow::generate_cookie(r$cid*2), # leave one space for the cases in which we need two rules. - $command=OpenFlow::OFPFC_ADD, - $idle_timeout=c$idle_timeout, - $priority=int_to_count(r$priority + c$priority_offset), - $flags=OpenFlow::OFPFF_SEND_FLOW_REM # please notify us when flows are removed - ); - - if ( r?$expire ) - flow_mod$hard_timeout = double_to_count(interval_to_double(r$expire)); - if ( c?$table_id ) - flow_mod$table_id = c$table_id; - - if ( r$ty == DROP ) - { - # default, nothing to do. We simply do not add an output port to the rule... - } - else if ( r$ty == WHITELIST ) - { - # at the moment our interpretation of whitelist is to hand this off to the switches L2/L3 routing. - flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL); - } - else if ( r$ty == MODIFY ) - { - # if no ports are given, just assume normal pipeline... - flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL); - - local mod = r$mod; - if ( mod?$redirect_port ) - flow_mod$actions$out_ports = vector(mod$redirect_port); - - if ( mod?$src_h ) - flow_mod$actions$nw_src = mod$src_h; - if ( mod?$dst_h ) - flow_mod$actions$nw_dst = mod$dst_h; - if ( mod?$src_m ) - flow_mod$actions$dl_src = mod$src_m; - if ( mod?$dst_m ) - flow_mod$actions$dl_dst = mod$dst_m; - if ( mod?$src_p ) - flow_mod$actions$tp_src = mod$src_p; - if ( mod?$dst_p ) - flow_mod$actions$tp_dst = mod$dst_p; - } - else if ( r$ty == REDIRECT ) - { - # redirect to port c - flow_mod$actions$out_ports = vector(r$out_port); - } - else - { - Reporter::error(fmt("Rule type %s not supported for openflow yet", cat(r$ty))); - } - - return openflow_flow_mod_pred(p, r, flow_mod); - } - -function openflow_add_rule(p: PluginState, r: Rule) : bool - { - if ( ! openflow_check_rule(p, r) ) - return F; - - local flow_mod = openflow_rule_to_flow_mod(p, r); - local matches = entity_to_match(p, r$entity); - - for ( i in matches ) - { - if ( OpenFlow::flow_mod(p$of_controller, matches[i], flow_mod) ) - { - of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r); - flow_mod = copy(flow_mod); - ++flow_mod$cookie; - } - else - event NetControl::rule_error(r, p, "Error while executing OpenFlow::flow_mod"); - } - - return T; - } - -function openflow_remove_rule(p: PluginState, r: Rule, reason: string) : bool - { - if ( ! openflow_check_rule(p, r) ) - return F; - - local flow_mod: OpenFlow::ofp_flow_mod = [ - $cookie=OpenFlow::generate_cookie(r$cid*2), - $command=OpenFlow::OFPFC_DELETE - ]; - - if ( OpenFlow::flow_mod(p$of_controller, [], flow_mod) ) - of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r); - else - { - event NetControl::rule_error(r, p, "Error while executing OpenFlow::flow_mod"); - return F; - } - - # if this was an address or mac match, we also need to remove the reverse - if ( r$entity$ty == ADDRESS || r$entity$ty == MAC ) - { - local flow_mod_2 = copy(flow_mod); - ++flow_mod_2$cookie; - OpenFlow::flow_mod(p$of_controller, [], flow_mod_2); - } - - return T; - } - -event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3 - { - local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2; - if ( [id, flow_mod$command] !in of_messages ) - return; - - local r = of_messages[id,flow_mod$command]$r; - local p = of_messages[id,flow_mod$command]$p; - local c = of_messages[id,flow_mod$command]$c; - - if ( r$entity$ty == ADDRESS || r$entity$ty == MAC ) - { - ++of_messages[id,flow_mod$command]$c; - if ( of_messages[id,flow_mod$command]$c < 2 ) - return; # will do stuff once the second part arrives... - } - - delete of_messages[id,flow_mod$command]; - - if ( p$of_controller$supports_flow_removed ) - of_flows[id] = OfTable($p=p, $r=r); - - if ( flow_mod$command == OpenFlow::OFPFC_ADD ) - event NetControl::rule_added(r, p, msg); - else if ( flow_mod$command == OpenFlow::OFPFC_DELETE || flow_mod$command == OpenFlow::OFPFC_DELETE_STRICT ) - event NetControl::rule_removed(r, p, msg); - } - -event OpenFlow::flow_mod_failure(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3 - { - local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2; - if ( [id, flow_mod$command] !in of_messages ) - return; - - local r = of_messages[id,flow_mod$command]$r; - local p = of_messages[id,flow_mod$command]$p; - delete of_messages[id,flow_mod$command]; - - event NetControl::rule_error(r, p, msg); - } - -event OpenFlow::flow_removed(name: string, match: OpenFlow::ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count) - { - local id = OpenFlow::get_cookie_uid(cookie)/2; - if ( id !in of_flows ) - return; - - local rec = of_flows[id]; - local r = rec$r; - local p = rec$p; - - if ( r$entity$ty == ADDRESS || r$entity$ty == MAC ) - { - ++of_flows[id]$c; - if ( of_flows[id]$c < 2 ) - return; # will do stuff once the second part arrives... - else - event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval((rec$duration_sec+duration_sec)/2), $packet_count=packet_count+rec$packet_count, $byte_count=byte_count+rec$byte_count), p); - - return; - } - - event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval(duration_sec+0.0), $packet_count=packet_count, $byte_count=byte_count), p); - } - -function openflow_init(p: PluginState) - { - local name = p$of_controller$state$_name; - if ( name in of_instances ) - Reporter::error(fmt("OpenFlow instance %s added to NetControl twice.", name)); - - of_instances[name] = p; - - # let's check, if our OpenFlow controller is already active. If not, we have to wait for it to become active. - if ( p$of_controller$state$_activated ) - plugin_activated(p); - } - -event OpenFlow::controller_activated(name: string, controller: OpenFlow::Controller) - { - if ( name in of_instances ) - plugin_activated(of_instances[name]); - } - -global openflow_plugin = Plugin( - $name=openflow_name, - $can_expire = T, - $init = openflow_init, -# $done = openflow_done, - $add_rule = openflow_add_rule, - $remove_rule = openflow_remove_rule - ); - -function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState - { - local p: PluginState = [$plugin=openflow_plugin, $of_controller=controller, $of_config=config]; - - return p; - } diff --git a/scripts/base/frameworks/netcontrol/plugins/openflow.zeek b/scripts/base/frameworks/netcontrol/plugins/openflow.zeek new file mode 100644 index 0000000000..d80d7c4a41 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/plugins/openflow.zeek @@ -0,0 +1,454 @@ +##! OpenFlow plugin for the NetControl framework. + +@load ../main +@load ../plugin +@load base/frameworks/openflow + +module NetControl; + +export { + ## This record specifies the configuration that is passed to :zeek:see:`NetControl::create_openflow`. + type OfConfig: record { + monitor: bool &default=T; ##< Accept rules that target the monitor path. + forward: bool &default=T; ##< Accept rules that target the forward path. + idle_timeout: count &default=0; ##< Default OpenFlow idle timeout. + table_id: count &optional; ##< Default OpenFlow table ID. + priority_offset: int &default=+0; ##< Add this to all rule priorities. Can be useful if you want the openflow priorities be offset from the netcontrol priorities without having to write a filter function. + + ## Predicate that is called on rule insertion or removal. + ## + ## p: Current plugin state. + ## + ## r: The rule to be inserted or removed. + ## + ## Returns: T if the rule can be handled by the current backend, F otherwise. + check_pred: function(p: PluginState, r: Rule): bool &optional; + + ## This predicate is called each time an OpenFlow match record is created. + ## The predicate can modify the match structure before it is sent on to the + ## device. + ## + ## p: Current plugin state. + ## + ## r: The rule to be inserted or removed. + ## + ## m: The openflow match structures that were generated for this rules. + ## + ## Returns: The modified OpenFlow match structures that will be used in place of the structures passed in m. + match_pred: function(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match): vector of OpenFlow::ofp_match &optional; + + ## This predicate is called before a FlowMod message is sent to the OpenFlow + ## device. It can modify the FlowMod message before it is passed on. + ## + ## p: Current plugin state. + ## + ## r: The rule to be inserted or removed. + ## + ## m: The OpenFlow FlowMod message. + ## + ## Returns: The modified FlowMod message that is used in lieu of m. + flow_mod_pred: function(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod &optional; + }; + + redef record PluginState += { + ## OpenFlow controller for NetControl OpenFlow plugin. + of_controller: OpenFlow::Controller &optional; + ## OpenFlow configuration record that is passed on initialization. + of_config: OfConfig &optional; + }; + + type OfTable: record { + p: PluginState; + r: Rule; + c: count &default=0; # how many replies did we see so far? needed for ids where we have multiple rules... + packet_count: count &default=0; + byte_count: count &default=0; + duration_sec: double &default=0.0; + }; + + ## The time interval after which an openflow message is considered to be timed out + ## and we delete it from our internal tracking. + const openflow_message_timeout = 20secs &redef; + + ## The time interval after we consider a flow timed out. This should be fairly high (or + ## even disabled) if you expect a lot of long flows. However, one also will have state + ## buildup for quite a while if keeping this around... + const openflow_flow_timeout = 24hrs &redef; + + ## Instantiates an openflow plugin for the NetControl framework. + global create_openflow: function(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState; +} + +global of_messages: table[count, OpenFlow::ofp_flow_mod_command] of OfTable &create_expire=openflow_message_timeout + &expire_func=function(t: table[count, OpenFlow::ofp_flow_mod_command] of OfTable, idx: any): interval + { + local rid: count; + local command: OpenFlow::ofp_flow_mod_command; + [rid, command] = idx; + + local p = t[rid, command]$p; + local r = t[rid, command]$r; + event NetControl::rule_error(r, p, "Timeout during rule insertion/removal"); + return 0secs; + }; + +global of_flows: table[count] of OfTable &create_expire=openflow_flow_timeout; +global of_instances: table[string] of PluginState; + +function openflow_name(p: PluginState) : string + { + return fmt("Openflow-%s", p$of_controller$describe(p$of_controller$state)); + } + +function openflow_check_rule(p: PluginState, r: Rule) : bool + { + local c = p$of_config; + + if ( p$of_config?$check_pred ) + return p$of_config$check_pred(p, r); + + if ( r$target == MONITOR && c$monitor ) + return T; + + if ( r$target == FORWARD && c$forward ) + return T; + + return F; + } + +function openflow_match_pred(p: PluginState, e: Entity, m: vector of OpenFlow::ofp_match) : vector of OpenFlow::ofp_match + { + if ( p$of_config?$match_pred ) + return p$of_config$match_pred(p, e, m); + + return m; + } + +function openflow_flow_mod_pred(p: PluginState, r: Rule, m: OpenFlow::ofp_flow_mod): OpenFlow::ofp_flow_mod + { + if ( p$of_config?$flow_mod_pred ) + return p$of_config$flow_mod_pred(p, r, m); + + return m; + } + +function determine_dl_type(s: subnet): count + { + local pdl = OpenFlow::ETH_IPv4; + if ( is_v6_subnet(s) ) + pdl = OpenFlow::ETH_IPv6; + + return pdl; + } + +function determine_proto(p: port): count + { + local proto = OpenFlow::IP_TCP; + if ( is_udp_port(p) ) + proto = OpenFlow::IP_UDP; + else if ( is_icmp_port(p) ) + proto = OpenFlow::IP_ICMP; + + return proto; + } + +function entity_to_match(p: PluginState, e: Entity): vector of OpenFlow::ofp_match + { + local v : vector of OpenFlow::ofp_match = vector(); + + if ( e$ty == CONNECTION ) + { + v += OpenFlow::match_conn(e$conn); # forward and... + v += OpenFlow::match_conn(e$conn, T); # reverse + return openflow_match_pred(p, e, v); + } + + if ( e$ty == MAC ) + { + v += OpenFlow::ofp_match( + $dl_src=e$mac + ); + v += OpenFlow::ofp_match( + $dl_dst=e$mac + ); + + return openflow_match_pred(p, e, v); + } + + local dl_type = OpenFlow::ETH_IPv4; + + if ( e$ty == ADDRESS ) + { + if ( is_v6_subnet(e$ip) ) + dl_type = OpenFlow::ETH_IPv6; + + v += OpenFlow::ofp_match( + $dl_type=dl_type, + $nw_src=e$ip + ); + + v += OpenFlow::ofp_match( + $dl_type=dl_type, + $nw_dst=e$ip + ); + + return openflow_match_pred(p, e, v); + } + + local proto = OpenFlow::IP_TCP; + + if ( e$ty == FLOW ) + { + local m = OpenFlow::ofp_match(); + local f = e$flow; + + if ( f?$src_m ) + m$dl_src=f$src_m; + if ( f?$dst_m ) + m$dl_dst=f$dst_m; + + if ( f?$src_h ) + { + m$dl_type = determine_dl_type(f$src_h); + m$nw_src = f$src_h; + } + + if ( f?$dst_h ) + { + m$dl_type = determine_dl_type(f$dst_h); + m$nw_dst = f$dst_h; + } + + if ( f?$src_p ) + { + m$nw_proto = determine_proto(f$src_p); + m$tp_src = port_to_count(f$src_p); + } + + if ( f?$dst_p ) + { + m$nw_proto = determine_proto(f$dst_p); + m$tp_dst = port_to_count(f$dst_p); + } + + v += m; + + return openflow_match_pred(p, e, v); + } + + Reporter::error(fmt("Entity type %s not supported for openflow yet", cat(e$ty))); + return openflow_match_pred(p, e, v); + } + +function openflow_rule_to_flow_mod(p: PluginState, r: Rule) : OpenFlow::ofp_flow_mod + { + local c = p$of_config; + + local flow_mod = OpenFlow::ofp_flow_mod( + $cookie=OpenFlow::generate_cookie(r$cid*2), # leave one space for the cases in which we need two rules. + $command=OpenFlow::OFPFC_ADD, + $idle_timeout=c$idle_timeout, + $priority=int_to_count(r$priority + c$priority_offset), + $flags=OpenFlow::OFPFF_SEND_FLOW_REM # please notify us when flows are removed + ); + + if ( r?$expire ) + flow_mod$hard_timeout = double_to_count(interval_to_double(r$expire)); + if ( c?$table_id ) + flow_mod$table_id = c$table_id; + + if ( r$ty == DROP ) + { + # default, nothing to do. We simply do not add an output port to the rule... + } + else if ( r$ty == WHITELIST ) + { + # at the moment our interpretation of whitelist is to hand this off to the switches L2/L3 routing. + flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL); + } + else if ( r$ty == MODIFY ) + { + # if no ports are given, just assume normal pipeline... + flow_mod$actions$out_ports = vector(OpenFlow::OFPP_NORMAL); + + local mod = r$mod; + if ( mod?$redirect_port ) + flow_mod$actions$out_ports = vector(mod$redirect_port); + + if ( mod?$src_h ) + flow_mod$actions$nw_src = mod$src_h; + if ( mod?$dst_h ) + flow_mod$actions$nw_dst = mod$dst_h; + if ( mod?$src_m ) + flow_mod$actions$dl_src = mod$src_m; + if ( mod?$dst_m ) + flow_mod$actions$dl_dst = mod$dst_m; + if ( mod?$src_p ) + flow_mod$actions$tp_src = mod$src_p; + if ( mod?$dst_p ) + flow_mod$actions$tp_dst = mod$dst_p; + } + else if ( r$ty == REDIRECT ) + { + # redirect to port c + flow_mod$actions$out_ports = vector(r$out_port); + } + else + { + Reporter::error(fmt("Rule type %s not supported for openflow yet", cat(r$ty))); + } + + return openflow_flow_mod_pred(p, r, flow_mod); + } + +function openflow_add_rule(p: PluginState, r: Rule) : bool + { + if ( ! openflow_check_rule(p, r) ) + return F; + + local flow_mod = openflow_rule_to_flow_mod(p, r); + local matches = entity_to_match(p, r$entity); + + for ( i in matches ) + { + if ( OpenFlow::flow_mod(p$of_controller, matches[i], flow_mod) ) + { + of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r); + flow_mod = copy(flow_mod); + ++flow_mod$cookie; + } + else + event NetControl::rule_error(r, p, "Error while executing OpenFlow::flow_mod"); + } + + return T; + } + +function openflow_remove_rule(p: PluginState, r: Rule, reason: string) : bool + { + if ( ! openflow_check_rule(p, r) ) + return F; + + local flow_mod: OpenFlow::ofp_flow_mod = [ + $cookie=OpenFlow::generate_cookie(r$cid*2), + $command=OpenFlow::OFPFC_DELETE + ]; + + if ( OpenFlow::flow_mod(p$of_controller, [], flow_mod) ) + of_messages[r$cid, flow_mod$command] = OfTable($p=p, $r=r); + else + { + event NetControl::rule_error(r, p, "Error while executing OpenFlow::flow_mod"); + return F; + } + + # if this was an address or mac match, we also need to remove the reverse + if ( r$entity$ty == ADDRESS || r$entity$ty == MAC ) + { + local flow_mod_2 = copy(flow_mod); + ++flow_mod_2$cookie; + OpenFlow::flow_mod(p$of_controller, [], flow_mod_2); + } + + return T; + } + +event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3 + { + local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2; + if ( [id, flow_mod$command] !in of_messages ) + return; + + local r = of_messages[id,flow_mod$command]$r; + local p = of_messages[id,flow_mod$command]$p; + local c = of_messages[id,flow_mod$command]$c; + + if ( r$entity$ty == ADDRESS || r$entity$ty == MAC ) + { + ++of_messages[id,flow_mod$command]$c; + if ( of_messages[id,flow_mod$command]$c < 2 ) + return; # will do stuff once the second part arrives... + } + + delete of_messages[id,flow_mod$command]; + + if ( p$of_controller$supports_flow_removed ) + of_flows[id] = OfTable($p=p, $r=r); + + if ( flow_mod$command == OpenFlow::OFPFC_ADD ) + event NetControl::rule_added(r, p, msg); + else if ( flow_mod$command == OpenFlow::OFPFC_DELETE || flow_mod$command == OpenFlow::OFPFC_DELETE_STRICT ) + event NetControl::rule_removed(r, p, msg); + } + +event OpenFlow::flow_mod_failure(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) &priority=3 + { + local id = OpenFlow::get_cookie_uid(flow_mod$cookie)/2; + if ( [id, flow_mod$command] !in of_messages ) + return; + + local r = of_messages[id,flow_mod$command]$r; + local p = of_messages[id,flow_mod$command]$p; + delete of_messages[id,flow_mod$command]; + + event NetControl::rule_error(r, p, msg); + } + +event OpenFlow::flow_removed(name: string, match: OpenFlow::ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count) + { + local id = OpenFlow::get_cookie_uid(cookie)/2; + if ( id !in of_flows ) + return; + + local rec = of_flows[id]; + local r = rec$r; + local p = rec$p; + + if ( r$entity$ty == ADDRESS || r$entity$ty == MAC ) + { + ++of_flows[id]$c; + if ( of_flows[id]$c < 2 ) + return; # will do stuff once the second part arrives... + else + event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval((rec$duration_sec+duration_sec)/2), $packet_count=packet_count+rec$packet_count, $byte_count=byte_count+rec$byte_count), p); + + return; + } + + event NetControl::rule_timeout(r, FlowInfo($duration=double_to_interval(duration_sec+0.0), $packet_count=packet_count, $byte_count=byte_count), p); + } + +function openflow_init(p: PluginState) + { + local name = p$of_controller$state$_name; + if ( name in of_instances ) + Reporter::error(fmt("OpenFlow instance %s added to NetControl twice.", name)); + + of_instances[name] = p; + + # let's check, if our OpenFlow controller is already active. If not, we have to wait for it to become active. + if ( p$of_controller$state$_activated ) + plugin_activated(p); + } + +event OpenFlow::controller_activated(name: string, controller: OpenFlow::Controller) + { + if ( name in of_instances ) + plugin_activated(of_instances[name]); + } + +global openflow_plugin = Plugin( + $name=openflow_name, + $can_expire = T, + $init = openflow_init, +# $done = openflow_done, + $add_rule = openflow_add_rule, + $remove_rule = openflow_remove_rule + ); + +function create_openflow(controller: OpenFlow::Controller, config: OfConfig &default=[]) : PluginState + { + local p: PluginState = [$plugin=openflow_plugin, $of_controller=controller, $of_config=config]; + + return p; + } diff --git a/scripts/base/frameworks/netcontrol/plugins/packetfilter.bro b/scripts/base/frameworks/netcontrol/plugins/packetfilter.bro deleted file mode 100644 index ac9bebdc90..0000000000 --- a/scripts/base/frameworks/netcontrol/plugins/packetfilter.bro +++ /dev/null @@ -1,113 +0,0 @@ -##! NetControl plugin for the process-level PacketFilter that comes with -##! Bro. Since the PacketFilter in Bro is quite limited in scope -##! and can only add/remove filters for addresses, this is quite -##! limited in scope at the moment. - -module NetControl; - -@load ../plugin - -export { - ## Instantiates the packetfilter plugin. - global create_packetfilter: function() : PluginState; -} - -# Check if we can handle this rule. If it specifies ports or -# anything Bro cannot handle, simply ignore it for now. -function packetfilter_check_rule(r: Rule) : bool - { - if ( r$ty != DROP ) - return F; - - if ( r$target != MONITOR ) - return F; - - local e = r$entity; - if ( e$ty == ADDRESS ) - return T; - - if ( e$ty != FLOW ) # everything else requires ports or MAC stuff - return F; - - if ( e$flow?$src_p || e$flow?$dst_p || e$flow?$src_m || e$flow?$dst_m ) - return F; - - return T; - } - - -function packetfilter_add_rule(p: PluginState, r: Rule) : bool - { - if ( ! packetfilter_check_rule(r) ) - return F; - - local e = r$entity; - if ( e$ty == ADDRESS ) - { - install_src_net_filter(e$ip, 0, 1.0); - install_dst_net_filter(e$ip, 0, 1.0); - return T; - } - - if ( e$ty == FLOW ) - { - local f = e$flow; - if ( f?$src_h ) - install_src_net_filter(f$src_h, 0, 1.0); - if ( f?$dst_h ) - install_dst_net_filter(f$dst_h, 0, 1.0); - - return T; - } - - return F; - } - -function packetfilter_remove_rule(p: PluginState, r: Rule, reason: string) : bool - { - if ( ! packetfilter_check_rule(r) ) - return F; - - local e = r$entity; - if ( e$ty == ADDRESS ) - { - uninstall_src_net_filter(e$ip); - uninstall_dst_net_filter(e$ip); - return T; - } - - if ( e$ty == FLOW ) - { - local f = e$flow; - if ( f?$src_h ) - uninstall_src_net_filter(f$src_h); - if ( f?$dst_h ) - uninstall_dst_net_filter(f$dst_h); - - return T; - } - - return F; - } - -function packetfilter_name(p: PluginState) : string - { - return "Packetfilter"; - } - -global packetfilter_plugin = Plugin( - $name=packetfilter_name, - $can_expire = F, -# $init = packetfilter_init, -# $done = packetfilter_done, - $add_rule = packetfilter_add_rule, - $remove_rule = packetfilter_remove_rule - ); - -function create_packetfilter() : PluginState - { - local p: PluginState = [$plugin=packetfilter_plugin]; - - return p; - } - diff --git a/scripts/base/frameworks/netcontrol/plugins/packetfilter.zeek b/scripts/base/frameworks/netcontrol/plugins/packetfilter.zeek new file mode 100644 index 0000000000..3648ed3955 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/plugins/packetfilter.zeek @@ -0,0 +1,113 @@ +##! NetControl plugin for the process-level PacketFilter that comes with +##! Zeek. Since the PacketFilter in Zeek is quite limited in scope +##! and can only add/remove filters for addresses, this is quite +##! limited in scope at the moment. + +@load ../plugin + +module NetControl; + +export { + ## Instantiates the packetfilter plugin. + global create_packetfilter: function() : PluginState; +} + +# Check if we can handle this rule. If it specifies ports or +# anything Zeek cannot handle, simply ignore it for now. +function packetfilter_check_rule(r: Rule) : bool + { + if ( r$ty != DROP ) + return F; + + if ( r$target != MONITOR ) + return F; + + local e = r$entity; + if ( e$ty == ADDRESS ) + return T; + + if ( e$ty != FLOW ) # everything else requires ports or MAC stuff + return F; + + if ( e$flow?$src_p || e$flow?$dst_p || e$flow?$src_m || e$flow?$dst_m ) + return F; + + return T; + } + + +function packetfilter_add_rule(p: PluginState, r: Rule) : bool + { + if ( ! packetfilter_check_rule(r) ) + return F; + + local e = r$entity; + if ( e$ty == ADDRESS ) + { + install_src_net_filter(e$ip, 0, 1.0); + install_dst_net_filter(e$ip, 0, 1.0); + return T; + } + + if ( e$ty == FLOW ) + { + local f = e$flow; + if ( f?$src_h ) + install_src_net_filter(f$src_h, 0, 1.0); + if ( f?$dst_h ) + install_dst_net_filter(f$dst_h, 0, 1.0); + + return T; + } + + return F; + } + +function packetfilter_remove_rule(p: PluginState, r: Rule, reason: string) : bool + { + if ( ! packetfilter_check_rule(r) ) + return F; + + local e = r$entity; + if ( e$ty == ADDRESS ) + { + uninstall_src_net_filter(e$ip); + uninstall_dst_net_filter(e$ip); + return T; + } + + if ( e$ty == FLOW ) + { + local f = e$flow; + if ( f?$src_h ) + uninstall_src_net_filter(f$src_h); + if ( f?$dst_h ) + uninstall_dst_net_filter(f$dst_h); + + return T; + } + + return F; + } + +function packetfilter_name(p: PluginState) : string + { + return "Packetfilter"; + } + +global packetfilter_plugin = Plugin( + $name=packetfilter_name, + $can_expire = F, +# $init = packetfilter_init, +# $done = packetfilter_done, + $add_rule = packetfilter_add_rule, + $remove_rule = packetfilter_remove_rule + ); + +function create_packetfilter() : PluginState + { + local p: PluginState = [$plugin=packetfilter_plugin]; + + return p; + } + diff --git a/scripts/base/frameworks/netcontrol/shunt.bro b/scripts/base/frameworks/netcontrol/shunt.bro deleted file mode 100644 index 1275be1560..0000000000 --- a/scripts/base/frameworks/netcontrol/shunt.bro +++ /dev/null @@ -1,69 +0,0 @@ -##! Implementation of the shunt functionality for NetControl. - -module NetControl; - -@load ./main - -export { - redef enum Log::ID += { SHUNT }; - - ## Stops forwarding a uni-directional flow's packets to Bro. - ## - ## f: The flow to shunt. - ## - ## t: How long to leave the shunt in place, with 0 being indefinitely. - ## - ## location: An optional string describing where the shunt was triggered. - ## - ## Returns: The id of the inserted rule on success and zero on failure. - global shunt_flow: function(f: flow_id, t: interval, location: string &default="") : string; - - type ShuntInfo: record { - ## Time at which the recorded activity occurred. - ts: time &log; - ## ID of the rule; unique during each Bro run. - rule_id: string &log; - ## Flow ID of the shunted flow. - f: flow_id &log; - ## Expiry time of the shunt. - expire: interval &log; - ## Location where the underlying action was triggered. - location: string &log &optional; - }; - - ## Event that can be handled to access the :bro:type:`NetControl::ShuntInfo` - ## record as it is sent on to the logging framework. - global log_netcontrol_shunt: event(rec: ShuntInfo); -} - -event bro_init() &priority=5 - { - Log::create_stream(NetControl::SHUNT, [$columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt"]); - } - -function shunt_flow(f: flow_id, t: interval, location: string &default="") : string - { - local flow = NetControl::Flow( - $src_h=addr_to_subnet(f$src_h), - $src_p=f$src_p, - $dst_h=addr_to_subnet(f$dst_h), - $dst_p=f$dst_p - ); - local e: Entity = [$ty=FLOW, $flow=flow]; - local r: Rule = [$ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location]; - - local id = add_rule(r); - - # Error should already be logged - if ( id == "" ) - return id; - - local log = ShuntInfo($ts=network_time(), $rule_id=id, $f=f, $expire=t); - if ( location != "" ) - log$location=location; - - Log::write(SHUNT, log); - - return id; - } - diff --git a/scripts/base/frameworks/netcontrol/shunt.zeek b/scripts/base/frameworks/netcontrol/shunt.zeek new file mode 100644 index 0000000000..007daffbb5 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/shunt.zeek @@ -0,0 +1,69 @@ +##! Implementation of the shunt functionality for NetControl. + +module NetControl; + +@load ./main + +export { + redef enum Log::ID += { SHUNT }; + + ## Stops forwarding a uni-directional flow's packets to Zeek. + ## + ## f: The flow to shunt. + ## + ## t: How long to leave the shunt in place, with 0 being indefinitely. + ## + ## location: An optional string describing where the shunt was triggered. + ## + ## Returns: The id of the inserted rule on success and zero on failure. + global shunt_flow: function(f: flow_id, t: interval, location: string &default="") : string; + + type ShuntInfo: record { + ## Time at which the recorded activity occurred. + ts: time &log; + ## ID of the rule; unique during each Zeek run. + rule_id: string &log; + ## Flow ID of the shunted flow. + f: flow_id &log; + ## Expiry time of the shunt. + expire: interval &log; + ## Location where the underlying action was triggered. + location: string &log &optional; + }; + + ## Event that can be handled to access the :zeek:type:`NetControl::ShuntInfo` + ## record as it is sent on to the logging framework. + global log_netcontrol_shunt: event(rec: ShuntInfo); +} + +event zeek_init() &priority=5 + { + Log::create_stream(NetControl::SHUNT, [$columns=ShuntInfo, $ev=log_netcontrol_shunt, $path="netcontrol_shunt"]); + } + +function shunt_flow(f: flow_id, t: interval, location: string &default="") : string + { + local flow = NetControl::Flow( + $src_h=addr_to_subnet(f$src_h), + $src_p=f$src_p, + $dst_h=addr_to_subnet(f$dst_h), + $dst_p=f$dst_p + ); + local e: Entity = [$ty=FLOW, $flow=flow]; + local r: Rule = [$ty=DROP, $target=MONITOR, $entity=e, $expire=t, $location=location]; + + local id = add_rule(r); + + # Error should already be logged + if ( id == "" ) + return id; + + local log = ShuntInfo($ts=network_time(), $rule_id=id, $f=f, $expire=t); + if ( location != "" ) + log$location=location; + + Log::write(SHUNT, log); + + return id; + } + diff --git a/scripts/base/frameworks/netcontrol/types.bro b/scripts/base/frameworks/netcontrol/types.bro deleted file mode 100644 index 7fda65ea6b..0000000000 --- a/scripts/base/frameworks/netcontrol/types.bro +++ /dev/null @@ -1,127 +0,0 @@ -##! This file defines the types that are used by the NetControl framework. -##! -##! The most important type defined in this file is :bro:see:`NetControl::Rule`, -##! which is used to describe all rules that can be expressed by the NetControl framework. - -module NetControl; - -export { - ## The default priority that is used when creating rules. - option default_priority: int = +0; - - ## The default priority that is used when using the high-level functions to - ## push whitelist entries to the backends (:bro:see:`NetControl::whitelist_address` and - ## :bro:see:`NetControl::whitelist_subnet`). - ## - ## Note that this priority is not automatically used when manually creating rules - ## that have a :bro:see:`NetControl::RuleType` of :bro:enum:`NetControl::WHITELIST`. - const whitelist_priority: int = +5 &redef; - - ## Type defining the entity that a rule applies to. - type EntityType: enum { - ADDRESS, ##< Activity involving a specific IP address. - CONNECTION, ##< Activity involving all of a bi-directional connection's activity. - FLOW, ##< Activity involving a uni-directional flow's activity. Can contain wildcards. - MAC, ##< Activity involving a MAC address. - }; - - ## Flow is used in :bro:type:`NetControl::Entity` together with :bro:enum:`NetControl::FLOW` to specify - ## a uni-directional flow that a rule applies to. - ## - ## If optional fields are not set, they are interpreted as wildcarded. - type Flow: record { - src_h: subnet &optional; ##< The source IP address/subnet. - src_p: port &optional; ##< The source port number. - dst_h: subnet &optional; ##< The destination IP address/subnet. - dst_p: port &optional; ##< The destination port number. - src_m: string &optional; ##< The source MAC address. - dst_m: string &optional; ##< The destination MAC address. - }; - - ## Type defining the entity a rule is operating on. - type Entity: record { - ty: EntityType; ##< Type of entity. - conn: conn_id &optional; ##< Used with :bro:enum:`NetControl::CONNECTION`. - flow: Flow &optional; ##< Used with :bro:enum:`NetControl::FLOW`. - ip: subnet &optional; ##< Used with :bro:enum:`NetControl::ADDRESS` to specifiy a CIDR subnet. - mac: string &optional; ##< Used with :bro:enum:`NetControl::MAC`. - }; - - ## Type defining the target of a rule. - ## - ## Rules can either be applied to the forward path, affecting all network traffic, or - ## on the monitor path, only affecting the traffic that is sent to Bro. The second - ## is mostly used for shunting, which allows Bro to tell the networking hardware that - ## it wants to no longer see traffic that it identified as benign. - type TargetType: enum { - FORWARD, #< Apply rule actively to traffic on forwarding path. - MONITOR, #< Apply rule passively to traffic sent to Bro for monitoring. - }; - - ## Type of rules that the framework supports. Each type lists the extra - ## :bro:type:`NetControl::Rule` fields it uses, if any. - ## - ## Plugins may extend this type to define their own. - type RuleType: enum { - ## Stop forwarding all packets matching the entity. - ## - ## No additional arguments. - DROP, - - ## Modify all packets matching entity. The packets - ## will be modified according to the `mod` entry of - ## the rule. - ## - MODIFY, - - ## Redirect all packets matching entity to a different switch port, - ## given in the `out_port` argument of the rule. - ## - REDIRECT, - - ## Whitelists all packets of an entity, meaning no restrictions will be applied. - ## While whitelisting is the default if no rule matches, this type can be - ## used to override lower-priority rules that would otherwise take effect for the - ## entity. - WHITELIST, - }; - - ## Type for defining a flow modification action. - type FlowMod: record { - src_h: addr &optional; ##< The source IP address. - src_p: count &optional; ##< The source port number. - dst_h: addr &optional; ##< The destination IP address. - dst_p: count &optional; ##< The destination port number. - src_m: string &optional; ##< The source MAC address. - dst_m: string &optional; ##< The destination MAC address. - redirect_port: count &optional; - }; - - ## A rule for the framework to put in place. Of all rules currently in - ## place, the first match will be taken, sorted by priority. All - ## further rules will be ignored. - type Rule: record { - ty: RuleType; ##< Type of rule. - target: TargetType; ##< Where to apply rule. - entity: Entity; ##< Entity to apply rule to. - expire: interval &optional; ##< Timeout after which to expire the rule. - priority: int &default=default_priority; ##< Priority if multiple rules match an entity (larger value is higher priority). - location: string &optional; ##< Optional string describing where/what installed the rule. - - out_port: count &optional; ##< Argument for :bro:enum:`NetControl::REDIRECT` rules. - mod: FlowMod &optional; ##< Argument for :bro:enum:`NetControl::MODIFY` rules. - - id: string &default=""; ##< Internally determined unique ID for this rule. Will be set when added. - cid: count &default=0; ##< Internally determined unique numeric ID for this rule. Set when added. - }; - - ## Information of a flow that can be provided by switches when the flow times out. - ## Currently this is heavily influenced by the data that OpenFlow returns by default. - ## That being said - their design makes sense and this is probably the data one - ## can expect to be available. - type FlowInfo: record { - duration: interval &optional; ##< Total duration of the rule. - packet_count: count &optional; ##< Number of packets exchanged over connections matched by the rule. - byte_count: count &optional; ##< Total bytes exchanged over connections matched by the rule. - }; -} diff --git a/scripts/base/frameworks/netcontrol/types.zeek b/scripts/base/frameworks/netcontrol/types.zeek new file mode 100644 index 0000000000..beac2302f6 --- /dev/null +++ b/scripts/base/frameworks/netcontrol/types.zeek @@ -0,0 +1,127 @@ +##! This file defines the types that are used by the NetControl framework. +##! +##! The most important type defined in this file is :zeek:see:`NetControl::Rule`, +##! which is used to describe all rules that can be expressed by the NetControl framework. + +module NetControl; + +export { + ## The default priority that is used when creating rules. + option default_priority: int = +0; + + ## The default priority that is used when using the high-level functions to + ## push whitelist entries to the backends (:zeek:see:`NetControl::whitelist_address` and + ## :zeek:see:`NetControl::whitelist_subnet`). + ## + ## Note that this priority is not automatically used when manually creating rules + ## that have a :zeek:see:`NetControl::RuleType` of :zeek:enum:`NetControl::WHITELIST`. + const whitelist_priority: int = +5 &redef; + + ## Type defining the entity that a rule applies to. + type EntityType: enum { + ADDRESS, ##< Activity involving a specific IP address. + CONNECTION, ##< Activity involving all of a bi-directional connection's activity. + FLOW, ##< Activity involving a uni-directional flow's activity. Can contain wildcards. + MAC, ##< Activity involving a MAC address. + }; + + ## Flow is used in :zeek:type:`NetControl::Entity` together with :zeek:enum:`NetControl::FLOW` to specify + ## a uni-directional flow that a rule applies to. + ## + ## If optional fields are not set, they are interpreted as wildcarded. + type Flow: record { + src_h: subnet &optional; ##< The source IP address/subnet. + src_p: port &optional; ##< The source port number. + dst_h: subnet &optional; ##< The destination IP address/subnet. + dst_p: port &optional; ##< The destination port number. + src_m: string &optional; ##< The source MAC address. + dst_m: string &optional; ##< The destination MAC address. + }; + + ## Type defining the entity a rule is operating on. + type Entity: record { + ty: EntityType; ##< Type of entity. + conn: conn_id &optional; ##< Used with :zeek:enum:`NetControl::CONNECTION`. + flow: Flow &optional; ##< Used with :zeek:enum:`NetControl::FLOW`. + ip: subnet &optional; ##< Used with :zeek:enum:`NetControl::ADDRESS` to specifiy a CIDR subnet. + mac: string &optional; ##< Used with :zeek:enum:`NetControl::MAC`. + }; + + ## Type defining the target of a rule. + ## + ## Rules can either be applied to the forward path, affecting all network traffic, or + ## on the monitor path, only affecting the traffic that is sent to Zeek. The second + ## is mostly used for shunting, which allows Zeek to tell the networking hardware that + ## it wants to no longer see traffic that it identified as benign. + type TargetType: enum { + FORWARD, #< Apply rule actively to traffic on forwarding path. + MONITOR, #< Apply rule passively to traffic sent to Zeek for monitoring. + }; + + ## Type of rules that the framework supports. Each type lists the extra + ## :zeek:type:`NetControl::Rule` fields it uses, if any. + ## + ## Plugins may extend this type to define their own. + type RuleType: enum { + ## Stop forwarding all packets matching the entity. + ## + ## No additional arguments. + DROP, + + ## Modify all packets matching entity. The packets + ## will be modified according to the `mod` entry of + ## the rule. + ## + MODIFY, + + ## Redirect all packets matching entity to a different switch port, + ## given in the `out_port` argument of the rule. + ## + REDIRECT, + + ## Whitelists all packets of an entity, meaning no restrictions will be applied. + ## While whitelisting is the default if no rule matches, this type can be + ## used to override lower-priority rules that would otherwise take effect for the + ## entity. + WHITELIST, + }; + + ## Type for defining a flow modification action. + type FlowMod: record { + src_h: addr &optional; ##< The source IP address. + src_p: count &optional; ##< The source port number. + dst_h: addr &optional; ##< The destination IP address. + dst_p: count &optional; ##< The destination port number. + src_m: string &optional; ##< The source MAC address. + dst_m: string &optional; ##< The destination MAC address. + redirect_port: count &optional; + }; + + ## A rule for the framework to put in place. Of all rules currently in + ## place, the first match will be taken, sorted by priority. All + ## further rules will be ignored. + type Rule: record { + ty: RuleType; ##< Type of rule. + target: TargetType; ##< Where to apply rule. + entity: Entity; ##< Entity to apply rule to. + expire: interval &optional; ##< Timeout after which to expire the rule. + priority: int &default=default_priority; ##< Priority if multiple rules match an entity (larger value is higher priority). + location: string &optional; ##< Optional string describing where/what installed the rule. + + out_port: count &optional; ##< Argument for :zeek:enum:`NetControl::REDIRECT` rules. + mod: FlowMod &optional; ##< Argument for :zeek:enum:`NetControl::MODIFY` rules. + + id: string &default=""; ##< Internally determined unique ID for this rule. Will be set when added. + cid: count &default=0; ##< Internally determined unique numeric ID for this rule. Set when added. + }; + + ## Information of a flow that can be provided by switches when the flow times out. + ## Currently this is heavily influenced by the data that OpenFlow returns by default. + ## That being said - their design makes sense and this is probably the data one + ## can expect to be available. + type FlowInfo: record { + duration: interval &optional; ##< Total duration of the rule. + packet_count: count &optional; ##< Number of packets exchanged over connections matched by the rule. + byte_count: count &optional; ##< Total bytes exchanged over connections matched by the rule. + }; +} diff --git a/scripts/base/frameworks/notice/README b/scripts/base/frameworks/notice/README index c46a8a7e5c..8875214a9f 100644 --- a/scripts/base/frameworks/notice/README +++ b/scripts/base/frameworks/notice/README @@ -1,4 +1,4 @@ -The notice framework enables Bro to "notice" things which are odd or +The notice framework enables Zeek to "notice" things which are odd or potentially bad, leaving it to the local configuration to define which of them are actionable. This decoupling of detection and reporting allows -Bro to be customized to the different needs that sites have. +Zeek to be customized to the different needs that sites have. diff --git a/scripts/base/frameworks/notice/__load__.bro b/scripts/base/frameworks/notice/__load__.bro deleted file mode 100644 index 54e704c744..0000000000 --- a/scripts/base/frameworks/notice/__load__.bro +++ /dev/null @@ -1,12 +0,0 @@ -@load ./main -@load ./weird - -# There should be no overhead imposed by loading notice actions so we -# load them all. -@load ./actions/drop -@load ./actions/email_admin -@load ./actions/page -@load ./actions/add-geodata - -# Load here so that it can check whether clustering is enabled. -@load ./actions/pp-alarms diff --git a/scripts/base/frameworks/notice/__load__.zeek b/scripts/base/frameworks/notice/__load__.zeek new file mode 100644 index 0000000000..4a272574d3 --- /dev/null +++ b/scripts/base/frameworks/notice/__load__.zeek @@ -0,0 +1,11 @@ +@load ./main +@load ./weird + +# There should be no overhead imposed by loading notice actions so we +# load them all. +@load ./actions/email_admin +@load ./actions/page +@load ./actions/add-geodata + +# Load here so that it can check whether clustering is enabled. +@load ./actions/pp-alarms diff --git a/scripts/base/frameworks/notice/actions/add-geodata.bro b/scripts/base/frameworks/notice/actions/add-geodata.bro deleted file mode 100644 index 7d097f5eb6..0000000000 --- a/scripts/base/frameworks/notice/actions/add-geodata.bro +++ /dev/null @@ -1,51 +0,0 @@ -##! This script adds geographic location data to notices for the "remote" -##! host in a connection. It does make the assumption that one of the -##! addresses in a connection is "local" and one is "remote" which is -##! probably a safe assumption to make in most cases. If both addresses -##! are remote, it will use the $src address. - -@load ../main -@load base/frameworks/notice -@load base/utils/site - -module Notice; - -export { - redef enum Action += { - ## Indicates that the notice should have geodata added for the - ## "remote" host. :bro:id:`Site::local_nets` must be defined - ## in order for this to work. - ACTION_ADD_GEODATA - }; - - redef record Info += { - ## If GeoIP support is built in, notices can have geographic - ## information attached to them. - remote_location: geo_location &log &optional; - }; - - ## Notice types which should have the "remote" location looked up. - ## If GeoIP support is not built in, this does nothing. - option lookup_location_types: set[Notice::Type] = {}; -} - -hook policy(n: Notice::Info) &priority=10 - { - if ( n$note in Notice::lookup_location_types ) - add n$actions[ACTION_ADD_GEODATA]; - } - -# This is handled at a high priority in case other notice handlers -# want to use the data. -hook notice(n: Notice::Info) &priority=10 - { - if ( ACTION_ADD_GEODATA in n$actions && - |Site::local_nets| > 0 && - ! n?$remote_location ) - { - if ( n?$src && ! Site::is_local_addr(n$src) ) - n$remote_location = lookup_location(n$src); - else if ( n?$dst && ! Site::is_local_addr(n$dst) ) - n$remote_location = lookup_location(n$dst); - } - } diff --git a/scripts/base/frameworks/notice/actions/add-geodata.zeek b/scripts/base/frameworks/notice/actions/add-geodata.zeek new file mode 100644 index 0000000000..04cc10209d --- /dev/null +++ b/scripts/base/frameworks/notice/actions/add-geodata.zeek @@ -0,0 +1,51 @@ +##! This script adds geographic location data to notices for the "remote" +##! host in a connection. It does make the assumption that one of the +##! addresses in a connection is "local" and one is "remote" which is +##! probably a safe assumption to make in most cases. If both addresses +##! are remote, it will use the $src address. + +@load ../main +@load base/frameworks/notice +@load base/utils/site + +module Notice; + +export { + redef enum Action += { + ## Indicates that the notice should have geodata added for the + ## "remote" host. :zeek:id:`Site::local_nets` must be defined + ## in order for this to work. + ACTION_ADD_GEODATA + }; + + redef record Info += { + ## If GeoIP support is built in, notices can have geographic + ## information attached to them. + remote_location: geo_location &log &optional; + }; + + ## Notice types which should have the "remote" location looked up. + ## If GeoIP support is not built in, this does nothing. + option lookup_location_types: set[Notice::Type] = {}; +} + +hook policy(n: Notice::Info) &priority=10 + { + if ( n$note in Notice::lookup_location_types ) + add n$actions[ACTION_ADD_GEODATA]; + } + +# This is handled at a high priority in case other notice handlers +# want to use the data. +hook notice(n: Notice::Info) &priority=10 + { + if ( ACTION_ADD_GEODATA in n$actions && + |Site::local_nets| > 0 && + ! n?$remote_location ) + { + if ( n?$src && ! Site::is_local_addr(n$src) ) + n$remote_location = lookup_location(n$src); + else if ( n?$dst && ! Site::is_local_addr(n$dst) ) + n$remote_location = lookup_location(n$dst); + } + } diff --git a/scripts/base/frameworks/notice/actions/drop.bro b/scripts/base/frameworks/notice/actions/drop.bro deleted file mode 100644 index a189faaeda..0000000000 --- a/scripts/base/frameworks/notice/actions/drop.bro +++ /dev/null @@ -1,35 +0,0 @@ -##! This script extends the built in notice code to implement the IP address -##! dropping functionality. - -@load ../main -@load base/frameworks/netcontrol - -module Notice; - -export { - redef enum Action += { - ## Drops the address via :bro:see:`NetControl::drop_address_catch_release`. - ACTION_DROP - }; - - redef record Info += { - ## Indicate if the $src IP address was dropped and denied - ## network access. - dropped: bool &log &default=F; - }; -} - -hook notice(n: Notice::Info) &priority=-5 - { - if ( ACTION_DROP in n$actions ) - { - local ci = NetControl::get_catch_release_info(n$src); - if ( ci$watch_until == double_to_time(0) ) - { - # we have not seen this one yet. Drop it. - local addl = n?$msg ? fmt("ACTION_DROP: %s", n?$msg) : "ACTION_DROP"; - local res = NetControl::drop_address_catch_release(n$src, addl); - n$dropped = res$watch_until != double_to_time(0); - } - } - } diff --git a/scripts/base/frameworks/notice/actions/email_admin.bro b/scripts/base/frameworks/notice/actions/email_admin.bro deleted file mode 100644 index fb82f2b960..0000000000 --- a/scripts/base/frameworks/notice/actions/email_admin.bro +++ /dev/null @@ -1,35 +0,0 @@ -##! Adds a new notice action type which can be used to email notices -##! to the administrators of a particular address space as set by -##! :bro:id:`Site::local_admins` if the notice contains a source -##! or destination address that lies within their space. - -@load ../main -@load base/utils/site - -module Notice; - -export { - redef enum Action += { - ## Indicate that the generated email should be addressed to the - ## appropriate email addresses as found by the - ## :bro:id:`Site::get_emails` function based on the relevant - ## address or addresses indicated in the notice. - ACTION_EMAIL_ADMIN - }; -} - -hook notice(n: Notice::Info) &priority=-5 - { - if ( |Site::local_admins| > 0 && - ACTION_EMAIL_ADMIN in n$actions ) - { - local email = ""; - if ( n?$src && |Site::get_emails(n$src)| > 0 ) - email = fmt("%s, %s", email, Site::get_emails(n$src)); - if ( n?$dst && |Site::get_emails(n$dst)| > 0 ) - email = fmt("%s, %s", email, Site::get_emails(n$dst)); - - if ( email != "" ) - email_notice_to(n, email, T); - } - } diff --git a/scripts/base/frameworks/notice/actions/email_admin.zeek b/scripts/base/frameworks/notice/actions/email_admin.zeek new file mode 100644 index 0000000000..1b02e5ff0c --- /dev/null +++ b/scripts/base/frameworks/notice/actions/email_admin.zeek @@ -0,0 +1,35 @@ +##! Adds a new notice action type which can be used to email notices +##! to the administrators of a particular address space as set by +##! :zeek:id:`Site::local_admins` if the notice contains a source +##! or destination address that lies within their space. + +@load ../main +@load base/utils/site + +module Notice; + +export { + redef enum Action += { + ## Indicate that the generated email should be addressed to the + ## appropriate email addresses as found by the + ## :zeek:id:`Site::get_emails` function based on the relevant + ## address or addresses indicated in the notice. + ACTION_EMAIL_ADMIN + }; +} + +hook notice(n: Notice::Info) &priority=-5 + { + if ( |Site::local_admins| > 0 && + ACTION_EMAIL_ADMIN in n$actions ) + { + local email = ""; + if ( n?$src && |Site::get_emails(n$src)| > 0 ) + email = fmt("%s, %s", email, Site::get_emails(n$src)); + if ( n?$dst && |Site::get_emails(n$dst)| > 0 ) + email = fmt("%s, %s", email, Site::get_emails(n$dst)); + + if ( email != "" ) + email_notice_to(n, email, T); + } + } diff --git a/scripts/base/frameworks/notice/actions/page.bro b/scripts/base/frameworks/notice/actions/page.bro deleted file mode 100644 index 73432337d1..0000000000 --- a/scripts/base/frameworks/notice/actions/page.bro +++ /dev/null @@ -1,24 +0,0 @@ -##! Allows configuration of a pager email address to which notices can be sent. - -@load ../main - -module Notice; - -export { - redef enum Action += { - ## Indicates that the notice should be sent to the pager email - ## address configured in the :bro:id:`Notice::mail_page_dest` - ## variable. - ACTION_PAGE - }; - - ## Email address to send notices with the :bro:enum:`Notice::ACTION_PAGE` - ## action. - option mail_page_dest = ""; -} - -hook notice(n: Notice::Info) &priority=-5 - { - if ( ACTION_PAGE in n$actions ) - email_notice_to(n, mail_page_dest, F); - } diff --git a/scripts/base/frameworks/notice/actions/page.zeek b/scripts/base/frameworks/notice/actions/page.zeek new file mode 100644 index 0000000000..99ca44537b --- /dev/null +++ b/scripts/base/frameworks/notice/actions/page.zeek @@ -0,0 +1,24 @@ +##! Allows configuration of a pager email address to which notices can be sent. + +@load ../main + +module Notice; + +export { + redef enum Action += { + ## Indicates that the notice should be sent to the pager email + ## address configured in the :zeek:id:`Notice::mail_page_dest` + ## variable. + ACTION_PAGE + }; + + ## Email address to send notices with the :zeek:enum:`Notice::ACTION_PAGE` + ## action. + option mail_page_dest = ""; +} + +hook notice(n: Notice::Info) &priority=-5 + { + if ( ACTION_PAGE in n$actions ) + email_notice_to(n, mail_page_dest, F); + } diff --git a/scripts/base/frameworks/notice/actions/pp-alarms.bro b/scripts/base/frameworks/notice/actions/pp-alarms.bro deleted file mode 100644 index a385d8c626..0000000000 --- a/scripts/base/frameworks/notice/actions/pp-alarms.bro +++ /dev/null @@ -1,254 +0,0 @@ -##! Notice extension that mails out a pretty-printed version of alarm.log -##! in regular intervals, formatted for better human readability. If activated, -##! that replaces the default summary mail having the raw log output. - -@load base/frameworks/cluster -@load ../main - -module Notice; - -export { - ## Activate pretty-printed alarm summaries. - const pretty_print_alarms = T &redef; - - ## Address to send the pretty-printed reports to. Default if not set is - ## :bro:id:`Notice::mail_dest`. - ## - ## Note that this is overridden by the BroControl MailAlarmsTo option. - const mail_dest_pretty_printed = "" &redef; - ## If an address from one of these networks is reported, we mark - ## the entry with an additional quote symbol (i.e., ">"). Many MUAs - ## then highlight such lines differently. - global flag_nets: set[subnet] &redef; - - ## Function that renders a single alarm. Can be overridden. - global pretty_print_alarm: function(out: file, n: Info) &redef; - - ## Force generating mail file, even if reading from traces or no mail - ## destination is defined. This is mainly for testing. - global force_email_summaries = F &redef; -} - -# We maintain an old-style file recording the pretty-printed alarms. -const pp_alarms_name = "alarm-mail.txt"; -global pp_alarms: file; -global pp_alarms_open: bool = F; - -# Returns True if pretty-printed alarm summaries are activated. -function want_pp() : bool - { - if ( force_email_summaries ) - return T; - - return (pretty_print_alarms && ! reading_traces() - && (mail_dest != "" || mail_dest_pretty_printed != "")); - } - -# Opens and intializes the output file. -function pp_open() - { - if ( pp_alarms_open ) - return; - - pp_alarms_open = T; - pp_alarms = open(pp_alarms_name); - } - -# Closes and mails out the current output file. -function pp_send(rinfo: Log::RotationInfo) - { - if ( ! pp_alarms_open ) - return; - - write_file(pp_alarms, "\n\n--\n[Automatically generated]\n\n"); - close(pp_alarms); - pp_alarms_open = F; - - local from = strftime("%H:%M:%S", rinfo$open); - local to = strftime("%H:%M:%S", rinfo$close); - local subject = fmt("Alarm summary from %s-%s", from, to); - local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed - : mail_dest; - - if ( dest == "" ) - # No mail destination configured, just leave the file alone. This is mainly for - # testing. - return; - - local headers = email_headers(subject, dest); - - local header_name = pp_alarms_name + ".tmp"; - local header = open(header_name); - write_file(header, headers + "\n"); - close(header); - - system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm -f %s %s", - header_name, pp_alarms_name, sendmail, header_name, pp_alarms_name)); - } - -# Postprocessor function that triggers the email. -function pp_postprocessor(info: Log::RotationInfo): bool - { - if ( want_pp() ) - pp_send(info); - - return T; - } - -event bro_init() - { - if ( ! want_pp() ) - return; - - # This replaces the standard non-pretty-printing filter. - Log::add_filter(Notice::ALARM_LOG, - [$name="alarm-mail", $writer=Log::WRITER_NONE, - $interv=Log::default_mail_alarms_interval, - $postprocessor=pp_postprocessor]); - } - -hook notice(n: Notice::Info) &priority=-5 - { - if ( ! want_pp() ) - return; - - if ( ACTION_ALARM !in n$actions ) - return; - - if ( ! pp_alarms_open ) - pp_open(); - - pretty_print_alarm(pp_alarms, n); - } - -function do_msg(out: file, n: Info, line1: string, line2: string, line3: string, host1: addr, name1: string, host2: addr, name2: string) - { - local country = ""; -@ifdef ( Notice::ACTION_ADD_GEODATA ) # Make tests happy, cyclic dependency. - if ( n?$remote_location && n$remote_location?$country_code ) - country = fmt(" (remote location %s)", n$remote_location$country_code); -@endif - - line1 = cat(line1, country); - - local resolved = ""; - - if ( host1 != 0.0.0.0 ) - resolved = fmt("%s # %s = %s", resolved, host1, name1); - - if ( host2 != 0.0.0.0 ) - resolved = fmt("%s %s = %s", resolved, host2, name2); - - print out, line1; - print out, line2; - if ( line3 != "" ) - print out, line3; - if ( resolved != "" ) - print out, resolved; - print out, ""; - } - -# Default pretty-printer. -function pretty_print_alarm(out: file, n: Info) - { - local pdescr = ""; - -@if ( Cluster::is_enabled() ) - pdescr = "local"; - - if ( n?$peer_descr ) - pdescr = n$peer_descr; - else if ( n?$peer_name ) - pdescr = n$peer_name; - - pdescr = fmt("<%s> ", pdescr); -@endif - - local msg = fmt( "%s%s", pdescr, n$msg); - - local who = ""; - local h1 = 0.0.0.0; - local h2 = 0.0.0.0; - - local orig_p = ""; - local resp_p = ""; - - if ( n?$id ) - { - h1 = n$id$orig_h; - h2 = n$id$resp_h; - who = fmt("%s:%s -> %s:%s", h1, n$id$orig_p, h2, n$id$resp_p); - } - else if ( n?$src && n?$dst ) - { - h1 = n$src; - h2 = n$dst; - who = fmt("%s -> %s", h1, h2); - } - else if ( n?$src ) - { - h1 = n$src; - who = fmt("%s%s", h1, (n?$p ? fmt(":%s", n$p) : "")); - } - - if ( n?$uid ) - who = fmt("%s (uid %s)", who, n$uid ); - - local flag = (h1 in flag_nets || h2 in flag_nets); - - local line1 = fmt(">%s %D %s %s", (flag ? ">" : " "), network_time(), n$note, who); - local line2 = fmt(" %s", msg); - local line3 = n?$sub ? fmt(" %s", n$sub) : ""; - - if ( h1 == 0.0.0.0 ) - { - do_msg(out, n, line1, line2, line3, h1, "", h2, ""); - return; - } - - if ( reading_traces() ) - { - do_msg(out, n, line1, line2, line3, h1, "", h2, ""); - return; - } - - when ( local h1name = lookup_addr(h1) ) - { - if ( h2 == 0.0.0.0 ) - { - do_msg(out, n, line1, line2, line3, h1, h1name, h2, ""); - return; - } - - when ( local h2name = lookup_addr(h2) ) - { - do_msg(out, n, line1, line2, line3, h1, h1name, h2, h2name); - return; - } - timeout 5secs - { - do_msg(out, n, line1, line2, line3, h1, h1name, h2, "(dns timeout)"); - return; - } - } - - timeout 5secs - { - if ( h2 == 0.0.0.0 ) - { - do_msg(out, n, line1, line2, line3, h1, "(dns timeout)", h2, ""); - return; - } - - when ( local h2name_ = lookup_addr(h2) ) - { - do_msg(out, n, line1, line2, line3, h1, "(dns timeout)", h2, h2name_); - return; - } - timeout 5secs - { - do_msg(out, n, line1, line2, line3, h1, "(dns timeout)", h2, "(dns timeout)"); - return; - } - } - } diff --git a/scripts/base/frameworks/notice/actions/pp-alarms.zeek b/scripts/base/frameworks/notice/actions/pp-alarms.zeek new file mode 100644 index 0000000000..ddfc45af6e --- /dev/null +++ b/scripts/base/frameworks/notice/actions/pp-alarms.zeek @@ -0,0 +1,254 @@ +##! Notice extension that mails out a pretty-printed version of alarm.log +##! in regular intervals, formatted for better human readability. If activated, +##! that replaces the default summary mail having the raw log output. + +@load base/frameworks/cluster +@load ../main + +module Notice; + +export { + ## Activate pretty-printed alarm summaries. + const pretty_print_alarms = T &redef; + + ## Address to send the pretty-printed reports to. Default if not set is + ## :zeek:id:`Notice::mail_dest`. + ## + ## Note that this is overridden by the ZeekControl MailAlarmsTo option. + const mail_dest_pretty_printed = "" &redef; + ## If an address from one of these networks is reported, we mark + ## the entry with an additional quote symbol (i.e., ">"). Many MUAs + ## then highlight such lines differently. + global flag_nets: set[subnet] &redef; + + ## Function that renders a single alarm. Can be overridden. + global pretty_print_alarm: function(out: file, n: Info) &redef; + + ## Force generating mail file, even if reading from traces or no mail + ## destination is defined. This is mainly for testing. + global force_email_summaries = F &redef; +} + +# We maintain an old-style file recording the pretty-printed alarms. +const pp_alarms_name = "alarm-mail.txt"; +global pp_alarms: file; +global pp_alarms_open: bool = F; + +# Returns True if pretty-printed alarm summaries are activated. +function want_pp() : bool + { + if ( force_email_summaries ) + return T; + + return (pretty_print_alarms && ! reading_traces() + && (mail_dest != "" || mail_dest_pretty_printed != "")); + } + +# Opens and intializes the output file. +function pp_open() + { + if ( pp_alarms_open ) + return; + + pp_alarms_open = T; + pp_alarms = open(pp_alarms_name); + } + +# Closes and mails out the current output file. +function pp_send(rinfo: Log::RotationInfo) + { + if ( ! pp_alarms_open ) + return; + + write_file(pp_alarms, "\n\n--\n[Automatically generated]\n\n"); + close(pp_alarms); + pp_alarms_open = F; + + local from = strftime("%H:%M:%S", rinfo$open); + local to = strftime("%H:%M:%S", rinfo$close); + local subject = fmt("Alarm summary from %s-%s", from, to); + local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed + : mail_dest; + + if ( dest == "" ) + # No mail destination configured, just leave the file alone. This is mainly for + # testing. + return; + + local headers = email_headers(subject, dest); + + local header_name = pp_alarms_name + ".tmp"; + local header = open(header_name); + write_file(header, headers + "\n"); + close(header); + + system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm -f %s %s", + header_name, pp_alarms_name, sendmail, header_name, pp_alarms_name)); + } + +# Postprocessor function that triggers the email. +function pp_postprocessor(info: Log::RotationInfo): bool + { + if ( want_pp() ) + pp_send(info); + + return T; + } + +event zeek_init() + { + if ( ! want_pp() ) + return; + + # This replaces the standard non-pretty-printing filter. + Log::add_filter(Notice::ALARM_LOG, + [$name="alarm-mail", $writer=Log::WRITER_NONE, + $interv=Log::default_mail_alarms_interval, + $postprocessor=pp_postprocessor]); + } + +hook notice(n: Notice::Info) &priority=-5 + { + if ( ! want_pp() ) + return; + + if ( ACTION_ALARM !in n$actions ) + return; + + if ( ! pp_alarms_open ) + pp_open(); + + pretty_print_alarm(pp_alarms, n); + } + +function do_msg(out: file, n: Info, line1: string, line2: string, line3: string, host1: addr, name1: string, host2: addr, name2: string) + { + local country = ""; +@ifdef ( Notice::ACTION_ADD_GEODATA ) # Make tests happy, cyclic dependency. + if ( n?$remote_location && n$remote_location?$country_code ) + country = fmt(" (remote location %s)", n$remote_location$country_code); +@endif + + line1 = cat(line1, country); + + local resolved = ""; + + if ( host1 != 0.0.0.0 ) + resolved = fmt("%s # %s = %s", resolved, host1, name1); + + if ( host2 != 0.0.0.0 ) + resolved = fmt("%s %s = %s", resolved, host2, name2); + + print out, line1; + print out, line2; + if ( line3 != "" ) + print out, line3; + if ( resolved != "" ) + print out, resolved; + print out, ""; + } + +# Default pretty-printer. +function pretty_print_alarm(out: file, n: Info) + { + local pdescr = ""; + +@if ( Cluster::is_enabled() ) + pdescr = "local"; + + if ( n?$peer_descr ) + pdescr = n$peer_descr; + else if ( n?$peer_name ) + pdescr = n$peer_name; + + pdescr = fmt("<%s> ", pdescr); +@endif + + local msg = fmt( "%s%s", pdescr, n$msg); + + local who = ""; + local h1 = 0.0.0.0; + local h2 = 0.0.0.0; + + local orig_p = ""; + local resp_p = ""; + + if ( n?$id ) + { + h1 = n$id$orig_h; + h2 = n$id$resp_h; + who = fmt("%s:%s -> %s:%s", h1, n$id$orig_p, h2, n$id$resp_p); + } + else if ( n?$src && n?$dst ) + { + h1 = n$src; + h2 = n$dst; + who = fmt("%s -> %s", h1, h2); + } + else if ( n?$src ) + { + h1 = n$src; + who = fmt("%s%s", h1, (n?$p ? fmt(":%s", n$p) : "")); + } + + if ( n?$uid ) + who = fmt("%s (uid %s)", who, n$uid ); + + local flag = (h1 in flag_nets || h2 in flag_nets); + + local line1 = fmt(">%s %D %s %s", (flag ? ">" : " "), network_time(), n$note, who); + local line2 = fmt(" %s", msg); + local line3 = n?$sub ? fmt(" %s", n$sub) : ""; + + if ( h1 == 0.0.0.0 ) + { + do_msg(out, n, line1, line2, line3, h1, "", h2, ""); + return; + } + + if ( reading_traces() ) + { + do_msg(out, n, line1, line2, line3, h1, "", h2, ""); + return; + } + + when ( local h1name = lookup_addr(h1) ) + { + if ( h2 == 0.0.0.0 ) + { + do_msg(out, n, line1, line2, line3, h1, h1name, h2, ""); + return; + } + + when ( local h2name = lookup_addr(h2) ) + { + do_msg(out, n, line1, line2, line3, h1, h1name, h2, h2name); + return; + } + timeout 5secs + { + do_msg(out, n, line1, line2, line3, h1, h1name, h2, "(dns timeout)"); + return; + } + } + + timeout 5secs + { + if ( h2 == 0.0.0.0 ) + { + do_msg(out, n, line1, line2, line3, h1, "(dns timeout)", h2, ""); + return; + } + + when ( local h2name_ = lookup_addr(h2) ) + { + do_msg(out, n, line1, line2, line3, h1, "(dns timeout)", h2, h2name_); + return; + } + timeout 5secs + { + do_msg(out, n, line1, line2, line3, h1, "(dns timeout)", h2, "(dns timeout)"); + return; + } + } + } diff --git a/scripts/base/frameworks/notice/main.bro b/scripts/base/frameworks/notice/main.bro deleted file mode 100644 index 881e5d7467..0000000000 --- a/scripts/base/frameworks/notice/main.bro +++ /dev/null @@ -1,679 +0,0 @@ -##! This is the notice framework which enables Bro to "notice" things which -##! are odd or potentially bad. Decisions of the meaning of various notices -##! need to be done per site because Bro does not ship with assumptions about -##! what is bad activity for sites. More extensive documentation about using -##! the notice framework can be found in :doc:`/frameworks/notice`. - -@load base/frameworks/cluster - -module Notice; - -export { - redef enum Log::ID += { - ## This is the primary logging stream for notices. - LOG, - ## This is the alarm stream. - ALARM_LOG, - }; - - ## Scripts creating new notices need to redef this enum to add their - ## own specific notice types which would then get used when they call - ## the :bro:id:`NOTICE` function. The convention is to give a general - ## category along with the specific notice separating words with - ## underscores and using leading capitals on each word except for - ## abbreviations which are kept in all capitals. For example, - ## SSH::Password_Guessing is for hosts that have crossed a threshold of - ## failed SSH logins. - type Type: enum { - ## Notice reporting a count of how often a notice occurred. - Tally, - }; - - ## These are values representing actions that can be taken with notices. - type Action: enum { - ## Indicates that there is no action to be taken. - ACTION_NONE, - ## Indicates that the notice should be sent to the notice - ## logging stream. - ACTION_LOG, - ## Indicates that the notice should be sent to the email - ## address(es) configured in the :bro:id:`Notice::mail_dest` - ## variable. - ACTION_EMAIL, - ## Indicates that the notice should be alarmed. A readable - ## ASCII version of the alarm log is emailed in bulk to the - ## address(es) configured in :bro:id:`Notice::mail_dest`. - ACTION_ALARM, - }; - - ## Type that represents a set of actions. - type ActionSet: set[Notice::Action]; - - ## The notice framework is able to do automatic notice suppression by - ## utilizing the *identifier* field in :bro:type:`Notice::Info` records. - ## Set this to "0secs" to completely disable automated notice - ## suppression. - option default_suppression_interval = 1hrs; - - ## The record type that is used for representing and logging notices. - type Info: record { - ## An absolute time indicating when the notice occurred, - ## defaults to the current network time. - ts: time &log &optional; - - ## A connection UID which uniquely identifies the endpoints - ## concerned with the notice. - uid: string &log &optional; - - ## A connection 4-tuple identifying the endpoints concerned - ## with the notice. - id: conn_id &log &optional; - - ## A shorthand way of giving the uid and id to a notice. The - ## reference to the actual connection will be deleted after - ## applying the notice policy. - conn: connection &optional; - ## A shorthand way of giving the uid and id to a notice. The - ## reference to the actual connection will be deleted after - ## applying the notice policy. - iconn: icmp_conn &optional; - - ## A file record if the notice is related to a file. The - ## reference to the actual fa_file record will be deleted after - ## applying the notice policy. - f: fa_file &optional; - - ## A file unique ID if this notice is related to a file. If - ## the *f* field is provided, this will be automatically filled - ## out. - fuid: string &log &optional; - - ## A mime type if the notice is related to a file. If the *f* - ## field is provided, this will be automatically filled out. - file_mime_type: string &log &optional; - - ## Frequently files can be "described" to give a bit more - ## context. This field will typically be automatically filled - ## out from an fa_file record. For example, if a notice was - ## related to a file over HTTP, the URL of the request would - ## be shown. - file_desc: string &log &optional; - - ## The transport protocol. Filled automatically when either - ## *conn*, *iconn* or *p* is specified. - proto: transport_proto &log &optional; - - ## The :bro:type:`Notice::Type` of the notice. - note: Type &log; - ## The human readable message for the notice. - msg: string &log &optional; - ## The human readable sub-message. - sub: string &log &optional; - - ## Source address, if we don't have a :bro:type:`conn_id`. - src: addr &log &optional; - ## Destination address. - dst: addr &log &optional; - ## Associated port, if we don't have a :bro:type:`conn_id`. - p: port &log &optional; - ## Associated count, or perhaps a status code. - n: count &log &optional; - - ## Name of remote peer that raised this notice. - peer_name: string &optional; - ## Textual description for the peer that raised this notice, - ## including name, host address and port. - peer_descr: string &log &optional; - - ## The actions which have been applied to this notice. - actions: ActionSet &log &default=ActionSet(); - - ## By adding chunks of text into this element, other scripts - ## can expand on notices that are being emailed. The normal - ## way to add text is to extend the vector by handling the - ## :bro:id:`Notice::notice` event and modifying the notice in - ## place. - email_body_sections: vector of string &optional; - - ## Adding a string "token" to this set will cause the notice - ## framework's built-in emailing functionality to delay sending - ## the email until either the token has been removed or the - ## email has been delayed for :bro:id:`Notice::max_email_delay`. - email_delay_tokens: set[string] &optional; - - ## This field is to be provided when a notice is generated for - ## the purpose of deduplicating notices. The identifier string - ## should be unique for a single instance of the notice. This - ## field should be filled out in almost all cases when - ## generating notices to define when a notice is conceptually - ## a duplicate of a previous notice. - ## - ## For example, an SSL certificate that is going to expire soon - ## should always have the same identifier no matter the client - ## IP address that connected and resulted in the certificate - ## being exposed. In this case, the resp_h, resp_p, and hash - ## of the certificate would be used to create this value. The - ## hash of the cert is included because servers can return - ## multiple certificates on the same port. - ## - ## Another example might be a host downloading a file which - ## triggered a notice because the MD5 sum of the file it - ## downloaded was known by some set of intelligence. In that - ## case, the orig_h (client) and MD5 sum would be used in this - ## field to dedup because if the same file is downloaded over - ## and over again you really only want to know about it a - ## single time. This makes it possible to send those notices - ## to email without worrying so much about sending thousands - ## of emails. - identifier: string &optional; - - ## This field indicates the length of time that this - ## unique notice should be suppressed. - suppress_for: interval &log &default=default_suppression_interval; - }; - - ## Ignored notice types. - option ignored_types: set[Notice::Type] = {}; - ## Emailed notice types. - option emailed_types: set[Notice::Type] = {}; - ## Alarmed notice types. - option alarmed_types: set[Notice::Type] = {}; - ## Types that should be suppressed for the default suppression interval. - option not_suppressed_types: set[Notice::Type] = {}; - ## This table can be used as a shorthand way to modify suppression - ## intervals for entire notice types. - const type_suppression_intervals: table[Notice::Type] of interval = {} &redef; - - ## The hook to modify notice handling. - global policy: hook(n: Notice::Info); - - ## Local system sendmail program. - ## - ## Note that this is overridden by the BroControl SendMail option. - option sendmail = "/usr/sbin/sendmail"; - ## Email address to send notices with the - ## :bro:enum:`Notice::ACTION_EMAIL` action or to send bulk alarm logs - ## on rotation with :bro:enum:`Notice::ACTION_ALARM`. - ## - ## Note that this is overridden by the BroControl MailTo option. - const mail_dest = "" &redef; - - ## Address that emails will be from. - ## - ## Note that this is overridden by the BroControl MailFrom option. - option mail_from = "Big Brother "; - ## Reply-to address used in outbound email. - option reply_to = ""; - ## Text string prefixed to the subject of all emails sent out. - ## - ## Note that this is overridden by the BroControl MailSubjectPrefix - ## option. - option mail_subject_prefix = "[Bro]"; - ## The maximum amount of time a plugin can delay email from being sent. - const max_email_delay = 15secs &redef; - - ## Contains a portion of :bro:see:`fa_file` that's also contained in - ## :bro:see:`Notice::Info`. - type FileInfo: record { - fuid: string; ##< File UID. - desc: string; ##< File description from e.g. - ##< :bro:see:`Files::describe`. - mime: string &optional; ##< Strongest mime type match for file. - cid: conn_id &optional; ##< Connection tuple over which file is sent. - cuid: string &optional; ##< Connection UID over which file is sent. - }; - - ## Creates a record containing a subset of a full :bro:see:`fa_file` record. - ## - ## f: record containing metadata about a file. - ## - ## Returns: record containing a subset of fields copied from *f*. - global create_file_info: function(f: fa_file): Notice::FileInfo; - - ## Populates file-related fields in a notice info record. - ## - ## f: record containing metadata about a file. - ## - ## n: a notice record that needs file-related fields populated. - global populate_file_info: function(f: fa_file, n: Notice::Info); - - ## Populates file-related fields in a notice info record. - ## - ## fi: record containing metadata about a file. - ## - ## n: a notice record that needs file-related fields populated. - global populate_file_info2: function(fi: Notice::FileInfo, n: Notice::Info); - - ## A log postprocessing function that implements emailing the contents - ## of a log upon rotation to any configured :bro:id:`Notice::mail_dest`. - ## The rotated log is removed upon being sent. - ## - ## info: A record containing the rotated log file information. - ## - ## Returns: True. - global log_mailing_postprocessor: function(info: Log::RotationInfo): bool; - - ## This is the event that is called as the entry point to the - ## notice framework by the global :bro:id:`NOTICE` function. By the - ## time this event is generated, default values have already been - ## filled out in the :bro:type:`Notice::Info` record and the notice - ## policy has also been applied. - ## - ## n: The record containing notice data. - global notice: hook(n: Info); - - ## This event is generated when a notice begins to be suppressed. - ## - ## ts: time indicating then when the notice to be suppressed occured. - ## - ## suppress_for: length of time that this notice should be suppressed. - ## - ## note: The :bro:type:`Notice::Type` of the notice. - ## - ## identifier: The identifier string of the notice that should be suppressed. - global begin_suppression: event(ts: time, suppress_for: interval, note: Type, identifier: string); - - ## A function to determine if an event is supposed to be suppressed. - ## - ## n: The record containing the notice in question. - global is_being_suppressed: function(n: Notice::Info): bool; - - ## This event is generated on each occurrence of an event being - ## suppressed. - ## - ## n: The record containing notice data regarding the notice type - ## being suppressed. - global suppressed: event(n: Notice::Info); - - ## Call this function to send a notice in an email. It is already used - ## by default with the built in :bro:enum:`Notice::ACTION_EMAIL` and - ## :bro:enum:`Notice::ACTION_PAGE` actions. - ## - ## n: The record of notice data to email. - ## - ## dest: The intended recipient of the notice email. - ## - ## extend: Whether to extend the email using the - ## ``email_body_sections`` field of *n*. - global email_notice_to: function(n: Info, dest: string, extend: bool); - - ## Constructs mail headers to which an email body can be appended for - ## sending with sendmail. - ## - ## subject_desc: a subject string to use for the mail. - ## - ## dest: recipient string to use for the mail. - ## - ## Returns: a string of mail headers to which an email body can be - ## appended. - global email_headers: function(subject_desc: string, dest: string): string; - - ## This event can be handled to access the :bro:type:`Notice::Info` - ## record as it is sent on to the logging framework. - ## - ## rec: The record containing notice data before it is logged. - global log_notice: event(rec: Info); - - ## This is an internal wrapper for the global :bro:id:`NOTICE` - ## function; disregard. - ## - ## n: The record of notice data. - global internal_NOTICE: function(n: Notice::Info); - - ## This is the event used to transport notices on the cluster. - ## - ## n: The notice information to be sent to the cluster manager for - ## further processing. - global cluster_notice: event(n: Notice::Info); -} - -module GLOBAL; - -function NOTICE(n: Notice::Info) - { - if ( Notice::is_being_suppressed(n) ) - return; - - @if ( Cluster::is_enabled() ) - if ( Cluster::local_node_type() == Cluster::MANAGER ) - Notice::internal_NOTICE(n); - else - { - n$peer_name = n$peer_descr = Cluster::node; - Broker::publish(Cluster::manager_topic, Notice::cluster_notice, n); - } - @else - Notice::internal_NOTICE(n); - @endif - } - -module Notice; - -# This is used as a hack to implement per-item expiration intervals. -function per_notice_suppression_interval(t: table[Notice::Type, string] of time, idx: any): interval - { - local n: Notice::Type; - local s: string; - [n,s] = idx; - - local suppress_time = t[n,s] - network_time(); - if ( suppress_time < 0secs ) - suppress_time = 0secs; - - return suppress_time; - } - -# This is the internally maintained notice suppression table. It's -# indexed on the Notice::Type and the $identifier field from the notice. -global suppressing: table[Type, string] of time = {} - &create_expire=0secs - &expire_func=per_notice_suppression_interval; - -function log_mailing_postprocessor(info: Log::RotationInfo): bool - { - if ( ! reading_traces() && mail_dest != "" ) - { - local headers = email_headers(fmt("Log Contents: %s", info$fname), - mail_dest); - local tmpfilename = fmt("%s.mailheaders.tmp", info$fname); - local tmpfile = open(tmpfilename); - write_file(tmpfile, headers); - close(tmpfile); - system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm %s %s", - tmpfilename, info$fname, sendmail, tmpfilename, info$fname)); - } - return T; - } - -event bro_init() &priority=5 - { - Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice, $path="notice"]); - - Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info, $path="notice_alarm"]); - # If Bro is configured for mailing notices, set up mailing for alarms. - # Make sure that this alarm log is also output as text so that it can - # be packaged up and emailed later. - if ( ! reading_traces() && mail_dest != "" ) - Log::add_filter(Notice::ALARM_LOG, - [$name="alarm-mail", $path="alarm-mail", $writer=Log::WRITER_ASCII, - $interv=24hrs, $postprocessor=log_mailing_postprocessor]); - } - -function email_headers(subject_desc: string, dest: string): string - { - local header_text = string_cat( - "From: ", mail_from, "\n", - "Subject: ", mail_subject_prefix, " ", subject_desc, "\n", - "To: ", dest, "\n", - "User-Agent: Bro-IDS/", bro_version(), "\n"); - if ( reply_to != "" ) - header_text = string_cat(header_text, "Reply-To: ", reply_to, "\n"); - return header_text; - } - -event delay_sending_email(n: Notice::Info, dest: string, extend: bool) - { - email_notice_to(n, dest, extend); - } - -function email_notice_to(n: Notice::Info, dest: string, extend: bool) - { - if ( reading_traces() || dest == "" ) - return; - - if ( extend ) - { - if ( |n$email_delay_tokens| > 0 ) - { - # If we still are within the max_email_delay, keep delaying. - if ( n$ts + max_email_delay > network_time() ) - { - schedule 1sec { delay_sending_email(n, dest, extend) }; - return; - } - else - { - Reporter::info(fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens)); - } - } - } - - local email_text = email_headers(fmt("%s", n$note), dest); - - # First off, finish the headers and include the human readable messages - # then leave a blank line after the message. - email_text = string_cat(email_text, "\nMessage: ", n$msg, "\n"); - - if ( n?$sub ) - email_text = string_cat(email_text, "Sub-message: ", n$sub, "\n"); - - email_text = string_cat(email_text, "\n"); - - # Add information about the file if it exists. - if ( n?$file_desc ) - email_text = string_cat(email_text, "File Description: ", n$file_desc, "\n"); - - if ( n?$file_mime_type ) - email_text = string_cat(email_text, "File MIME Type: ", n$file_mime_type, "\n"); - - if ( n?$file_desc || n?$file_mime_type ) - email_text = string_cat(email_text, "\n"); - - # Next, add information about the connection if it exists. - if ( n?$id ) - { - email_text = string_cat(email_text, "Connection: ", - fmt("%s", n$id$orig_h), ":", fmt("%d", n$id$orig_p), " -> ", - fmt("%s", n$id$resp_h), ":", fmt("%d", n$id$resp_p), "\n"); - if ( n?$uid ) - email_text = string_cat(email_text, "Connection uid: ", n$uid, "\n"); - } - else if ( n?$src ) - email_text = string_cat(email_text, "Address: ", fmt("%s", n$src), "\n"); - - # Add the extended information if it's requested. - if ( extend ) - { - email_text = string_cat(email_text, "\nEmail Extensions\n"); - email_text = string_cat(email_text, "----------------\n"); - for ( i in n$email_body_sections ) - { - email_text = string_cat(email_text, n$email_body_sections[i], "\n"); - } - } - - email_text = string_cat(email_text, "\n\n--\n[Automatically generated]\n\n"); - piped_exec(fmt("%s -t -oi", sendmail), email_text); - } - -hook Notice::policy(n: Notice::Info) &priority=10 - { - if ( n$note in Notice::ignored_types ) - break; - - if ( n$note in Notice::not_suppressed_types ) - n$suppress_for=0secs; - if ( n$note in Notice::alarmed_types ) - add n$actions[ACTION_ALARM]; - if ( n$note in Notice::emailed_types ) - add n$actions[ACTION_EMAIL]; - - if ( n$note in Notice::type_suppression_intervals ) - n$suppress_for=Notice::type_suppression_intervals[n$note]; - - # Logging is a default action. It can be removed in a later hook if desired. - add n$actions[ACTION_LOG]; - } - -hook Notice::notice(n: Notice::Info) &priority=-5 - { - if ( ACTION_EMAIL in n$actions ) - email_notice_to(n, mail_dest, T); - if ( ACTION_LOG in n$actions ) - Log::write(Notice::LOG, n); - if ( ACTION_ALARM in n$actions ) - Log::write(Notice::ALARM_LOG, n); - - # Normally suppress further notices like this one unless directed not to. - # n$identifier *must* be specified for suppression to function at all. - if ( n?$identifier && - [n$note, n$identifier] !in suppressing && - n$suppress_for != 0secs ) - { - event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier); - } - } - -event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type, - identifier: string) - { - local suppress_until = ts + suppress_for; - suppressing[note, identifier] = suppress_until; - } - -event bro_init() - { - if ( ! Cluster::is_enabled() ) - return; - - Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression); - Broker::auto_publish(Cluster::proxy_topic, Notice::begin_suppression); - } - -function is_being_suppressed(n: Notice::Info): bool - { - if ( n?$identifier && [n$note, n$identifier] in suppressing ) - { - event Notice::suppressed(n); - return T; - } - else - return F; - } - -# Executes a script with all of the notice fields put into the -# new process' environment as "BRO_ARG_" variables. -function execute_with_notice(cmd: string, n: Notice::Info) - { - # TODO: fix system calls - #local tgs = tags(n); - #system_env(cmd, tags); - } - -function create_file_info(f: fa_file): Notice::FileInfo - { - local fi: Notice::FileInfo = Notice::FileInfo($fuid = f$id, - $desc = Files::describe(f)); - - if ( f?$info && f$info?$mime_type ) - fi$mime = f$info$mime_type; - - if ( f?$conns && |f$conns| == 1 ) - for ( id, c in f$conns ) - { - fi$cid = id; - fi$cuid = c$uid; - } - - return fi; - } - -function populate_file_info(f: fa_file, n: Notice::Info) - { - populate_file_info2(create_file_info(f), n); - } - -function populate_file_info2(fi: Notice::FileInfo, n: Notice::Info) - { - if ( ! n?$fuid ) - n$fuid = fi$fuid; - - if ( ! n?$file_mime_type && fi?$mime ) - n$file_mime_type = fi$mime; - - n$file_desc = fi$desc; - n$id = fi$cid; - n$uid = fi$cuid; - } - -# This is run synchronously as a function before all of the other -# notice related functions and events. It also modifies the -# :bro:type:`Notice::Info` record in place. -function apply_policy(n: Notice::Info) - { - # Fill in some defaults. - if ( ! n?$ts ) - n$ts = network_time(); - - if ( n?$f ) - populate_file_info(n$f, n); - - if ( n?$conn ) - { - if ( ! n?$id ) - n$id = n$conn$id; - - if ( ! n?$uid ) - n$uid = n$conn$uid; - } - - if ( n?$id ) - { - if ( ! n?$src ) - n$src = n$id$orig_h; - if ( ! n?$dst ) - n$dst = n$id$resp_h; - if ( ! n?$p ) - n$p = n$id$resp_p; - } - - if ( n?$p ) - n$proto = get_port_transport_proto(n$p); - - if ( n?$iconn ) - { - n$proto = icmp; - if ( ! n?$src ) - n$src = n$iconn$orig_h; - if ( ! n?$dst ) - n$dst = n$iconn$resp_h; - } - - if ( ! n?$email_body_sections ) - n$email_body_sections = vector(); - if ( ! n?$email_delay_tokens ) - n$email_delay_tokens = set(); - - # Apply the hook based policy. - hook Notice::policy(n); - - # Apply the suppression time after applying the policy so that policy - # items can give custom suppression intervals. If there is no - # suppression interval given yet, the default is applied. - if ( ! n?$suppress_for ) - n$suppress_for = default_suppression_interval; - - # Delete the connection and file records if they're there so we - # aren't sending that to remote machines. It can cause problems - # due to the size of those records. - if ( n?$conn ) - delete n$conn; - if ( n?$iconn ) - delete n$iconn; - if ( n?$f ) - delete n$f; - } - -function internal_NOTICE(n: Notice::Info) - { - # Fill out fields that might be empty and do the policy processing. - apply_policy(n); - - # Generate the notice event with the notice. - hook Notice::notice(n); - } - -event Notice::cluster_notice(n: Notice::Info) - { - NOTICE(n); - } diff --git a/scripts/base/frameworks/notice/main.zeek b/scripts/base/frameworks/notice/main.zeek new file mode 100644 index 0000000000..ab4288de1a --- /dev/null +++ b/scripts/base/frameworks/notice/main.zeek @@ -0,0 +1,679 @@ +##! This is the notice framework which enables Zeek to "notice" things which +##! are odd or potentially bad. Decisions of the meaning of various notices +##! need to be done per site because Zeek does not ship with assumptions about +##! what is bad activity for sites. More extensive documentation about using +##! the notice framework can be found in :doc:`/frameworks/notice`. + +@load base/frameworks/cluster + +module Notice; + +export { + redef enum Log::ID += { + ## This is the primary logging stream for notices. + LOG, + ## This is the alarm stream. + ALARM_LOG, + }; + + ## Scripts creating new notices need to redef this enum to add their + ## own specific notice types which would then get used when they call + ## the :zeek:id:`NOTICE` function. The convention is to give a general + ## category along with the specific notice separating words with + ## underscores and using leading capitals on each word except for + ## abbreviations which are kept in all capitals. For example, + ## SSH::Password_Guessing is for hosts that have crossed a threshold of + ## failed SSH logins. + type Type: enum { + ## Notice reporting a count of how often a notice occurred. + Tally, + }; + + ## These are values representing actions that can be taken with notices. + type Action: enum { + ## Indicates that there is no action to be taken. + ACTION_NONE, + ## Indicates that the notice should be sent to the notice + ## logging stream. + ACTION_LOG, + ## Indicates that the notice should be sent to the email + ## address(es) configured in the :zeek:id:`Notice::mail_dest` + ## variable. + ACTION_EMAIL, + ## Indicates that the notice should be alarmed. A readable + ## ASCII version of the alarm log is emailed in bulk to the + ## address(es) configured in :zeek:id:`Notice::mail_dest`. + ACTION_ALARM, + }; + + ## Type that represents a set of actions. + type ActionSet: set[Notice::Action]; + + ## The notice framework is able to do automatic notice suppression by + ## utilizing the *identifier* field in :zeek:type:`Notice::Info` records. + ## Set this to "0secs" to completely disable automated notice + ## suppression. + option default_suppression_interval = 1hrs; + + ## The record type that is used for representing and logging notices. + type Info: record { + ## An absolute time indicating when the notice occurred, + ## defaults to the current network time. + ts: time &log &optional; + + ## A connection UID which uniquely identifies the endpoints + ## concerned with the notice. + uid: string &log &optional; + + ## A connection 4-tuple identifying the endpoints concerned + ## with the notice. + id: conn_id &log &optional; + + ## A shorthand way of giving the uid and id to a notice. The + ## reference to the actual connection will be deleted after + ## applying the notice policy. + conn: connection &optional; + ## A shorthand way of giving the uid and id to a notice. The + ## reference to the actual connection will be deleted after + ## applying the notice policy. + iconn: icmp_conn &optional; + + ## A file record if the notice is related to a file. The + ## reference to the actual fa_file record will be deleted after + ## applying the notice policy. + f: fa_file &optional; + + ## A file unique ID if this notice is related to a file. If + ## the *f* field is provided, this will be automatically filled + ## out. + fuid: string &log &optional; + + ## A mime type if the notice is related to a file. If the *f* + ## field is provided, this will be automatically filled out. + file_mime_type: string &log &optional; + + ## Frequently files can be "described" to give a bit more + ## context. This field will typically be automatically filled + ## out from an fa_file record. For example, if a notice was + ## related to a file over HTTP, the URL of the request would + ## be shown. + file_desc: string &log &optional; + + ## The transport protocol. Filled automatically when either + ## *conn*, *iconn* or *p* is specified. + proto: transport_proto &log &optional; + + ## The :zeek:type:`Notice::Type` of the notice. + note: Type &log; + ## The human readable message for the notice. + msg: string &log &optional; + ## The human readable sub-message. + sub: string &log &optional; + + ## Source address, if we don't have a :zeek:type:`conn_id`. + src: addr &log &optional; + ## Destination address. + dst: addr &log &optional; + ## Associated port, if we don't have a :zeek:type:`conn_id`. + p: port &log &optional; + ## Associated count, or perhaps a status code. + n: count &log &optional; + + ## Name of remote peer that raised this notice. + peer_name: string &optional; + ## Textual description for the peer that raised this notice, + ## including name, host address and port. + peer_descr: string &log &optional; + + ## The actions which have been applied to this notice. + actions: ActionSet &log &default=ActionSet(); + + ## By adding chunks of text into this element, other scripts + ## can expand on notices that are being emailed. The normal + ## way to add text is to extend the vector by handling the + ## :zeek:id:`Notice::notice` event and modifying the notice in + ## place. + email_body_sections: vector of string &optional; + + ## Adding a string "token" to this set will cause the notice + ## framework's built-in emailing functionality to delay sending + ## the email until either the token has been removed or the + ## email has been delayed for :zeek:id:`Notice::max_email_delay`. + email_delay_tokens: set[string] &optional; + + ## This field is to be provided when a notice is generated for + ## the purpose of deduplicating notices. The identifier string + ## should be unique for a single instance of the notice. This + ## field should be filled out in almost all cases when + ## generating notices to define when a notice is conceptually + ## a duplicate of a previous notice. + ## + ## For example, an SSL certificate that is going to expire soon + ## should always have the same identifier no matter the client + ## IP address that connected and resulted in the certificate + ## being exposed. In this case, the resp_h, resp_p, and hash + ## of the certificate would be used to create this value. The + ## hash of the cert is included because servers can return + ## multiple certificates on the same port. + ## + ## Another example might be a host downloading a file which + ## triggered a notice because the MD5 sum of the file it + ## downloaded was known by some set of intelligence. In that + ## case, the orig_h (client) and MD5 sum would be used in this + ## field to dedup because if the same file is downloaded over + ## and over again you really only want to know about it a + ## single time. This makes it possible to send those notices + ## to email without worrying so much about sending thousands + ## of emails. + identifier: string &optional; + + ## This field indicates the length of time that this + ## unique notice should be suppressed. + suppress_for: interval &log &default=default_suppression_interval; + }; + + ## Ignored notice types. + option ignored_types: set[Notice::Type] = {}; + ## Emailed notice types. + option emailed_types: set[Notice::Type] = {}; + ## Alarmed notice types. + option alarmed_types: set[Notice::Type] = {}; + ## Types that should be suppressed for the default suppression interval. + option not_suppressed_types: set[Notice::Type] = {}; + ## This table can be used as a shorthand way to modify suppression + ## intervals for entire notice types. + const type_suppression_intervals: table[Notice::Type] of interval = {} &redef; + + ## The hook to modify notice handling. + global policy: hook(n: Notice::Info); + + ## Local system sendmail program. + ## + ## Note that this is overridden by the ZeekControl SendMail option. + option sendmail = "/usr/sbin/sendmail"; + ## Email address to send notices with the + ## :zeek:enum:`Notice::ACTION_EMAIL` action or to send bulk alarm logs + ## on rotation with :zeek:enum:`Notice::ACTION_ALARM`. + ## + ## Note that this is overridden by the ZeekControl MailTo option. + const mail_dest = "" &redef; + + ## Address that emails will be from. + ## + ## Note that this is overridden by the ZeekControl MailFrom option. + option mail_from = "Zeek "; + ## Reply-to address used in outbound email. + option reply_to = ""; + ## Text string prefixed to the subject of all emails sent out. + ## + ## Note that this is overridden by the ZeekControl MailSubjectPrefix + ## option. + option mail_subject_prefix = "[Zeek]"; + ## The maximum amount of time a plugin can delay email from being sent. + const max_email_delay = 15secs &redef; + + ## Contains a portion of :zeek:see:`fa_file` that's also contained in + ## :zeek:see:`Notice::Info`. + type FileInfo: record { + fuid: string; ##< File UID. + desc: string; ##< File description from e.g. + ##< :zeek:see:`Files::describe`. + mime: string &optional; ##< Strongest mime type match for file. + cid: conn_id &optional; ##< Connection tuple over which file is sent. + cuid: string &optional; ##< Connection UID over which file is sent. + }; + + ## Creates a record containing a subset of a full :zeek:see:`fa_file` record. + ## + ## f: record containing metadata about a file. + ## + ## Returns: record containing a subset of fields copied from *f*. + global create_file_info: function(f: fa_file): Notice::FileInfo; + + ## Populates file-related fields in a notice info record. + ## + ## f: record containing metadata about a file. + ## + ## n: a notice record that needs file-related fields populated. + global populate_file_info: function(f: fa_file, n: Notice::Info); + + ## Populates file-related fields in a notice info record. + ## + ## fi: record containing metadata about a file. + ## + ## n: a notice record that needs file-related fields populated. + global populate_file_info2: function(fi: Notice::FileInfo, n: Notice::Info); + + ## A log postprocessing function that implements emailing the contents + ## of a log upon rotation to any configured :zeek:id:`Notice::mail_dest`. + ## The rotated log is removed upon being sent. + ## + ## info: A record containing the rotated log file information. + ## + ## Returns: True. + global log_mailing_postprocessor: function(info: Log::RotationInfo): bool; + + ## This is the event that is called as the entry point to the + ## notice framework by the global :zeek:id:`NOTICE` function. By the + ## time this event is generated, default values have already been + ## filled out in the :zeek:type:`Notice::Info` record and the notice + ## policy has also been applied. + ## + ## n: The record containing notice data. + global notice: hook(n: Info); + + ## This event is generated when a notice begins to be suppressed. + ## + ## ts: time indicating then when the notice to be suppressed occured. + ## + ## suppress_for: length of time that this notice should be suppressed. + ## + ## note: The :zeek:type:`Notice::Type` of the notice. + ## + ## identifier: The identifier string of the notice that should be suppressed. + global begin_suppression: event(ts: time, suppress_for: interval, note: Type, identifier: string); + + ## A function to determine if an event is supposed to be suppressed. + ## + ## n: The record containing the notice in question. + global is_being_suppressed: function(n: Notice::Info): bool; + + ## This event is generated on each occurrence of an event being + ## suppressed. + ## + ## n: The record containing notice data regarding the notice type + ## being suppressed. + global suppressed: event(n: Notice::Info); + + ## Call this function to send a notice in an email. It is already used + ## by default with the built in :zeek:enum:`Notice::ACTION_EMAIL` and + ## :zeek:enum:`Notice::ACTION_PAGE` actions. + ## + ## n: The record of notice data to email. + ## + ## dest: The intended recipient of the notice email. + ## + ## extend: Whether to extend the email using the + ## ``email_body_sections`` field of *n*. + global email_notice_to: function(n: Info, dest: string, extend: bool); + + ## Constructs mail headers to which an email body can be appended for + ## sending with sendmail. + ## + ## subject_desc: a subject string to use for the mail. + ## + ## dest: recipient string to use for the mail. + ## + ## Returns: a string of mail headers to which an email body can be + ## appended. + global email_headers: function(subject_desc: string, dest: string): string; + + ## This event can be handled to access the :zeek:type:`Notice::Info` + ## record as it is sent on to the logging framework. + ## + ## rec: The record containing notice data before it is logged. + global log_notice: event(rec: Info); + + ## This is an internal wrapper for the global :zeek:id:`NOTICE` + ## function; disregard. + ## + ## n: The record of notice data. + global internal_NOTICE: function(n: Notice::Info); + + ## This is the event used to transport notices on the cluster. + ## + ## n: The notice information to be sent to the cluster manager for + ## further processing. + global cluster_notice: event(n: Notice::Info); +} + +module GLOBAL; + +function NOTICE(n: Notice::Info) + { + if ( Notice::is_being_suppressed(n) ) + return; + + @if ( Cluster::is_enabled() ) + if ( Cluster::local_node_type() == Cluster::MANAGER ) + Notice::internal_NOTICE(n); + else + { + n$peer_name = n$peer_descr = Cluster::node; + Broker::publish(Cluster::manager_topic, Notice::cluster_notice, n); + } + @else + Notice::internal_NOTICE(n); + @endif + } + +module Notice; + +# This is used as a hack to implement per-item expiration intervals. +function per_notice_suppression_interval(t: table[Notice::Type, string] of time, idx: any): interval + { + local n: Notice::Type; + local s: string; + [n,s] = idx; + + local suppress_time = t[n,s] - network_time(); + if ( suppress_time < 0secs ) + suppress_time = 0secs; + + return suppress_time; + } + +# This is the internally maintained notice suppression table. It's +# indexed on the Notice::Type and the $identifier field from the notice. +global suppressing: table[Type, string] of time = {} + &create_expire=0secs + &expire_func=per_notice_suppression_interval; + +function log_mailing_postprocessor(info: Log::RotationInfo): bool + { + if ( ! reading_traces() && mail_dest != "" ) + { + local headers = email_headers(fmt("Log Contents: %s", info$fname), + mail_dest); + local tmpfilename = fmt("%s.mailheaders.tmp", info$fname); + local tmpfile = open(tmpfilename); + write_file(tmpfile, headers); + close(tmpfile); + system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm %s %s", + tmpfilename, info$fname, sendmail, tmpfilename, info$fname)); + } + return T; + } + +event zeek_init() &priority=5 + { + Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice, $path="notice"]); + + Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info, $path="notice_alarm"]); + # If Zeek is configured for mailing notices, set up mailing for alarms. + # Make sure that this alarm log is also output as text so that it can + # be packaged up and emailed later. + if ( ! reading_traces() && mail_dest != "" ) + Log::add_filter(Notice::ALARM_LOG, + [$name="alarm-mail", $path="alarm-mail", $writer=Log::WRITER_ASCII, + $interv=24hrs, $postprocessor=log_mailing_postprocessor]); + } + +function email_headers(subject_desc: string, dest: string): string + { + local header_text = string_cat( + "From: ", mail_from, "\n", + "Subject: ", mail_subject_prefix, " ", subject_desc, "\n", + "To: ", dest, "\n", + "User-Agent: Bro-IDS/", zeek_version(), "\n"); + if ( reply_to != "" ) + header_text = string_cat(header_text, "Reply-To: ", reply_to, "\n"); + return header_text; + } + +event delay_sending_email(n: Notice::Info, dest: string, extend: bool) + { + email_notice_to(n, dest, extend); + } + +function email_notice_to(n: Notice::Info, dest: string, extend: bool) + { + if ( reading_traces() || dest == "" ) + return; + + if ( extend ) + { + if ( |n$email_delay_tokens| > 0 ) + { + # If we still are within the max_email_delay, keep delaying. + if ( n$ts + max_email_delay > network_time() ) + { + schedule 1sec { delay_sending_email(n, dest, extend) }; + return; + } + else + { + Reporter::info(fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens)); + } + } + } + + local email_text = email_headers(fmt("%s", n$note), dest); + + # First off, finish the headers and include the human readable messages + # then leave a blank line after the message. + email_text = string_cat(email_text, "\nMessage: ", n$msg, "\n"); + + if ( n?$sub ) + email_text = string_cat(email_text, "Sub-message: ", n$sub, "\n"); + + email_text = string_cat(email_text, "\n"); + + # Add information about the file if it exists. + if ( n?$file_desc ) + email_text = string_cat(email_text, "File Description: ", n$file_desc, "\n"); + + if ( n?$file_mime_type ) + email_text = string_cat(email_text, "File MIME Type: ", n$file_mime_type, "\n"); + + if ( n?$file_desc || n?$file_mime_type ) + email_text = string_cat(email_text, "\n"); + + # Next, add information about the connection if it exists. + if ( n?$id ) + { + email_text = string_cat(email_text, "Connection: ", + fmt("%s", n$id$orig_h), ":", fmt("%d", n$id$orig_p), " -> ", + fmt("%s", n$id$resp_h), ":", fmt("%d", n$id$resp_p), "\n"); + if ( n?$uid ) + email_text = string_cat(email_text, "Connection uid: ", n$uid, "\n"); + } + else if ( n?$src ) + email_text = string_cat(email_text, "Address: ", fmt("%s", n$src), "\n"); + + # Add the extended information if it's requested. + if ( extend ) + { + email_text = string_cat(email_text, "\nEmail Extensions\n"); + email_text = string_cat(email_text, "----------------\n"); + for ( i in n$email_body_sections ) + { + email_text = string_cat(email_text, n$email_body_sections[i], "\n"); + } + } + + email_text = string_cat(email_text, "\n\n--\n[Automatically generated]\n\n"); + piped_exec(fmt("%s -t -oi", sendmail), email_text); + } + +hook Notice::policy(n: Notice::Info) &priority=10 + { + if ( n$note in Notice::ignored_types ) + break; + + if ( n$note in Notice::not_suppressed_types ) + n$suppress_for=0secs; + if ( n$note in Notice::alarmed_types ) + add n$actions[ACTION_ALARM]; + if ( n$note in Notice::emailed_types ) + add n$actions[ACTION_EMAIL]; + + if ( n$note in Notice::type_suppression_intervals ) + n$suppress_for=Notice::type_suppression_intervals[n$note]; + + # Logging is a default action. It can be removed in a later hook if desired. + add n$actions[ACTION_LOG]; + } + +hook Notice::notice(n: Notice::Info) &priority=-5 + { + if ( ACTION_EMAIL in n$actions ) + email_notice_to(n, mail_dest, T); + if ( ACTION_LOG in n$actions ) + Log::write(Notice::LOG, n); + if ( ACTION_ALARM in n$actions ) + Log::write(Notice::ALARM_LOG, n); + + # Normally suppress further notices like this one unless directed not to. + # n$identifier *must* be specified for suppression to function at all. + if ( n?$identifier && + [n$note, n$identifier] !in suppressing && + n$suppress_for != 0secs ) + { + event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier); + } + } + +event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type, + identifier: string) + { + local suppress_until = ts + suppress_for; + suppressing[note, identifier] = suppress_until; + } + +event zeek_init() + { + if ( ! Cluster::is_enabled() ) + return; + + Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression); + Broker::auto_publish(Cluster::proxy_topic, Notice::begin_suppression); + } + +function is_being_suppressed(n: Notice::Info): bool + { + if ( n?$identifier && [n$note, n$identifier] in suppressing ) + { + event Notice::suppressed(n); + return T; + } + else + return F; + } + +# Executes a script with all of the notice fields put into the +# new process' environment as "ZEEK_ARG_" variables. +function execute_with_notice(cmd: string, n: Notice::Info) + { + # TODO: fix system calls + #local tgs = tags(n); + #system_env(cmd, tags); + } + +function create_file_info(f: fa_file): Notice::FileInfo + { + local fi: Notice::FileInfo = Notice::FileInfo($fuid = f$id, + $desc = Files::describe(f)); + + if ( f?$info && f$info?$mime_type ) + fi$mime = f$info$mime_type; + + if ( f?$conns && |f$conns| == 1 ) + for ( id, c in f$conns ) + { + fi$cid = id; + fi$cuid = c$uid; + } + + return fi; + } + +function populate_file_info(f: fa_file, n: Notice::Info) + { + populate_file_info2(create_file_info(f), n); + } + +function populate_file_info2(fi: Notice::FileInfo, n: Notice::Info) + { + if ( ! n?$fuid ) + n$fuid = fi$fuid; + + if ( ! n?$file_mime_type && fi?$mime ) + n$file_mime_type = fi$mime; + + n$file_desc = fi$desc; + n$id = fi$cid; + n$uid = fi$cuid; + } + +# This is run synchronously as a function before all of the other +# notice related functions and events. It also modifies the +# :zeek:type:`Notice::Info` record in place. +function apply_policy(n: Notice::Info) + { + # Fill in some defaults. + if ( ! n?$ts ) + n$ts = network_time(); + + if ( n?$f ) + populate_file_info(n$f, n); + + if ( n?$conn ) + { + if ( ! n?$id ) + n$id = n$conn$id; + + if ( ! n?$uid ) + n$uid = n$conn$uid; + } + + if ( n?$id ) + { + if ( ! n?$src ) + n$src = n$id$orig_h; + if ( ! n?$dst ) + n$dst = n$id$resp_h; + if ( ! n?$p ) + n$p = n$id$resp_p; + } + + if ( n?$p ) + n$proto = get_port_transport_proto(n$p); + + if ( n?$iconn ) + { + n$proto = icmp; + if ( ! n?$src ) + n$src = n$iconn$orig_h; + if ( ! n?$dst ) + n$dst = n$iconn$resp_h; + } + + if ( ! n?$email_body_sections ) + n$email_body_sections = vector(); + if ( ! n?$email_delay_tokens ) + n$email_delay_tokens = set(); + + # Apply the hook based policy. + hook Notice::policy(n); + + # Apply the suppression time after applying the policy so that policy + # items can give custom suppression intervals. If there is no + # suppression interval given yet, the default is applied. + if ( ! n?$suppress_for ) + n$suppress_for = default_suppression_interval; + + # Delete the connection and file records if they're there so we + # aren't sending that to remote machines. It can cause problems + # due to the size of those records. + if ( n?$conn ) + delete n$conn; + if ( n?$iconn ) + delete n$iconn; + if ( n?$f ) + delete n$f; + } + +function internal_NOTICE(n: Notice::Info) + { + # Fill out fields that might be empty and do the policy processing. + apply_policy(n); + + # Generate the notice event with the notice. + hook Notice::notice(n); + } + +event Notice::cluster_notice(n: Notice::Info) + { + NOTICE(n); + } diff --git a/scripts/base/frameworks/notice/weird.bro b/scripts/base/frameworks/notice/weird.bro deleted file mode 100644 index c7a1f3aefb..0000000000 --- a/scripts/base/frameworks/notice/weird.bro +++ /dev/null @@ -1,434 +0,0 @@ -##! This script provides a default set of actions to take for "weird activity" -##! events generated from Bro's event engine. Weird activity is defined as -##! unusual or exceptional activity that can indicate malformed connections, -##! traffic that doesn't conform to a particular protocol, malfunctioning -##! or misconfigured hardware, or even an attacker attempting to avoid/confuse -##! a sensor. Without context, it's hard to judge whether a particular -##! category of weird activity is interesting, but this script provides -##! a starting point for the user. - -@load base/utils/conn-ids -@load base/utils/site -@load ./main - -module Weird; - -export { - ## The weird logging stream identifier. - redef enum Log::ID += { LOG }; - - redef enum Notice::Type += { - ## Generic unusual but notice-worthy weird activity. - Activity, - }; - - ## The record which is used for representing and logging weirds. - type Info: record { - ## The time when the weird occurred. - ts: time &log; - - ## If a connection is associated with this weird, this will be - ## the connection's unique ID. - uid: string &log &optional; - - ## conn_id for the optional connection. - id: conn_id &log &optional; - - ## A shorthand way of giving the uid and id to a weird. - conn: connection &optional; - - ## The name of the weird that occurred. - name: string &log; - - ## Additional information accompanying the weird if any. - addl: string &log &optional; - - ## Indicate if this weird was also turned into a notice. - notice: bool &log &default=F; - - ## The peer that originated this weird. This is helpful in - ## cluster deployments if a particular cluster node is having - ## trouble to help identify which node is having trouble. - peer: string &log &optional &default=peer_description; - - ## This field is to be provided when a weird is generated for - ## the purpose of deduplicating weirds. The identifier string - ## should be unique for a single instance of the weird. This field - ## is used to define when a weird is conceptually a duplicate of - ## a previous weird. - identifier: string &optional; - }; - - ## Types of actions that may be taken when handling weird activity events. - type Action: enum { - ## A dummy action indicating the user does not care what - ## internal decision is made regarding a given type of weird. - ACTION_UNSPECIFIED, - ## No action is to be taken. - ACTION_IGNORE, - ## Log the weird event every time it occurs. - ACTION_LOG, - ## Log the weird event only once. - ACTION_LOG_ONCE, - ## Log the weird event once per connection. - ACTION_LOG_PER_CONN, - ## Log the weird event once per originator host. - ACTION_LOG_PER_ORIG, - ## Always generate a notice associated with the weird event. - ACTION_NOTICE, - ## Generate a notice associated with the weird event only once. - ACTION_NOTICE_ONCE, - ## Generate a notice for the weird event once per connection. - ACTION_NOTICE_PER_CONN, - ## Generate a notice for the weird event once per originator host. - ACTION_NOTICE_PER_ORIG, - }; - - ## A table specifying default/recommended actions per weird type. - const actions: table[string] of Action = { - ["unsolicited_SYN_response"] = ACTION_IGNORE, - ["above_hole_data_without_any_acks"] = ACTION_LOG, - ["active_connection_reuse"] = ACTION_LOG, - ["bad_HTTP_reply"] = ACTION_LOG, - ["bad_HTTP_version"] = ACTION_LOG, - ["bad_ICMP_checksum"] = ACTION_LOG_PER_ORIG, - ["bad_ident_port"] = ACTION_LOG, - ["bad_ident_reply"] = ACTION_LOG, - ["bad_ident_request"] = ACTION_LOG, - ["bad_rlogin_prolog"] = ACTION_LOG, - ["bad_rsh_prolog"] = ACTION_LOG, - ["rsh_text_after_rejected"] = ACTION_LOG, - ["bad_RPC"] = ACTION_LOG_PER_ORIG, - ["bad_RPC_program"] = ACTION_LOG, - ["bad_SYN_ack"] = ACTION_LOG, - ["bad_TCP_checksum"] = ACTION_LOG_PER_ORIG, - ["bad_UDP_checksum"] = ACTION_LOG_PER_ORIG, - ["baroque_SYN"] = ACTION_LOG, - ["base64_illegal_encoding"] = ACTION_LOG, - ["connection_originator_SYN_ack"] = ACTION_LOG_PER_ORIG, - ["contentline_size_exceeded"] = ACTION_LOG, - ["crud_trailing_HTTP_request"] = ACTION_LOG, - ["data_after_reset"] = ACTION_LOG, - ["data_before_established"] = ACTION_LOG, - ["DNS_AAAA_neg_length"] = ACTION_LOG, - ["DNS_Conn_count_too_large"] = ACTION_LOG, - ["DNS_NAME_too_long"] = ACTION_LOG, - ["DNS_RR_bad_length"] = ACTION_LOG, - ["DNS_RR_length_mismatch"] = ACTION_LOG, - ["DNS_RR_unknown_type"] = ACTION_LOG, - ["DNS_label_forward_compress_offset"] = ACTION_LOG_PER_ORIG, - ["DNS_label_len_gt_name_len"] = ACTION_LOG_PER_ORIG, - ["DNS_label_len_gt_pkt"] = ACTION_LOG_PER_ORIG, - ["DNS_label_too_long"] = ACTION_LOG_PER_ORIG, - ["DNS_truncated_RR_rdlength_lt_len"] = ACTION_LOG, - ["DNS_truncated_ans_too_short"] = ACTION_LOG, - ["DNS_truncated_len_lt_hdr_len"] = ACTION_LOG, - ["DNS_truncated_quest_too_short"] = ACTION_LOG, - ["excessive_data_without_further_acks"] = ACTION_LOG, - ["excess_RPC"] = ACTION_LOG_PER_ORIG, - ["FIN_advanced_last_seq"] = ACTION_LOG, - ["FIN_after_reset"] = ACTION_IGNORE, - ["FIN_storm"] = ACTION_NOTICE_PER_ORIG, - ["HTTP_bad_chunk_size"] = ACTION_LOG, - ["HTTP_chunked_transfer_for_multipart_message"] = ACTION_LOG, - ["HTTP_overlapping_messages"] = ACTION_LOG, - ["unknown_HTTP_method"] = ACTION_LOG, - ["HTTP_version_mismatch"] = ACTION_LOG, - ["ident_request_addendum"] = ACTION_LOG, - ["inappropriate_FIN"] = ACTION_LOG, - ["inflate_failed"] = ACTION_LOG, - ["invalid_irc_global_users_reply"] = ACTION_LOG, - ["irc_invalid_command"] = ACTION_LOG, - ["irc_invalid_dcc_message_format"] = ACTION_LOG, - ["irc_invalid_invite_message_format"] = ACTION_LOG, - ["irc_invalid_join_line"] = ACTION_LOG, - ["irc_invalid_kick_message_format"] = ACTION_LOG, - ["irc_invalid_line"] = ACTION_LOG, - ["irc_invalid_mode_message_format"] = ACTION_LOG, - ["irc_invalid_names_line"] = ACTION_LOG, - ["irc_invalid_njoin_line"] = ACTION_LOG, - ["irc_invalid_notice_message_format"] = ACTION_LOG, - ["irc_invalid_oper_message_format"] = ACTION_LOG, - ["irc_invalid_privmsg_message_format"] = ACTION_LOG, - ["irc_invalid_reply_number"] = ACTION_LOG, - ["irc_invalid_squery_message_format"] = ACTION_LOG, - ["irc_invalid_topic_reply"] = ACTION_LOG, - ["irc_invalid_who_line"] = ACTION_LOG, - ["irc_invalid_who_message_format"] = ACTION_LOG, - ["irc_invalid_whois_channel_line"] = ACTION_LOG, - ["irc_invalid_whois_message_format"] = ACTION_LOG, - ["irc_invalid_whois_operator_line"] = ACTION_LOG, - ["irc_invalid_whois_user_line"] = ACTION_LOG, - ["irc_line_size_exceeded"] = ACTION_LOG, - ["irc_line_too_short"] = ACTION_LOG, - ["irc_too_many_invalid"] = ACTION_LOG, - ["line_terminated_with_single_CR"] = ACTION_LOG, - ["line_terminated_with_single_LF"] = ACTION_LOG, - ["malformed_ssh_identification"] = ACTION_LOG, - ["malformed_ssh_version"] = ACTION_LOG, - ["multiple_HTTP_request_elements"] = ACTION_LOG, - ["NUL_in_line"] = ACTION_LOG, - ["originator_RPC_reply"] = ACTION_LOG_PER_ORIG, - ["partial_finger_request"] = ACTION_LOG, - ["partial_ftp_request"] = ACTION_LOG, - ["partial_ident_request"] = ACTION_LOG, - ["partial_RPC"] = ACTION_LOG_PER_ORIG, - ["pending_data_when_closed"] = ACTION_LOG, - ["pop3_bad_base64_encoding"] = ACTION_LOG, - ["pop3_client_command_unknown"] = ACTION_LOG, - ["pop3_client_sending_server_commands"] = ACTION_LOG, - ["pop3_malformed_auth_plain"] = ACTION_LOG, - ["pop3_server_command_unknown"] = ACTION_LOG, - ["pop3_server_sending_client_commands"] = ACTION_LOG, - ["possible_split_routing"] = ACTION_LOG, - ["premature_connection_reuse"] = ACTION_LOG, - ["repeated_SYN_reply_wo_ack"] = ACTION_LOG, - ["repeated_SYN_with_ack"] = ACTION_LOG, - ["responder_RPC_call"] = ACTION_LOG_PER_ORIG, - ["rlogin_text_after_rejected"] = ACTION_LOG, - ["RPC_rexmit_inconsistency"] = ACTION_LOG, - ["RPC_underflow"] = ACTION_LOG, - ["RST_storm"] = ACTION_LOG, - ["RST_with_data"] = ACTION_LOG, - ["SSL_many_server_names"] = ACTION_LOG, - ["simultaneous_open"] = ACTION_LOG_PER_CONN, - ["spontaneous_FIN"] = ACTION_IGNORE, - ["spontaneous_RST"] = ACTION_IGNORE, - ["SMB_parsing_error"] = ACTION_LOG, - ["no_smb_session_using_parsesambamsg"] = ACTION_LOG, - ["smb_andx_command_failed_to_parse"] = ACTION_LOG, - ["transaction_subcmd_missing"] = ACTION_LOG, - ["successful_RPC_reply_to_invalid_request"] = ACTION_NOTICE_PER_ORIG, - ["SYN_after_close"] = ACTION_LOG, - ["SYN_after_partial"] = ACTION_NOTICE_PER_ORIG, - ["SYN_after_reset"] = ACTION_LOG, - ["SYN_inside_connection"] = ACTION_LOG, - ["SYN_seq_jump"] = ACTION_LOG, - ["SYN_with_data"] = ACTION_LOG_PER_ORIG, - ["TCP_christmas"] = ACTION_LOG, - ["truncated_ARP"] = ACTION_LOG, - ["truncated_NTP"] = ACTION_LOG, - ["UDP_datagram_length_mismatch"] = ACTION_LOG_PER_ORIG, - ["unexpected_client_HTTP_data"] = ACTION_LOG, - ["unexpected_multiple_HTTP_requests"] = ACTION_LOG, - ["unexpected_server_HTTP_data"] = ACTION_LOG, - ["unmatched_HTTP_reply"] = ACTION_LOG, - ["unpaired_RPC_response"] = ACTION_LOG, - ["window_recision"] = ACTION_LOG, - ["double_%_in_URI"] = ACTION_LOG, - ["illegal_%_at_end_of_URI"] = ACTION_LOG, - ["unescaped_%_in_URI"] = ACTION_LOG, - ["unescaped_special_URI_char"] = ACTION_LOG, - ["deficit_netbios_hdr_len"] = ACTION_LOG, - ["excess_netbios_hdr_len"] = ACTION_LOG, - ["netbios_client_session_reply"] = ACTION_LOG, - ["netbios_raw_session_msg"] = ACTION_LOG, - ["netbios_server_session_request"] = ACTION_LOG, - ["unknown_netbios_type"] = ACTION_LOG, - ["excessively_large_fragment"] = ACTION_LOG, - ["excessively_small_fragment"] = ACTION_LOG_PER_ORIG, - ["fragment_inconsistency"] = ACTION_LOG_PER_ORIG, - ["fragment_overlap"] = ACTION_LOG_PER_ORIG, - ["fragment_protocol_inconsistency"] = ACTION_LOG, - ["fragment_size_inconsistency"] = ACTION_LOG_PER_ORIG, - # These do indeed happen! - ["fragment_with_DF"] = ACTION_LOG, - ["incompletely_captured_fragment"] = ACTION_LOG, - ["bad_IP_checksum"] = ACTION_LOG_PER_ORIG, - ["bad_TCP_header_len"] = ACTION_LOG, - ["internally_truncated_header"] = ACTION_LOG, - ["truncated_IP"] = ACTION_LOG, - ["truncated_header"] = ACTION_LOG, - } &default=ACTION_LOG &redef; - - ## To completely ignore a specific weird for a host, add the host - ## and weird name into this set. - option ignore_hosts: set[addr, string] = {}; - - ## Don't ignore repeats for weirds in this set. For example, - ## it's handy keeping track of clustered checksum errors. - option weird_do_not_ignore_repeats = { - "bad_IP_checksum", "bad_TCP_checksum", "bad_UDP_checksum", - "bad_ICMP_checksum", - }; - - ## This table is used to track identifier and name pairs that should be - ## temporarily ignored because the problem has already been reported. - ## This helps reduce the volume of high volume weirds by only allowing - ## a unique weird every ``create_expire`` interval. - global weird_ignore: set[string, string] &create_expire=10min &redef; - - ## A state set which tracks unique weirds solely by name to reduce - ## duplicate logging. This is deliberately not synchronized because it - ## could cause overload during storms. - global did_log: set[string, string] &create_expire=1day &redef; - - ## A state set which tracks unique weirds solely by name to reduce - ## duplicate notices from being raised. - global did_notice: set[string, string] &create_expire=1day &redef; - - ## Handlers of this event are invoked once per write to the weird - ## logging stream before the data is actually written. - ## - ## rec: The weird columns about to be logged to the weird stream. - global log_weird: event(rec: Info); - - global weird: function(w: Weird::Info); -} - -# These actions result in the output being limited and further redundant -# weirds not progressing to being logged or noticed. -const limiting_actions = { - ACTION_LOG_ONCE, - ACTION_LOG_PER_CONN, - ACTION_LOG_PER_ORIG, - ACTION_NOTICE_ONCE, - ACTION_NOTICE_PER_CONN, - ACTION_NOTICE_PER_ORIG, -}; - -# This is an internal set to track which Weird::Action values lead to notice -# creation. -const notice_actions = { - ACTION_NOTICE, - ACTION_NOTICE_PER_CONN, - ACTION_NOTICE_PER_ORIG, - ACTION_NOTICE_ONCE, -}; - -event bro_init() &priority=5 - { - Log::create_stream(Weird::LOG, [$columns=Info, $ev=log_weird, $path="weird"]); - } - -function flow_id_string(src: addr, dst: addr): string - { - return fmt("%s -> %s", src, dst); - } - -function weird(w: Weird::Info) - { - local action = actions[w$name]; - - local identifier = ""; - if ( w?$identifier ) - identifier = w$identifier; - else - { - if ( w?$id ) - identifier = id_string(w$id); - } - - # If this weird is to be ignored let's drop out of here very early. - if ( action == ACTION_IGNORE || [w$name, identifier] in weird_ignore ) - return; - - if ( w?$conn ) - { - w$uid = w$conn$uid; - w$id = w$conn$id; - } - - if ( w?$id ) - { - if ( [w$id$orig_h, w$name] in ignore_hosts || - [w$id$resp_h, w$name] in ignore_hosts ) - return; - } - - if ( action in limiting_actions ) - { - local notice_identifier = identifier; - if ( action in notice_actions ) - { - # Handle notices - if ( w?$id && action == ACTION_NOTICE_PER_ORIG ) - notice_identifier = fmt("%s", w$id$orig_h); - else if ( action == ACTION_NOTICE_ONCE ) - notice_identifier = ""; - - # If this weird was already noticed then we're done. - if ( [w$name, notice_identifier] in did_notice ) - return; - add did_notice[w$name, notice_identifier]; - } - else - { - # Handle logging. - if ( w?$id && action == ACTION_LOG_PER_ORIG ) - notice_identifier = fmt("%s", w$id$orig_h); - else if ( action == ACTION_LOG_ONCE ) - notice_identifier = ""; - - # If this weird was already logged then we're done. - if ( [w$name, notice_identifier] in did_log ) - return; - - add did_log[w$name, notice_identifier]; - } - } - - if ( action in notice_actions ) - { - w$notice = T; - - local n: Notice::Info; - n$note = Activity; - n$msg = w$name; - if ( w?$conn ) - n$conn = w$conn; - else - { - if ( w?$uid ) - n$uid = w$uid; - if ( w?$id ) - n$id = w$id; - } - if ( w?$addl ) - n$sub = w$addl; - NOTICE(n); - } - - # This is for the temporary ignoring to reduce volume for identical weirds. - if ( w$name !in weird_do_not_ignore_repeats ) - add weird_ignore[w$name, identifier]; - - Log::write(Weird::LOG, w); - } - -# The following events come from core generated weirds typically. -event conn_weird(name: string, c: connection, addl: string) - { - local i = Info($ts=network_time(), $name=name, $conn=c, $identifier=id_string(c$id)); - if ( addl != "" ) - i$addl = addl; - - weird(i); - } - -event flow_weird(name: string, src: addr, dst: addr) - { - # We add the source and destination as port 0/unknown because that is - # what fits best here. - local id = conn_id($orig_h=src, $orig_p=count_to_port(0, unknown_transport), - $resp_h=dst, $resp_p=count_to_port(0, unknown_transport)); - - local i = Info($ts=network_time(), $name=name, $id=id, $identifier=flow_id_string(src,dst)); - weird(i); - } - -event net_weird(name: string) - { - local i = Info($ts=network_time(), $name=name); - weird(i); - } - -event file_weird(name: string, f: fa_file, addl: string) - { - local i = Info($ts=network_time(), $name=name, $addl=f$id); - - if ( addl != "" ) - i$addl += fmt(": %s", addl); - - weird(i); - } diff --git a/scripts/base/frameworks/notice/weird.zeek b/scripts/base/frameworks/notice/weird.zeek new file mode 100644 index 0000000000..d6d381406a --- /dev/null +++ b/scripts/base/frameworks/notice/weird.zeek @@ -0,0 +1,434 @@ +##! This script provides a default set of actions to take for "weird activity" +##! events generated from Zeek's event engine. Weird activity is defined as +##! unusual or exceptional activity that can indicate malformed connections, +##! traffic that doesn't conform to a particular protocol, malfunctioning +##! or misconfigured hardware, or even an attacker attempting to avoid/confuse +##! a sensor. Without context, it's hard to judge whether a particular +##! category of weird activity is interesting, but this script provides +##! a starting point for the user. + +@load base/utils/conn-ids +@load base/utils/site +@load ./main + +module Weird; + +export { + ## The weird logging stream identifier. + redef enum Log::ID += { LOG }; + + redef enum Notice::Type += { + ## Generic unusual but notice-worthy weird activity. + Activity, + }; + + ## The record which is used for representing and logging weirds. + type Info: record { + ## The time when the weird occurred. + ts: time &log; + + ## If a connection is associated with this weird, this will be + ## the connection's unique ID. + uid: string &log &optional; + + ## conn_id for the optional connection. + id: conn_id &log &optional; + + ## A shorthand way of giving the uid and id to a weird. + conn: connection &optional; + + ## The name of the weird that occurred. + name: string &log; + + ## Additional information accompanying the weird if any. + addl: string &log &optional; + + ## Indicate if this weird was also turned into a notice. + notice: bool &log &default=F; + + ## The peer that originated this weird. This is helpful in + ## cluster deployments if a particular cluster node is having + ## trouble to help identify which node is having trouble. + peer: string &log &optional &default=peer_description; + + ## This field is to be provided when a weird is generated for + ## the purpose of deduplicating weirds. The identifier string + ## should be unique for a single instance of the weird. This field + ## is used to define when a weird is conceptually a duplicate of + ## a previous weird. + identifier: string &optional; + }; + + ## Types of actions that may be taken when handling weird activity events. + type Action: enum { + ## A dummy action indicating the user does not care what + ## internal decision is made regarding a given type of weird. + ACTION_UNSPECIFIED, + ## No action is to be taken. + ACTION_IGNORE, + ## Log the weird event every time it occurs. + ACTION_LOG, + ## Log the weird event only once. + ACTION_LOG_ONCE, + ## Log the weird event once per connection. + ACTION_LOG_PER_CONN, + ## Log the weird event once per originator host. + ACTION_LOG_PER_ORIG, + ## Always generate a notice associated with the weird event. + ACTION_NOTICE, + ## Generate a notice associated with the weird event only once. + ACTION_NOTICE_ONCE, + ## Generate a notice for the weird event once per connection. + ACTION_NOTICE_PER_CONN, + ## Generate a notice for the weird event once per originator host. + ACTION_NOTICE_PER_ORIG, + }; + + ## A table specifying default/recommended actions per weird type. + const actions: table[string] of Action = { + ["unsolicited_SYN_response"] = ACTION_IGNORE, + ["above_hole_data_without_any_acks"] = ACTION_LOG, + ["active_connection_reuse"] = ACTION_LOG, + ["bad_HTTP_reply"] = ACTION_LOG, + ["bad_HTTP_version"] = ACTION_LOG, + ["bad_ICMP_checksum"] = ACTION_LOG_PER_ORIG, + ["bad_ident_port"] = ACTION_LOG, + ["bad_ident_reply"] = ACTION_LOG, + ["bad_ident_request"] = ACTION_LOG, + ["bad_rlogin_prolog"] = ACTION_LOG, + ["bad_rsh_prolog"] = ACTION_LOG, + ["rsh_text_after_rejected"] = ACTION_LOG, + ["bad_RPC"] = ACTION_LOG_PER_ORIG, + ["bad_RPC_program"] = ACTION_LOG, + ["bad_SYN_ack"] = ACTION_LOG, + ["bad_TCP_checksum"] = ACTION_LOG_PER_ORIG, + ["bad_UDP_checksum"] = ACTION_LOG_PER_ORIG, + ["baroque_SYN"] = ACTION_LOG, + ["base64_illegal_encoding"] = ACTION_LOG, + ["connection_originator_SYN_ack"] = ACTION_LOG_PER_ORIG, + ["contentline_size_exceeded"] = ACTION_LOG, + ["crud_trailing_HTTP_request"] = ACTION_LOG, + ["data_after_reset"] = ACTION_LOG, + ["data_before_established"] = ACTION_LOG, + ["DNS_AAAA_neg_length"] = ACTION_LOG, + ["DNS_Conn_count_too_large"] = ACTION_LOG, + ["DNS_NAME_too_long"] = ACTION_LOG, + ["DNS_RR_bad_length"] = ACTION_LOG, + ["DNS_RR_length_mismatch"] = ACTION_LOG, + ["DNS_RR_unknown_type"] = ACTION_LOG, + ["DNS_label_forward_compress_offset"] = ACTION_LOG_PER_ORIG, + ["DNS_label_len_gt_name_len"] = ACTION_LOG_PER_ORIG, + ["DNS_label_len_gt_pkt"] = ACTION_LOG_PER_ORIG, + ["DNS_label_too_long"] = ACTION_LOG_PER_ORIG, + ["DNS_truncated_RR_rdlength_lt_len"] = ACTION_LOG, + ["DNS_truncated_ans_too_short"] = ACTION_LOG, + ["DNS_truncated_len_lt_hdr_len"] = ACTION_LOG, + ["DNS_truncated_quest_too_short"] = ACTION_LOG, + ["excessive_data_without_further_acks"] = ACTION_LOG, + ["excess_RPC"] = ACTION_LOG_PER_ORIG, + ["FIN_advanced_last_seq"] = ACTION_LOG, + ["FIN_after_reset"] = ACTION_IGNORE, + ["FIN_storm"] = ACTION_NOTICE_PER_ORIG, + ["HTTP_bad_chunk_size"] = ACTION_LOG, + ["HTTP_chunked_transfer_for_multipart_message"] = ACTION_LOG, + ["HTTP_overlapping_messages"] = ACTION_LOG, + ["unknown_HTTP_method"] = ACTION_LOG, + ["HTTP_version_mismatch"] = ACTION_LOG, + ["ident_request_addendum"] = ACTION_LOG, + ["inappropriate_FIN"] = ACTION_LOG, + ["inflate_failed"] = ACTION_LOG, + ["invalid_irc_global_users_reply"] = ACTION_LOG, + ["irc_invalid_command"] = ACTION_LOG, + ["irc_invalid_dcc_message_format"] = ACTION_LOG, + ["irc_invalid_invite_message_format"] = ACTION_LOG, + ["irc_invalid_join_line"] = ACTION_LOG, + ["irc_invalid_kick_message_format"] = ACTION_LOG, + ["irc_invalid_line"] = ACTION_LOG, + ["irc_invalid_mode_message_format"] = ACTION_LOG, + ["irc_invalid_names_line"] = ACTION_LOG, + ["irc_invalid_njoin_line"] = ACTION_LOG, + ["irc_invalid_notice_message_format"] = ACTION_LOG, + ["irc_invalid_oper_message_format"] = ACTION_LOG, + ["irc_invalid_privmsg_message_format"] = ACTION_LOG, + ["irc_invalid_reply_number"] = ACTION_LOG, + ["irc_invalid_squery_message_format"] = ACTION_LOG, + ["irc_invalid_topic_reply"] = ACTION_LOG, + ["irc_invalid_who_line"] = ACTION_LOG, + ["irc_invalid_who_message_format"] = ACTION_LOG, + ["irc_invalid_whois_channel_line"] = ACTION_LOG, + ["irc_invalid_whois_message_format"] = ACTION_LOG, + ["irc_invalid_whois_operator_line"] = ACTION_LOG, + ["irc_invalid_whois_user_line"] = ACTION_LOG, + ["irc_line_size_exceeded"] = ACTION_LOG, + ["irc_line_too_short"] = ACTION_LOG, + ["irc_too_many_invalid"] = ACTION_LOG, + ["line_terminated_with_single_CR"] = ACTION_LOG, + ["line_terminated_with_single_LF"] = ACTION_LOG, + ["malformed_ssh_identification"] = ACTION_LOG, + ["malformed_ssh_version"] = ACTION_LOG, + ["multiple_HTTP_request_elements"] = ACTION_LOG, + ["NUL_in_line"] = ACTION_LOG, + ["originator_RPC_reply"] = ACTION_LOG_PER_ORIG, + ["partial_finger_request"] = ACTION_LOG, + ["partial_ftp_request"] = ACTION_LOG, + ["partial_ident_request"] = ACTION_LOG, + ["partial_RPC"] = ACTION_LOG_PER_ORIG, + ["pending_data_when_closed"] = ACTION_LOG, + ["pop3_bad_base64_encoding"] = ACTION_LOG, + ["pop3_client_command_unknown"] = ACTION_LOG, + ["pop3_client_sending_server_commands"] = ACTION_LOG, + ["pop3_malformed_auth_plain"] = ACTION_LOG, + ["pop3_server_command_unknown"] = ACTION_LOG, + ["pop3_server_sending_client_commands"] = ACTION_LOG, + ["possible_split_routing"] = ACTION_LOG, + ["premature_connection_reuse"] = ACTION_LOG, + ["repeated_SYN_reply_wo_ack"] = ACTION_LOG, + ["repeated_SYN_with_ack"] = ACTION_LOG, + ["responder_RPC_call"] = ACTION_LOG_PER_ORIG, + ["rlogin_text_after_rejected"] = ACTION_LOG, + ["RPC_rexmit_inconsistency"] = ACTION_LOG, + ["RPC_underflow"] = ACTION_LOG, + ["RST_storm"] = ACTION_LOG, + ["RST_with_data"] = ACTION_LOG, + ["SSL_many_server_names"] = ACTION_LOG, + ["simultaneous_open"] = ACTION_LOG_PER_CONN, + ["spontaneous_FIN"] = ACTION_IGNORE, + ["spontaneous_RST"] = ACTION_IGNORE, + ["SMB_parsing_error"] = ACTION_LOG, + ["no_smb_session_using_parsesambamsg"] = ACTION_LOG, + ["smb_andx_command_failed_to_parse"] = ACTION_LOG, + ["transaction_subcmd_missing"] = ACTION_LOG, + ["successful_RPC_reply_to_invalid_request"] = ACTION_NOTICE_PER_ORIG, + ["SYN_after_close"] = ACTION_LOG, + ["SYN_after_partial"] = ACTION_NOTICE_PER_ORIG, + ["SYN_after_reset"] = ACTION_LOG, + ["SYN_inside_connection"] = ACTION_LOG, + ["SYN_seq_jump"] = ACTION_LOG, + ["SYN_with_data"] = ACTION_LOG_PER_ORIG, + ["TCP_christmas"] = ACTION_LOG, + ["truncated_ARP"] = ACTION_LOG, + ["truncated_NTP"] = ACTION_LOG, + ["UDP_datagram_length_mismatch"] = ACTION_LOG_PER_ORIG, + ["unexpected_client_HTTP_data"] = ACTION_LOG, + ["unexpected_multiple_HTTP_requests"] = ACTION_LOG, + ["unexpected_server_HTTP_data"] = ACTION_LOG, + ["unmatched_HTTP_reply"] = ACTION_LOG, + ["unpaired_RPC_response"] = ACTION_LOG, + ["window_recision"] = ACTION_LOG, + ["double_%_in_URI"] = ACTION_LOG, + ["illegal_%_at_end_of_URI"] = ACTION_LOG, + ["unescaped_%_in_URI"] = ACTION_LOG, + ["unescaped_special_URI_char"] = ACTION_LOG, + ["deficit_netbios_hdr_len"] = ACTION_LOG, + ["excess_netbios_hdr_len"] = ACTION_LOG, + ["netbios_client_session_reply"] = ACTION_LOG, + ["netbios_raw_session_msg"] = ACTION_LOG, + ["netbios_server_session_request"] = ACTION_LOG, + ["unknown_netbios_type"] = ACTION_LOG, + ["excessively_large_fragment"] = ACTION_LOG, + ["excessively_small_fragment"] = ACTION_LOG_PER_ORIG, + ["fragment_inconsistency"] = ACTION_LOG_PER_ORIG, + ["fragment_overlap"] = ACTION_LOG_PER_ORIG, + ["fragment_protocol_inconsistency"] = ACTION_LOG, + ["fragment_size_inconsistency"] = ACTION_LOG_PER_ORIG, + # These do indeed happen! + ["fragment_with_DF"] = ACTION_LOG, + ["incompletely_captured_fragment"] = ACTION_LOG, + ["bad_IP_checksum"] = ACTION_LOG_PER_ORIG, + ["bad_TCP_header_len"] = ACTION_LOG, + ["internally_truncated_header"] = ACTION_LOG, + ["truncated_IP"] = ACTION_LOG, + ["truncated_header"] = ACTION_LOG, + } &default=ACTION_LOG &redef; + + ## To completely ignore a specific weird for a host, add the host + ## and weird name into this set. + option ignore_hosts: set[addr, string] = {}; + + ## Don't ignore repeats for weirds in this set. For example, + ## it's handy keeping track of clustered checksum errors. + option weird_do_not_ignore_repeats = { + "bad_IP_checksum", "bad_TCP_checksum", "bad_UDP_checksum", + "bad_ICMP_checksum", + }; + + ## This table is used to track identifier and name pairs that should be + ## temporarily ignored because the problem has already been reported. + ## This helps reduce the volume of high volume weirds by only allowing + ## a unique weird every ``create_expire`` interval. + global weird_ignore: set[string, string] &create_expire=10min &redef; + + ## A state set which tracks unique weirds solely by name to reduce + ## duplicate logging. This is deliberately not synchronized because it + ## could cause overload during storms. + global did_log: set[string, string] &create_expire=1day &redef; + + ## A state set which tracks unique weirds solely by name to reduce + ## duplicate notices from being raised. + global did_notice: set[string, string] &create_expire=1day &redef; + + ## Handlers of this event are invoked once per write to the weird + ## logging stream before the data is actually written. + ## + ## rec: The weird columns about to be logged to the weird stream. + global log_weird: event(rec: Info); + + global weird: function(w: Weird::Info); +} + +# These actions result in the output being limited and further redundant +# weirds not progressing to being logged or noticed. +const limiting_actions = { + ACTION_LOG_ONCE, + ACTION_LOG_PER_CONN, + ACTION_LOG_PER_ORIG, + ACTION_NOTICE_ONCE, + ACTION_NOTICE_PER_CONN, + ACTION_NOTICE_PER_ORIG, +}; + +# This is an internal set to track which Weird::Action values lead to notice +# creation. +const notice_actions = { + ACTION_NOTICE, + ACTION_NOTICE_PER_CONN, + ACTION_NOTICE_PER_ORIG, + ACTION_NOTICE_ONCE, +}; + +event zeek_init() &priority=5 + { + Log::create_stream(Weird::LOG, [$columns=Info, $ev=log_weird, $path="weird"]); + } + +function flow_id_string(src: addr, dst: addr): string + { + return fmt("%s -> %s", src, dst); + } + +function weird(w: Weird::Info) + { + local action = actions[w$name]; + + local identifier = ""; + if ( w?$identifier ) + identifier = w$identifier; + else + { + if ( w?$id ) + identifier = id_string(w$id); + } + + # If this weird is to be ignored let's drop out of here very early. + if ( action == ACTION_IGNORE || [w$name, identifier] in weird_ignore ) + return; + + if ( w?$conn ) + { + w$uid = w$conn$uid; + w$id = w$conn$id; + } + + if ( w?$id ) + { + if ( [w$id$orig_h, w$name] in ignore_hosts || + [w$id$resp_h, w$name] in ignore_hosts ) + return; + } + + if ( action in limiting_actions ) + { + local notice_identifier = identifier; + if ( action in notice_actions ) + { + # Handle notices + if ( w?$id && action == ACTION_NOTICE_PER_ORIG ) + notice_identifier = fmt("%s", w$id$orig_h); + else if ( action == ACTION_NOTICE_ONCE ) + notice_identifier = ""; + + # If this weird was already noticed then we're done. + if ( [w$name, notice_identifier] in did_notice ) + return; + add did_notice[w$name, notice_identifier]; + } + else + { + # Handle logging. + if ( w?$id && action == ACTION_LOG_PER_ORIG ) + notice_identifier = fmt("%s", w$id$orig_h); + else if ( action == ACTION_LOG_ONCE ) + notice_identifier = ""; + + # If this weird was already logged then we're done. + if ( [w$name, notice_identifier] in did_log ) + return; + + add did_log[w$name, notice_identifier]; + } + } + + if ( action in notice_actions ) + { + w$notice = T; + + local n: Notice::Info; + n$note = Activity; + n$msg = w$name; + if ( w?$conn ) + n$conn = w$conn; + else + { + if ( w?$uid ) + n$uid = w$uid; + if ( w?$id ) + n$id = w$id; + } + if ( w?$addl ) + n$sub = w$addl; + NOTICE(n); + } + + # This is for the temporary ignoring to reduce volume for identical weirds. + if ( w$name !in weird_do_not_ignore_repeats ) + add weird_ignore[w$name, identifier]; + + Log::write(Weird::LOG, w); + } + +# The following events come from core generated weirds typically. +event conn_weird(name: string, c: connection, addl: string) + { + local i = Info($ts=network_time(), $name=name, $conn=c, $identifier=id_string(c$id)); + if ( addl != "" ) + i$addl = addl; + + weird(i); + } + +event flow_weird(name: string, src: addr, dst: addr) + { + # We add the source and destination as port 0/unknown because that is + # what fits best here. + local id = conn_id($orig_h=src, $orig_p=count_to_port(0, unknown_transport), + $resp_h=dst, $resp_p=count_to_port(0, unknown_transport)); + + local i = Info($ts=network_time(), $name=name, $id=id, $identifier=flow_id_string(src,dst)); + weird(i); + } + +event net_weird(name: string) + { + local i = Info($ts=network_time(), $name=name); + weird(i); + } + +event file_weird(name: string, f: fa_file, addl: string) + { + local i = Info($ts=network_time(), $name=name, $addl=f$id); + + if ( addl != "" ) + i$addl += fmt(": %s", addl); + + weird(i); + } diff --git a/scripts/base/frameworks/openflow/__load__.bro b/scripts/base/frameworks/openflow/__load__.zeek similarity index 100% rename from scripts/base/frameworks/openflow/__load__.bro rename to scripts/base/frameworks/openflow/__load__.zeek diff --git a/scripts/base/frameworks/openflow/cluster.bro b/scripts/base/frameworks/openflow/cluster.bro deleted file mode 100644 index 9ae4274bb7..0000000000 --- a/scripts/base/frameworks/openflow/cluster.bro +++ /dev/null @@ -1,126 +0,0 @@ -##! Cluster support for the OpenFlow framework. - -@load ./main -@load base/frameworks/cluster - -module OpenFlow; - -export { - ## This is the event used to transport flow_mod messages to the manager. - global cluster_flow_mod: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod); - - ## This is the event used to transport flow_clear messages to the manager. - global cluster_flow_clear: event(name: string); -} - -@if ( Cluster::local_node_type() != Cluster::MANAGER ) -# Workers need ability to forward commands to manager. -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_mod); - Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_clear); - } -@endif - -# the flow_mod function wrapper -function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool - { - if ( ! controller?$flow_mod ) - return F; - - if ( Cluster::local_node_type() == Cluster::MANAGER ) - return controller$flow_mod(controller$state, match, flow_mod); - else - event OpenFlow::cluster_flow_mod(controller$state$_name, match, flow_mod); - - return T; - } - -function flow_clear(controller: Controller): bool - { - if ( ! controller?$flow_clear ) - return F; - - if ( Cluster::local_node_type() == Cluster::MANAGER ) - return controller$flow_clear(controller$state); - else - event OpenFlow::cluster_flow_clear(controller$state$_name); - - return T; - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod) - { - if ( name !in name_to_controller ) - { - Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name)); - return; - } - - local c = name_to_controller[name]; - - if ( ! c$state$_activated ) - return; - - if ( c?$flow_mod ) - c$flow_mod(c$state, match, flow_mod); - } - -event OpenFlow::cluster_flow_clear(name: string) - { - if ( name !in name_to_controller ) - { - Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name)); - return; - } - - local c = name_to_controller[name]; - - if ( ! c$state$_activated ) - return; - - if ( c?$flow_clear ) - c$flow_clear(c$state); - } -@endif - -function register_controller(tpe: OpenFlow::Plugin, name: string, controller: Controller) - { - controller$state$_name = cat(tpe, name); - controller$state$_plugin = tpe; - - # we only run the init functions on the manager. - if ( Cluster::local_node_type() != Cluster::MANAGER ) - return; - - register_controller_impl(tpe, name, controller); - } - -function unregister_controller(controller: Controller) - { - # we only run the on the manager. - if ( Cluster::local_node_type() != Cluster::MANAGER ) - return; - - unregister_controller_impl(controller); - } - -function lookup_controller(name: string): vector of Controller - { - # we only run the on the manager. Otherwhise we don't have a mapping or state -> return empty - if ( Cluster::local_node_type() != Cluster::MANAGER ) - return vector(); - - # I am not quite sure if we can actually get away with this - in the - # current state, this means that the individual nodes cannot lookup - # a controller by name. - # - # This means that there can be no reactions to things on the actual - # worker nodes - because they cannot look up a name. On the other hand - - # currently we also do not even send the events to the worker nodes (at least - # not if we are using broker). Because of that I am not really feeling that - # badly about it... - - return lookup_controller_impl(name); - } diff --git a/scripts/base/frameworks/openflow/cluster.zeek b/scripts/base/frameworks/openflow/cluster.zeek new file mode 100644 index 0000000000..6ff005b877 --- /dev/null +++ b/scripts/base/frameworks/openflow/cluster.zeek @@ -0,0 +1,126 @@ +##! Cluster support for the OpenFlow framework. + +@load ./main +@load base/frameworks/cluster + +module OpenFlow; + +export { + ## This is the event used to transport flow_mod messages to the manager. + global cluster_flow_mod: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod); + + ## This is the event used to transport flow_clear messages to the manager. + global cluster_flow_clear: event(name: string); +} + +@if ( Cluster::local_node_type() != Cluster::MANAGER ) +# Workers need ability to forward commands to manager. +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_mod); + Broker::auto_publish(Cluster::manager_topic, OpenFlow::cluster_flow_clear); + } +@endif + +# the flow_mod function wrapper +function flow_mod(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool + { + if ( ! controller?$flow_mod ) + return F; + + if ( Cluster::local_node_type() == Cluster::MANAGER ) + return controller$flow_mod(controller$state, match, flow_mod); + else + event OpenFlow::cluster_flow_mod(controller$state$_name, match, flow_mod); + + return T; + } + +function flow_clear(controller: Controller): bool + { + if ( ! controller?$flow_clear ) + return F; + + if ( Cluster::local_node_type() == Cluster::MANAGER ) + return controller$flow_clear(controller$state); + else + event OpenFlow::cluster_flow_clear(controller$state$_name); + + return T; + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod) + { + if ( name !in name_to_controller ) + { + Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name)); + return; + } + + local c = name_to_controller[name]; + + if ( ! c$state$_activated ) + return; + + if ( c?$flow_mod ) + c$flow_mod(c$state, match, flow_mod); + } + +event OpenFlow::cluster_flow_clear(name: string) + { + if ( name !in name_to_controller ) + { + Reporter::error(fmt("OpenFlow controller %s not found in mapping on master", name)); + return; + } + + local c = name_to_controller[name]; + + if ( ! c$state$_activated ) + return; + + if ( c?$flow_clear ) + c$flow_clear(c$state); + } +@endif + +function register_controller(tpe: OpenFlow::Plugin, name: string, controller: Controller) + { + controller$state$_name = cat(tpe, name); + controller$state$_plugin = tpe; + + # we only run the init functions on the manager. + if ( Cluster::local_node_type() != Cluster::MANAGER ) + return; + + register_controller_impl(tpe, name, controller); + } + +function unregister_controller(controller: Controller) + { + # we only run the on the manager. + if ( Cluster::local_node_type() != Cluster::MANAGER ) + return; + + unregister_controller_impl(controller); + } + +function lookup_controller(name: string): vector of Controller + { + # we only run the on the manager. Otherwhise we don't have a mapping or state -> return empty + if ( Cluster::local_node_type() != Cluster::MANAGER ) + return vector(); + + # I am not quite sure if we can actually get away with this - in the + # current state, this means that the individual nodes cannot lookup + # a controller by name. + # + # This means that there can be no reactions to things on the actual + # worker nodes - because they cannot look up a name. On the other hand - + # currently we also do not even send the events to the worker nodes (at least + # not if we are using broker). Because of that I am not really feeling that + # badly about it... + + return lookup_controller_impl(name); + } diff --git a/scripts/base/frameworks/openflow/consts.bro b/scripts/base/frameworks/openflow/consts.bro deleted file mode 100644 index 3564137701..0000000000 --- a/scripts/base/frameworks/openflow/consts.bro +++ /dev/null @@ -1,229 +0,0 @@ -##! Constants used by the OpenFlow framework. - -# All types/constants not specific to OpenFlow will be defined here -# until they somehow get into Bro. - -module OpenFlow; - -# Some cookie specific constants. -# first 24 bits -const COOKIE_BID_SIZE = 16777216; -# start at bit 40 (1 << 40) -const COOKIE_BID_START = 1099511627776; -# bro specific cookie ID shall have the 42 bit set (1 << 42) -const BRO_COOKIE_ID = 4; -# 8 bits group identifier -const COOKIE_GID_SIZE = 256; -# start at bit 32 (1 << 32) -const COOKIE_GID_START = 4294967296; -# 32 bits unique identifier -const COOKIE_UID_SIZE = 4294967296; -# start at bit 0 (1 << 0) -const COOKIE_UID_START = 0; - -export { - # All ethertypes can be found at - # http://standards.ieee.org/develop/regauth/ethertype/eth.txt - # but are not interesting for us at this point -#type ethertype: enum { - # Internet protocol version 4 - const ETH_IPv4 = 0x0800; - # Address resolution protocol - const ETH_ARP = 0x0806; - # Wake on LAN - const ETH_WOL = 0x0842; - # Reverse address resolution protocol - const ETH_RARP = 0x8035; - # Appletalk - const ETH_APPLETALK = 0x809B; - # Appletalk address resolution protocol - const ETH_APPLETALK_ARP = 0x80F3; - # IEEE 802.1q & IEEE 802.1aq - const ETH_VLAN = 0x8100; - # Novell IPX old - const ETH_IPX_OLD = 0x8137; - # Novell IPX - const ETH_IPX = 0x8138; - # Internet protocol version 6 - const ETH_IPv6 = 0x86DD; - # IEEE 802.3x - const ETH_ETHER_FLOW_CONTROL = 0x8808; - # Multiprotocol Label Switching unicast - const ETH_MPLS_UNICAST = 0x8847; - # Multiprotocol Label Switching multicast - const ETH_MPLS_MULTICAST = 0x8848; - # Point-to-point protocol over Ethernet discovery phase (rfc2516) - const ETH_PPPOE_DISCOVERY = 0x8863; - # Point-to-point protocol over Ethernet session phase (rfc2516) - const ETH_PPPOE_SESSION = 0x8864; - # Jumbo frames - const ETH_JUMBO_FRAMES = 0x8870; - # IEEE 802.1X - const ETH_EAP_OVER_LAN = 0x888E; - # IEEE 802.1ad & IEEE 802.1aq - const ETH_PROVIDER_BRIDING = 0x88A8; - # IEEE 802.1ae - const ETH_MAC_SECURITY = 0x88E5; - # IEEE 802.1ad (QinQ) - const ETH_QINQ = 0x9100; -#}; - - # A list of ip protocol numbers can be found at - # http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers -#type iptype: enum { - # IPv6 Hop-by-Hop Option (RFC2460) - const IP_HOPOPT = 0x00; - # Internet Control Message Protocol (RFC792) - const IP_ICMP = 0x01; - # Internet Group Management Protocol (RFC1112) - const IP_IGMP = 0x02; - # Gateway-to-Gateway Protocol (RFC823) - const IP_GGP = 0x03; - # IP-Within-IP (encapsulation) (RFC2003) - const IP_IPIP = 0x04; - # Internet Stream Protocol (RFC1190;RFC1819) - const IP_ST = 0x05; - # Tansmission Control Protocol (RFC793) - const IP_TCP = 0x06; - # Core-based trees (RFC2189) - const IP_CBT = 0x07; - # Exterior Gateway Protocol (RFC888) - const IP_EGP = 0x08; - # Interior Gateway Protocol (any private interior - # gateway (used by Cisco for their IGRP)) - const IP_IGP = 0x09; - # User Datagram Protocol (RFC768) - const IP_UDP = 0x11; - # Reliable Datagram Protocol (RFC908) - const IP_RDP = 0x1B; - # IPv6 Encapsulation (RFC2473) - const IP_IPv6 = 0x29; - # Resource Reservation Protocol (RFC2205) - const IP_RSVP = 0x2E; - # Generic Routing Encapsulation (RFC2784;RFC2890) - const IP_GRE = 0x2F; - # Open Shortest Path First (RFC1583) - const IP_OSPF = 0x59; - # Multicast Transport Protocol - const IP_MTP = 0x5C; - # IP-within-IP Encapsulation Protocol (RFC2003) - ### error 0x5E; - # Ethernet-within-IP Encapsulation Protocol (RFC3378) - const IP_ETHERIP = 0x61; - # Layer Two Tunneling Protocol Version 3 (RFC3931) - const IP_L2TP = 0x73; - # Intermediate System to Intermediate System (IS-IS) Protocol over IPv4 (RFC1142;RFC1195) - const IP_ISIS = 0x7C; - # Fibre Channel - const IP_FC = 0x85; - # Multiprotocol Label Switching Encapsulated in IP (RFC4023) - const IP_MPLS = 0x89; -#}; - - ## Return value for a cookie from a flow - ## which is not added, modified or deleted - ## from the bro openflow framework. - const INVALID_COOKIE = 0xffffffffffffffff; - # Openflow physical port definitions - ## Send the packet out the input port. This - ## virual port must be explicitly used in - ## order to send back out of the input port. - const OFPP_IN_PORT = 0xfffffff8; - ## Perform actions in flow table. - ## NB: This can only be the destination port - ## for packet-out messages. - const OFPP_TABLE = 0xfffffff9; - ## Process with normal L2/L3 switching. - const OFPP_NORMAL = 0xfffffffa; - ## All physical ports except input port and - ## those disabled by STP. - const OFPP_FLOOD = 0xfffffffb; - ## All physical ports except input port. - const OFPP_ALL = 0xfffffffc; - ## Send to controller. - const OFPP_CONTROLLER = 0xfffffffd; - ## Local openflow "port". - const OFPP_LOCAL = 0xfffffffe; - ## Wildcard port used only for flow mod (delete) and flow stats requests. - const OFPP_ANY = 0xffffffff; - # Openflow no buffer constant. - const OFP_NO_BUFFER = 0xffffffff; - ## Send flow removed message when flow - ## expires or is deleted. - const OFPFF_SEND_FLOW_REM = 0x1; - ## Check for overlapping entries first. - const OFPFF_CHECK_OVERLAP = 0x2; - ## Remark this is for emergency. - ## Flows added with this are only used - ## when the controller is disconnected. - const OFPFF_EMERG = 0x4; - - # Wildcard table used for table config, - # flow stats and flow deletes. - const OFPTT_ALL = 0xff; - - ## Openflow action_type definitions. - ## - ## The openflow action type defines - ## what actions openflow can take - ## to modify a packet - type ofp_action_type: enum { - ## Output to switch port. - OFPAT_OUTPUT = 0x0000, - ## Set the 802.1q VLAN id. - OFPAT_SET_VLAN_VID = 0x0001, - ## Set the 802.1q priority. - OFPAT_SET_VLAN_PCP = 0x0002, - ## Strip the 802.1q header. - OFPAT_STRIP_VLAN = 0x0003, - ## Ethernet source address. - OFPAT_SET_DL_SRC = 0x0004, - ## Ethernet destination address. - OFPAT_SET_DL_DST = 0x0005, - ## IP source address. - OFPAT_SET_NW_SRC = 0x0006, - ## IP destination address. - OFPAT_SET_NW_DST = 0x0007, - ## IP ToS (DSCP field, 6 bits). - OFPAT_SET_NW_TOS = 0x0008, - ## TCP/UDP source port. - OFPAT_SET_TP_SRC = 0x0009, - ## TCP/UDP destination port. - OFPAT_SET_TP_DST = 0x000a, - ## Output to queue. - OFPAT_ENQUEUE = 0x000b, - ## Vendor specific. - OFPAT_VENDOR = 0xffff, - }; - - ## Openflow flow_mod_command definitions. - ## - ## The openflow flow_mod_command describes - ## of what kind an action is. - type ofp_flow_mod_command: enum { - ## New flow. - OFPFC_ADD = 0x0, - ## Modify all matching flows. - OFPFC_MODIFY = 0x1, - ## Modify entry strictly matching wildcards. - OFPFC_MODIFY_STRICT = 0x2, - ## Delete all matching flows. - OFPFC_DELETE = 0x3, - ## Strictly matching wildcards and priority. - OFPFC_DELETE_STRICT = 0x4, - }; - - ## Openflow config flag definitions. - ## - ## TODO: describe - type ofp_config_flags: enum { - ## No special handling for fragments. - OFPC_FRAG_NORMAL = 0, - ## Drop fragments. - OFPC_FRAG_DROP = 1, - ## Reassemble (only if OFPC_IP_REASM set). - OFPC_FRAG_REASM = 2, - OFPC_FRAG_MASK = 3, - }; - -} diff --git a/scripts/base/frameworks/openflow/consts.zeek b/scripts/base/frameworks/openflow/consts.zeek new file mode 100644 index 0000000000..ea6a5e5eec --- /dev/null +++ b/scripts/base/frameworks/openflow/consts.zeek @@ -0,0 +1,229 @@ +##! Constants used by the OpenFlow framework. + +# All types/constants not specific to OpenFlow will be defined here +# until they somehow get into Zeek. + +module OpenFlow; + +# Some cookie specific constants. +# first 24 bits +const COOKIE_BID_SIZE = 16777216; +# start at bit 40 (1 << 40) +const COOKIE_BID_START = 1099511627776; +# Zeek specific cookie ID shall have the 42 bit set (1 << 42) +const ZEEK_COOKIE_ID = 4; +# 8 bits group identifier +const COOKIE_GID_SIZE = 256; +# start at bit 32 (1 << 32) +const COOKIE_GID_START = 4294967296; +# 32 bits unique identifier +const COOKIE_UID_SIZE = 4294967296; +# start at bit 0 (1 << 0) +const COOKIE_UID_START = 0; + +export { + # All ethertypes can be found at + # http://standards.ieee.org/develop/regauth/ethertype/eth.txt + # but are not interesting for us at this point +#type ethertype: enum { + # Internet protocol version 4 + const ETH_IPv4 = 0x0800; + # Address resolution protocol + const ETH_ARP = 0x0806; + # Wake on LAN + const ETH_WOL = 0x0842; + # Reverse address resolution protocol + const ETH_RARP = 0x8035; + # Appletalk + const ETH_APPLETALK = 0x809B; + # Appletalk address resolution protocol + const ETH_APPLETALK_ARP = 0x80F3; + # IEEE 802.1q & IEEE 802.1aq + const ETH_VLAN = 0x8100; + # Novell IPX old + const ETH_IPX_OLD = 0x8137; + # Novell IPX + const ETH_IPX = 0x8138; + # Internet protocol version 6 + const ETH_IPv6 = 0x86DD; + # IEEE 802.3x + const ETH_ETHER_FLOW_CONTROL = 0x8808; + # Multiprotocol Label Switching unicast + const ETH_MPLS_UNICAST = 0x8847; + # Multiprotocol Label Switching multicast + const ETH_MPLS_MULTICAST = 0x8848; + # Point-to-point protocol over Ethernet discovery phase (rfc2516) + const ETH_PPPOE_DISCOVERY = 0x8863; + # Point-to-point protocol over Ethernet session phase (rfc2516) + const ETH_PPPOE_SESSION = 0x8864; + # Jumbo frames + const ETH_JUMBO_FRAMES = 0x8870; + # IEEE 802.1X + const ETH_EAP_OVER_LAN = 0x888E; + # IEEE 802.1ad & IEEE 802.1aq + const ETH_PROVIDER_BRIDING = 0x88A8; + # IEEE 802.1ae + const ETH_MAC_SECURITY = 0x88E5; + # IEEE 802.1ad (QinQ) + const ETH_QINQ = 0x9100; +#}; + + # A list of ip protocol numbers can be found at + # http://en.wikipedia.org/wiki/List_of_IP_protocol_numbers +#type iptype: enum { + # IPv6 Hop-by-Hop Option (RFC2460) + const IP_HOPOPT = 0x00; + # Internet Control Message Protocol (RFC792) + const IP_ICMP = 0x01; + # Internet Group Management Protocol (RFC1112) + const IP_IGMP = 0x02; + # Gateway-to-Gateway Protocol (RFC823) + const IP_GGP = 0x03; + # IP-Within-IP (encapsulation) (RFC2003) + const IP_IPIP = 0x04; + # Internet Stream Protocol (RFC1190;RFC1819) + const IP_ST = 0x05; + # Tansmission Control Protocol (RFC793) + const IP_TCP = 0x06; + # Core-based trees (RFC2189) + const IP_CBT = 0x07; + # Exterior Gateway Protocol (RFC888) + const IP_EGP = 0x08; + # Interior Gateway Protocol (any private interior + # gateway (used by Cisco for their IGRP)) + const IP_IGP = 0x09; + # User Datagram Protocol (RFC768) + const IP_UDP = 0x11; + # Reliable Datagram Protocol (RFC908) + const IP_RDP = 0x1B; + # IPv6 Encapsulation (RFC2473) + const IP_IPv6 = 0x29; + # Resource Reservation Protocol (RFC2205) + const IP_RSVP = 0x2E; + # Generic Routing Encapsulation (RFC2784;RFC2890) + const IP_GRE = 0x2F; + # Open Shortest Path First (RFC1583) + const IP_OSPF = 0x59; + # Multicast Transport Protocol + const IP_MTP = 0x5C; + # IP-within-IP Encapsulation Protocol (RFC2003) + ### error 0x5E; + # Ethernet-within-IP Encapsulation Protocol (RFC3378) + const IP_ETHERIP = 0x61; + # Layer Two Tunneling Protocol Version 3 (RFC3931) + const IP_L2TP = 0x73; + # Intermediate System to Intermediate System (IS-IS) Protocol over IPv4 (RFC1142;RFC1195) + const IP_ISIS = 0x7C; + # Fibre Channel + const IP_FC = 0x85; + # Multiprotocol Label Switching Encapsulated in IP (RFC4023) + const IP_MPLS = 0x89; +#}; + + ## Return value for a cookie from a flow + ## which is not added, modified or deleted + ## from the Zeek openflow framework. + const INVALID_COOKIE = 0xffffffffffffffff; + # Openflow physical port definitions + ## Send the packet out the input port. This + ## virual port must be explicitly used in + ## order to send back out of the input port. + const OFPP_IN_PORT = 0xfffffff8; + ## Perform actions in flow table. + ## NB: This can only be the destination port + ## for packet-out messages. + const OFPP_TABLE = 0xfffffff9; + ## Process with normal L2/L3 switching. + const OFPP_NORMAL = 0xfffffffa; + ## All physical ports except input port and + ## those disabled by STP. + const OFPP_FLOOD = 0xfffffffb; + ## All physical ports except input port. + const OFPP_ALL = 0xfffffffc; + ## Send to controller. + const OFPP_CONTROLLER = 0xfffffffd; + ## Local openflow "port". + const OFPP_LOCAL = 0xfffffffe; + ## Wildcard port used only for flow mod (delete) and flow stats requests. + const OFPP_ANY = 0xffffffff; + # Openflow no buffer constant. + const OFP_NO_BUFFER = 0xffffffff; + ## Send flow removed message when flow + ## expires or is deleted. + const OFPFF_SEND_FLOW_REM = 0x1; + ## Check for overlapping entries first. + const OFPFF_CHECK_OVERLAP = 0x2; + ## Remark this is for emergency. + ## Flows added with this are only used + ## when the controller is disconnected. + const OFPFF_EMERG = 0x4; + + # Wildcard table used for table config, + # flow stats and flow deletes. + const OFPTT_ALL = 0xff; + + ## Openflow action_type definitions. + ## + ## The openflow action type defines + ## what actions openflow can take + ## to modify a packet + type ofp_action_type: enum { + ## Output to switch port. + OFPAT_OUTPUT = 0x0000, + ## Set the 802.1q VLAN id. + OFPAT_SET_VLAN_VID = 0x0001, + ## Set the 802.1q priority. + OFPAT_SET_VLAN_PCP = 0x0002, + ## Strip the 802.1q header. + OFPAT_STRIP_VLAN = 0x0003, + ## Ethernet source address. + OFPAT_SET_DL_SRC = 0x0004, + ## Ethernet destination address. + OFPAT_SET_DL_DST = 0x0005, + ## IP source address. + OFPAT_SET_NW_SRC = 0x0006, + ## IP destination address. + OFPAT_SET_NW_DST = 0x0007, + ## IP ToS (DSCP field, 6 bits). + OFPAT_SET_NW_TOS = 0x0008, + ## TCP/UDP source port. + OFPAT_SET_TP_SRC = 0x0009, + ## TCP/UDP destination port. + OFPAT_SET_TP_DST = 0x000a, + ## Output to queue. + OFPAT_ENQUEUE = 0x000b, + ## Vendor specific. + OFPAT_VENDOR = 0xffff, + }; + + ## Openflow flow_mod_command definitions. + ## + ## The openflow flow_mod_command describes + ## of what kind an action is. + type ofp_flow_mod_command: enum { + ## New flow. + OFPFC_ADD = 0x0, + ## Modify all matching flows. + OFPFC_MODIFY = 0x1, + ## Modify entry strictly matching wildcards. + OFPFC_MODIFY_STRICT = 0x2, + ## Delete all matching flows. + OFPFC_DELETE = 0x3, + ## Strictly matching wildcards and priority. + OFPFC_DELETE_STRICT = 0x4, + }; + + ## Openflow config flag definitions. + ## + ## TODO: describe + type ofp_config_flags: enum { + ## No special handling for fragments. + OFPC_FRAG_NORMAL = 0, + ## Drop fragments. + OFPC_FRAG_DROP = 1, + ## Reassemble (only if OFPC_IP_REASM set). + OFPC_FRAG_REASM = 2, + OFPC_FRAG_MASK = 3, + }; + +} diff --git a/scripts/base/frameworks/openflow/main.bro b/scripts/base/frameworks/openflow/main.bro deleted file mode 100644 index 5740e90056..0000000000 --- a/scripts/base/frameworks/openflow/main.bro +++ /dev/null @@ -1,289 +0,0 @@ -##! Bro's OpenFlow control framework. -##! -##! This plugin-based framework allows to control OpenFlow capable -##! switches by implementing communication to an OpenFlow controller -##! via plugins. The framework has to be instantiated via the new function -##! in one of the plugins. This framework only offers very low-level -##! functionality; if you want to use OpenFlow capable switches, e.g., -##! for shunting, please look at the NetControl framework, which provides higher -##! level functions and can use the OpenFlow framework as a backend. - -module OpenFlow; - -@load ./consts -@load ./types - -export { - ## Global flow_mod function. - ## - ## controller: The controller which should execute the flow modification. - ## - ## match: The ofp_match record which describes the flow to match. - ## - ## flow_mod: The openflow flow_mod record which describes the action to take. - ## - ## Returns: F on error or if the plugin does not support the operation, T when the operation was queued. - global flow_mod: function(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool; - - ## Clear the current flow table of the controller. - ## - ## controller: The controller which should execute the flow modification. - ## - ## Returns: F on error or if the plugin does not support the operation, T when the operation was queued. - global flow_clear: function(controller: Controller): bool; - - ## Event confirming successful modification of a flow rule. - ## - ## name: The unique name of the OpenFlow controller from which this event originated. - ## - ## match: The ofp_match record which describes the flow to match. - ## - ## flow_mod: The openflow flow_mod record which describes the action to take. - ## - ## msg: An optional informational message by the plugin. - global flow_mod_success: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default=""); - - ## Reports an error while installing a flow Rule. - ## - ## name: The unique name of the OpenFlow controller from which this event originated. - ## - ## match: The ofp_match record which describes the flow to match. - ## - ## flow_mod: The openflow flow_mod record which describes the action to take. - ## - ## msg: Message to describe the event. - global flow_mod_failure: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default=""); - - ## Reports that a flow was removed by the switch because of either the hard or the idle timeout. - ## This message is only generated by controllers that indicate that they support flow removal - ## in supports_flow_removed. - ## - ## name: The unique name of the OpenFlow controller from which this event originated. - ## - ## match: The ofp_match record which was used to create the flow. - ## - ## cookie: The cookie that was specified when creating the flow. - ## - ## priority: The priority that was specified when creating the flow. - ## - ## reason: The reason for flow removal (OFPRR_*). - ## - ## duration_sec: Duration of the flow in seconds. - ## - ## packet_count: Packet count of the flow. - ## - ## byte_count: Byte count of the flow. - global flow_removed: event(name: string, match: ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count); - - ## Convert a conn_id record into an ofp_match record that can be used to - ## create match objects for OpenFlow. - ## - ## id: The conn_id record that describes the record. - ## - ## reverse: Reverse the sources and destinations when creating the match record (default F). - ## - ## Returns: ofp_match object for the conn_id record. - global match_conn: function(id: conn_id, reverse: bool &default=F): ofp_match; - - # ### - # ### Low-level functions for cookie handling and plugin registration. - # ### - - ## Function to get the unique id out of a given cookie. - ## - ## cookie: The openflow match cookie. - ## - ## Returns: The cookie unique id. - global get_cookie_uid: function(cookie: count): count; - - ## Function to get the group id out of a given cookie. - ## - ## cookie: The openflow match cookie. - ## - ## Returns: The cookie group id. - global get_cookie_gid: function(cookie: count): count; - - ## Function to generate a new cookie using our group id. - ## - ## cookie: The openflow match cookie. - ## - ## Returns: The cookie group id. - global generate_cookie: function(cookie: count &default=0): count; - - ## Function to register a controller instance. This function - ## is called automatically by the plugin _new functions. - ## - ## tpe: Type of this plugin. - ## - ## name: Unique name of this controller instance. - ## - ## controller: The controller to register. - global register_controller: function(tpe: OpenFlow::Plugin, name: string, controller: Controller); - - ## Function to unregister a controller instance. This function - ## should be called when a specific controller should no longer - ## be used. - ## - ## controller: The controller to unregister. - global unregister_controller: function(controller: Controller); - - ## Function to signal that a controller finished activation and is - ## ready to use. Will throw the ``OpenFlow::controller_activated`` - ## event. - global controller_init_done: function(controller: Controller); - - ## Event that is raised once a controller finishes initialization - ## and is completely activated. - ## name: Unique name of this controller instance. - ## - ## controller: The controller that finished activation. - global OpenFlow::controller_activated: event(name: string, controller: Controller); - - ## Function to lookup a controller instance by name. - ## - ## name: Unique name of the controller to look up. - ## - ## Returns: One element vector with controller, if found. Empty vector otherwise. - global lookup_controller: function(name: string): vector of Controller; -} - -global name_to_controller: table[string] of Controller; - - -function match_conn(id: conn_id, reverse: bool &default=F): ofp_match - { - local dl_type = ETH_IPv4; - local proto = IP_TCP; - - local orig_h: addr; - local orig_p: port; - local resp_h: addr; - local resp_p: port; - - if ( reverse == F ) - { - orig_h = id$orig_h; - orig_p = id$orig_p; - resp_h = id$resp_h; - resp_p = id$resp_p; - } - else - { - orig_h = id$resp_h; - orig_p = id$resp_p; - resp_h = id$orig_h; - resp_p = id$orig_p; - } - - if ( is_v6_addr(orig_h) ) - dl_type = ETH_IPv6; - - if ( is_udp_port(orig_p) ) - proto = IP_UDP; - else if ( is_icmp_port(orig_p) ) - proto = IP_ICMP; - - return ofp_match( - $dl_type=dl_type, - $nw_proto=proto, - $nw_src=addr_to_subnet(orig_h), - $tp_src=port_to_count(orig_p), - $nw_dst=addr_to_subnet(resp_h), - $tp_dst=port_to_count(resp_p) - ); - } - -# local function to forge a flow_mod cookie for this framework. -# all flow entries from the openflow framework should have the -# 42 bit of the cookie set. -function generate_cookie(cookie: count &default=0): count - { - local c = BRO_COOKIE_ID * COOKIE_BID_START; - - if ( cookie >= COOKIE_UID_SIZE ) - Reporter::warning(fmt("The given cookie uid '%d' is > 32bit and will be discarded", cookie)); - else - c += cookie; - - return c; - } - -# local function to check if a given flow_mod cookie is forged from this framework. -function is_valid_cookie(cookie: count): bool - { - if ( cookie / COOKIE_BID_START == BRO_COOKIE_ID ) - return T; - - Reporter::warning(fmt("The given Openflow cookie '%d' is not valid", cookie)); - - return F; - } - -function get_cookie_uid(cookie: count): count - { - if( is_valid_cookie(cookie) ) - return (cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START)); - - return INVALID_COOKIE; - } - -function get_cookie_gid(cookie: count): count - { - if( is_valid_cookie(cookie) ) - return ( - (cookie - (COOKIE_BID_START * BRO_COOKIE_ID) - - (cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START))) / - COOKIE_GID_START - ); - - return INVALID_COOKIE; - } - -function controller_init_done(controller: Controller) - { - if ( controller$state$_name !in name_to_controller ) - { - Reporter::error(fmt("Openflow initialized unknown plugin %s successfully?", controller$state$_name)); - return; - } - - controller$state$_activated = T; - event OpenFlow::controller_activated(controller$state$_name, controller); - } - -# Functions that are called from cluster.bro and non-cluster.bro - -function register_controller_impl(tpe: OpenFlow::Plugin, name: string, controller: Controller) - { - if ( controller$state$_name in name_to_controller ) - { - Reporter::error(fmt("OpenFlow Controller %s was already registered. Ignored duplicate registration", controller$state$_name)); - return; - } - - name_to_controller[controller$state$_name] = controller; - - if ( controller?$init ) - controller$init(controller$state); - else - controller_init_done(controller); - } - -function unregister_controller_impl(controller: Controller) - { - if ( controller$state$_name in name_to_controller ) - delete name_to_controller[controller$state$_name]; - else - Reporter::error("OpenFlow Controller %s was not registered in unregister."); - - if ( controller?$destroy ) - controller$destroy(controller$state); - } - -function lookup_controller_impl(name: string): vector of Controller - { - if ( name in name_to_controller ) - return vector(name_to_controller[name]); - else - return vector(); - } diff --git a/scripts/base/frameworks/openflow/main.zeek b/scripts/base/frameworks/openflow/main.zeek new file mode 100644 index 0000000000..9649000b21 --- /dev/null +++ b/scripts/base/frameworks/openflow/main.zeek @@ -0,0 +1,289 @@ +##! Zeek's OpenFlow control framework. +##! +##! This plugin-based framework allows to control OpenFlow capable +##! switches by implementing communication to an OpenFlow controller +##! via plugins. The framework has to be instantiated via the new function +##! in one of the plugins. This framework only offers very low-level +##! functionality; if you want to use OpenFlow capable switches, e.g., +##! for shunting, please look at the NetControl framework, which provides higher +##! level functions and can use the OpenFlow framework as a backend. + +module OpenFlow; + +@load ./consts +@load ./types + +export { + ## Global flow_mod function. + ## + ## controller: The controller which should execute the flow modification. + ## + ## match: The ofp_match record which describes the flow to match. + ## + ## flow_mod: The openflow flow_mod record which describes the action to take. + ## + ## Returns: F on error or if the plugin does not support the operation, T when the operation was queued. + global flow_mod: function(controller: Controller, match: ofp_match, flow_mod: ofp_flow_mod): bool; + + ## Clear the current flow table of the controller. + ## + ## controller: The controller which should execute the flow modification. + ## + ## Returns: F on error or if the plugin does not support the operation, T when the operation was queued. + global flow_clear: function(controller: Controller): bool; + + ## Event confirming successful modification of a flow rule. + ## + ## name: The unique name of the OpenFlow controller from which this event originated. + ## + ## match: The ofp_match record which describes the flow to match. + ## + ## flow_mod: The openflow flow_mod record which describes the action to take. + ## + ## msg: An optional informational message by the plugin. + global flow_mod_success: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default=""); + + ## Reports an error while installing a flow Rule. + ## + ## name: The unique name of the OpenFlow controller from which this event originated. + ## + ## match: The ofp_match record which describes the flow to match. + ## + ## flow_mod: The openflow flow_mod record which describes the action to take. + ## + ## msg: Message to describe the event. + global flow_mod_failure: event(name: string, match: ofp_match, flow_mod: ofp_flow_mod, msg: string &default=""); + + ## Reports that a flow was removed by the switch because of either the hard or the idle timeout. + ## This message is only generated by controllers that indicate that they support flow removal + ## in supports_flow_removed. + ## + ## name: The unique name of the OpenFlow controller from which this event originated. + ## + ## match: The ofp_match record which was used to create the flow. + ## + ## cookie: The cookie that was specified when creating the flow. + ## + ## priority: The priority that was specified when creating the flow. + ## + ## reason: The reason for flow removal (OFPRR_*). + ## + ## duration_sec: Duration of the flow in seconds. + ## + ## packet_count: Packet count of the flow. + ## + ## byte_count: Byte count of the flow. + global flow_removed: event(name: string, match: ofp_match, cookie: count, priority: count, reason: count, duration_sec: count, idle_timeout: count, packet_count: count, byte_count: count); + + ## Convert a conn_id record into an ofp_match record that can be used to + ## create match objects for OpenFlow. + ## + ## id: The conn_id record that describes the record. + ## + ## reverse: Reverse the sources and destinations when creating the match record (default F). + ## + ## Returns: ofp_match object for the conn_id record. + global match_conn: function(id: conn_id, reverse: bool &default=F): ofp_match; + + # ### + # ### Low-level functions for cookie handling and plugin registration. + # ### + + ## Function to get the unique id out of a given cookie. + ## + ## cookie: The openflow match cookie. + ## + ## Returns: The cookie unique id. + global get_cookie_uid: function(cookie: count): count; + + ## Function to get the group id out of a given cookie. + ## + ## cookie: The openflow match cookie. + ## + ## Returns: The cookie group id. + global get_cookie_gid: function(cookie: count): count; + + ## Function to generate a new cookie using our group id. + ## + ## cookie: The openflow match cookie. + ## + ## Returns: The cookie group id. + global generate_cookie: function(cookie: count &default=0): count; + + ## Function to register a controller instance. This function + ## is called automatically by the plugin _new functions. + ## + ## tpe: Type of this plugin. + ## + ## name: Unique name of this controller instance. + ## + ## controller: The controller to register. + global register_controller: function(tpe: OpenFlow::Plugin, name: string, controller: Controller); + + ## Function to unregister a controller instance. This function + ## should be called when a specific controller should no longer + ## be used. + ## + ## controller: The controller to unregister. + global unregister_controller: function(controller: Controller); + + ## Function to signal that a controller finished activation and is + ## ready to use. Will throw the ``OpenFlow::controller_activated`` + ## event. + global controller_init_done: function(controller: Controller); + + ## Event that is raised once a controller finishes initialization + ## and is completely activated. + ## name: Unique name of this controller instance. + ## + ## controller: The controller that finished activation. + global OpenFlow::controller_activated: event(name: string, controller: Controller); + + ## Function to lookup a controller instance by name. + ## + ## name: Unique name of the controller to look up. + ## + ## Returns: One element vector with controller, if found. Empty vector otherwise. + global lookup_controller: function(name: string): vector of Controller; +} + +global name_to_controller: table[string] of Controller; + + +function match_conn(id: conn_id, reverse: bool &default=F): ofp_match + { + local dl_type = ETH_IPv4; + local proto = IP_TCP; + + local orig_h: addr; + local orig_p: port; + local resp_h: addr; + local resp_p: port; + + if ( reverse == F ) + { + orig_h = id$orig_h; + orig_p = id$orig_p; + resp_h = id$resp_h; + resp_p = id$resp_p; + } + else + { + orig_h = id$resp_h; + orig_p = id$resp_p; + resp_h = id$orig_h; + resp_p = id$orig_p; + } + + if ( is_v6_addr(orig_h) ) + dl_type = ETH_IPv6; + + if ( is_udp_port(orig_p) ) + proto = IP_UDP; + else if ( is_icmp_port(orig_p) ) + proto = IP_ICMP; + + return ofp_match( + $dl_type=dl_type, + $nw_proto=proto, + $nw_src=addr_to_subnet(orig_h), + $tp_src=port_to_count(orig_p), + $nw_dst=addr_to_subnet(resp_h), + $tp_dst=port_to_count(resp_p) + ); + } + +# local function to forge a flow_mod cookie for this framework. +# all flow entries from the openflow framework should have the +# 42 bit of the cookie set. +function generate_cookie(cookie: count &default=0): count + { + local c = ZEEK_COOKIE_ID * COOKIE_BID_START; + + if ( cookie >= COOKIE_UID_SIZE ) + Reporter::warning(fmt("The given cookie uid '%d' is > 32bit and will be discarded", cookie)); + else + c += cookie; + + return c; + } + +# local function to check if a given flow_mod cookie is forged from this framework. +function is_valid_cookie(cookie: count): bool + { + if ( cookie / COOKIE_BID_START == ZEEK_COOKIE_ID ) + return T; + + Reporter::warning(fmt("The given Openflow cookie '%d' is not valid", cookie)); + + return F; + } + +function get_cookie_uid(cookie: count): count + { + if( is_valid_cookie(cookie) ) + return (cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START)); + + return INVALID_COOKIE; + } + +function get_cookie_gid(cookie: count): count + { + if( is_valid_cookie(cookie) ) + return ( + (cookie - (COOKIE_BID_START * ZEEK_COOKIE_ID) - + (cookie - ((cookie / COOKIE_GID_START) * COOKIE_GID_START))) / + COOKIE_GID_START + ); + + return INVALID_COOKIE; + } + +function controller_init_done(controller: Controller) + { + if ( controller$state$_name !in name_to_controller ) + { + Reporter::error(fmt("Openflow initialized unknown plugin %s successfully?", controller$state$_name)); + return; + } + + controller$state$_activated = T; + event OpenFlow::controller_activated(controller$state$_name, controller); + } + +# Functions that are called from cluster.zeek and non-cluster.zeek + +function register_controller_impl(tpe: OpenFlow::Plugin, name: string, controller: Controller) + { + if ( controller$state$_name in name_to_controller ) + { + Reporter::error(fmt("OpenFlow Controller %s was already registered. Ignored duplicate registration", controller$state$_name)); + return; + } + + name_to_controller[controller$state$_name] = controller; + + if ( controller?$init ) + controller$init(controller$state); + else + controller_init_done(controller); + } + +function unregister_controller_impl(controller: Controller) + { + if ( controller$state$_name in name_to_controller ) + delete name_to_controller[controller$state$_name]; + else + Reporter::error("OpenFlow Controller %s was not registered in unregister."); + + if ( controller?$destroy ) + controller$destroy(controller$state); + } + +function lookup_controller_impl(name: string): vector of Controller + { + if ( name in name_to_controller ) + return vector(name_to_controller[name]); + else + return vector(); + } diff --git a/scripts/base/frameworks/openflow/non-cluster.bro b/scripts/base/frameworks/openflow/non-cluster.zeek similarity index 100% rename from scripts/base/frameworks/openflow/non-cluster.bro rename to scripts/base/frameworks/openflow/non-cluster.zeek diff --git a/scripts/base/frameworks/openflow/plugins/__load__.bro b/scripts/base/frameworks/openflow/plugins/__load__.zeek similarity index 100% rename from scripts/base/frameworks/openflow/plugins/__load__.bro rename to scripts/base/frameworks/openflow/plugins/__load__.zeek diff --git a/scripts/base/frameworks/openflow/plugins/broker.bro b/scripts/base/frameworks/openflow/plugins/broker.bro deleted file mode 100644 index f37f0b8afc..0000000000 --- a/scripts/base/frameworks/openflow/plugins/broker.bro +++ /dev/null @@ -1,95 +0,0 @@ -##! OpenFlow plugin for interfacing to controllers via Broker. - -@load base/frameworks/openflow -@load base/frameworks/broker - -module OpenFlow; - -export { - redef enum Plugin += { - BROKER, - }; - - ## Broker controller constructor. - ## - ## host: Controller ip. - ## - ## host_port: Controller listen port. - ## - ## topic: Broker topic to send messages to. - ## - ## dpid: OpenFlow switch datapath id. - ## - ## Returns: OpenFlow::Controller record. - global broker_new: function(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller; - - redef record ControllerState += { - ## Controller ip. - broker_host: addr &optional; - ## Controller listen port. - broker_port: port &optional; - ## OpenFlow switch datapath id. - broker_dpid: count &optional; - ## Topic to send events for this controller to. - broker_topic: string &optional; - }; - - global broker_flow_mod: event(name: string, dpid: count, match: ofp_match, flow_mod: ofp_flow_mod); - global broker_flow_clear: event(name: string, dpid: count); -} - -global broker_peers: table[port, string] of Controller; - -function broker_describe(state: ControllerState): string - { - return fmt("Broker-%s:%d-%d", state$broker_host, state$broker_port, state$broker_dpid); - } - -function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool - { - Broker::publish(state$broker_topic, Broker::make_event(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod)); - - return T; - } - -function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool - { - Broker::publish(state$broker_topic, Broker::make_event(broker_flow_clear, state$_name, state$broker_dpid)); - - return T; - } - -function broker_init(state: OpenFlow::ControllerState) - { - Broker::peer(cat(state$broker_host), state$broker_port); - Broker::subscribe(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker. - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - local peer_address = cat(endpoint$network$address); - local peer_port = endpoint$network$bound_port; - if ( [peer_port, peer_address] !in broker_peers ) - # ok, this one was none of ours... - return; - - local p = broker_peers[peer_port, peer_address]; - controller_init_done(p); - delete broker_peers[peer_port, peer_address]; - } - -# broker controller constructor -function broker_new(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller - { - local c = OpenFlow::Controller($state=OpenFlow::ControllerState($broker_host=host, $broker_port=host_port, $broker_dpid=dpid, $broker_topic=topic), - $flow_mod=broker_flow_mod_fun, $flow_clear=broker_flow_clear_fun, $describe=broker_describe, $supports_flow_removed=T, $init=broker_init); - - register_controller(OpenFlow::BROKER, name, c); - - if ( [host_port, cat(host)] in broker_peers ) - Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, host_port)); - else - broker_peers[host_port, cat(host)] = c; - - return c; - } diff --git a/scripts/base/frameworks/openflow/plugins/broker.zeek b/scripts/base/frameworks/openflow/plugins/broker.zeek new file mode 100644 index 0000000000..e6a594822e --- /dev/null +++ b/scripts/base/frameworks/openflow/plugins/broker.zeek @@ -0,0 +1,95 @@ +##! OpenFlow plugin for interfacing to controllers via Broker. + +@load base/frameworks/openflow +@load base/frameworks/broker + +module OpenFlow; + +export { + redef enum Plugin += { + BROKER, + }; + + ## Broker controller constructor. + ## + ## host: Controller ip. + ## + ## host_port: Controller listen port. + ## + ## topic: Broker topic to send messages to. + ## + ## dpid: OpenFlow switch datapath id. + ## + ## Returns: OpenFlow::Controller record. + global broker_new: function(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller; + + redef record ControllerState += { + ## Controller ip. + broker_host: addr &optional; + ## Controller listen port. + broker_port: port &optional; + ## OpenFlow switch datapath id. + broker_dpid: count &optional; + ## Topic to send events for this controller to. + broker_topic: string &optional; + }; + + global broker_flow_mod: event(name: string, dpid: count, match: ofp_match, flow_mod: ofp_flow_mod); + global broker_flow_clear: event(name: string, dpid: count); +} + +global broker_peers: table[port, string] of Controller; + +function broker_describe(state: ControllerState): string + { + return fmt("Broker-%s:%d-%d", state$broker_host, state$broker_port, state$broker_dpid); + } + +function broker_flow_mod_fun(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool + { + Broker::publish(state$broker_topic, Broker::make_event(broker_flow_mod, state$_name, state$broker_dpid, match, flow_mod)); + + return T; + } + +function broker_flow_clear_fun(state: OpenFlow::ControllerState): bool + { + Broker::publish(state$broker_topic, Broker::make_event(broker_flow_clear, state$_name, state$broker_dpid)); + + return T; + } + +function broker_init(state: OpenFlow::ControllerState) + { + Broker::subscribe(state$broker_topic); # openflow success and failure events are directly sent back via the other plugin via broker. + Broker::peer(cat(state$broker_host), state$broker_port); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + local peer_address = cat(endpoint$network$address); + local peer_port = endpoint$network$bound_port; + if ( [peer_port, peer_address] !in broker_peers ) + # ok, this one was none of ours... + return; + + local p = broker_peers[peer_port, peer_address]; + controller_init_done(p); + delete broker_peers[peer_port, peer_address]; + } + +# broker controller constructor +function broker_new(name: string, host: addr, host_port: port, topic: string, dpid: count): OpenFlow::Controller + { + local c = OpenFlow::Controller($state=OpenFlow::ControllerState($broker_host=host, $broker_port=host_port, $broker_dpid=dpid, $broker_topic=topic), + $flow_mod=broker_flow_mod_fun, $flow_clear=broker_flow_clear_fun, $describe=broker_describe, $supports_flow_removed=T, $init=broker_init); + + register_controller(OpenFlow::BROKER, name, c); + + if ( [host_port, cat(host)] in broker_peers ) + Reporter::warning(fmt("Peer %s:%s was added to NetControl acld plugin twice.", host, host_port)); + else + broker_peers[host_port, cat(host)] = c; + + return c; + } diff --git a/scripts/base/frameworks/openflow/plugins/log.bro b/scripts/base/frameworks/openflow/plugins/log.bro deleted file mode 100644 index 2fd961cd4f..0000000000 --- a/scripts/base/frameworks/openflow/plugins/log.bro +++ /dev/null @@ -1,76 +0,0 @@ -##! OpenFlow plugin that outputs flow-modification commands -##! to a Bro log file. - -@load base/frameworks/openflow -@load base/frameworks/logging - -module OpenFlow; - -export { - redef enum Plugin += { - OFLOG, - }; - - redef enum Log::ID += { LOG }; - - ## Log controller constructor. - ## - ## dpid: OpenFlow switch datapath id. - ## - ## success_event: If true, flow_mod_success is raised for each logged line. - ## - ## Returns: OpenFlow::Controller record. - global log_new: function(dpid: count, success_event: bool &default=T): OpenFlow::Controller; - - redef record ControllerState += { - ## OpenFlow switch datapath id. - log_dpid: count &optional; - ## Raise or do not raise success event. - log_success_event: bool &optional; - }; - - ## The record type which contains column fields of the OpenFlow log. - type Info: record { - ## Network time. - ts: time &log; - ## OpenFlow switch datapath id. - dpid: count &log; - ## OpenFlow match fields. - match: ofp_match &log; - ## OpenFlow modify flow entry message. - flow_mod: ofp_flow_mod &log; - }; - - ## Event that can be handled to access the :bro:type:`OpenFlow::Info` - ## record as it is sent on to the logging framework. - global log_openflow: event(rec: Info); -} - -event bro_init() &priority=5 - { - Log::create_stream(OpenFlow::LOG, [$columns=Info, $ev=log_openflow, $path="openflow"]); - } - -function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool - { - Log::write(OpenFlow::LOG, [$ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod]); - if ( state$log_success_event ) - event OpenFlow::flow_mod_success(state$_name, match, flow_mod); - - return T; - } - -function log_describe(state: ControllerState): string - { - return fmt("Log-%d", state$log_dpid); - } - -function log_new(dpid: count, success_event: bool &default=T): OpenFlow::Controller - { - local c = OpenFlow::Controller($state=OpenFlow::ControllerState($log_dpid=dpid, $log_success_event=success_event), - $flow_mod=log_flow_mod, $describe=log_describe, $supports_flow_removed=F); - - register_controller(OpenFlow::OFLOG, cat(dpid), c); - - return c; - } diff --git a/scripts/base/frameworks/openflow/plugins/log.zeek b/scripts/base/frameworks/openflow/plugins/log.zeek new file mode 100644 index 0000000000..a9d397fab4 --- /dev/null +++ b/scripts/base/frameworks/openflow/plugins/log.zeek @@ -0,0 +1,76 @@ +##! OpenFlow plugin that outputs flow-modification commands +##! to a Zeek log file. + +@load base/frameworks/openflow +@load base/frameworks/logging + +module OpenFlow; + +export { + redef enum Plugin += { + OFLOG, + }; + + redef enum Log::ID += { LOG }; + + ## Log controller constructor. + ## + ## dpid: OpenFlow switch datapath id. + ## + ## success_event: If true, flow_mod_success is raised for each logged line. + ## + ## Returns: OpenFlow::Controller record. + global log_new: function(dpid: count, success_event: bool &default=T): OpenFlow::Controller; + + redef record ControllerState += { + ## OpenFlow switch datapath id. + log_dpid: count &optional; + ## Raise or do not raise success event. + log_success_event: bool &optional; + }; + + ## The record type which contains column fields of the OpenFlow log. + type Info: record { + ## Network time. + ts: time &log; + ## OpenFlow switch datapath id. + dpid: count &log; + ## OpenFlow match fields. + match: ofp_match &log; + ## OpenFlow modify flow entry message. + flow_mod: ofp_flow_mod &log; + }; + + ## Event that can be handled to access the :zeek:type:`OpenFlow::Info` + ## record as it is sent on to the logging framework. + global log_openflow: event(rec: Info); +} + +event zeek_init() &priority=5 + { + Log::create_stream(OpenFlow::LOG, [$columns=Info, $ev=log_openflow, $path="openflow"]); + } + +function log_flow_mod(state: ControllerState, match: ofp_match, flow_mod: OpenFlow::ofp_flow_mod): bool + { + Log::write(OpenFlow::LOG, [$ts=network_time(), $dpid=state$log_dpid, $match=match, $flow_mod=flow_mod]); + if ( state$log_success_event ) + event OpenFlow::flow_mod_success(state$_name, match, flow_mod); + + return T; + } + +function log_describe(state: ControllerState): string + { + return fmt("Log-%d", state$log_dpid); + } + +function log_new(dpid: count, success_event: bool &default=T): OpenFlow::Controller + { + local c = OpenFlow::Controller($state=OpenFlow::ControllerState($log_dpid=dpid, $log_success_event=success_event), + $flow_mod=log_flow_mod, $describe=log_describe, $supports_flow_removed=F); + + register_controller(OpenFlow::OFLOG, cat(dpid), c); + + return c; + } diff --git a/scripts/base/frameworks/openflow/plugins/ryu.bro b/scripts/base/frameworks/openflow/plugins/ryu.zeek similarity index 100% rename from scripts/base/frameworks/openflow/plugins/ryu.bro rename to scripts/base/frameworks/openflow/plugins/ryu.zeek diff --git a/scripts/base/frameworks/openflow/types.bro b/scripts/base/frameworks/openflow/types.bro deleted file mode 100644 index ef57b25e2e..0000000000 --- a/scripts/base/frameworks/openflow/types.bro +++ /dev/null @@ -1,132 +0,0 @@ -##! Types used by the OpenFlow framework. - -module OpenFlow; - -@load ./consts - -export { - ## Available openflow plugins. - type Plugin: enum { - ## Internal placeholder plugin. - INVALID, - }; - - ## Controller related state. - ## Can be redefined by plugins to - ## add state. - type ControllerState: record { - ## Internally set to the type of plugin used. - _plugin: Plugin &optional; - ## Internally set to the unique name of the controller. - _name: string &optional; - ## Internally set to true once the controller is activated. - _activated: bool &default=F; - } &redef; - - ## Openflow match definition. - ## - ## The openflow match record describes - ## which packets match to a specific - ## rule in a flow table. - type ofp_match: record { - # Input switch port. - in_port: count &optional; - # Ethernet source address. - dl_src: string &optional; - # Ethernet destination address. - dl_dst: string &optional; - # Input VLAN id. - dl_vlan: count &optional; - # Input VLAN priority. - dl_vlan_pcp: count &optional; - # Ethernet frame type. - dl_type: count &optional; - # IP ToS (actually DSCP field, 6bits). - nw_tos: count &optional; - # IP protocol or lower 8 bits of ARP opcode. - nw_proto: count &optional; - # At the moment, we store both v4 and v6 in the same fields. - # This is not how OpenFlow does it, we might want to change that... - # IP source address. - nw_src: subnet &optional; - # IP destination address. - nw_dst: subnet &optional; - # TCP/UDP source port. - tp_src: count &optional; - # TCP/UDP destination port. - tp_dst: count &optional; - } &log; - - ## The actions that can be taken in a flow. - ## (Separate record to make ofp_flow_mod less crowded) - type ofp_flow_action: record { - ## Output ports to send data to. - out_ports: vector of count &default=vector(); - ## Set vlan vid to this value. - vlan_vid: count &optional; - ## Set vlan priority to this value. - vlan_pcp: count &optional; - ## Strip vlan tag. - vlan_strip: bool &default=F; - ## Set ethernet source address. - dl_src: string &optional; - ## Set ethernet destination address. - dl_dst: string &optional; - ## Set ip tos to this value. - nw_tos: count &optional; - ## Set source to this ip. - nw_src: addr &optional; - ## Set destination to this ip. - nw_dst: addr &optional; - ## Set tcp/udp source port. - tp_src: count &optional; - ## Set tcp/udp destination port. - tp_dst: count &optional; - } &log; - - ## Openflow flow_mod definition, describing the action to perform. - type ofp_flow_mod: record { - ## Opaque controller-issued identifier. - # This is optional in the specification - but let's force - # it so we always can identify our flows... - cookie: count; # &default=BRO_COOKIE_ID * COOKIE_BID_START; - # Flow actions - ## Table to put the flow in. OFPTT_ALL can be used for delete, - ## to delete flows from all matching tables. - table_id: count &optional; - ## One of OFPFC_*. - command: ofp_flow_mod_command; # &default=OFPFC_ADD; - ## Idle time before discarding (seconds). - idle_timeout: count &default=0; - ## Max time before discarding (seconds). - hard_timeout: count &default=0; - ## Priority level of flow entry. - priority: count &default=0; - ## For OFPFC_DELETE* commands, require matching entried to include - ## this as an output port/group. OFPP_ANY/OFPG_ANY means no restrictions. - out_port: count &optional; - out_group: count &optional; - ## Bitmap of the OFPFF_* flags - flags: count &default=0; - ## Actions to take on match - actions: ofp_flow_action &default=ofp_flow_action(); - } &log; - - ## Controller record representing an openflow controller. - type Controller: record { - ## Controller related state. - state: ControllerState; - ## Does the controller support the flow_removed event? - supports_flow_removed: bool; - ## Function that describes the controller. Has to be implemented. - describe: function(state: ControllerState): string; - ## One-time initialization function. If defined, controller_init_done has to be called once initialization finishes. - init: function (state: ControllerState) &optional; - ## One-time destruction function. - destroy: function (state: ControllerState) &optional; - ## flow_mod function. - flow_mod: function(state: ControllerState, match: ofp_match, flow_mod: ofp_flow_mod): bool &optional; - ## flow_clear function. - flow_clear: function(state: ControllerState): bool &optional; - }; -} diff --git a/scripts/base/frameworks/openflow/types.zeek b/scripts/base/frameworks/openflow/types.zeek new file mode 100644 index 0000000000..e208775549 --- /dev/null +++ b/scripts/base/frameworks/openflow/types.zeek @@ -0,0 +1,132 @@ +##! Types used by the OpenFlow framework. + +module OpenFlow; + +@load ./consts + +export { + ## Available openflow plugins. + type Plugin: enum { + ## Internal placeholder plugin. + INVALID, + }; + + ## Controller related state. + ## Can be redefined by plugins to + ## add state. + type ControllerState: record { + ## Internally set to the type of plugin used. + _plugin: Plugin &optional; + ## Internally set to the unique name of the controller. + _name: string &optional; + ## Internally set to true once the controller is activated. + _activated: bool &default=F; + } &redef; + + ## Openflow match definition. + ## + ## The openflow match record describes + ## which packets match to a specific + ## rule in a flow table. + type ofp_match: record { + # Input switch port. + in_port: count &optional; + # Ethernet source address. + dl_src: string &optional; + # Ethernet destination address. + dl_dst: string &optional; + # Input VLAN id. + dl_vlan: count &optional; + # Input VLAN priority. + dl_vlan_pcp: count &optional; + # Ethernet frame type. + dl_type: count &optional; + # IP ToS (actually DSCP field, 6bits). + nw_tos: count &optional; + # IP protocol or lower 8 bits of ARP opcode. + nw_proto: count &optional; + # At the moment, we store both v4 and v6 in the same fields. + # This is not how OpenFlow does it, we might want to change that... + # IP source address. + nw_src: subnet &optional; + # IP destination address. + nw_dst: subnet &optional; + # TCP/UDP source port. + tp_src: count &optional; + # TCP/UDP destination port. + tp_dst: count &optional; + } &log; + + ## The actions that can be taken in a flow. + ## (Separate record to make ofp_flow_mod less crowded) + type ofp_flow_action: record { + ## Output ports to send data to. + out_ports: vector of count &default=vector(); + ## Set vlan vid to this value. + vlan_vid: count &optional; + ## Set vlan priority to this value. + vlan_pcp: count &optional; + ## Strip vlan tag. + vlan_strip: bool &default=F; + ## Set ethernet source address. + dl_src: string &optional; + ## Set ethernet destination address. + dl_dst: string &optional; + ## Set ip tos to this value. + nw_tos: count &optional; + ## Set source to this ip. + nw_src: addr &optional; + ## Set destination to this ip. + nw_dst: addr &optional; + ## Set tcp/udp source port. + tp_src: count &optional; + ## Set tcp/udp destination port. + tp_dst: count &optional; + } &log; + + ## Openflow flow_mod definition, describing the action to perform. + type ofp_flow_mod: record { + ## Opaque controller-issued identifier. + # This is optional in the specification - but let's force + # it so we always can identify our flows... + cookie: count; # &default=ZEEK_COOKIE_ID * COOKIE_BID_START; + # Flow actions + ## Table to put the flow in. OFPTT_ALL can be used for delete, + ## to delete flows from all matching tables. + table_id: count &optional; + ## One of OFPFC_*. + command: ofp_flow_mod_command; # &default=OFPFC_ADD; + ## Idle time before discarding (seconds). + idle_timeout: count &default=0; + ## Max time before discarding (seconds). + hard_timeout: count &default=0; + ## Priority level of flow entry. + priority: count &default=0; + ## For OFPFC_DELETE* commands, require matching entried to include + ## this as an output port/group. OFPP_ANY/OFPG_ANY means no restrictions. + out_port: count &optional; + out_group: count &optional; + ## Bitmap of the OFPFF_* flags + flags: count &default=0; + ## Actions to take on match + actions: ofp_flow_action &default=ofp_flow_action(); + } &log; + + ## Controller record representing an openflow controller. + type Controller: record { + ## Controller related state. + state: ControllerState; + ## Does the controller support the flow_removed event? + supports_flow_removed: bool; + ## Function that describes the controller. Has to be implemented. + describe: function(state: ControllerState): string; + ## One-time initialization function. If defined, controller_init_done has to be called once initialization finishes. + init: function (state: ControllerState) &optional; + ## One-time destruction function. + destroy: function (state: ControllerState) &optional; + ## flow_mod function. + flow_mod: function(state: ControllerState, match: ofp_match, flow_mod: ofp_flow_mod): bool &optional; + ## flow_clear function. + flow_clear: function(state: ControllerState): bool &optional; + }; +} diff --git a/scripts/base/frameworks/packet-filter/README b/scripts/base/frameworks/packet-filter/README index 536c1527db..7cc194ec17 100644 --- a/scripts/base/frameworks/packet-filter/README +++ b/scripts/base/frameworks/packet-filter/README @@ -1 +1 @@ -The packet filter framework supports how Bro sets its BPF capture filter. +The packet filter framework supports how Zeek sets its BPF capture filter. diff --git a/scripts/base/frameworks/packet-filter/__load__.bro b/scripts/base/frameworks/packet-filter/__load__.zeek similarity index 100% rename from scripts/base/frameworks/packet-filter/__load__.bro rename to scripts/base/frameworks/packet-filter/__load__.zeek diff --git a/scripts/base/frameworks/packet-filter/cluster.bro b/scripts/base/frameworks/packet-filter/cluster.bro deleted file mode 100644 index 6e41a6045f..0000000000 --- a/scripts/base/frameworks/packet-filter/cluster.bro +++ /dev/null @@ -1,17 +0,0 @@ - -@load base/frameworks/cluster -@load ./main - -module PacketFilter; - -event remote_connection_handshake_done(p: event_peer) &priority=3 - { - if ( Cluster::local_node_type() == Cluster::WORKER && - p$descr in Cluster::nodes && - Cluster::nodes[p$descr]$node_type == Cluster::MANAGER ) - { - # This ensures that a packet filter is installed and logged - # after the manager connects to us. - install(); - } - } diff --git a/scripts/base/frameworks/packet-filter/cluster.zeek b/scripts/base/frameworks/packet-filter/cluster.zeek new file mode 100644 index 0000000000..b1e1ceaddf --- /dev/null +++ b/scripts/base/frameworks/packet-filter/cluster.zeek @@ -0,0 +1,17 @@ + +@load base/frameworks/cluster +@load ./main + +module PacketFilter; + +event Cluster::hello(name: string, id: string) &priority=-3 + { + if ( Cluster::local_node_type() == Cluster::WORKER && + name in Cluster::nodes && + Cluster::nodes[name]$node_type == Cluster::MANAGER ) + { + # This ensures that a packet filter is installed and logged + # after the manager connects to us. + install(); + } + } diff --git a/scripts/base/frameworks/packet-filter/main.bro b/scripts/base/frameworks/packet-filter/main.bro deleted file mode 100644 index 9657f14c44..0000000000 --- a/scripts/base/frameworks/packet-filter/main.bro +++ /dev/null @@ -1,321 +0,0 @@ -##! This script supports how Bro sets its BPF capture filter. By default -##! Bro sets a capture filter that allows all traffic. If a filter -##! is set on the command line, that filter takes precedence over the default -##! open filter and all filters defined in Bro scripts with the -##! :bro:id:`capture_filters` and :bro:id:`restrict_filters` variables. - -@load base/frameworks/notice -@load base/frameworks/analyzer -@load ./utils - -module PacketFilter; - -export { - ## Add the packet filter logging stream. - redef enum Log::ID += { LOG }; - - ## Add notice types related to packet filter errors. - redef enum Notice::Type += { - ## This notice is generated if a packet filter cannot be compiled. - Compile_Failure, - - ## Generated if a packet filter fails to install. - Install_Failure, - - ## Generated when a notice takes too long to compile. - Too_Long_To_Compile_Filter - }; - - ## The record type defining columns to be logged in the packet filter - ## logging stream. - type Info: record { - ## The time at which the packet filter installation attempt was made. - ts: time &log; - - ## This is a string representation of the node that applied this - ## packet filter. It's mostly useful in the context of - ## dynamically changing filters on clusters. - node: string &log &optional; - - ## The packet filter that is being set. - filter: string &log; - - ## Indicate if this is the filter set during initialization. - init: bool &log &default=F; - - ## Indicate if the filter was applied successfully. - success: bool &log &default=T; - }; - - ## The BPF filter that is used by default to define what traffic should - ## be captured. Filters defined in :bro:id:`restrict_filters` will - ## still be applied to reduce the captured traffic. - const default_capture_filter = "ip or not ip" &redef; - - ## Filter string which is unconditionally or'ed to the beginning of - ## every dynamically built filter. - const unrestricted_filter = "" &redef; - - ## Filter string which is unconditionally and'ed to the beginning of - ## every dynamically built filter. This is mostly used when a custom - ## filter is being used but MPLS or VLAN tags are on the traffic. - const restricted_filter = "" &redef; - - ## The maximum amount of time that you'd like to allow for BPF filters to compile. - ## If this time is exceeded, compensation measures may be taken by the framework - ## to reduce the filter size. This threshold being crossed also results - ## in the :bro:see:`PacketFilter::Too_Long_To_Compile_Filter` notice. - const max_filter_compile_time = 100msec &redef; - - ## Install a BPF filter to exclude some traffic. The filter should - ## positively match what is to be excluded, it will be wrapped in - ## a "not". - ## - ## filter_id: An arbitrary string that can be used to identify - ## the filter. - ## - ## filter: A BPF expression of traffic that should be excluded. - ## - ## Returns: A boolean value to indicate if the filter was successfully - ## installed or not. - global exclude: function(filter_id: string, filter: string): bool; - - ## Install a temporary filter to traffic which should not be passed - ## through the BPF filter. The filter should match the traffic you - ## don't want to see (it will be wrapped in a "not" condition). - ## - ## filter_id: An arbitrary string that can be used to identify - ## the filter. - ## - ## filter: A BPF expression of traffic that should be excluded. - ## - ## length: The duration for which this filter should be put in place. - ## - ## Returns: A boolean value to indicate if the filter was successfully - ## installed or not. - global exclude_for: function(filter_id: string, filter: string, span: interval): bool; - - ## Call this function to build and install a new dynamically built - ## packet filter. - global install: function(): bool; - - ## A data structure to represent filter generating plugins. - type FilterPlugin: record { - ## A function that is directly called when generating the complete filter. - func : function(); - }; - - ## API function to register a new plugin for dynamic restriction filters. - global register_filter_plugin: function(fp: FilterPlugin); - - ## Enables the old filtering approach of "only watch common ports for - ## analyzed protocols". - ## - ## Unless you know what you are doing, leave this set to F. - const enable_auto_protocol_capture_filters = F &redef; - - ## This is where the default packet filter is stored and it should not - ## normally be modified by users. - global current_filter = ""; -} - -global dynamic_restrict_filters: table[string] of string = {}; - -# Track if a filter is currently building so functions that would ultimately -# install a filter immediately can still be used but they won't try to build or -# install the filter. -global currently_building = F; - -# Internal tracking for if the filter being built has possibly been changed. -global filter_changed = F; - -global filter_plugins: set[FilterPlugin] = {}; - -redef enum PcapFilterID += { - DefaultPcapFilter, - FilterTester, -}; - -function test_filter(filter: string): bool - { - if ( ! Pcap::precompile_pcap_filter(FilterTester, filter) ) - { - # The given filter was invalid - # TODO: generate a notice. - return F; - } - return T; - } - -# This tracks any changes for filtering mechanisms that play along nice -# and set filter_changed to T. -event filter_change_tracking() - { - if ( filter_changed ) - install(); - - schedule 5min { filter_change_tracking() }; - } - -event bro_init() &priority=5 - { - Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter"]); - - # Preverify the capture and restrict filters to give more granular failure messages. - for ( id, cf in capture_filters ) - { - if ( ! test_filter(cf) ) - Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, cf)); - } - - for ( id, rf in restrict_filters ) - { - if ( ! test_filter(restrict_filters[id]) ) - Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, rf)); - } - } - -event bro_init() &priority=-5 - { - install(); - - event filter_change_tracking(); - } - -function register_filter_plugin(fp: FilterPlugin) - { - add filter_plugins[fp]; - } - -event remove_dynamic_filter(filter_id: string) - { - if ( filter_id in dynamic_restrict_filters ) - { - delete dynamic_restrict_filters[filter_id]; - install(); - } - } - -function exclude(filter_id: string, filter: string): bool - { - if ( ! test_filter(filter) ) - return F; - - dynamic_restrict_filters[filter_id] = filter; - install(); - return T; - } - -function exclude_for(filter_id: string, filter: string, span: interval): bool - { - if ( exclude(filter_id, filter) ) - { - schedule span { remove_dynamic_filter(filter_id) }; - return T; - } - return F; - } - -function build(): string - { - if ( cmd_line_bpf_filter != "" ) - # Return what the user specified on the command line; - return cmd_line_bpf_filter; - - currently_building = T; - - # Generate all of the plugin based filters. - for ( plugin in filter_plugins ) - { - plugin$func(); - } - - local cfilter = ""; - if ( |capture_filters| == 0 && ! enable_auto_protocol_capture_filters ) - cfilter = default_capture_filter; - - for ( id, cf in capture_filters ) - cfilter = combine_filters(cfilter, "or", cf); - - if ( enable_auto_protocol_capture_filters ) - cfilter = combine_filters(cfilter, "or", Analyzer::get_bpf()); - - # Apply the restriction filters. - local rfilter = ""; - for ( id, rf in restrict_filters ) - rfilter = combine_filters(rfilter, "and", rf); - - # Apply the dynamic restriction filters. - for ( filt, drf in dynamic_restrict_filters ) - rfilter = combine_filters(rfilter, "and", string_cat("not (", drf, ")")); - - # Finally, join them into one filter. - local filter = combine_filters(cfilter, "and", rfilter); - - if ( unrestricted_filter != "" ) - filter = combine_filters(unrestricted_filter, "or", filter); - if ( restricted_filter != "" ) - filter = combine_filters(restricted_filter, "and", filter); - - currently_building = F; - return filter; - } - -function install(): bool - { - if ( currently_building ) - return F; - - local tmp_filter = build(); - - # No need to proceed if the filter hasn't changed. - if ( tmp_filter == current_filter ) - return F; - - local ts = current_time(); - if ( ! Pcap::precompile_pcap_filter(DefaultPcapFilter, tmp_filter) ) - { - NOTICE([$note=Compile_Failure, - $msg=fmt("Compiling packet filter failed"), - $sub=tmp_filter]); - if ( network_time() == 0.0 ) - Reporter::fatal(fmt("Bad pcap filter '%s'", tmp_filter)); - else - Reporter::warning(fmt("Bad pcap filter '%s'", tmp_filter)); - } - local diff = current_time()-ts; - if ( diff > max_filter_compile_time ) - NOTICE([$note=Too_Long_To_Compile_Filter, - $msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)]); - - # Set it to the current filter if it passed precompiling - current_filter = tmp_filter; - - # Do an audit log for the packet filter. - local info: Info; - info$ts = network_time(); - info$node = peer_description; - # If network_time() is 0.0 we're at init time so use the wall clock. - if ( info$ts == 0.0 ) - { - info$ts = current_time(); - info$init = T; - } - info$filter = current_filter; - - if ( ! Pcap::install_pcap_filter(DefaultPcapFilter) ) - { - # Installing the filter failed for some reason. - info$success = F; - NOTICE([$note=Install_Failure, - $msg=fmt("Installing packet filter failed"), - $sub=current_filter]); - } - - if ( reading_live_traffic() || reading_traces() ) - Log::write(PacketFilter::LOG, info); - - # Update the filter change tracking - filter_changed = F; - return T; - } diff --git a/scripts/base/frameworks/packet-filter/main.zeek b/scripts/base/frameworks/packet-filter/main.zeek new file mode 100644 index 0000000000..5d616eb7e6 --- /dev/null +++ b/scripts/base/frameworks/packet-filter/main.zeek @@ -0,0 +1,321 @@ +##! This script supports how Zeek sets its BPF capture filter. By default +##! Zeek sets a capture filter that allows all traffic. If a filter +##! is set on the command line, that filter takes precedence over the default +##! open filter and all filters defined in Zeek scripts with the +##! :zeek:id:`capture_filters` and :zeek:id:`restrict_filters` variables. + +@load base/frameworks/notice +@load base/frameworks/analyzer +@load ./utils + +module PacketFilter; + +export { + ## Add the packet filter logging stream. + redef enum Log::ID += { LOG }; + + ## Add notice types related to packet filter errors. + redef enum Notice::Type += { + ## This notice is generated if a packet filter cannot be compiled. + Compile_Failure, + + ## Generated if a packet filter fails to install. + Install_Failure, + + ## Generated when a notice takes too long to compile. + Too_Long_To_Compile_Filter + }; + + ## The record type defining columns to be logged in the packet filter + ## logging stream. + type Info: record { + ## The time at which the packet filter installation attempt was made. + ts: time &log; + + ## This is a string representation of the node that applied this + ## packet filter. It's mostly useful in the context of + ## dynamically changing filters on clusters. + node: string &log &optional; + + ## The packet filter that is being set. + filter: string &log; + + ## Indicate if this is the filter set during initialization. + init: bool &log &default=F; + + ## Indicate if the filter was applied successfully. + success: bool &log &default=T; + }; + + ## The BPF filter that is used by default to define what traffic should + ## be captured. Filters defined in :zeek:id:`restrict_filters` will + ## still be applied to reduce the captured traffic. + const default_capture_filter = "ip or not ip" &redef; + + ## Filter string which is unconditionally or'ed to the beginning of + ## every dynamically built filter. + const unrestricted_filter = "" &redef; + + ## Filter string which is unconditionally and'ed to the beginning of + ## every dynamically built filter. This is mostly used when a custom + ## filter is being used but MPLS or VLAN tags are on the traffic. + const restricted_filter = "" &redef; + + ## The maximum amount of time that you'd like to allow for BPF filters to compile. + ## If this time is exceeded, compensation measures may be taken by the framework + ## to reduce the filter size. This threshold being crossed also results + ## in the :zeek:see:`PacketFilter::Too_Long_To_Compile_Filter` notice. + const max_filter_compile_time = 100msec &redef; + + ## Install a BPF filter to exclude some traffic. The filter should + ## positively match what is to be excluded, it will be wrapped in + ## a "not". + ## + ## filter_id: An arbitrary string that can be used to identify + ## the filter. + ## + ## filter: A BPF expression of traffic that should be excluded. + ## + ## Returns: A boolean value to indicate if the filter was successfully + ## installed or not. + global exclude: function(filter_id: string, filter: string): bool; + + ## Install a temporary filter to traffic which should not be passed + ## through the BPF filter. The filter should match the traffic you + ## don't want to see (it will be wrapped in a "not" condition). + ## + ## filter_id: An arbitrary string that can be used to identify + ## the filter. + ## + ## filter: A BPF expression of traffic that should be excluded. + ## + ## length: The duration for which this filter should be put in place. + ## + ## Returns: A boolean value to indicate if the filter was successfully + ## installed or not. + global exclude_for: function(filter_id: string, filter: string, span: interval): bool; + + ## Call this function to build and install a new dynamically built + ## packet filter. + global install: function(): bool; + + ## A data structure to represent filter generating plugins. + type FilterPlugin: record { + ## A function that is directly called when generating the complete filter. + func : function(); + }; + + ## API function to register a new plugin for dynamic restriction filters. + global register_filter_plugin: function(fp: FilterPlugin); + + ## Enables the old filtering approach of "only watch common ports for + ## analyzed protocols". + ## + ## Unless you know what you are doing, leave this set to F. + const enable_auto_protocol_capture_filters = F &redef; + + ## This is where the default packet filter is stored and it should not + ## normally be modified by users. + global current_filter = ""; +} + +global dynamic_restrict_filters: table[string] of string = {}; + +# Track if a filter is currently building so functions that would ultimately +# install a filter immediately can still be used but they won't try to build or +# install the filter. +global currently_building = F; + +# Internal tracking for if the filter being built has possibly been changed. +global filter_changed = F; + +global filter_plugins: set[FilterPlugin] = {}; + +redef enum PcapFilterID += { + DefaultPcapFilter, + FilterTester, +}; + +function test_filter(filter: string): bool + { + if ( ! Pcap::precompile_pcap_filter(FilterTester, filter) ) + { + # The given filter was invalid + # TODO: generate a notice. + return F; + } + return T; + } + +# This tracks any changes for filtering mechanisms that play along nice +# and set filter_changed to T. +event filter_change_tracking() + { + if ( filter_changed ) + install(); + + schedule 5min { filter_change_tracking() }; + } + +event zeek_init() &priority=5 + { + Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter"]); + + # Preverify the capture and restrict filters to give more granular failure messages. + for ( id, cf in capture_filters ) + { + if ( ! test_filter(cf) ) + Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, cf)); + } + + for ( id, rf in restrict_filters ) + { + if ( ! test_filter(restrict_filters[id]) ) + Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, rf)); + } + } + +event zeek_init() &priority=-5 + { + install(); + + event filter_change_tracking(); + } + +function register_filter_plugin(fp: FilterPlugin) + { + add filter_plugins[fp]; + } + +event remove_dynamic_filter(filter_id: string) + { + if ( filter_id in dynamic_restrict_filters ) + { + delete dynamic_restrict_filters[filter_id]; + install(); + } + } + +function exclude(filter_id: string, filter: string): bool + { + if ( ! test_filter(filter) ) + return F; + + dynamic_restrict_filters[filter_id] = filter; + install(); + return T; + } + +function exclude_for(filter_id: string, filter: string, span: interval): bool + { + if ( exclude(filter_id, filter) ) + { + schedule span { remove_dynamic_filter(filter_id) }; + return T; + } + return F; + } + +function build(): string + { + if ( cmd_line_bpf_filter != "" ) + # Return what the user specified on the command line; + return cmd_line_bpf_filter; + + currently_building = T; + + # Generate all of the plugin based filters. + for ( plugin in filter_plugins ) + { + plugin$func(); + } + + local cfilter = ""; + if ( |capture_filters| == 0 && ! enable_auto_protocol_capture_filters ) + cfilter = default_capture_filter; + + for ( id, cf in capture_filters ) + cfilter = combine_filters(cfilter, "or", cf); + + if ( enable_auto_protocol_capture_filters ) + cfilter = combine_filters(cfilter, "or", Analyzer::get_bpf()); + + # Apply the restriction filters. + local rfilter = ""; + for ( id, rf in restrict_filters ) + rfilter = combine_filters(rfilter, "and", rf); + + # Apply the dynamic restriction filters. + for ( filt, drf in dynamic_restrict_filters ) + rfilter = combine_filters(rfilter, "and", string_cat("not (", drf, ")")); + + # Finally, join them into one filter. + local filter = combine_filters(cfilter, "and", rfilter); + + if ( unrestricted_filter != "" ) + filter = combine_filters(unrestricted_filter, "or", filter); + if ( restricted_filter != "" ) + filter = combine_filters(restricted_filter, "and", filter); + + currently_building = F; + return filter; + } + +function install(): bool + { + if ( currently_building ) + return F; + + local tmp_filter = build(); + + # No need to proceed if the filter hasn't changed. + if ( tmp_filter == current_filter ) + return F; + + local ts = current_time(); + if ( ! Pcap::precompile_pcap_filter(DefaultPcapFilter, tmp_filter) ) + { + NOTICE([$note=Compile_Failure, + $msg=fmt("Compiling packet filter failed"), + $sub=tmp_filter]); + if ( network_time() == 0.0 ) + Reporter::fatal(fmt("Bad pcap filter '%s'", tmp_filter)); + else + Reporter::warning(fmt("Bad pcap filter '%s'", tmp_filter)); + } + local diff = current_time()-ts; + if ( diff > max_filter_compile_time ) + NOTICE([$note=Too_Long_To_Compile_Filter, + $msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)]); + + # Set it to the current filter if it passed precompiling + current_filter = tmp_filter; + + # Do an audit log for the packet filter. + local info: Info; + info$ts = network_time(); + info$node = peer_description; + # If network_time() is 0.0 we're at init time so use the wall clock. + if ( info$ts == 0.0 ) + { + info$ts = current_time(); + info$init = T; + } + info$filter = current_filter; + + if ( ! Pcap::install_pcap_filter(DefaultPcapFilter) ) + { + # Installing the filter failed for some reason. + info$success = F; + NOTICE([$note=Install_Failure, + $msg=fmt("Installing packet filter failed"), + $sub=current_filter]); + } + + if ( reading_live_traffic() || reading_traces() ) + Log::write(PacketFilter::LOG, info); + + # Update the filter change tracking + filter_changed = F; + return T; + } diff --git a/scripts/base/frameworks/packet-filter/netstats.bro b/scripts/base/frameworks/packet-filter/netstats.bro deleted file mode 100644 index 14545243d2..0000000000 --- a/scripts/base/frameworks/packet-filter/netstats.bro +++ /dev/null @@ -1,42 +0,0 @@ -##! This script reports on packet loss from the various packet sources. -##! When Bro is reading input from trace files, this script will not -##! report any packet loss statistics. - -@load base/frameworks/notice - -module PacketFilter; - -export { - redef enum Notice::Type += { - ## Indicates packets were dropped by the packet filter. - Dropped_Packets, - }; - - ## This is the interval between individual statistics collection. - const stats_collection_interval = 5min; -} - -event net_stats_update(last_stat: NetStats) - { - local ns = get_net_stats(); - local new_dropped = ns$pkts_dropped - last_stat$pkts_dropped; - if ( new_dropped > 0 ) - { - local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd; - local new_link = ns$pkts_link - last_stat$pkts_link; - NOTICE([$note=Dropped_Packets, - $msg=fmt("%d packets dropped after filtering, %d received%s", - new_dropped, new_recvd + new_dropped, - new_link != 0 ? fmt(", %d on link", new_link) : "")]); - } - - schedule stats_collection_interval { net_stats_update(ns) }; - } - -event bro_init() - { - # Since this currently only calculates packet drops, let's skip the stats - # collection if reading traces. - if ( ! reading_traces() ) - schedule stats_collection_interval { net_stats_update(get_net_stats()) }; - } diff --git a/scripts/base/frameworks/packet-filter/netstats.zeek b/scripts/base/frameworks/packet-filter/netstats.zeek new file mode 100644 index 0000000000..173f4371cd --- /dev/null +++ b/scripts/base/frameworks/packet-filter/netstats.zeek @@ -0,0 +1,42 @@ +##! This script reports on packet loss from the various packet sources. +##! When Zeek is reading input from trace files, this script will not +##! report any packet loss statistics. + +@load base/frameworks/notice + +module PacketFilter; + +export { + redef enum Notice::Type += { + ## Indicates packets were dropped by the packet filter. + Dropped_Packets, + }; + + ## This is the interval between individual statistics collection. + const stats_collection_interval = 5min; +} + +event net_stats_update(last_stat: NetStats) + { + local ns = get_net_stats(); + local new_dropped = ns$pkts_dropped - last_stat$pkts_dropped; + if ( new_dropped > 0 ) + { + local new_recvd = ns$pkts_recvd - last_stat$pkts_recvd; + local new_link = ns$pkts_link - last_stat$pkts_link; + NOTICE([$note=Dropped_Packets, + $msg=fmt("%d packets dropped after filtering, %d received%s", + new_dropped, new_recvd + new_dropped, + new_link != 0 ? fmt(", %d on link", new_link) : "")]); + } + + schedule stats_collection_interval { net_stats_update(ns) }; + } + +event zeek_init() + { + # Since this currently only calculates packet drops, let's skip the stats + # collection if reading traces. + if ( ! reading_traces() ) + schedule stats_collection_interval { net_stats_update(get_net_stats()) }; + } diff --git a/scripts/base/frameworks/packet-filter/utils.bro b/scripts/base/frameworks/packet-filter/utils.bro deleted file mode 100644 index 29b54229af..0000000000 --- a/scripts/base/frameworks/packet-filter/utils.bro +++ /dev/null @@ -1,58 +0,0 @@ -module PacketFilter; - -export { - ## Takes a :bro:type:`port` and returns a BPF expression which will - ## match the port. - ## - ## p: The port. - ## - ## Returns: A valid BPF filter string for matching the port. - global port_to_bpf: function(p: port): string; - - ## Create a BPF filter to sample IPv4 and IPv6 traffic. - ## - ## num_parts: The number of parts the traffic should be split into. - ## - ## this_part: The part of the traffic this filter will accept (0-based). - global sampling_filter: function(num_parts: count, this_part: count): string; - - ## Combines two valid BPF filter strings with a string based operator - ## to form a new filter. - ## - ## lfilter: Filter which will go on the left side. - ## - ## op: Operation being applied (typically "or" or "and"). - ## - ## rfilter: Filter which will go on the right side. - ## - ## Returns: A new string representing the two filters combined with - ## the operator. Either filter being an empty string will - ## still result in a valid filter. - global combine_filters: function(lfilter: string, op: string, rfilter: string): string; -} - -function port_to_bpf(p: port): string - { - local tp = get_port_transport_proto(p); - return cat(tp, " and ", fmt("port %d", p)); - } - -function combine_filters(lfilter: string, op: string, rfilter: string): string - { - if ( lfilter == "" && rfilter == "" ) - return ""; - else if ( lfilter == "" ) - return rfilter; - else if ( rfilter == "" ) - return lfilter; - else - return fmt("(%s) %s (%s)", lfilter, op, rfilter); - } - -function sampling_filter(num_parts: count, this_part: count): string - { - local v4_filter = fmt("ip and ((ip[14:2]+ip[18:2]) - (%d*((ip[14:2]+ip[18:2])/%d)) == %d)", num_parts, num_parts, this_part); - # TODO: this is probably a fairly suboptimal filter, but it should work for now. - local v6_filter = fmt("ip6 and ((ip6[22:2]+ip6[38:2]) - (%d*((ip6[22:2]+ip6[38:2])/%d)) == %d)", num_parts, num_parts, this_part); - return combine_filters(v4_filter, "or", v6_filter); - } diff --git a/scripts/base/frameworks/packet-filter/utils.zeek b/scripts/base/frameworks/packet-filter/utils.zeek new file mode 100644 index 0000000000..cbf07f64ad --- /dev/null +++ b/scripts/base/frameworks/packet-filter/utils.zeek @@ -0,0 +1,58 @@ +module PacketFilter; + +export { + ## Takes a :zeek:type:`port` and returns a BPF expression which will + ## match the port. + ## + ## p: The port. + ## + ## Returns: A valid BPF filter string for matching the port. + global port_to_bpf: function(p: port): string; + + ## Create a BPF filter to sample IPv4 and IPv6 traffic. + ## + ## num_parts: The number of parts the traffic should be split into. + ## + ## this_part: The part of the traffic this filter will accept (0-based). + global sampling_filter: function(num_parts: count, this_part: count): string; + + ## Combines two valid BPF filter strings with a string based operator + ## to form a new filter. + ## + ## lfilter: Filter which will go on the left side. + ## + ## op: Operation being applied (typically "or" or "and"). + ## + ## rfilter: Filter which will go on the right side. + ## + ## Returns: A new string representing the two filters combined with + ## the operator. Either filter being an empty string will + ## still result in a valid filter. + global combine_filters: function(lfilter: string, op: string, rfilter: string): string; +} + +function port_to_bpf(p: port): string + { + local tp = get_port_transport_proto(p); + return cat(tp, " and ", fmt("port %d", p)); + } + +function combine_filters(lfilter: string, op: string, rfilter: string): string + { + if ( lfilter == "" && rfilter == "" ) + return ""; + else if ( lfilter == "" ) + return rfilter; + else if ( rfilter == "" ) + return lfilter; + else + return fmt("(%s) %s (%s)", lfilter, op, rfilter); + } + +function sampling_filter(num_parts: count, this_part: count): string + { + local v4_filter = fmt("ip and ((ip[14:2]+ip[18:2]) - (%d*((ip[14:2]+ip[18:2])/%d)) == %d)", num_parts, num_parts, this_part); + # TODO: this is probably a fairly suboptimal filter, but it should work for now. + local v6_filter = fmt("ip6 and ((ip6[22:2]+ip6[38:2]) - (%d*((ip6[22:2]+ip6[38:2])/%d)) == %d)", num_parts, num_parts, this_part); + return combine_filters(v4_filter, "or", v6_filter); + } diff --git a/scripts/base/frameworks/dpd/__load__.bro b/scripts/base/frameworks/reporter/__load__.zeek similarity index 100% rename from scripts/base/frameworks/dpd/__load__.bro rename to scripts/base/frameworks/reporter/__load__.zeek diff --git a/scripts/base/frameworks/reporter/main.bro b/scripts/base/frameworks/reporter/main.bro deleted file mode 100644 index 8cba29bdc2..0000000000 --- a/scripts/base/frameworks/reporter/main.bro +++ /dev/null @@ -1,56 +0,0 @@ -##! This framework is intended to create an output and filtering path for -##! internal messages/warnings/errors. It should typically be loaded to -##! log such messages to a file in a standard way. For the options to -##! toggle whether messages are additionally written to STDERR, see -##! :bro:see:`Reporter::info_to_stderr`, -##! :bro:see:`Reporter::warnings_to_stderr`, and -##! :bro:see:`Reporter::errors_to_stderr`. -##! -##! Note that this framework deals with the handling of internally generated -##! reporter messages, for the interface -##! into actually creating reporter messages from the scripting layer, use -##! the built-in functions in :doc:`/scripts/base/bif/reporter.bif.bro`. - -module Reporter; - -export { - ## The reporter logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The record type which contains the column fields of the reporter log. - type Info: record { - ## The network time at which the reporter event was generated. - ts: time &log; - ## The severity of the reporter message. Levels are INFO for informational - ## messages, not needing specific attention; WARNING for warning of a potential - ## problem, and ERROR for a non-fatal error that should be addressed, but doesn't - ## terminate program execution. - level: Level &log; - ## An info/warning/error message that could have either been - ## generated from the internal Bro core or at the scripting-layer. - message: string &log; - ## This is the location in a Bro script where the message originated. - ## Not all reporter messages will have locations in them though. - location: string &log &optional; - }; -} - -event bro_init() &priority=5 - { - Log::create_stream(Reporter::LOG, [$columns=Info, $path="reporter"]); - } - -event reporter_info(t: time, msg: string, location: string) &priority=-5 - { - Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]); - } - -event reporter_warning(t: time, msg: string, location: string) &priority=-5 - { - Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]); - } - -event reporter_error(t: time, msg: string, location: string) &priority=-5 - { - Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]); - } diff --git a/scripts/base/frameworks/reporter/main.zeek b/scripts/base/frameworks/reporter/main.zeek new file mode 100644 index 0000000000..ce66e8f86a --- /dev/null +++ b/scripts/base/frameworks/reporter/main.zeek @@ -0,0 +1,56 @@ +##! This framework is intended to create an output and filtering path for +##! internal messages/warnings/errors. It should typically be loaded to +##! log such messages to a file in a standard way. For the options to +##! toggle whether messages are additionally written to STDERR, see +##! :zeek:see:`Reporter::info_to_stderr`, +##! :zeek:see:`Reporter::warnings_to_stderr`, and +##! :zeek:see:`Reporter::errors_to_stderr`. +##! +##! Note that this framework deals with the handling of internally generated +##! reporter messages, for the interface +##! into actually creating reporter messages from the scripting layer, use +##! the built-in functions in :doc:`/scripts/base/bif/reporter.bif.zeek`. + +module Reporter; + +export { + ## The reporter logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The record type which contains the column fields of the reporter log. + type Info: record { + ## The network time at which the reporter event was generated. + ts: time &log; + ## The severity of the reporter message. Levels are INFO for informational + ## messages, not needing specific attention; WARNING for warning of a potential + ## problem, and ERROR for a non-fatal error that should be addressed, but doesn't + ## terminate program execution. + level: Level &log; + ## An info/warning/error message that could have either been + ## generated from the internal Zeek core or at the scripting-layer. + message: string &log; + ## This is the location in a Zeek script where the message originated. + ## Not all reporter messages will have locations in them though. + location: string &log &optional; + }; +} + +event zeek_init() &priority=5 + { + Log::create_stream(Reporter::LOG, [$columns=Info, $path="reporter"]); + } + +event reporter_info(t: time, msg: string, location: string) &priority=-5 + { + Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]); + } + +event reporter_warning(t: time, msg: string, location: string) &priority=-5 + { + Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]); + } + +event reporter_error(t: time, msg: string, location: string) &priority=-5 + { + Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]); + } diff --git a/scripts/base/frameworks/signatures/README b/scripts/base/frameworks/signatures/README index fd45cd3a19..7bc1926309 100644 --- a/scripts/base/frameworks/signatures/README +++ b/scripts/base/frameworks/signatures/README @@ -1,4 +1,4 @@ The signature framework provides for doing low-level pattern matching. While -signatures are not Bro's preferred detection tool, they sometimes come in +signatures are not Zeek's preferred detection tool, they sometimes come in handy and are closer to what many people are familiar with from using other NIDS. diff --git a/scripts/base/frameworks/signatures/__load__.bro b/scripts/base/frameworks/signatures/__load__.zeek similarity index 100% rename from scripts/base/frameworks/signatures/__load__.bro rename to scripts/base/frameworks/signatures/__load__.zeek diff --git a/scripts/base/frameworks/signatures/main.bro b/scripts/base/frameworks/signatures/main.bro deleted file mode 100644 index 70c446d046..0000000000 --- a/scripts/base/frameworks/signatures/main.bro +++ /dev/null @@ -1,311 +0,0 @@ -##! Script level signature support. See the -##! :doc:`signature documentation ` for more -##! information about Bro's signature engine. - -@load base/frameworks/notice - -module Signatures; - -export { - ## Add various signature-related notice types. - redef enum Notice::Type += { - ## Generic notice type for notice-worthy signature matches. - Sensitive_Signature, - ## Host has triggered many signatures on the same host. The - ## number of signatures is defined by the - ## :bro:id:`Signatures::vert_scan_thresholds` variable. - Multiple_Signatures, - ## Host has triggered the same signature on multiple hosts as - ## defined by the :bro:id:`Signatures::horiz_scan_thresholds` - ## variable. - Multiple_Sig_Responders, - ## The same signature has triggered multiple times for a host. - ## The number of times the signature has been triggered is - ## defined by the :bro:id:`Signatures::count_thresholds` - ## variable. To generate this notice, the - ## :bro:enum:`Signatures::SIG_COUNT_PER_RESP` action must be - ## set for the signature. - Count_Signature, - ## Summarize the number of times a host triggered a signature. - ## The interval between summaries is defined by the - ## :bro:id:`Signatures::summary_interval` variable. - Signature_Summary, - }; - - ## The signature logging stream identifier. - redef enum Log::ID += { LOG }; - - ## These are the default actions you can apply to signature matches. - ## All of them write the signature record to the logging stream unless - ## declared otherwise. - type Action: enum { - ## Ignore this signature completely (even for scan detection). - ## Don't write to the signatures logging stream. - SIG_IGNORE, - ## Process through the various aggregate techniques, but don't - ## report individually and don't write to the signatures logging - ## stream. - SIG_QUIET, - ## Generate a notice. - SIG_LOG, - ## The same as :bro:enum:`Signatures::SIG_LOG`, but ignore for - ## aggregate/scan processing. - SIG_FILE_BUT_NO_SCAN, - ## Generate a notice and set it to be alarmed upon. - SIG_ALARM, - ## Alarm once per originator. - SIG_ALARM_PER_ORIG, - ## Alarm once and then never again. - SIG_ALARM_ONCE, - ## Count signatures per responder host and alarm with the - ## :bro:enum:`Signatures::Count_Signature` notice if a threshold - ## defined by :bro:id:`Signatures::count_thresholds` is reached. - SIG_COUNT_PER_RESP, - ## Don't alarm, but generate per-orig summary. - SIG_SUMMARY, - }; - - ## The record type which contains the column fields of the signature log. - type Info: record { - ## The network time at which a signature matching type of event - ## to be logged has occurred. - ts: time &log; - ## A unique identifier of the connection which triggered the - ## signature match event. - uid: string &log &optional; - ## The host which triggered the signature match event. - src_addr: addr &log &optional; - ## The host port on which the signature-matching activity - ## occurred. - src_port: port &log &optional; - ## The destination host which was sent the payload that - ## triggered the signature match. - dst_addr: addr &log &optional; - ## The destination host port which was sent the payload that - ## triggered the signature match. - dst_port: port &log &optional; - ## Notice associated with signature event. - note: Notice::Type &log; - ## The name of the signature that matched. - sig_id: string &log &optional; - ## A more descriptive message of the signature-matching event. - event_msg: string &log &optional; - ## Extracted payload data or extra message. - sub_msg: string &log &optional; - ## Number of sigs, usually from summary count. - sig_count: count &log &optional; - ## Number of hosts, from a summary count. - host_count: count &log &optional; - }; - - ## Actions for a signature. - const actions: table[string] of Action = { - ["unspecified"] = SIG_IGNORE, # place-holder - } &redef &default = SIG_ALARM; - - ## Signature IDs that should always be ignored. - option ignored_ids = /NO_DEFAULT_MATCHES/; - - ## Generate a notice if, for a pair [orig, signature], the number of - ## different responders has reached one of the thresholds. - const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; - - ## Generate a notice if, for a pair [orig, resp], the number of - ## different signature matches has reached one of the thresholds. - const vert_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; - - ## Generate a notice if a :bro:enum:`Signatures::SIG_COUNT_PER_RESP` - ## signature is triggered as often as given by one of these thresholds. - const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef; - - ## The interval between when :bro:enum:`Signatures::Signature_Summary` - ## notices are generated. - option summary_interval = 1 day; - - ## This event can be handled to access/alter data about to be logged - ## to the signature logging stream. - ## - ## rec: The record of signature data about to be logged. - global log_signature: event(rec: Info); -} - -global horiz_table: table[addr, string] of addr_set &read_expire = 1 hr; -global vert_table: table[addr, addr] of string_set &read_expire = 1 hr; -global last_hthresh: table[addr] of count &default = 0 &read_expire = 1 hr; -global last_vthresh: table[addr] of count &default = 0 &read_expire = 1 hr; -global count_per_resp: table[addr, string] of count - &default = 0 &read_expire = 1 hr; -global count_per_orig: table[addr, string] of count - &default = 0 &read_expire = 1 hr; -global did_sig_log: set[string] &read_expire = 1 hr; - - -event bro_init() - { - Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures"]); - } - -# Returns true if the given signature has already been triggered for the given -# [orig, resp] pair. -function has_signature_matched(id: string, orig: addr, resp: addr): bool - { - return [orig, resp] in vert_table ? id in vert_table[orig, resp] : F; - } - -event sig_summary(orig: addr, id: string, msg: string) - { - NOTICE([$note=Signature_Summary, $src=orig, - $msg=fmt("%s: %s", orig, msg), - $n=count_per_orig[orig,id] ]); - } - -event signature_match(state: signature_state, msg: string, data: string) - { - local sig_id = state$sig_id; - local action = actions[sig_id]; - - if ( action == SIG_IGNORE || ignored_ids in sig_id ) - return; - - # Trim the matched data down to something reasonable - if ( |data| > 140 ) - data = fmt("%s...", sub_bytes(data, 0, 140)); - - local src_addr: addr; - local src_port: port; - local dst_addr: addr; - local dst_port: port; - - if ( state$is_orig ) - { - src_addr = state$conn$id$orig_h; - src_port = state$conn$id$orig_p; - dst_addr = state$conn$id$resp_h; - dst_port = state$conn$id$resp_p; - } - else - { - src_addr = state$conn$id$resp_h; - src_port = state$conn$id$resp_p; - dst_addr = state$conn$id$orig_h; - dst_port = state$conn$id$orig_p; - } - - if ( action != SIG_QUIET && action != SIG_COUNT_PER_RESP ) - { - local info: Info = [$ts=network_time(), - $note=Sensitive_Signature, - $uid=state$conn$uid, - $src_addr=src_addr, - $src_port=src_port, - $dst_addr=dst_addr, - $dst_port=dst_port, - $event_msg=fmt("%s: %s", src_addr, msg), - $sig_id=sig_id, - $sub_msg=data]; - Log::write(Signatures::LOG, info); - } - - local notice = F; - if ( action == SIG_ALARM ) - notice = T; - - if ( action == SIG_COUNT_PER_RESP ) - { - local dst = state$conn$id$resp_h; - if ( ++count_per_resp[dst,sig_id] in count_thresholds ) - { - NOTICE([$note=Count_Signature, $conn=state$conn, - $msg=msg, - $n=count_per_resp[dst,sig_id], - $sub=fmt("%d matches of signature %s on host %s", - count_per_resp[dst,sig_id], - sig_id, dst)]); - } - } - - if ( (action == SIG_ALARM_PER_ORIG || action == SIG_SUMMARY) && - ++count_per_orig[state$conn$id$orig_h, sig_id] == 1 ) - { - if ( action == SIG_ALARM_PER_ORIG ) - notice = T; - else - schedule summary_interval { - sig_summary(state$conn$id$orig_h, sig_id, msg) - }; - } - - if ( action == SIG_ALARM_ONCE ) - { - if ( [sig_id] !in did_sig_log ) - { - notice = T; - add did_sig_log[sig_id]; - } - } - - if ( notice ) - NOTICE([$note=Sensitive_Signature, - $conn=state$conn, $src=src_addr, - $dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg), - $sub=data]); - - if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY ) - return; - - # Keep track of scans. - local orig = state$conn$id$orig_h; - local resp = state$conn$id$resp_h; - - if ( [orig, sig_id] !in horiz_table ) - horiz_table[orig, sig_id] = set(); - - add horiz_table[orig, sig_id][resp]; - - if ( [orig, resp] !in vert_table ) - vert_table[orig, resp] = set(); - - add vert_table[orig, resp][sig_id]; - - local hcount = |horiz_table[orig, sig_id]|; - local vcount = |vert_table[orig, resp]|; - - if ( hcount in horiz_scan_thresholds && hcount != last_hthresh[orig] ) - { - local horz_scan_msg = - fmt("%s has triggered signature %s on %d hosts", - orig, sig_id, hcount); - - Log::write(Signatures::LOG, - [$ts=network_time(), $note=Multiple_Sig_Responders, - $src_addr=orig, $sig_id=sig_id, $event_msg=msg, - $host_count=hcount, $sub_msg=horz_scan_msg]); - - NOTICE([$note=Multiple_Sig_Responders, $src=orig, - $msg=msg, $n=hcount, $sub=horz_scan_msg]); - - last_hthresh[orig] = hcount; - } - - if ( vcount in vert_scan_thresholds && vcount != last_vthresh[orig] ) - { - local vert_scan_msg = - fmt("%s has triggered %d different signatures on host %s", - orig, vcount, resp); - - Log::write(Signatures::LOG, - [$ts=network_time(), - $note=Multiple_Signatures, - $src_addr=orig, - $dst_addr=resp, $sig_id=sig_id, $sig_count=vcount, - $event_msg=fmt("%s different signatures triggered", vcount), - $sub_msg=vert_scan_msg]); - - NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp, - $msg=fmt("%s different signatures triggered", vcount), - $n=vcount, $sub=vert_scan_msg]); - - last_vthresh[orig] = vcount; - } - } - diff --git a/scripts/base/frameworks/signatures/main.zeek b/scripts/base/frameworks/signatures/main.zeek new file mode 100644 index 0000000000..eb2143d4ec --- /dev/null +++ b/scripts/base/frameworks/signatures/main.zeek @@ -0,0 +1,311 @@ +##! Script level signature support. See the +##! :doc:`signature documentation ` for more +##! information about Zeek's signature engine. + +@load base/frameworks/notice + +module Signatures; + +export { + ## Add various signature-related notice types. + redef enum Notice::Type += { + ## Generic notice type for notice-worthy signature matches. + Sensitive_Signature, + ## Host has triggered many signatures on the same host. The + ## number of signatures is defined by the + ## :zeek:id:`Signatures::vert_scan_thresholds` variable. + Multiple_Signatures, + ## Host has triggered the same signature on multiple hosts as + ## defined by the :zeek:id:`Signatures::horiz_scan_thresholds` + ## variable. + Multiple_Sig_Responders, + ## The same signature has triggered multiple times for a host. + ## The number of times the signature has been triggered is + ## defined by the :zeek:id:`Signatures::count_thresholds` + ## variable. To generate this notice, the + ## :zeek:enum:`Signatures::SIG_COUNT_PER_RESP` action must be + ## set for the signature. + Count_Signature, + ## Summarize the number of times a host triggered a signature. + ## The interval between summaries is defined by the + ## :zeek:id:`Signatures::summary_interval` variable. + Signature_Summary, + }; + + ## The signature logging stream identifier. + redef enum Log::ID += { LOG }; + + ## These are the default actions you can apply to signature matches. + ## All of them write the signature record to the logging stream unless + ## declared otherwise. + type Action: enum { + ## Ignore this signature completely (even for scan detection). + ## Don't write to the signatures logging stream. + SIG_IGNORE, + ## Process through the various aggregate techniques, but don't + ## report individually and don't write to the signatures logging + ## stream. + SIG_QUIET, + ## Generate a notice. + SIG_LOG, + ## The same as :zeek:enum:`Signatures::SIG_LOG`, but ignore for + ## aggregate/scan processing. + SIG_FILE_BUT_NO_SCAN, + ## Generate a notice and set it to be alarmed upon. + SIG_ALARM, + ## Alarm once per originator. + SIG_ALARM_PER_ORIG, + ## Alarm once and then never again. + SIG_ALARM_ONCE, + ## Count signatures per responder host and alarm with the + ## :zeek:enum:`Signatures::Count_Signature` notice if a threshold + ## defined by :zeek:id:`Signatures::count_thresholds` is reached. + SIG_COUNT_PER_RESP, + ## Don't alarm, but generate per-orig summary. + SIG_SUMMARY, + }; + + ## The record type which contains the column fields of the signature log. + type Info: record { + ## The network time at which a signature matching type of event + ## to be logged has occurred. + ts: time &log; + ## A unique identifier of the connection which triggered the + ## signature match event. + uid: string &log &optional; + ## The host which triggered the signature match event. + src_addr: addr &log &optional; + ## The host port on which the signature-matching activity + ## occurred. + src_port: port &log &optional; + ## The destination host which was sent the payload that + ## triggered the signature match. + dst_addr: addr &log &optional; + ## The destination host port which was sent the payload that + ## triggered the signature match. + dst_port: port &log &optional; + ## Notice associated with signature event. + note: Notice::Type &log; + ## The name of the signature that matched. + sig_id: string &log &optional; + ## A more descriptive message of the signature-matching event. + event_msg: string &log &optional; + ## Extracted payload data or extra message. + sub_msg: string &log &optional; + ## Number of sigs, usually from summary count. + sig_count: count &log &optional; + ## Number of hosts, from a summary count. + host_count: count &log &optional; + }; + + ## Actions for a signature. + const actions: table[string] of Action = { + ["unspecified"] = SIG_IGNORE, # place-holder + } &redef &default = SIG_ALARM; + + ## Signature IDs that should always be ignored. + option ignored_ids = /NO_DEFAULT_MATCHES/; + + ## Generate a notice if, for a pair [orig, signature], the number of + ## different responders has reached one of the thresholds. + const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; + + ## Generate a notice if, for a pair [orig, resp], the number of + ## different signature matches has reached one of the thresholds. + const vert_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; + + ## Generate a notice if a :zeek:enum:`Signatures::SIG_COUNT_PER_RESP` + ## signature is triggered as often as given by one of these thresholds. + const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef; + + ## The interval between when :zeek:enum:`Signatures::Signature_Summary` + ## notices are generated. + option summary_interval = 1 day; + + ## This event can be handled to access/alter data about to be logged + ## to the signature logging stream. + ## + ## rec: The record of signature data about to be logged. + global log_signature: event(rec: Info); +} + +global horiz_table: table[addr, string] of addr_set &read_expire = 1 hr; +global vert_table: table[addr, addr] of string_set &read_expire = 1 hr; +global last_hthresh: table[addr] of count &default = 0 &read_expire = 1 hr; +global last_vthresh: table[addr] of count &default = 0 &read_expire = 1 hr; +global count_per_resp: table[addr, string] of count + &default = 0 &read_expire = 1 hr; +global count_per_orig: table[addr, string] of count + &default = 0 &read_expire = 1 hr; +global did_sig_log: set[string] &read_expire = 1 hr; + + +event zeek_init() + { + Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures"]); + } + +# Returns true if the given signature has already been triggered for the given +# [orig, resp] pair. +function has_signature_matched(id: string, orig: addr, resp: addr): bool + { + return [orig, resp] in vert_table ? id in vert_table[orig, resp] : F; + } + +event sig_summary(orig: addr, id: string, msg: string) + { + NOTICE([$note=Signature_Summary, $src=orig, + $msg=fmt("%s: %s", orig, msg), + $n=count_per_orig[orig,id] ]); + } + +event signature_match(state: signature_state, msg: string, data: string) + { + local sig_id = state$sig_id; + local action = actions[sig_id]; + + if ( action == SIG_IGNORE || ignored_ids in sig_id ) + return; + + # Trim the matched data down to something reasonable + if ( |data| > 140 ) + data = fmt("%s...", sub_bytes(data, 0, 140)); + + local src_addr: addr; + local src_port: port; + local dst_addr: addr; + local dst_port: port; + + if ( state$is_orig ) + { + src_addr = state$conn$id$orig_h; + src_port = state$conn$id$orig_p; + dst_addr = state$conn$id$resp_h; + dst_port = state$conn$id$resp_p; + } + else + { + src_addr = state$conn$id$resp_h; + src_port = state$conn$id$resp_p; + dst_addr = state$conn$id$orig_h; + dst_port = state$conn$id$orig_p; + } + + if ( action != SIG_QUIET && action != SIG_COUNT_PER_RESP ) + { + local info: Info = [$ts=network_time(), + $note=Sensitive_Signature, + $uid=state$conn$uid, + $src_addr=src_addr, + $src_port=src_port, + $dst_addr=dst_addr, + $dst_port=dst_port, + $event_msg=fmt("%s: %s", src_addr, msg), + $sig_id=sig_id, + $sub_msg=data]; + Log::write(Signatures::LOG, info); + } + + local notice = F; + if ( action == SIG_ALARM ) + notice = T; + + if ( action == SIG_COUNT_PER_RESP ) + { + local dst = state$conn$id$resp_h; + if ( ++count_per_resp[dst,sig_id] in count_thresholds ) + { + NOTICE([$note=Count_Signature, $conn=state$conn, + $msg=msg, + $n=count_per_resp[dst,sig_id], + $sub=fmt("%d matches of signature %s on host %s", + count_per_resp[dst,sig_id], + sig_id, dst)]); + } + } + + if ( (action == SIG_ALARM_PER_ORIG || action == SIG_SUMMARY) && + ++count_per_orig[state$conn$id$orig_h, sig_id] == 1 ) + { + if ( action == SIG_ALARM_PER_ORIG ) + notice = T; + else + schedule summary_interval { + sig_summary(state$conn$id$orig_h, sig_id, msg) + }; + } + + if ( action == SIG_ALARM_ONCE ) + { + if ( [sig_id] !in did_sig_log ) + { + notice = T; + add did_sig_log[sig_id]; + } + } + + if ( notice ) + NOTICE([$note=Sensitive_Signature, + $conn=state$conn, $src=src_addr, + $dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg), + $sub=data]); + + if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY ) + return; + + # Keep track of scans. + local orig = state$conn$id$orig_h; + local resp = state$conn$id$resp_h; + + if ( [orig, sig_id] !in horiz_table ) + horiz_table[orig, sig_id] = set(); + + add horiz_table[orig, sig_id][resp]; + + if ( [orig, resp] !in vert_table ) + vert_table[orig, resp] = set(); + + add vert_table[orig, resp][sig_id]; + + local hcount = |horiz_table[orig, sig_id]|; + local vcount = |vert_table[orig, resp]|; + + if ( hcount in horiz_scan_thresholds && hcount != last_hthresh[orig] ) + { + local horz_scan_msg = + fmt("%s has triggered signature %s on %d hosts", + orig, sig_id, hcount); + + Log::write(Signatures::LOG, + [$ts=network_time(), $note=Multiple_Sig_Responders, + $src_addr=orig, $sig_id=sig_id, $event_msg=msg, + $host_count=hcount, $sub_msg=horz_scan_msg]); + + NOTICE([$note=Multiple_Sig_Responders, $src=orig, + $msg=msg, $n=hcount, $sub=horz_scan_msg]); + + last_hthresh[orig] = hcount; + } + + if ( vcount in vert_scan_thresholds && vcount != last_vthresh[orig] ) + { + local vert_scan_msg = + fmt("%s has triggered %d different signatures on host %s", + orig, vcount, resp); + + Log::write(Signatures::LOG, + [$ts=network_time(), + $note=Multiple_Signatures, + $src_addr=orig, + $dst_addr=resp, $sig_id=sig_id, $sig_count=vcount, + $event_msg=fmt("%s different signatures triggered", vcount), + $sub_msg=vert_scan_msg]); + + NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp, + $msg=fmt("%s different signatures triggered", vcount), + $n=vcount, $sub=vert_scan_msg]); + + last_vthresh[orig] = vcount; + } + } + diff --git a/scripts/base/frameworks/software/__load__.bro b/scripts/base/frameworks/software/__load__.zeek similarity index 100% rename from scripts/base/frameworks/software/__load__.bro rename to scripts/base/frameworks/software/__load__.zeek diff --git a/scripts/base/frameworks/software/main.bro b/scripts/base/frameworks/software/main.bro deleted file mode 100644 index 068f34d1cf..0000000000 --- a/scripts/base/frameworks/software/main.bro +++ /dev/null @@ -1,530 +0,0 @@ -##! This script provides the framework for software version detection and -##! parsing but doesn't actually do any detection on it's own. It relies on -##! other protocol specific scripts to parse out software from the protocols -##! that they analyze. The entry point for providing new software detections -##! to this framework is through the :bro:id:`Software::found` function. - -@load base/utils/directions-and-hosts -@load base/utils/numbers -@load base/frameworks/cluster - -module Software; - -export { - ## The software logging stream identifier. - redef enum Log::ID += { LOG }; - - ## Scripts detecting new types of software need to redef this enum to add - ## their own specific software types which would then be used when they - ## create :bro:type:`Software::Info` records. - type Type: enum { - ## A placeholder type for when the type of software is not known. - UNKNOWN, - }; - - ## A structure to represent the numeric version of software. - type Version: record { - ## Major version number. - major: count &optional; - ## Minor version number. - minor: count &optional; - ## Minor subversion number. - minor2: count &optional; - ## Minor updates number. - minor3: count &optional; - ## Additional version string (e.g. "beta42"). - addl: string &optional; - } &log; - - ## The record type that is used for representing and logging software. - type Info: record { - ## The time at which the software was detected. - ts: time &log &optional; - ## The IP address detected running the software. - host: addr &log; - ## The port on which the software is running. Only sensible for - ## server software. - host_p: port &log &optional; - ## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`). - software_type: Type &log &default=UNKNOWN; - ## Name of the software (e.g. Apache). - name: string &log &optional; - ## Version of the software. - version: Version &log &optional; - ## The full unparsed version string found because the version - ## parsing doesn't always work reliably in all cases and this - ## acts as a fallback in the logs. - unparsed_version: string &log &optional; - - ## This can indicate that this software being detected should - ## definitely be sent onward to the logging framework. By - ## default, only software that is "interesting" due to a change - ## in version or it being currently unknown is sent to the - ## logging framework. This can be set to T to force the record - ## to be sent to the logging framework if some amount of this - ## tracking needs to happen in a specific way to the software. - force_log: bool &default=F; - }; - - ## Hosts whose software should be detected and tracked. - ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. - option asset_tracking = LOCAL_HOSTS; - - ## Other scripts should call this function when they detect software. - ## - ## id: The connection id where the software was discovered. - ## - ## info: A record representing the software discovered. - ## - ## Returns: T if the software was logged, F otherwise. - global found: function(id: conn_id, info: Info): bool; - - ## Compare two version records. - ## - ## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2. - ## If the numerical version numbers match, the *addl* string - ## is compared lexicographically. - global cmp_versions: function(v1: Version, v2: Version): int; - - ## Sometimes software will expose itself on the network with - ## slight naming variations. This table provides a mechanism - ## for a piece of software to be renamed to a single name - ## even if it exposes itself with an alternate name. The - ## yielded string is the name that will be logged and generally - ## used for everything. - global alternate_names: table[string] of string { - ["Flash Player"] = "Flash", - } &default=function(a: string): string { return a; }; - - ## Type to represent a collection of :bro:type:`Software::Info` records. - ## It's indexed with the name of a piece of software such as "Firefox" - ## and it yields a :bro:type:`Software::Info` record with more - ## information about the software. - type SoftwareSet: table[string] of Info; - - ## The set of software associated with an address. Data expires from - ## this table after one day by default so that a detected piece of - ## software will be logged once each day. In a cluster, this table is - ## uniformly distributed among proxy nodes. - global tracked: table[addr] of SoftwareSet &create_expire=1day; - - ## This event can be handled to access the :bro:type:`Software::Info` - ## record as it is sent on to the logging framework. - global log_software: event(rec: Info); - - ## This event can be handled to access software information whenever it's - ## version is found to have changed. - global version_change: event(old: Info, new: Info); - - ## This event is raised when software is about to be registered for - ## tracking in :bro:see:`Software::tracked`. - global register: event(info: Info); -} - -event bro_init() &priority=5 - { - Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software"]); - } - -type Description: record { - name: string; - version: Version; - unparsed_version: string; -}; - -# Defining this here because of a circular dependency between two functions. -global parse_mozilla: function(unparsed_version: string): Description; - -# Don't even try to understand this now, just make sure the tests are -# working. -function parse(unparsed_version: string): Description - { - local software_name = ""; - local v: Version; - - # Parse browser-alike versions separately - if ( /^(Mozilla|Opera)\/[0-9]+\./ in unparsed_version ) - { - return parse_mozilla(unparsed_version); - } - else if ( /A\/[0-9\.]*\/Google\/Pixel/ in unparsed_version ) - { - software_name = "Android (Google Pixel)"; - local parts = split_string_all(unparsed_version, /\//); - if ( 2 in parts ) - { - local vs = parts[2]; - - if ( "." in vs ) - v = parse(vs)$version; - else - v = Version($major=extract_count(vs)); - - return [$version=v, $unparsed_version=unparsed_version, $name=software_name]; - } - } - else - { - # The regular expression should match the complete version number - # and software name. - local clean_unparsed_version = gsub(unparsed_version, /\\x/, "%"); - clean_unparsed_version = unescape_URI(clean_unparsed_version); - local version_parts = split_string_n(clean_unparsed_version, /([\/\-_]|( [\(v]+))?[0-9\-\._, ]{2,}/, T, 1); - if ( 0 in version_parts ) - { - # Remove any bits of junk at end of first part. - if ( /([\/\-_]|( [\(v]+))$/ in version_parts[0] ) - version_parts[0] = strip(sub(version_parts[0], /([\/\-_]|( [\(v]+))/, "")); - - if ( /^\(/ in version_parts[0] ) - software_name = strip(sub(version_parts[0], /\(/, "")); - else - software_name = strip(version_parts[0]); - } - if ( |version_parts| >= 2 ) - { - # Remove the name/version separator if it's left at the beginning - # of the version number from the previous split_all. - local sv = strip(version_parts[1]); - if ( /^[\/\-\._v\(]/ in sv ) - sv = strip(sub(version_parts[1], /^\(?[\/\-\._v\(]/, "")); - local version_numbers = split_string_n(sv, /[\-\._,\[\(\{ ]/, F, 3); - if ( 4 in version_numbers && version_numbers[4] != "" ) - v$addl = strip(version_numbers[4]); - else if ( 2 in version_parts && version_parts[2] != "" && - version_parts[2] != ")" ) - { - if ( /^[[:blank:]]*\([a-zA-Z0-9\-\._[:blank:]]*\)/ in version_parts[2] ) - { - v$addl = split_string_n(version_parts[2], /[\(\)]/, F, 2)[1]; - } - else - { - local vp = split_string_n(version_parts[2], /[\-\._,;\[\]\(\)\{\} ]/, F, 3); - if ( |vp| >= 1 && vp[0] != "" ) - { - v$addl = strip(vp[0]); - } - else if ( |vp| >= 2 && vp[1] != "" ) - { - v$addl = strip(vp[1]); - } - else if ( |vp| >= 3 && vp[2] != "" ) - { - v$addl = strip(vp[2]); - } - else - { - v$addl = strip(version_parts[2]); - } - - } - } - - if ( 3 in version_numbers && version_numbers[3] != "" ) - v$minor3 = extract_count(version_numbers[3]); - if ( 2 in version_numbers && version_numbers[2] != "" ) - v$minor2 = extract_count(version_numbers[2]); - if ( 1 in version_numbers && version_numbers[1] != "" ) - v$minor = extract_count(version_numbers[1]); - if ( 0 in version_numbers && version_numbers[0] != "" ) - v$major = extract_count(version_numbers[0]); - } - } - - return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]]; - } - - -function parse_mozilla(unparsed_version: string): Description - { - local software_name = ""; - local v: Version; - local parts: string_vec; - - if ( /Opera [0-9\.]*$/ in unparsed_version ) - { - software_name = "Opera"; - parts = split_string_all(unparsed_version, /Opera [0-9\.]*$/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - else if ( / MSIE |Trident\// in unparsed_version ) - { - software_name = "MSIE"; - if ( /Trident\/4\.0/ in unparsed_version ) - v = [$major=8,$minor=0]; - else if ( /Trident\/5\.0/ in unparsed_version ) - v = [$major=9,$minor=0]; - else if ( /Trident\/6\.0/ in unparsed_version ) - v = [$major=10,$minor=0]; - else if ( /Trident\/7\.0/ in unparsed_version ) - v = [$major=11,$minor=0]; - else - { - parts = split_string_all(unparsed_version, /MSIE [0-9]{1,2}\.*[0-9]*b?[0-9]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - } - else if ( /Edge\// in unparsed_version ) - { - software_name="Edge"; - parts = split_string_all(unparsed_version, /Edge\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - else if ( /Version\/.*Safari\// in unparsed_version ) - { - software_name = "Safari"; - parts = split_string_all(unparsed_version, /Version\/[0-9\.]*/); - if ( 1 in parts ) - { - v = parse(parts[1])$version; - if ( / Mobile\/?.* Safari/ in unparsed_version ) - v$addl = "Mobile"; - } - } - else if ( /(Firefox|Netscape|Thunderbird)\/[0-9\.]*/ in unparsed_version ) - { - parts = split_string_all(unparsed_version, /(Firefox|Netscape|Thunderbird)\/[0-9\.]*/); - if ( 1 in parts ) - { - local tmp_s = parse(parts[1]); - software_name = tmp_s$name; - v = tmp_s$version; - } - } - else if ( /Chrome\/.*Safari\// in unparsed_version ) - { - software_name = "Chrome"; - parts = split_string_all(unparsed_version, /Chrome\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - else if ( /^Opera\// in unparsed_version ) - { - if ( /Opera M(ini|obi)\// in unparsed_version ) - { - parts = split_string_all(unparsed_version, /Opera M(ini|obi)/); - if ( 1 in parts ) - software_name = parts[1]; - parts = split_string_all(unparsed_version, /Version\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - else - { - parts = split_string_all(unparsed_version, /Opera Mini\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - } - else - { - software_name = "Opera"; - parts = split_string_all(unparsed_version, /Version\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - } - else if ( /Flash%20Player/ in unparsed_version ) - { - software_name = "Flash"; - parts = split_string_all(unparsed_version, /[\/ ]/); - if ( 2 in parts ) - v = parse(parts[2])$version; - } - - else if ( /AdobeAIR\/[0-9\.]*/ in unparsed_version ) - { - software_name = "AdobeAIR"; - parts = split_string_all(unparsed_version, /AdobeAIR\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - else if ( /AppleWebKit\/[0-9\.]*/ in unparsed_version ) - { - software_name = "Unspecified WebKit"; - parts = split_string_all(unparsed_version, /AppleWebKit\/[0-9\.]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - else if ( / Java\/[0-9]\./ in unparsed_version ) - { - software_name = "Java"; - parts = split_string_all(unparsed_version, /Java\/[0-9\._]*/); - if ( 1 in parts ) - v = parse(parts[1])$version; - } - - return [$version=v, $unparsed_version=unparsed_version, $name=software_name]; - } - - -function cmp_versions(v1: Version, v2: Version): int - { - if ( v1?$major && v2?$major ) - { - if ( v1$major < v2$major ) - return -1; - if ( v1$major > v2$major ) - return 1; - } - else - { - if ( !v1?$major && !v2?$major ) - { } - else - return v1?$major ? 1 : -1; - } - - if ( v1?$minor && v2?$minor ) - { - if ( v1$minor < v2$minor ) - return -1; - if ( v1$minor > v2$minor ) - return 1; - } - else - { - if ( !v1?$minor && !v2?$minor ) - { } - else - return v1?$minor ? 1 : -1; - } - - if ( v1?$minor2 && v2?$minor2 ) - { - if ( v1$minor2 < v2$minor2 ) - return -1; - if ( v1$minor2 > v2$minor2 ) - return 1; - } - else - { - if ( !v1?$minor2 && !v2?$minor2 ) - { } - else - return v1?$minor2 ? 1 : -1; - } - - if ( v1?$minor3 && v2?$minor3 ) - { - if ( v1$minor3 < v2$minor3 ) - return -1; - if ( v1$minor3 > v2$minor3 ) - return 1; - } - else - { - if ( !v1?$minor3 && !v2?$minor3 ) - { } - else - return v1?$minor3 ? 1 : -1; - } - - if ( v1?$addl && v2?$addl ) - { - return strcmp(v1$addl, v2$addl); - } - else - { - if ( !v1?$addl && !v2?$addl ) - return 0; - else - return v1?$addl ? 1 : -1; - } - - # A catcher return that should never be reached...hopefully - return 0; - } - -function software_endpoint_name(id: conn_id, host: addr): string - { - return fmt("%s %s", host, (host == id$orig_h ? "client" : "server")); - } - -# Convert a version into a string "a.b.c-x". -function software_fmt_version(v: Version): string - { - return fmt("%s%s%s%s%s", - v?$major ? fmt("%d", v$major) : "0", - v?$minor ? fmt(".%d", v$minor) : "", - v?$minor2 ? fmt(".%d", v$minor2) : "", - v?$minor3 ? fmt(".%d", v$minor3) : "", - v?$addl ? fmt("-%s", v$addl) : ""); - } - -# Convert a software into a string "name a.b.cx". -function software_fmt(i: Info): string - { - return fmt("%s %s", i$name, software_fmt_version(i$version)); - } - -event Software::register(info: Info) - { - local ts: SoftwareSet; - - if ( info$host in tracked ) - ts = tracked[info$host]; - else - ts = tracked[info$host] = SoftwareSet(); - - # Software already registered for this host? We don't want to endlessly - # log the same thing. - if ( info$name in ts ) - { - local old = ts[info$name]; - local changed = cmp_versions(old$version, info$version) != 0; - - if ( changed ) - event Software::version_change(old, info); - else if ( ! info$force_log ) - # If the version hasn't changed, then we're just redetecting the - # same thing, then we don't care. - return; - } - - ts[info$name] = info; - Log::write(Software::LOG, info); - } - -function found(id: conn_id, info: Info): bool - { - if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) ) - return F; - - if ( ! info?$ts ) - info$ts = network_time(); - - if ( info?$version ) - { - if ( ! info?$name ) - { - Reporter::error("Required field name not present in Software::found"); - return F; - } - } - else if ( ! info?$unparsed_version ) - { - Reporter::error("No unparsed version string present in Info record with version in Software::found"); - return F; - } - - if ( ! info?$version ) - { - local sw = parse(info$unparsed_version); - info$unparsed_version = sw$unparsed_version; - info$name = sw$name; - info$version = sw$version; - } - - @if ( Cluster::is_enabled() ) - Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::register, - info); - @else - event Software::register(info); - @endif - - return T; - } diff --git a/scripts/base/frameworks/software/main.zeek b/scripts/base/frameworks/software/main.zeek new file mode 100644 index 0000000000..83669cbc82 --- /dev/null +++ b/scripts/base/frameworks/software/main.zeek @@ -0,0 +1,530 @@ +##! This script provides the framework for software version detection and +##! parsing but doesn't actually do any detection on it's own. It relies on +##! other protocol specific scripts to parse out software from the protocols +##! that they analyze. The entry point for providing new software detections +##! to this framework is through the :zeek:id:`Software::found` function. + +@load base/utils/directions-and-hosts +@load base/utils/numbers +@load base/frameworks/cluster + +module Software; + +export { + ## The software logging stream identifier. + redef enum Log::ID += { LOG }; + + ## Scripts detecting new types of software need to redef this enum to add + ## their own specific software types which would then be used when they + ## create :zeek:type:`Software::Info` records. + type Type: enum { + ## A placeholder type for when the type of software is not known. + UNKNOWN, + }; + + ## A structure to represent the numeric version of software. + type Version: record { + ## Major version number. + major: count &optional; + ## Minor version number. + minor: count &optional; + ## Minor subversion number. + minor2: count &optional; + ## Minor updates number. + minor3: count &optional; + ## Additional version string (e.g. "beta42"). + addl: string &optional; + } &log; + + ## The record type that is used for representing and logging software. + type Info: record { + ## The time at which the software was detected. + ts: time &log &optional; + ## The IP address detected running the software. + host: addr &log; + ## The port on which the software is running. Only sensible for + ## server software. + host_p: port &log &optional; + ## The type of software detected (e.g. :zeek:enum:`HTTP::SERVER`). + software_type: Type &log &default=UNKNOWN; + ## Name of the software (e.g. Apache). + name: string &log &optional; + ## Version of the software. + version: Version &log &optional; + ## The full unparsed version string found because the version + ## parsing doesn't always work reliably in all cases and this + ## acts as a fallback in the logs. + unparsed_version: string &log &optional; + + ## This can indicate that this software being detected should + ## definitely be sent onward to the logging framework. By + ## default, only software that is "interesting" due to a change + ## in version or it being currently unknown is sent to the + ## logging framework. This can be set to T to force the record + ## to be sent to the logging framework if some amount of this + ## tracking needs to happen in a specific way to the software. + force_log: bool &default=F; + }; + + ## Hosts whose software should be detected and tracked. + ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. + option asset_tracking = LOCAL_HOSTS; + + ## Other scripts should call this function when they detect software. + ## + ## id: The connection id where the software was discovered. + ## + ## info: A record representing the software discovered. + ## + ## Returns: T if the software was logged, F otherwise. + global found: function(id: conn_id, info: Info): bool; + + ## Compare two version records. + ## + ## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2. + ## If the numerical version numbers match, the *addl* string + ## is compared lexicographically. + global cmp_versions: function(v1: Version, v2: Version): int; + + ## Sometimes software will expose itself on the network with + ## slight naming variations. This table provides a mechanism + ## for a piece of software to be renamed to a single name + ## even if it exposes itself with an alternate name. The + ## yielded string is the name that will be logged and generally + ## used for everything. + global alternate_names: table[string] of string { + ["Flash Player"] = "Flash", + } &default=function(a: string): string { return a; }; + + ## Type to represent a collection of :zeek:type:`Software::Info` records. + ## It's indexed with the name of a piece of software such as "Firefox" + ## and it yields a :zeek:type:`Software::Info` record with more + ## information about the software. + type SoftwareSet: table[string] of Info; + + ## The set of software associated with an address. Data expires from + ## this table after one day by default so that a detected piece of + ## software will be logged once each day. In a cluster, this table is + ## uniformly distributed among proxy nodes. + global tracked: table[addr] of SoftwareSet &create_expire=1day; + + ## This event can be handled to access the :zeek:type:`Software::Info` + ## record as it is sent on to the logging framework. + global log_software: event(rec: Info); + + ## This event can be handled to access software information whenever it's + ## version is found to have changed. + global version_change: event(old: Info, new: Info); + + ## This event is raised when software is about to be registered for + ## tracking in :zeek:see:`Software::tracked`. + global register: event(info: Info); +} + +event zeek_init() &priority=5 + { + Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software"]); + } + +type Description: record { + name: string; + version: Version; + unparsed_version: string; +}; + +# Defining this here because of a circular dependency between two functions. +global parse_mozilla: function(unparsed_version: string): Description; + +# Don't even try to understand this now, just make sure the tests are +# working. +function parse(unparsed_version: string): Description + { + local software_name = ""; + local v: Version; + + # Parse browser-alike versions separately + if ( /^(Mozilla|Opera)\/[0-9]+\./ in unparsed_version ) + { + return parse_mozilla(unparsed_version); + } + else if ( /A\/[0-9\.]*\/Google\/Pixel/ in unparsed_version ) + { + software_name = "Android (Google Pixel)"; + local parts = split_string_all(unparsed_version, /\//); + if ( 2 in parts ) + { + local vs = parts[2]; + + if ( "." in vs ) + v = parse(vs)$version; + else + v = Version($major=extract_count(vs)); + + return [$version=v, $unparsed_version=unparsed_version, $name=software_name]; + } + } + else + { + # The regular expression should match the complete version number + # and software name. + local clean_unparsed_version = gsub(unparsed_version, /\\x/, "%"); + clean_unparsed_version = unescape_URI(clean_unparsed_version); + local version_parts = split_string_n(clean_unparsed_version, /([\/\-_]|( [\(v]+))?[0-9\-\._, ]{2,}/, T, 1); + if ( 0 in version_parts ) + { + # Remove any bits of junk at end of first part. + if ( /([\/\-_]|( [\(v]+))$/ in version_parts[0] ) + version_parts[0] = strip(sub(version_parts[0], /([\/\-_]|( [\(v]+))/, "")); + + if ( /^\(/ in version_parts[0] ) + software_name = strip(sub(version_parts[0], /\(/, "")); + else + software_name = strip(version_parts[0]); + } + if ( |version_parts| >= 2 ) + { + # Remove the name/version separator if it's left at the beginning + # of the version number from the previous split_all. + local sv = strip(version_parts[1]); + if ( /^[\/\-\._v\(]/ in sv ) + sv = strip(sub(version_parts[1], /^\(?[\/\-\._v\(]/, "")); + local version_numbers = split_string_n(sv, /[\-\._,\[\(\{ ]/, F, 3); + if ( 4 in version_numbers && version_numbers[4] != "" ) + v$addl = strip(version_numbers[4]); + else if ( 2 in version_parts && version_parts[2] != "" && + version_parts[2] != ")" ) + { + if ( /^[[:blank:]]*\([a-zA-Z0-9\-\._[:blank:]]*\)/ in version_parts[2] ) + { + v$addl = split_string_n(version_parts[2], /[\(\)]/, F, 2)[1]; + } + else + { + local vp = split_string_n(version_parts[2], /[\-\._,;\[\]\(\)\{\} ]/, F, 3); + if ( |vp| >= 1 && vp[0] != "" ) + { + v$addl = strip(vp[0]); + } + else if ( |vp| >= 2 && vp[1] != "" ) + { + v$addl = strip(vp[1]); + } + else if ( |vp| >= 3 && vp[2] != "" ) + { + v$addl = strip(vp[2]); + } + else + { + v$addl = strip(version_parts[2]); + } + + } + } + + if ( 3 in version_numbers && version_numbers[3] != "" ) + v$minor3 = extract_count(version_numbers[3]); + if ( 2 in version_numbers && version_numbers[2] != "" ) + v$minor2 = extract_count(version_numbers[2]); + if ( 1 in version_numbers && version_numbers[1] != "" ) + v$minor = extract_count(version_numbers[1]); + if ( 0 in version_numbers && version_numbers[0] != "" ) + v$major = extract_count(version_numbers[0]); + } + } + + return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]]; + } + + +function parse_mozilla(unparsed_version: string): Description + { + local software_name = ""; + local v: Version; + local parts: string_vec; + + if ( /Opera [0-9\.]*$/ in unparsed_version ) + { + software_name = "Opera"; + parts = split_string_all(unparsed_version, /Opera [0-9\.]*$/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + else if ( / MSIE |Trident\// in unparsed_version ) + { + software_name = "MSIE"; + if ( /Trident\/4\.0/ in unparsed_version ) + v = [$major=8,$minor=0]; + else if ( /Trident\/5\.0/ in unparsed_version ) + v = [$major=9,$minor=0]; + else if ( /Trident\/6\.0/ in unparsed_version ) + v = [$major=10,$minor=0]; + else if ( /Trident\/7\.0/ in unparsed_version ) + v = [$major=11,$minor=0]; + else + { + parts = split_string_all(unparsed_version, /MSIE [0-9]{1,2}\.*[0-9]*b?[0-9]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + } + else if ( /Edge\// in unparsed_version ) + { + software_name="Edge"; + parts = split_string_all(unparsed_version, /Edge\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + else if ( /Version\/.*Safari\// in unparsed_version ) + { + software_name = "Safari"; + parts = split_string_all(unparsed_version, /Version\/[0-9\.]*/); + if ( 1 in parts ) + { + v = parse(parts[1])$version; + if ( / Mobile\/?.* Safari/ in unparsed_version ) + v$addl = "Mobile"; + } + } + else if ( /(Firefox|Netscape|Thunderbird)\/[0-9\.]*/ in unparsed_version ) + { + parts = split_string_all(unparsed_version, /(Firefox|Netscape|Thunderbird)\/[0-9\.]*/); + if ( 1 in parts ) + { + local tmp_s = parse(parts[1]); + software_name = tmp_s$name; + v = tmp_s$version; + } + } + else if ( /Chrome\/.*Safari\// in unparsed_version ) + { + software_name = "Chrome"; + parts = split_string_all(unparsed_version, /Chrome\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + else if ( /^Opera\// in unparsed_version ) + { + if ( /Opera M(ini|obi)\// in unparsed_version ) + { + parts = split_string_all(unparsed_version, /Opera M(ini|obi)/); + if ( 1 in parts ) + software_name = parts[1]; + parts = split_string_all(unparsed_version, /Version\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + else + { + parts = split_string_all(unparsed_version, /Opera Mini\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + } + else + { + software_name = "Opera"; + parts = split_string_all(unparsed_version, /Version\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + } + else if ( /Flash%20Player/ in unparsed_version ) + { + software_name = "Flash"; + parts = split_string_all(unparsed_version, /[\/ ]/); + if ( 2 in parts ) + v = parse(parts[2])$version; + } + + else if ( /AdobeAIR\/[0-9\.]*/ in unparsed_version ) + { + software_name = "AdobeAIR"; + parts = split_string_all(unparsed_version, /AdobeAIR\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + else if ( /AppleWebKit\/[0-9\.]*/ in unparsed_version ) + { + software_name = "Unspecified WebKit"; + parts = split_string_all(unparsed_version, /AppleWebKit\/[0-9\.]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + else if ( / Java\/[0-9]\./ in unparsed_version ) + { + software_name = "Java"; + parts = split_string_all(unparsed_version, /Java\/[0-9\._]*/); + if ( 1 in parts ) + v = parse(parts[1])$version; + } + + return [$version=v, $unparsed_version=unparsed_version, $name=software_name]; + } + + +function cmp_versions(v1: Version, v2: Version): int + { + if ( v1?$major && v2?$major ) + { + if ( v1$major < v2$major ) + return -1; + if ( v1$major > v2$major ) + return 1; + } + else + { + if ( !v1?$major && !v2?$major ) + { } + else + return v1?$major ? 1 : -1; + } + + if ( v1?$minor && v2?$minor ) + { + if ( v1$minor < v2$minor ) + return -1; + if ( v1$minor > v2$minor ) + return 1; + } + else + { + if ( !v1?$minor && !v2?$minor ) + { } + else + return v1?$minor ? 1 : -1; + } + + if ( v1?$minor2 && v2?$minor2 ) + { + if ( v1$minor2 < v2$minor2 ) + return -1; + if ( v1$minor2 > v2$minor2 ) + return 1; + } + else + { + if ( !v1?$minor2 && !v2?$minor2 ) + { } + else + return v1?$minor2 ? 1 : -1; + } + + if ( v1?$minor3 && v2?$minor3 ) + { + if ( v1$minor3 < v2$minor3 ) + return -1; + if ( v1$minor3 > v2$minor3 ) + return 1; + } + else + { + if ( !v1?$minor3 && !v2?$minor3 ) + { } + else + return v1?$minor3 ? 1 : -1; + } + + if ( v1?$addl && v2?$addl ) + { + return strcmp(v1$addl, v2$addl); + } + else + { + if ( !v1?$addl && !v2?$addl ) + return 0; + else + return v1?$addl ? 1 : -1; + } + + # A catcher return that should never be reached...hopefully + return 0; + } + +function software_endpoint_name(id: conn_id, host: addr): string + { + return fmt("%s %s", host, (host == id$orig_h ? "client" : "server")); + } + +# Convert a version into a string "a.b.c-x". +function software_fmt_version(v: Version): string + { + return fmt("%s%s%s%s%s", + v?$major ? fmt("%d", v$major) : "0", + v?$minor ? fmt(".%d", v$minor) : "", + v?$minor2 ? fmt(".%d", v$minor2) : "", + v?$minor3 ? fmt(".%d", v$minor3) : "", + v?$addl ? fmt("-%s", v$addl) : ""); + } + +# Convert a software into a string "name a.b.cx". +function software_fmt(i: Info): string + { + return fmt("%s %s", i$name, software_fmt_version(i$version)); + } + +event Software::register(info: Info) + { + local ts: SoftwareSet; + + if ( info$host in tracked ) + ts = tracked[info$host]; + else + ts = tracked[info$host] = SoftwareSet(); + + # Software already registered for this host? We don't want to endlessly + # log the same thing. + if ( info$name in ts ) + { + local old = ts[info$name]; + local changed = cmp_versions(old$version, info$version) != 0; + + if ( changed ) + event Software::version_change(old, info); + else if ( ! info$force_log ) + # If the version hasn't changed, then we're just redetecting the + # same thing, then we don't care. + return; + } + + ts[info$name] = info; + Log::write(Software::LOG, info); + } + +function found(id: conn_id, info: Info): bool + { + if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) ) + return F; + + if ( ! info?$ts ) + info$ts = network_time(); + + if ( info?$version ) + { + if ( ! info?$name ) + { + Reporter::error("Required field name not present in Software::found"); + return F; + } + } + else if ( ! info?$unparsed_version ) + { + Reporter::error("No unparsed version string present in Info record with version in Software::found"); + return F; + } + + if ( ! info?$version ) + { + local sw = parse(info$unparsed_version); + info$unparsed_version = sw$unparsed_version; + info$name = sw$name; + info$version = sw$version; + } + + @if ( Cluster::is_enabled() ) + Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::register, + info); + @else + event Software::register(info); + @endif + + return T; + } diff --git a/scripts/base/frameworks/sumstats/__load__.bro b/scripts/base/frameworks/sumstats/__load__.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/__load__.bro rename to scripts/base/frameworks/sumstats/__load__.zeek diff --git a/scripts/base/frameworks/sumstats/cluster.bro b/scripts/base/frameworks/sumstats/cluster.bro deleted file mode 100644 index f92b4112ff..0000000000 --- a/scripts/base/frameworks/sumstats/cluster.bro +++ /dev/null @@ -1,498 +0,0 @@ -##! This implements transparent cluster support for the SumStats framework. -##! Do not load this file directly. It's only meant to be loaded automatically -##! and will be if the cluster framework has been enabled. -##! The goal of this script is to make sumstats calculation completely and -##! transparently automated when running on a cluster. - -@load base/frameworks/cluster -@load ./main - -module SumStats; - -export { - ## The percent of the full threshold value that needs to be met on a - ## single worker for that worker to send the value to its manager in - ## order for it to request a global view for that value. There is no - ## requirement that the manager requests a global view for the key since - ## it may opt not to if it requested a global view for the key recently. - const cluster_request_global_view_percent = 0.2 &redef; - - ## This is to deal with intermediate update overload. A manager will - ## only allow this many intermediate update requests to the workers to - ## be inflight at any given time. Requested intermediate updates are - ## currently thrown out and not performed. In practice this should - ## hopefully have a minimal effect. - const max_outstanding_global_views = 10 &redef; - - ## Event sent by the manager in a cluster to initiate the collection of - ## values for a sumstat. - global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool); - - ## This event is sent by the manager in a cluster to initiate the - ## collection of a single key value from a sumstat. It's typically used - ## to get intermediate updates before the break interval triggers to - ## speed detection of a value crossing a threshold. - global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool); - - ## This event is sent by nodes in response to a - ## :bro:id:`SumStats::cluster_get_result` event. - global cluster_send_result: event(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool); - - ## This is sent by workers to indicate that they crossed the percent - ## of the current threshold by the percentage defined globally in - ## :bro:id:`SumStats::cluster_request_global_view_percent`. - global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key); - - ## This event is scheduled internally on workers to send result chunks. - global send_data: event(uid: string, ss_name: string, data: ResultTable, cleanup: bool); - - global get_a_key: event(uid: string, ss_name: string, cleanup: bool &default=F); - - global send_a_key: event(uid: string, ss_name: string, key: Key); - global send_no_key: event(uid: string, ss_name: string); - - ## This event is generated when a threshold is crossed. - global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold_index: count); -} - -# This variable is maintained to know what keys have recently sent or received -# intermediate updates so they don't overwhelm the manager. -global recent_global_view_keys: set[string, Key] &create_expire=1min; - -@if ( Cluster::local_node_type() != Cluster::MANAGER ) - -event bro_init() &priority=100 - { - Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_send_result); - Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_key_intermediate_response); - Broker::auto_publish(Cluster::manager_topic, SumStats::send_a_key); - Broker::auto_publish(Cluster::manager_topic, SumStats::send_no_key); - } - -# Result tables indexed on a uid that are currently being sent to the -# manager. -global sending_results: table[string] of ResultTable = table() &read_expire=1min; - -# This is done on all non-manager node types in the event that a sumstat is -# being collected somewhere other than a worker. -function data_added(ss: SumStat, key: Key, result: Result) - { - # If an intermediate update for this key was sent recently, don't send it again - if ( [ss$name, key] in recent_global_view_keys ) - return; - - # If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that - # crosses the full threshold then it's a candidate to send as an - # intermediate update. - if ( check_thresholds(ss, key, result, cluster_request_global_view_percent) ) - { - # kick off intermediate update - event SumStats::cluster_key_intermediate_response(ss$name, key); - add recent_global_view_keys[ss$name, key]; - } - } - -event SumStats::get_a_key(uid: string, ss_name: string, cleanup: bool) - { - if ( uid in sending_results ) - { - if ( |sending_results[uid]| == 0 ) - { - event SumStats::send_no_key(uid, ss_name); - } - else - { - for ( key in sending_results[uid] ) - { - event SumStats::send_a_key(uid, ss_name, key); - # break to only send one. - break; - } - } - } - else if ( !cleanup && ss_name in result_store && |result_store[ss_name]| > 0 ) - { - if ( |result_store[ss_name]| == 0 ) - { - event SumStats::send_no_key(uid, ss_name); - } - else - { - for ( key in result_store[ss_name] ) - { - event SumStats::send_a_key(uid, ss_name, key); - # break to only send one. - break; - } - } - } - else - { - event SumStats::send_no_key(uid, ss_name); - } - } - -event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool) - { - #print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id); - - # Create a back store for the result - sending_results[uid] = (ss_name in result_store) ? result_store[ss_name] : table(); - - # Lookup the actual sumstats and reset it, the reference to the data - # currently stored will be maintained internally from the - # sending_results table. - if ( cleanup && ss_name in stats_store ) - reset(stats_store[ss_name]); - } - -event SumStats::cluster_get_result(uid: string, ss_name: string, key: Key, cleanup: bool) - { - #print fmt("WORKER %s: received the cluster_get_result event for %s=%s.", Cluster::node, key2str(key), data); - - if ( cleanup ) # data will implicitly be in sending_results (i know this isn't great) - { - if ( uid in sending_results && key in sending_results[uid] ) - { - # Note: copy is needed to compensate serialization caching issue. This should be - # changed to something else later. - event SumStats::cluster_send_result(uid, ss_name, key, copy(sending_results[uid][key]), cleanup); - delete sending_results[uid][key]; - } - else - { - # We need to send an empty response if we don't have the data so that the manager - # can know that it heard back from all of the workers. - event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup); - } - } - else - { - if ( ss_name in result_store && key in result_store[ss_name] ) - { - # Note: copy is needed to compensate serialization caching issue. This should be - # changed to something else later. - event SumStats::cluster_send_result(uid, ss_name, key, copy(result_store[ss_name][key]), cleanup); - } - else - { - # We need to send an empty response if we don't have the data so that the manager - # can know that it heard back from all of the workers. - event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup); - } - } - } - -event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, thold_index: count) - { - if ( ss_name !in threshold_tracker ) - threshold_tracker[ss_name] = table(); - - threshold_tracker[ss_name][key] = thold_index; - } - -# request-key is a non-op on the workers. -# It only should be called by the manager. Due to the fact that we usually run the same scripts on the -# workers and the manager, it might also be called by the workers, so we just ignore it here. -# -# There is a small chance that people will try running it on events that are just thrown on the workers. -# This does not work at the moment and we cannot throw an error message, because we cannot distinguish it -# from the "script is running it everywhere" case. But - people should notice that they do not get results. -# Not entirely pretty, sorry :( -function request_key(ss_name: string, key: Key): Result - { - return Result(); - } - -@endif - - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -event bro_init() &priority=100 - { - Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_ss_request); - Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_get_result); - Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_threshold_crossed); - Broker::auto_publish(Cluster::worker_topic, SumStats::get_a_key); - } - -# This variable is maintained by manager nodes as they collect and aggregate -# results. -# Index on a uid. -global stats_keys: table[string] of set[Key] &read_expire=1min - &expire_func=function(s: table[string] of set[Key], idx: string): interval - { - Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx)); - return 0secs; - }; - -# This variable is maintained by manager nodes to track how many "dones" they -# collected per collection unique id. Once the number of results for a uid -# matches the number of peer nodes that results should be coming from, the -# result is written out and deleted from here. -# Indexed on a uid. -global done_with: table[string] of count &read_expire=1min &default=0; - -# This variable is maintained by managers to track intermediate responses as -# they are getting a global view for a certain key. -# Indexed on a uid. -global key_requests: table[string] of Result &read_expire=1min; - -# Store uids for dynamic requests here to avoid cleanup on the uid. -# (This needs to be done differently!) -global dynamic_requests: set[string] &read_expire=1min; - -# This variable is maintained by managers to prevent overwhelming communication due -# to too many intermediate updates. Each sumstat is tracked separately so that -# one won't overwhelm and degrade other quieter sumstats. -# Indexed on a sumstat id. -global outstanding_global_views: table[string] of set[string] &read_expire=1min; - -const zero_time = double_to_time(0.0); -# Managers handle logging. -event SumStats::finish_epoch(ss: SumStat) - { - if ( network_time() > zero_time ) - { - #print fmt("%.6f MANAGER: breaking %s sumstat", network_time(), ss$name); - local uid = unique_id(""); - - if ( uid in stats_keys ) - delete stats_keys[uid]; - stats_keys[uid] = set(); - - # Request data from peers. - event SumStats::cluster_ss_request(uid, ss$name, T); - - done_with[uid] = 0; - - #print fmt("get_key by uid: %s", uid); - event SumStats::get_a_key(uid, ss$name, T); - } - - # Schedule the next finish_epoch event. - schedule ss$epoch { SumStats::finish_epoch(ss) }; - } - -# This is unlikely to be called often, but it's here in -# case there are sumstats being collected by managers. -function data_added(ss: SumStat, key: Key, result: Result) - { - if ( check_thresholds(ss, key, result, 1.0) ) - { - threshold_crossed(ss, key, result); - event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]); - } - } - -function handle_end_of_result_collection(uid: string, ss_name: string, key: Key, cleanup: bool) - { - if ( uid !in key_requests ) - { - Reporter::warning(fmt("Tried to handle end of result collection with missing uid in key_request sumstat:%s, key:%s.", ss_name, key)); - return; - } - - #print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]); - local ss = stats_store[ss_name]; - local ir = key_requests[uid]; - if ( check_thresholds(ss, key, ir, 1.0) ) - { - threshold_crossed(ss, key, ir); - event SumStats::cluster_threshold_crossed(ss_name, key, threshold_tracker[ss_name][key]); - } - - if ( cleanup ) - { - # This is done here because "cleanup" implicitly means - # it's the end of an epoch. - if ( ss?$epoch_result && |ir| > 0 ) - { - local now = network_time(); - ss$epoch_result(now, key, ir); - } - - } - # Check if this was an intermediate update - if ( ss_name in outstanding_global_views ) - delete outstanding_global_views[ss_name][uid]; - - delete key_requests[uid]; - delete done_with[uid]; - } - -function request_all_current_keys(uid: string, ss_name: string, cleanup: bool) - { - #print "request_all_current_keys"; - if ( uid in stats_keys && |stats_keys[uid]| > 0 ) - { - #print fmt(" -- %d remaining keys here", |stats_keys[uid]|); - for ( key in stats_keys[uid] ) - { - done_with[uid] = 0; - event SumStats::cluster_get_result(uid, ss_name, key, cleanup); - delete stats_keys[uid][key]; - break; # only a single key - } - } - else - { - # Get more keys! And this breaks us out of the evented loop. - done_with[uid] = 0; - #print fmt("get_key by uid: %s", uid); - event SumStats::get_a_key(uid, ss_name, cleanup); - } - } - -event SumStats::send_no_key(uid: string, ss_name: string) - { - #print "send_no_key"; - - if ( uid !in done_with ) - done_with[uid] = 0; - - ++done_with[uid]; - if ( Cluster::worker_count == done_with[uid] ) - { - delete done_with[uid]; - - if ( uid in stats_keys && |stats_keys[uid]| > 0 ) - { - #print "we need more keys!"; - # Now that we have a key from each worker, lets - # grab all of the results. - request_all_current_keys(uid, ss_name, T); - } - else - { - #print "we're out of keys!"; - local ss = stats_store[ss_name]; - if ( ss?$epoch_finished ) - ss$epoch_finished(network_time()); - - delete stats_keys[uid]; - reset(ss); - } - } - } - -event SumStats::send_a_key(uid: string, ss_name: string, key: Key) - { - #print fmt("send_a_key %s", key); - if ( uid !in stats_keys ) - { - Reporter::warning(fmt("Manager received a uid for an unknown request. SumStat: %s, Key: %s", ss_name, key)); - return; - } - - if ( key !in stats_keys[uid] ) - add stats_keys[uid][key]; - - ++done_with[uid]; - if ( Cluster::worker_count == done_with[uid] ) - { - delete done_with[uid]; - - if ( |stats_keys[uid]| > 0 ) - { - #print "we need more keys!"; - # Now that we have a key from each worker, lets - # grab all of the results. - request_all_current_keys(uid, ss_name, T); - } - else - { - #print "we're out of keys!"; - local ss = stats_store[ss_name]; - if ( ss?$epoch_finished ) - ss$epoch_finished(network_time()); - - reset(ss); - } - } - } - -event SumStats::cluster_send_result(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool) - { - #print "cluster_send_result"; - #print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result); - - # We only want to try and do a value merge if there are actually measured datapoints - # in the Result. - if ( uid !in key_requests || |key_requests[uid]| == 0 ) - key_requests[uid] = result; - else - key_requests[uid] = compose_results(key_requests[uid], result); - - # Mark that a worker is done. - if ( uid !in done_with ) - done_with[uid] = 0; - - #print fmt("MANAGER: got a result for %s %s from %s", uid, key, get_event_peer()$descr); - ++done_with[uid]; - - if ( uid !in dynamic_requests && - uid in done_with && Cluster::worker_count == done_with[uid] ) - { - handle_end_of_result_collection(uid, ss_name, key, cleanup); - - if ( cleanup ) - request_all_current_keys(uid, ss_name, cleanup); - } - } - -# Managers handle intermediate updates here. -event SumStats::cluster_key_intermediate_response(ss_name: string, key: Key) - { - #print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr); - #print fmt("MANAGER: requesting key data for %s", key); - # If an intermediate update for this key was handled recently, don't do it again - if ( [ss_name, key] in recent_global_view_keys ) - return; - add recent_global_view_keys[ss_name, key]; - - if ( ss_name !in outstanding_global_views) - outstanding_global_views[ss_name] = set(); - else if ( |outstanding_global_views[ss_name]| > max_outstanding_global_views ) - { - # Don't do this intermediate update. Perhaps at some point in the future - # we will queue and randomly select from these ignored intermediate - # update requests. - return; - } - - local uid = unique_id(""); - add outstanding_global_views[ss_name][uid]; - done_with[uid] = 0; - #print fmt("requesting results for: %s", uid); - event SumStats::cluster_get_result(uid, ss_name, key, F); - } - -function request_key(ss_name: string, key: Key): Result - { - local uid = unique_id(""); - done_with[uid] = 0; - key_requests[uid] = table(); - add dynamic_requests[uid]; - - event SumStats::cluster_get_result(uid, ss_name, key, F); - return when ( uid in done_with && Cluster::worker_count == done_with[uid] ) - { - #print "done with request_key"; - local result = key_requests[uid]; - # Clean up - delete key_requests[uid]; - delete done_with[uid]; - delete dynamic_requests[uid]; - - return result; - } - timeout 1.1min - { - Reporter::warning(fmt("Dynamic SumStat key request for %s in SumStat %s took longer than 1 minute and was automatically cancelled.", key, ss_name)); - return Result(); - } - } - -@endif diff --git a/scripts/base/frameworks/sumstats/cluster.zeek b/scripts/base/frameworks/sumstats/cluster.zeek new file mode 100644 index 0000000000..d2633afd87 --- /dev/null +++ b/scripts/base/frameworks/sumstats/cluster.zeek @@ -0,0 +1,498 @@ +##! This implements transparent cluster support for the SumStats framework. +##! Do not load this file directly. It's only meant to be loaded automatically +##! and will be if the cluster framework has been enabled. +##! The goal of this script is to make sumstats calculation completely and +##! transparently automated when running on a cluster. + +@load base/frameworks/cluster +@load ./main + +module SumStats; + +export { + ## The percent of the full threshold value that needs to be met on a + ## single worker for that worker to send the value to its manager in + ## order for it to request a global view for that value. There is no + ## requirement that the manager requests a global view for the key since + ## it may opt not to if it requested a global view for the key recently. + const cluster_request_global_view_percent = 0.2 &redef; + + ## This is to deal with intermediate update overload. A manager will + ## only allow this many intermediate update requests to the workers to + ## be inflight at any given time. Requested intermediate updates are + ## currently thrown out and not performed. In practice this should + ## hopefully have a minimal effect. + const max_outstanding_global_views = 10 &redef; + + ## Event sent by the manager in a cluster to initiate the collection of + ## values for a sumstat. + global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool); + + ## This event is sent by the manager in a cluster to initiate the + ## collection of a single key value from a sumstat. It's typically used + ## to get intermediate updates before the break interval triggers to + ## speed detection of a value crossing a threshold. + global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool); + + ## This event is sent by nodes in response to a + ## :zeek:id:`SumStats::cluster_get_result` event. + global cluster_send_result: event(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool); + + ## This is sent by workers to indicate that they crossed the percent + ## of the current threshold by the percentage defined globally in + ## :zeek:id:`SumStats::cluster_request_global_view_percent`. + global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key); + + ## This event is scheduled internally on workers to send result chunks. + global send_data: event(uid: string, ss_name: string, data: ResultTable, cleanup: bool); + + global get_a_key: event(uid: string, ss_name: string, cleanup: bool &default=F); + + global send_a_key: event(uid: string, ss_name: string, key: Key); + global send_no_key: event(uid: string, ss_name: string); + + ## This event is generated when a threshold is crossed. + global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold_index: count); +} + +# This variable is maintained to know what keys have recently sent or received +# intermediate updates so they don't overwhelm the manager. +global recent_global_view_keys: set[string, Key] &create_expire=1min; + +@if ( Cluster::local_node_type() != Cluster::MANAGER ) + +event zeek_init() &priority=100 + { + Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_send_result); + Broker::auto_publish(Cluster::manager_topic, SumStats::cluster_key_intermediate_response); + Broker::auto_publish(Cluster::manager_topic, SumStats::send_a_key); + Broker::auto_publish(Cluster::manager_topic, SumStats::send_no_key); + } + +# Result tables indexed on a uid that are currently being sent to the +# manager. +global sending_results: table[string] of ResultTable = table() &read_expire=1min; + +# This is done on all non-manager node types in the event that a sumstat is +# being collected somewhere other than a worker. +function data_added(ss: SumStat, key: Key, result: Result) + { + # If an intermediate update for this key was sent recently, don't send it again + if ( [ss$name, key] in recent_global_view_keys ) + return; + + # If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that + # crosses the full threshold then it's a candidate to send as an + # intermediate update. + if ( check_thresholds(ss, key, result, cluster_request_global_view_percent) ) + { + # kick off intermediate update + event SumStats::cluster_key_intermediate_response(ss$name, key); + add recent_global_view_keys[ss$name, key]; + } + } + +event SumStats::get_a_key(uid: string, ss_name: string, cleanup: bool) + { + if ( uid in sending_results ) + { + if ( |sending_results[uid]| == 0 ) + { + event SumStats::send_no_key(uid, ss_name); + } + else + { + for ( key in sending_results[uid] ) + { + event SumStats::send_a_key(uid, ss_name, key); + # break to only send one. + break; + } + } + } + else if ( !cleanup && ss_name in result_store && |result_store[ss_name]| > 0 ) + { + if ( |result_store[ss_name]| == 0 ) + { + event SumStats::send_no_key(uid, ss_name); + } + else + { + for ( key in result_store[ss_name] ) + { + event SumStats::send_a_key(uid, ss_name, key); + # break to only send one. + break; + } + } + } + else + { + event SumStats::send_no_key(uid, ss_name); + } + } + +event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool) + { + #print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id); + + # Create a back store for the result + sending_results[uid] = (ss_name in result_store) ? result_store[ss_name] : table(); + + # Lookup the actual sumstats and reset it, the reference to the data + # currently stored will be maintained internally from the + # sending_results table. + if ( cleanup && ss_name in stats_store ) + reset(stats_store[ss_name]); + } + +event SumStats::cluster_get_result(uid: string, ss_name: string, key: Key, cleanup: bool) + { + #print fmt("WORKER %s: received the cluster_get_result event for %s=%s.", Cluster::node, key2str(key), data); + + if ( cleanup ) # data will implicitly be in sending_results (i know this isn't great) + { + if ( uid in sending_results && key in sending_results[uid] ) + { + # Note: copy is needed to compensate serialization caching issue. This should be + # changed to something else later. + event SumStats::cluster_send_result(uid, ss_name, key, copy(sending_results[uid][key]), cleanup); + delete sending_results[uid][key]; + } + else + { + # We need to send an empty response if we don't have the data so that the manager + # can know that it heard back from all of the workers. + event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup); + } + } + else + { + if ( ss_name in result_store && key in result_store[ss_name] ) + { + # Note: copy is needed to compensate serialization caching issue. This should be + # changed to something else later. + event SumStats::cluster_send_result(uid, ss_name, key, copy(result_store[ss_name][key]), cleanup); + } + else + { + # We need to send an empty response if we don't have the data so that the manager + # can know that it heard back from all of the workers. + event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup); + } + } + } + +event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, thold_index: count) + { + if ( ss_name !in threshold_tracker ) + threshold_tracker[ss_name] = table(); + + threshold_tracker[ss_name][key] = thold_index; + } + +# request-key is a non-op on the workers. +# It only should be called by the manager. Due to the fact that we usually run the same scripts on the +# workers and the manager, it might also be called by the workers, so we just ignore it here. +# +# There is a small chance that people will try running it on events that are just thrown on the workers. +# This does not work at the moment and we cannot throw an error message, because we cannot distinguish it +# from the "script is running it everywhere" case. But - people should notice that they do not get results. +# Not entirely pretty, sorry :( +function request_key(ss_name: string, key: Key): Result + { + return Result(); + } + +@endif + + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +event zeek_init() &priority=100 + { + Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_ss_request); + Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_get_result); + Broker::auto_publish(Cluster::worker_topic, SumStats::cluster_threshold_crossed); + Broker::auto_publish(Cluster::worker_topic, SumStats::get_a_key); + } + +# This variable is maintained by manager nodes as they collect and aggregate +# results. +# Index on a uid. +global stats_keys: table[string] of set[Key] &read_expire=1min + &expire_func=function(s: table[string] of set[Key], idx: string): interval + { + Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx)); + return 0secs; + }; + +# This variable is maintained by manager nodes to track how many "dones" they +# collected per collection unique id. Once the number of results for a uid +# matches the number of peer nodes that results should be coming from, the +# result is written out and deleted from here. +# Indexed on a uid. +global done_with: table[string] of count &read_expire=1min &default=0; + +# This variable is maintained by managers to track intermediate responses as +# they are getting a global view for a certain key. +# Indexed on a uid. +global key_requests: table[string] of Result &read_expire=1min; + +# Store uids for dynamic requests here to avoid cleanup on the uid. +# (This needs to be done differently!) +global dynamic_requests: set[string] &read_expire=1min; + +# This variable is maintained by managers to prevent overwhelming communication due +# to too many intermediate updates. Each sumstat is tracked separately so that +# one won't overwhelm and degrade other quieter sumstats. +# Indexed on a sumstat id. +global outstanding_global_views: table[string] of set[string] &read_expire=1min; + +const zero_time = double_to_time(0.0); +# Managers handle logging. +event SumStats::finish_epoch(ss: SumStat) + { + if ( network_time() > zero_time ) + { + #print fmt("%.6f MANAGER: breaking %s sumstat", network_time(), ss$name); + local uid = unique_id(""); + + if ( uid in stats_keys ) + delete stats_keys[uid]; + stats_keys[uid] = set(); + + # Request data from peers. + event SumStats::cluster_ss_request(uid, ss$name, T); + + done_with[uid] = 0; + + #print fmt("get_key by uid: %s", uid); + event SumStats::get_a_key(uid, ss$name, T); + } + + # Schedule the next finish_epoch event. + schedule ss$epoch { SumStats::finish_epoch(ss) }; + } + +# This is unlikely to be called often, but it's here in +# case there are sumstats being collected by managers. +function data_added(ss: SumStat, key: Key, result: Result) + { + if ( check_thresholds(ss, key, result, 1.0) ) + { + threshold_crossed(ss, key, result); + event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]); + } + } + +function handle_end_of_result_collection(uid: string, ss_name: string, key: Key, cleanup: bool) + { + if ( uid !in key_requests ) + { + Reporter::warning(fmt("Tried to handle end of result collection with missing uid in key_request sumstat:%s, key:%s.", ss_name, key)); + return; + } + + #print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]); + local ss = stats_store[ss_name]; + local ir = key_requests[uid]; + if ( check_thresholds(ss, key, ir, 1.0) ) + { + threshold_crossed(ss, key, ir); + event SumStats::cluster_threshold_crossed(ss_name, key, threshold_tracker[ss_name][key]); + } + + if ( cleanup ) + { + # This is done here because "cleanup" implicitly means + # it's the end of an epoch. + if ( ss?$epoch_result && |ir| > 0 ) + { + local now = network_time(); + ss$epoch_result(now, key, ir); + } + + } + # Check if this was an intermediate update + if ( ss_name in outstanding_global_views ) + delete outstanding_global_views[ss_name][uid]; + + delete key_requests[uid]; + delete done_with[uid]; + } + +function request_all_current_keys(uid: string, ss_name: string, cleanup: bool) + { + #print "request_all_current_keys"; + if ( uid in stats_keys && |stats_keys[uid]| > 0 ) + { + #print fmt(" -- %d remaining keys here", |stats_keys[uid]|); + for ( key in stats_keys[uid] ) + { + done_with[uid] = 0; + event SumStats::cluster_get_result(uid, ss_name, key, cleanup); + delete stats_keys[uid][key]; + break; # only a single key + } + } + else + { + # Get more keys! And this breaks us out of the evented loop. + done_with[uid] = 0; + #print fmt("get_key by uid: %s", uid); + event SumStats::get_a_key(uid, ss_name, cleanup); + } + } + +event SumStats::send_no_key(uid: string, ss_name: string) + { + #print "send_no_key"; + + if ( uid !in done_with ) + done_with[uid] = 0; + + ++done_with[uid]; + if ( Cluster::worker_count == done_with[uid] ) + { + delete done_with[uid]; + + if ( uid in stats_keys && |stats_keys[uid]| > 0 ) + { + #print "we need more keys!"; + # Now that we have a key from each worker, lets + # grab all of the results. + request_all_current_keys(uid, ss_name, T); + } + else + { + #print "we're out of keys!"; + local ss = stats_store[ss_name]; + if ( ss?$epoch_finished ) + ss$epoch_finished(network_time()); + + delete stats_keys[uid]; + reset(ss); + } + } + } + +event SumStats::send_a_key(uid: string, ss_name: string, key: Key) + { + #print fmt("send_a_key %s", key); + if ( uid !in stats_keys ) + { + Reporter::warning(fmt("Manager received a uid for an unknown request. SumStat: %s, Key: %s", ss_name, key)); + return; + } + + if ( key !in stats_keys[uid] ) + add stats_keys[uid][key]; + + ++done_with[uid]; + if ( Cluster::worker_count == done_with[uid] ) + { + delete done_with[uid]; + + if ( |stats_keys[uid]| > 0 ) + { + #print "we need more keys!"; + # Now that we have a key from each worker, lets + # grab all of the results. + request_all_current_keys(uid, ss_name, T); + } + else + { + #print "we're out of keys!"; + local ss = stats_store[ss_name]; + if ( ss?$epoch_finished ) + ss$epoch_finished(network_time()); + + reset(ss); + } + } + } + +event SumStats::cluster_send_result(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool) + { + #print "cluster_send_result"; + #print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result); + + # We only want to try and do a value merge if there are actually measured datapoints + # in the Result. + if ( uid !in key_requests || |key_requests[uid]| == 0 ) + key_requests[uid] = result; + else + key_requests[uid] = compose_results(key_requests[uid], result); + + # Mark that a worker is done. + if ( uid !in done_with ) + done_with[uid] = 0; + + #print fmt("MANAGER: got a result for %s %s from %s", uid, key, get_event_peer()$descr); + ++done_with[uid]; + + if ( uid !in dynamic_requests && + uid in done_with && Cluster::worker_count == done_with[uid] ) + { + handle_end_of_result_collection(uid, ss_name, key, cleanup); + + if ( cleanup ) + request_all_current_keys(uid, ss_name, cleanup); + } + } + +# Managers handle intermediate updates here. +event SumStats::cluster_key_intermediate_response(ss_name: string, key: Key) + { + #print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr); + #print fmt("MANAGER: requesting key data for %s", key); + # If an intermediate update for this key was handled recently, don't do it again + if ( [ss_name, key] in recent_global_view_keys ) + return; + add recent_global_view_keys[ss_name, key]; + + if ( ss_name !in outstanding_global_views) + outstanding_global_views[ss_name] = set(); + else if ( |outstanding_global_views[ss_name]| > max_outstanding_global_views ) + { + # Don't do this intermediate update. Perhaps at some point in the future + # we will queue and randomly select from these ignored intermediate + # update requests. + return; + } + + local uid = unique_id(""); + add outstanding_global_views[ss_name][uid]; + done_with[uid] = 0; + #print fmt("requesting results for: %s", uid); + event SumStats::cluster_get_result(uid, ss_name, key, F); + } + +function request_key(ss_name: string, key: Key): Result + { + local uid = unique_id(""); + done_with[uid] = 0; + key_requests[uid] = table(); + add dynamic_requests[uid]; + + event SumStats::cluster_get_result(uid, ss_name, key, F); + return when ( uid in done_with && Cluster::worker_count == done_with[uid] ) + { + #print "done with request_key"; + local result = key_requests[uid]; + # Clean up + delete key_requests[uid]; + delete done_with[uid]; + delete dynamic_requests[uid]; + + return result; + } + timeout 1.1min + { + Reporter::warning(fmt("Dynamic SumStat key request for %s in SumStat %s took longer than 1 minute and was automatically cancelled.", key, ss_name)); + return Result(); + } + } + +@endif diff --git a/scripts/base/frameworks/sumstats/main.bro b/scripts/base/frameworks/sumstats/main.bro deleted file mode 100644 index a37877f7e8..0000000000 --- a/scripts/base/frameworks/sumstats/main.bro +++ /dev/null @@ -1,534 +0,0 @@ -##! The summary statistics framework provides a way to -##! summarize large streams of data into simple reduced -##! measurements. - -module SumStats; - -export { - ## Type to represent the calculations that are available. The calculations - ## are all defined as plugins. - type Calculation: enum { - PLACEHOLDER - }; - - ## Represents a thing which is having summarization - ## results collected for it. - type Key: record { - ## A non-address related summarization or a sub-key for - ## an address based summarization. An example might be - ## successful SSH connections by client IP address - ## where the client string would be the key value. - ## Another example might be number of HTTP requests to - ## a particular value in a Host header. This is an - ## example of a non-host based metric since multiple - ## IP addresses could respond for the same Host - ## header value. - str: string &optional; - - ## Host is the value to which this metric applies. - host: addr &optional; - }; - - ## Represents data being added for a single observation. - ## Only supply a single field at a time! - type Observation: record { - ## Count value. - num: count &optional; - ## Double value. - dbl: double &optional; - ## String value. - str: string &optional; - }; - - ## Represents a reducer. - type Reducer: record { - ## Observation stream identifier for the reducer - ## to attach to. - stream: string; - - ## The calculations to perform on the data points. - apply: set[Calculation]; - - ## A predicate so that you can decide per key if you - ## would like to accept the data being inserted. - pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional; - - ## A function to normalize the key. This can be used to - ## aggregate or normalize the entire key. - normalize_key: function(key: SumStats::Key): Key &optional; - }; - - ## Result calculated for an observation stream fed into a reducer. - ## Most of the fields are added by plugins. - type ResultVal: record { - ## The time when the first observation was added to - ## this result value. - begin: time; - - ## The time when the last observation was added to - ## this result value. - end: time; - - ## The number of observations received. - num: count &default=0; - }; - - ## Type to store a table of results for multiple reducers indexed by - ## observation stream identifier. - type Result: table[string] of ResultVal; - - ## Type to store a table of sumstats results indexed by keys. - type ResultTable: table[Key] of Result; - - ## Represents a SumStat, which consists of an aggregation of reducers along - ## with mechanisms to handle various situations like the epoch ending - ## or thresholds being crossed. - ## - ## It's best to not access any global state outside - ## of the variables given to the callbacks because there - ## is no assurance provided as to where the callbacks - ## will be executed on clusters. - type SumStat: record { - ## An arbitrary name for the sumstat so that it can - ## be referred to later. - name: string; - - ## The interval at which this filter should be "broken" - ## and the *epoch_result* callback called. The - ## results are also reset at this time so any threshold - ## based detection needs to be set to a - ## value that should be expected to happen within - ## this epoch. - epoch: interval; - - ## The reducers for the SumStat. - reducers: set[Reducer]; - - ## A function that will be called once for each observation in order - ## to calculate a value from the :bro:see:`SumStats::Result` structure - ## which will be used for thresholding. - ## This function is required if a *threshold* value or - ## a *threshold_series* is given. - threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional; - - ## The threshold value for calling the *threshold_crossed* callback. - ## If you need more than one threshold value, then use - ## *threshold_series* instead. - threshold: double &optional; - - ## A series of thresholds for calling the *threshold_crossed* - ## callback. These thresholds must be listed in ascending order, - ## because a threshold is not checked until the preceding one has - ## been crossed. - threshold_series: vector of double &optional; - - ## A callback that is called when a threshold is crossed. - ## A threshold is crossed when the value returned from *threshold_val* - ## is greater than or equal to the threshold value, but only the first - ## time this happens within an epoch. - threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional; - - ## A callback that receives each of the results at the - ## end of the analysis epoch. The function will be - ## called once for each key. - epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional; - - ## A callback that will be called when a single collection - ## interval is completed. The *ts* value will be the time of - ## when the collection started. - epoch_finished: function(ts:time) &optional; - }; - - ## Create a summary statistic. - ## - ## ss: The SumStat to create. - global create: function(ss: SumStats::SumStat); - - ## Add data into an observation stream. This should be - ## called when a script has measured some point value. - ## - ## id: The observation stream identifier that the data - ## point represents. - ## - ## key: The key that the value is related to. - ## - ## obs: The data point to send into the stream. - global observe: function(id: string, key: SumStats::Key, obs: SumStats::Observation); - - ## Dynamically request a sumstat key. This function should be - ## used sparingly and not as a replacement for the callbacks - ## from the :bro:see:`SumStats::SumStat` record. The function is only - ## available for use within "when" statements as an asynchronous - ## function. - ## - ## ss_name: SumStat name. - ## - ## key: The SumStat key being requested. - ## - ## Returns: The result for the requested sumstat key. - global request_key: function(ss_name: string, key: Key): Result; - - ## Helper function to represent a :bro:type:`SumStats::Key` value as - ## a simple string. - ## - ## key: The metric key that is to be converted into a string. - ## - ## Returns: A string representation of the metric key. - global key2str: function(key: SumStats::Key): string; -} - -# The function prototype for plugins to do calculations. -type ObserveFunc: function(r: Reducer, val: double, data: Observation, rv: ResultVal); - -redef record Reducer += { - # Internal use only. Provides a reference back to the related SumStats by its name. - ssname: string &optional; - - calc_funcs: vector of Calculation &optional; -}; - -# Internal use only. For tracking thresholds per sumstat and key. -# In the case of a single threshold, 0 means the threshold isn't crossed. -# In the case of a threshold series, the number tracks the threshold offset. -global threshold_tracker: table[string] of table[Key] of count; - -function increment_threshold_tracker(ss_name: string, key: Key) - { - if ( ss_name !in threshold_tracker ) - threshold_tracker[ss_name] = table(); - if ( key !in threshold_tracker[ss_name] ) - threshold_tracker[ss_name][key] = 0; - - ++threshold_tracker[ss_name][key]; - } - -function get_threshold_index(ss_name: string, key: Key): count - { - if ( ss_name !in threshold_tracker ) - return 0; - if ( key !in threshold_tracker[ss_name] ) - return 0; - - return threshold_tracker[ss_name][key]; - } - -# Prototype the hook point for plugins to initialize any result values. -global init_resultval_hook: hook(r: Reducer, rv: ResultVal); - -# Prototype the hook point for plugins to merge Results. -global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal); - -# Store of sumstats indexed on the sumstat id. -global stats_store: table[string] of SumStat = table(); - -# Store of reducers indexed on the data point stream id. -global reducer_store: table[string] of set[Reducer] = table(); - -# Store of results indexed on the measurement id. -global result_store: table[string] of ResultTable = table(); - -# Store of threshold information. -global thresholds_store: table[string, Key] of bool = table(); - -# Store the calculations. -global calc_store: table[Calculation] of ObserveFunc = table(); - -# Store the dependencies for Calculations. -global calc_deps: table[Calculation] of vector of Calculation = table(); - -# Hook for registering observation calculation plugins. -global register_observe_plugins: hook(); - -# This is called whenever key values are updated and the new val is given as the -# `val` argument. It's only prototyped here because cluster and non-cluster have -# separate implementations. -global data_added: function(ss: SumStat, key: Key, result: Result); - -# Event that is used to "finish" measurements and adapt the measurement -# framework for clustered or non-clustered usage. -global finish_epoch: event(ss: SumStat); - -function key2str(key: Key): string - { - local out = ""; - if ( key?$host ) - out = fmt("%shost=%s", out, key$host); - if ( key?$str ) - out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", key$str); - return fmt("sumstats_key(%s)", out); - } - -function register_observe_plugin(calc: Calculation, func: ObserveFunc) - { - calc_store[calc] = func; - } - -function add_observe_plugin_dependency(calc: Calculation, depends_on: Calculation) - { - if ( calc !in calc_deps ) - calc_deps[calc] = vector(); - calc_deps[calc] += depends_on; - } - -event bro_init() &priority=100000 - { - # Call all of the plugin registration hooks - hook register_observe_plugins(); - } - -function init_resultval(r: Reducer): ResultVal - { - local rv: ResultVal = [$begin=network_time(), $end=network_time()]; - hook init_resultval_hook(r, rv); - return rv; - } - -function compose_resultvals(rv1: ResultVal, rv2: ResultVal): ResultVal - { - local result: ResultVal; - - result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin; - result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end; - result$num = rv1$num + rv2$num; - - # Run the plugin composition hooks. - hook compose_resultvals_hook(result, rv1, rv2); - return result; - } - -function compose_results(r1: Result, r2: Result): Result - { - local result: Result = table(); - - for ( id, rv in r1 ) - { - result[id] = rv; - } - - for ( id, rv in r2 ) - { - if ( id in r1 ) - result[id] = compose_resultvals(r1[id], rv); - else - result[id] = rv; - } - - return result; - } - - -function reset(ss: SumStat) - { - if ( ss$name in result_store ) - delete result_store[ss$name]; - - result_store[ss$name] = table(); - - if ( ss$name in threshold_tracker ) - { - delete threshold_tracker[ss$name]; - threshold_tracker[ss$name] = table(); - } - } - -# This could potentially recurse forever, but plugin authors -# should be making sure they aren't causing reflexive dependencies. -function add_calc_deps(calcs: vector of Calculation, c: Calculation) - { - #print fmt("Checking for deps for %s", c); - for ( i in calc_deps[c] ) - { - local skip_calc=F; - for ( j in calcs ) - { - if ( calcs[j] == calc_deps[c][i] ) - skip_calc=T; - } - if ( ! skip_calc ) - { - if ( calc_deps[c][i] in calc_deps ) - add_calc_deps(calcs, calc_deps[c][i]); - calcs += calc_deps[c][i]; - #print fmt("add dep for %s [%s] ", c, calc_deps[c][i]); - } - } - - } - -function create(ss: SumStat) - { - if ( (ss?$threshold || ss?$threshold_series) && ! ss?$threshold_val ) - { - Reporter::error("SumStats given a threshold with no $threshold_val function"); - } - - stats_store[ss$name] = ss; - - if ( ss?$threshold || ss?$threshold_series ) - threshold_tracker[ss$name] = table(); - - for ( reducer in ss$reducers ) - { - reducer$ssname = ss$name; - reducer$calc_funcs = vector(); - for ( calc in reducer$apply ) - { - # Add in dependencies recursively. - if ( calc in calc_deps ) - add_calc_deps(reducer$calc_funcs, calc); - - # Don't add this calculation to the vector if - # it was already added by something else as a - # dependency. - local skip_calc=F; - for ( j in reducer$calc_funcs ) - { - if ( calc == reducer$calc_funcs[j] ) - skip_calc=T; - } - if ( ! skip_calc ) - reducer$calc_funcs += calc; - } - - if ( reducer$stream !in reducer_store ) - reducer_store[reducer$stream] = set(); - add reducer_store[reducer$stream][reducer]; - } - - reset(ss); - schedule ss$epoch { SumStats::finish_epoch(ss) }; - } - -function observe(id: string, orig_key: Key, obs: Observation) - { - if ( id !in reducer_store ) - return; - - # Try to add the data to all of the defined reducers. - for ( r in reducer_store[id] ) - { - local key = r?$normalize_key ? r$normalize_key(copy(orig_key)) : orig_key; - - # If this reducer has a predicate, run the predicate - # and skip this key if the predicate return false. - if ( r?$pred && ! r$pred(key, obs) ) - next; - - local ss = stats_store[r$ssname]; - - # If there is a threshold and no epoch_result callback - # we don't need to continue counting since the data will - # never be accessed. This was leading - # to some state management issues when measuring - # uniqueness. - # NOTE: this optimization could need removed in the - # future if on demand access is provided to the - # SumStats results. - if ( ! ss?$epoch_result && - r$ssname in threshold_tracker && - ( ss?$threshold && - key in threshold_tracker[r$ssname] && - threshold_tracker[r$ssname][key] != 0 ) || - ( ss?$threshold_series && - key in threshold_tracker[r$ssname] && - threshold_tracker[r$ssname][key] == |ss$threshold_series| ) ) - { - next; - } - - if ( r$ssname !in result_store ) - result_store[r$ssname] = table(); - local results = result_store[r$ssname]; - - if ( key !in results ) - results[key] = table(); - local result = results[key]; - - if ( id !in result ) - result[id] = init_resultval(r); - local result_val = result[id]; - - ++result_val$num; - # Continually update the $end field. - result_val$end=network_time(); - - # If a string was given, fall back to 1.0 as the value. - local val = 1.0; - if ( obs?$num ) - val = obs$num; - else if ( obs?$dbl ) - val = obs$dbl; - - for ( i in r$calc_funcs ) - calc_store[r$calc_funcs[i]](r, val, obs, result_val); - data_added(ss, key, result); - } - } - -# This function checks if a threshold has been crossed. It is also used as a method to implement -# mid-break-interval threshold crossing detection for cluster deployments. -function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool - { - if ( ! (ss?$threshold || ss?$threshold_series || ss?$threshold_crossed) ) - return F; - - # Add in the extra ResultVals to make threshold_vals easier to write. - # This length comparison should work because we just need to make - # sure that we have the same number of reducers and results. - if ( |ss$reducers| != |result| ) - { - for ( reducer in ss$reducers ) - { - if ( reducer$stream !in result ) - result[reducer$stream] = init_resultval(reducer); - } - } - - local watch = ss$threshold_val(key, result); - - if ( modify_pct < 1.0 && modify_pct > 0.0 ) - watch = watch/modify_pct; - - local t_index = get_threshold_index(ss$name, key); - - if ( ss?$threshold && - t_index == 0 && # Check that the threshold hasn't already been crossed. - watch >= ss$threshold ) - { - # Value crossed the threshold. - return T; - } - - if ( ss?$threshold_series && - |ss$threshold_series| > t_index && # Check if there are more thresholds. - watch >= ss$threshold_series[t_index] ) - { - # A threshold series was given and the value crossed the next - # value in the series. - return T; - } - - return F; - } - -function threshold_crossed(ss: SumStat, key: Key, result: Result) - { - # If there is no callback, there is no point in any of this. - if ( ! ss?$threshold_crossed ) - return; - - increment_threshold_tracker(ss$name,key); - - # Add in the extra ResultVals to make threshold_crossed callbacks easier to write. - if ( |ss$reducers| != |result| ) - { - for ( reducer in ss$reducers ) - { - if ( reducer$stream !in result ) - result[reducer$stream] = init_resultval(reducer); - } - } - - ss$threshold_crossed(key, result); - } - diff --git a/scripts/base/frameworks/sumstats/main.zeek b/scripts/base/frameworks/sumstats/main.zeek new file mode 100644 index 0000000000..3f73d278e5 --- /dev/null +++ b/scripts/base/frameworks/sumstats/main.zeek @@ -0,0 +1,534 @@ +##! The summary statistics framework provides a way to +##! summarize large streams of data into simple reduced +##! measurements. + +module SumStats; + +export { + ## Type to represent the calculations that are available. The calculations + ## are all defined as plugins. + type Calculation: enum { + PLACEHOLDER + }; + + ## Represents a thing which is having summarization + ## results collected for it. + type Key: record { + ## A non-address related summarization or a sub-key for + ## an address based summarization. An example might be + ## successful SSH connections by client IP address + ## where the client string would be the key value. + ## Another example might be number of HTTP requests to + ## a particular value in a Host header. This is an + ## example of a non-host based metric since multiple + ## IP addresses could respond for the same Host + ## header value. + str: string &optional; + + ## Host is the value to which this metric applies. + host: addr &optional; + }; + + ## Represents data being added for a single observation. + ## Only supply a single field at a time! + type Observation: record { + ## Count value. + num: count &optional; + ## Double value. + dbl: double &optional; + ## String value. + str: string &optional; + }; + + ## Represents a reducer. + type Reducer: record { + ## Observation stream identifier for the reducer + ## to attach to. + stream: string; + + ## The calculations to perform on the data points. + apply: set[Calculation]; + + ## A predicate so that you can decide per key if you + ## would like to accept the data being inserted. + pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional; + + ## A function to normalize the key. This can be used to + ## aggregate or normalize the entire key. + normalize_key: function(key: SumStats::Key): Key &optional; + }; + + ## Result calculated for an observation stream fed into a reducer. + ## Most of the fields are added by plugins. + type ResultVal: record { + ## The time when the first observation was added to + ## this result value. + begin: time; + + ## The time when the last observation was added to + ## this result value. + end: time; + + ## The number of observations received. + num: count &default=0; + }; + + ## Type to store a table of results for multiple reducers indexed by + ## observation stream identifier. + type Result: table[string] of ResultVal; + + ## Type to store a table of sumstats results indexed by keys. + type ResultTable: table[Key] of Result; + + ## Represents a SumStat, which consists of an aggregation of reducers along + ## with mechanisms to handle various situations like the epoch ending + ## or thresholds being crossed. + ## + ## It's best to not access any global state outside + ## of the variables given to the callbacks because there + ## is no assurance provided as to where the callbacks + ## will be executed on clusters. + type SumStat: record { + ## An arbitrary name for the sumstat so that it can + ## be referred to later. + name: string; + + ## The interval at which this filter should be "broken" + ## and the *epoch_result* callback called. The + ## results are also reset at this time so any threshold + ## based detection needs to be set to a + ## value that should be expected to happen within + ## this epoch. + epoch: interval; + + ## The reducers for the SumStat. + reducers: set[Reducer]; + + ## A function that will be called once for each observation in order + ## to calculate a value from the :zeek:see:`SumStats::Result` structure + ## which will be used for thresholding. + ## This function is required if a *threshold* value or + ## a *threshold_series* is given. + threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional; + + ## The threshold value for calling the *threshold_crossed* callback. + ## If you need more than one threshold value, then use + ## *threshold_series* instead. + threshold: double &optional; + + ## A series of thresholds for calling the *threshold_crossed* + ## callback. These thresholds must be listed in ascending order, + ## because a threshold is not checked until the preceding one has + ## been crossed. + threshold_series: vector of double &optional; + + ## A callback that is called when a threshold is crossed. + ## A threshold is crossed when the value returned from *threshold_val* + ## is greater than or equal to the threshold value, but only the first + ## time this happens within an epoch. + threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional; + + ## A callback that receives each of the results at the + ## end of the analysis epoch. The function will be + ## called once for each key. + epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional; + + ## A callback that will be called when a single collection + ## interval is completed. The *ts* value will be the time of + ## when the collection started. + epoch_finished: function(ts:time) &optional; + }; + + ## Create a summary statistic. + ## + ## ss: The SumStat to create. + global create: function(ss: SumStats::SumStat); + + ## Add data into an observation stream. This should be + ## called when a script has measured some point value. + ## + ## id: The observation stream identifier that the data + ## point represents. + ## + ## key: The key that the value is related to. + ## + ## obs: The data point to send into the stream. + global observe: function(id: string, key: SumStats::Key, obs: SumStats::Observation); + + ## Dynamically request a sumstat key. This function should be + ## used sparingly and not as a replacement for the callbacks + ## from the :zeek:see:`SumStats::SumStat` record. The function is only + ## available for use within "when" statements as an asynchronous + ## function. + ## + ## ss_name: SumStat name. + ## + ## key: The SumStat key being requested. + ## + ## Returns: The result for the requested sumstat key. + global request_key: function(ss_name: string, key: Key): Result; + + ## Helper function to represent a :zeek:type:`SumStats::Key` value as + ## a simple string. + ## + ## key: The metric key that is to be converted into a string. + ## + ## Returns: A string representation of the metric key. + global key2str: function(key: SumStats::Key): string; +} + +# The function prototype for plugins to do calculations. +type ObserveFunc: function(r: Reducer, val: double, data: Observation, rv: ResultVal); + +redef record Reducer += { + # Internal use only. Provides a reference back to the related SumStats by its name. + ssname: string &optional; + + calc_funcs: vector of Calculation &optional; +}; + +# Internal use only. For tracking thresholds per sumstat and key. +# In the case of a single threshold, 0 means the threshold isn't crossed. +# In the case of a threshold series, the number tracks the threshold offset. +global threshold_tracker: table[string] of table[Key] of count; + +function increment_threshold_tracker(ss_name: string, key: Key) + { + if ( ss_name !in threshold_tracker ) + threshold_tracker[ss_name] = table(); + if ( key !in threshold_tracker[ss_name] ) + threshold_tracker[ss_name][key] = 0; + + ++threshold_tracker[ss_name][key]; + } + +function get_threshold_index(ss_name: string, key: Key): count + { + if ( ss_name !in threshold_tracker ) + return 0; + if ( key !in threshold_tracker[ss_name] ) + return 0; + + return threshold_tracker[ss_name][key]; + } + +# Prototype the hook point for plugins to initialize any result values. +global init_resultval_hook: hook(r: Reducer, rv: ResultVal); + +# Prototype the hook point for plugins to merge Results. +global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal); + +# Store of sumstats indexed on the sumstat id. +global stats_store: table[string] of SumStat = table(); + +# Store of reducers indexed on the data point stream id. +global reducer_store: table[string] of set[Reducer] = table(); + +# Store of results indexed on the measurement id. +global result_store: table[string] of ResultTable = table(); + +# Store of threshold information. +global thresholds_store: table[string, Key] of bool = table(); + +# Store the calculations. +global calc_store: table[Calculation] of ObserveFunc = table(); + +# Store the dependencies for Calculations. +global calc_deps: table[Calculation] of vector of Calculation = table(); + +# Hook for registering observation calculation plugins. +global register_observe_plugins: hook(); + +# This is called whenever key values are updated and the new val is given as the +# `val` argument. It's only prototyped here because cluster and non-cluster have +# separate implementations. +global data_added: function(ss: SumStat, key: Key, result: Result); + +# Event that is used to "finish" measurements and adapt the measurement +# framework for clustered or non-clustered usage. +global finish_epoch: event(ss: SumStat); + +function key2str(key: Key): string + { + local out = ""; + if ( key?$host ) + out = fmt("%shost=%s", out, key$host); + if ( key?$str ) + out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", key$str); + return fmt("sumstats_key(%s)", out); + } + +function register_observe_plugin(calc: Calculation, func: ObserveFunc) + { + calc_store[calc] = func; + } + +function add_observe_plugin_dependency(calc: Calculation, depends_on: Calculation) + { + if ( calc !in calc_deps ) + calc_deps[calc] = vector(); + calc_deps[calc] += depends_on; + } + +event zeek_init() &priority=100000 + { + # Call all of the plugin registration hooks + hook register_observe_plugins(); + } + +function init_resultval(r: Reducer): ResultVal + { + local rv: ResultVal = [$begin=network_time(), $end=network_time()]; + hook init_resultval_hook(r, rv); + return rv; + } + +function compose_resultvals(rv1: ResultVal, rv2: ResultVal): ResultVal + { + local result: ResultVal; + + result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin; + result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end; + result$num = rv1$num + rv2$num; + + # Run the plugin composition hooks. + hook compose_resultvals_hook(result, rv1, rv2); + return result; + } + +function compose_results(r1: Result, r2: Result): Result + { + local result: Result = table(); + + for ( id, rv in r1 ) + { + result[id] = rv; + } + + for ( id, rv in r2 ) + { + if ( id in r1 ) + result[id] = compose_resultvals(r1[id], rv); + else + result[id] = rv; + } + + return result; + } + + +function reset(ss: SumStat) + { + if ( ss$name in result_store ) + delete result_store[ss$name]; + + result_store[ss$name] = table(); + + if ( ss$name in threshold_tracker ) + { + delete threshold_tracker[ss$name]; + threshold_tracker[ss$name] = table(); + } + } + +# This could potentially recurse forever, but plugin authors +# should be making sure they aren't causing reflexive dependencies. +function add_calc_deps(calcs: vector of Calculation, c: Calculation) + { + #print fmt("Checking for deps for %s", c); + for ( i in calc_deps[c] ) + { + local skip_calc=F; + for ( j in calcs ) + { + if ( calcs[j] == calc_deps[c][i] ) + skip_calc=T; + } + if ( ! skip_calc ) + { + if ( calc_deps[c][i] in calc_deps ) + add_calc_deps(calcs, calc_deps[c][i]); + calcs += calc_deps[c][i]; + #print fmt("add dep for %s [%s] ", c, calc_deps[c][i]); + } + } + + } + +function create(ss: SumStat) + { + if ( (ss?$threshold || ss?$threshold_series) && ! ss?$threshold_val ) + { + Reporter::error("SumStats given a threshold with no $threshold_val function"); + } + + stats_store[ss$name] = ss; + + if ( ss?$threshold || ss?$threshold_series ) + threshold_tracker[ss$name] = table(); + + for ( reducer in ss$reducers ) + { + reducer$ssname = ss$name; + reducer$calc_funcs = vector(); + for ( calc in reducer$apply ) + { + # Add in dependencies recursively. + if ( calc in calc_deps ) + add_calc_deps(reducer$calc_funcs, calc); + + # Don't add this calculation to the vector if + # it was already added by something else as a + # dependency. + local skip_calc=F; + for ( j in reducer$calc_funcs ) + { + if ( calc == reducer$calc_funcs[j] ) + skip_calc=T; + } + if ( ! skip_calc ) + reducer$calc_funcs += calc; + } + + if ( reducer$stream !in reducer_store ) + reducer_store[reducer$stream] = set(); + add reducer_store[reducer$stream][reducer]; + } + + reset(ss); + schedule ss$epoch { SumStats::finish_epoch(ss) }; + } + +function observe(id: string, orig_key: Key, obs: Observation) + { + if ( id !in reducer_store ) + return; + + # Try to add the data to all of the defined reducers. + for ( r in reducer_store[id] ) + { + local key = r?$normalize_key ? r$normalize_key(copy(orig_key)) : orig_key; + + # If this reducer has a predicate, run the predicate + # and skip this key if the predicate return false. + if ( r?$pred && ! r$pred(key, obs) ) + next; + + local ss = stats_store[r$ssname]; + + # If there is a threshold and no epoch_result callback + # we don't need to continue counting since the data will + # never be accessed. This was leading + # to some state management issues when measuring + # uniqueness. + # NOTE: this optimization could need removed in the + # future if on demand access is provided to the + # SumStats results. + if ( ! ss?$epoch_result && + r$ssname in threshold_tracker && + ( ss?$threshold && + key in threshold_tracker[r$ssname] && + threshold_tracker[r$ssname][key] != 0 ) || + ( ss?$threshold_series && + key in threshold_tracker[r$ssname] && + threshold_tracker[r$ssname][key] == |ss$threshold_series| ) ) + { + next; + } + + if ( r$ssname !in result_store ) + result_store[r$ssname] = table(); + local results = result_store[r$ssname]; + + if ( key !in results ) + results[key] = table(); + local result = results[key]; + + if ( id !in result ) + result[id] = init_resultval(r); + local result_val = result[id]; + + ++result_val$num; + # Continually update the $end field. + result_val$end=network_time(); + + # If a string was given, fall back to 1.0 as the value. + local val = 1.0; + if ( obs?$num ) + val = obs$num; + else if ( obs?$dbl ) + val = obs$dbl; + + for ( i in r$calc_funcs ) + calc_store[r$calc_funcs[i]](r, val, obs, result_val); + data_added(ss, key, result); + } + } + +# This function checks if a threshold has been crossed. It is also used as a method to implement +# mid-break-interval threshold crossing detection for cluster deployments. +function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool + { + if ( ! (ss?$threshold || ss?$threshold_series || ss?$threshold_crossed) ) + return F; + + # Add in the extra ResultVals to make threshold_vals easier to write. + # This length comparison should work because we just need to make + # sure that we have the same number of reducers and results. + if ( |ss$reducers| != |result| ) + { + for ( reducer in ss$reducers ) + { + if ( reducer$stream !in result ) + result[reducer$stream] = init_resultval(reducer); + } + } + + local watch = ss$threshold_val(key, result); + + if ( modify_pct < 1.0 && modify_pct > 0.0 ) + watch = watch/modify_pct; + + local t_index = get_threshold_index(ss$name, key); + + if ( ss?$threshold && + t_index == 0 && # Check that the threshold hasn't already been crossed. + watch >= ss$threshold ) + { + # Value crossed the threshold. + return T; + } + + if ( ss?$threshold_series && + |ss$threshold_series| > t_index && # Check if there are more thresholds. + watch >= ss$threshold_series[t_index] ) + { + # A threshold series was given and the value crossed the next + # value in the series. + return T; + } + + return F; + } + +function threshold_crossed(ss: SumStat, key: Key, result: Result) + { + # If there is no callback, there is no point in any of this. + if ( ! ss?$threshold_crossed ) + return; + + increment_threshold_tracker(ss$name,key); + + # Add in the extra ResultVals to make threshold_crossed callbacks easier to write. + if ( |ss$reducers| != |result| ) + { + for ( reducer in ss$reducers ) + { + if ( reducer$stream !in result ) + result[reducer$stream] = init_resultval(reducer); + } + } + + ss$threshold_crossed(key, result); + } + diff --git a/scripts/base/frameworks/sumstats/non-cluster.bro b/scripts/base/frameworks/sumstats/non-cluster.bro deleted file mode 100644 index b4292431c5..0000000000 --- a/scripts/base/frameworks/sumstats/non-cluster.bro +++ /dev/null @@ -1,89 +0,0 @@ -@load ./main - -module SumStats; - -event SumStats::process_epoch_result(ss: SumStat, now: time, data: ResultTable) - { - # TODO: is this the right processing group size? - local i = 50; - local keys_to_delete: vector of SumStats::Key = vector(); - - for ( key, res in data ) - { - ss$epoch_result(now, key, res); - keys_to_delete += key; - - if ( --i == 0 ) - break; - } - - for ( idx in keys_to_delete ) - delete data[keys_to_delete[idx]]; - - if ( |data| > 0 ) - # TODO: is this the right interval? - schedule 0.01 secs { SumStats::process_epoch_result(ss, now, data) }; - else if ( ss?$epoch_finished ) - ss$epoch_finished(now); - } - -event SumStats::finish_epoch(ss: SumStat) - { - if ( ss$name in result_store ) - { - if ( ss?$epoch_result ) - { - local data = result_store[ss$name]; - local now = network_time(); - if ( bro_is_terminating() ) - { - for ( key, val in data ) - ss$epoch_result(now, key, val); - - if ( ss?$epoch_finished ) - ss$epoch_finished(now); - } - else if ( |data| > 0 ) - { - event SumStats::process_epoch_result(ss, now, copy(data)); - } - } - - # We can reset here because we know that the reference - # to the data will be maintained by the process_epoch_result - # event. - reset(ss); - } - - schedule ss$epoch { SumStats::finish_epoch(ss) }; - } - -function data_added(ss: SumStat, key: Key, result: Result) - { - if ( check_thresholds(ss, key, result, 1.0) ) - threshold_crossed(ss, key, result); - } - -function request(ss_name: string): ResultTable - { - # This only needs to be implemented this way for cluster compatibility. - return when ( T ) - { - if ( ss_name in result_store ) - return result_store[ss_name]; - else - return table(); - } - } - -function request_key(ss_name: string, key: Key): Result - { - # This only needs to be implemented this way for cluster compatibility. - return when ( T ) - { - if ( ss_name in result_store && key in result_store[ss_name] ) - return result_store[ss_name][key]; - else - return table(); - } - } diff --git a/scripts/base/frameworks/sumstats/non-cluster.zeek b/scripts/base/frameworks/sumstats/non-cluster.zeek new file mode 100644 index 0000000000..630f36bbcd --- /dev/null +++ b/scripts/base/frameworks/sumstats/non-cluster.zeek @@ -0,0 +1,89 @@ +@load ./main + +module SumStats; + +event SumStats::process_epoch_result(ss: SumStat, now: time, data: ResultTable) + { + # TODO: is this the right processing group size? + local i = 50; + local keys_to_delete: vector of SumStats::Key = vector(); + + for ( key, res in data ) + { + ss$epoch_result(now, key, res); + keys_to_delete += key; + + if ( --i == 0 ) + break; + } + + for ( idx in keys_to_delete ) + delete data[keys_to_delete[idx]]; + + if ( |data| > 0 ) + # TODO: is this the right interval? + schedule 0.01 secs { SumStats::process_epoch_result(ss, now, data) }; + else if ( ss?$epoch_finished ) + ss$epoch_finished(now); + } + +event SumStats::finish_epoch(ss: SumStat) + { + if ( ss$name in result_store ) + { + if ( ss?$epoch_result ) + { + local data = result_store[ss$name]; + local now = network_time(); + if ( zeek_is_terminating() ) + { + for ( key, val in data ) + ss$epoch_result(now, key, val); + + if ( ss?$epoch_finished ) + ss$epoch_finished(now); + } + else if ( |data| > 0 ) + { + event SumStats::process_epoch_result(ss, now, copy(data)); + } + } + + # We can reset here because we know that the reference + # to the data will be maintained by the process_epoch_result + # event. + reset(ss); + } + + schedule ss$epoch { SumStats::finish_epoch(ss) }; + } + +function data_added(ss: SumStat, key: Key, result: Result) + { + if ( check_thresholds(ss, key, result, 1.0) ) + threshold_crossed(ss, key, result); + } + +function request(ss_name: string): ResultTable + { + # This only needs to be implemented this way for cluster compatibility. + return when ( T ) + { + if ( ss_name in result_store ) + return result_store[ss_name]; + else + return table(); + } + } + +function request_key(ss_name: string, key: Key): Result + { + # This only needs to be implemented this way for cluster compatibility. + return when ( T ) + { + if ( ss_name in result_store && key in result_store[ss_name] ) + return result_store[ss_name][key]; + else + return table(); + } + } diff --git a/scripts/base/frameworks/sumstats/plugins/__load__.bro b/scripts/base/frameworks/sumstats/plugins/__load__.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/__load__.bro rename to scripts/base/frameworks/sumstats/plugins/__load__.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/average.bro b/scripts/base/frameworks/sumstats/plugins/average.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/average.bro rename to scripts/base/frameworks/sumstats/plugins/average.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/hll_unique.bro b/scripts/base/frameworks/sumstats/plugins/hll_unique.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/hll_unique.bro rename to scripts/base/frameworks/sumstats/plugins/hll_unique.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/last.bro b/scripts/base/frameworks/sumstats/plugins/last.bro deleted file mode 100644 index b12d854bbb..0000000000 --- a/scripts/base/frameworks/sumstats/plugins/last.bro +++ /dev/null @@ -1,73 +0,0 @@ -##! Keep the last X observations. - -@load base/frameworks/sumstats -@load base/utils/queue - -module SumStats; - -export { - redef enum Calculation += { - ## Keep last X observations in a queue. - LAST - }; - - redef record Reducer += { - ## Number of elements to keep. - num_last_elements: count &default=0; - }; - - redef record ResultVal += { - ## This is the queue where elements are maintained. - ## Don't access this value directly, instead use the - ## :bro:see:`SumStats::get_last` function to get a vector of - ## the current element values. - last_elements: Queue::Queue &optional; - }; - - ## Get a vector of element values from a ResultVal. - global get_last: function(rv: ResultVal): vector of Observation; -} - -function get_last(rv: ResultVal): vector of Observation - { - local s: vector of any = vector(); - - if ( rv?$last_elements ) - Queue::get_vector(rv$last_elements, s); - - local rval: vector of Observation = vector(); - - for ( i in s ) - # When using the cluster-ized version of SumStats, Queue's - # internal table storage uses "any" type for values, so we need - # to cast them here or else they may be left as Broker::Data from - # the unserialization process. - rval += s[i] as Observation; - - return rval; - } - -hook register_observe_plugins() - { - register_observe_plugin(LAST, function(r: Reducer, val: double, obs: Observation, rv: ResultVal) - { - if ( r$num_last_elements > 0 ) - { - if ( ! rv?$last_elements ) - rv$last_elements = Queue::init([$max_len=r$num_last_elements]); - Queue::put(rv$last_elements, obs); - } - }); - } - - -hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) - { - # Merge $samples - if ( rv1?$last_elements && rv2?$last_elements ) - result$last_elements = Queue::merge(rv1$last_elements, rv2$last_elements); - else if ( rv1?$last_elements ) - result$last_elements = rv1$last_elements; - else if ( rv2?$last_elements ) - result$last_elements = rv2$last_elements; - } diff --git a/scripts/base/frameworks/sumstats/plugins/last.zeek b/scripts/base/frameworks/sumstats/plugins/last.zeek new file mode 100644 index 0000000000..a2c19f3f51 --- /dev/null +++ b/scripts/base/frameworks/sumstats/plugins/last.zeek @@ -0,0 +1,73 @@ +##! Keep the last X observations. + +@load base/frameworks/sumstats +@load base/utils/queue + +module SumStats; + +export { + redef enum Calculation += { + ## Keep last X observations in a queue. + LAST + }; + + redef record Reducer += { + ## Number of elements to keep. + num_last_elements: count &default=0; + }; + + redef record ResultVal += { + ## This is the queue where elements are maintained. + ## Don't access this value directly, instead use the + ## :zeek:see:`SumStats::get_last` function to get a vector of + ## the current element values. + last_elements: Queue::Queue &optional; + }; + + ## Get a vector of element values from a ResultVal. + global get_last: function(rv: ResultVal): vector of Observation; +} + +function get_last(rv: ResultVal): vector of Observation + { + local s: vector of any = vector(); + + if ( rv?$last_elements ) + Queue::get_vector(rv$last_elements, s); + + local rval: vector of Observation = vector(); + + for ( i in s ) + # When using the cluster-ized version of SumStats, Queue's + # internal table storage uses "any" type for values, so we need + # to cast them here or else they may be left as Broker::Data from + # the unserialization process. + rval += s[i] as Observation; + + return rval; + } + +hook register_observe_plugins() + { + register_observe_plugin(LAST, function(r: Reducer, val: double, obs: Observation, rv: ResultVal) + { + if ( r$num_last_elements > 0 ) + { + if ( ! rv?$last_elements ) + rv$last_elements = Queue::init([$max_len=r$num_last_elements]); + Queue::put(rv$last_elements, obs); + } + }); + } + + +hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) + { + # Merge $samples + if ( rv1?$last_elements && rv2?$last_elements ) + result$last_elements = Queue::merge(rv1$last_elements, rv2$last_elements); + else if ( rv1?$last_elements ) + result$last_elements = rv1$last_elements; + else if ( rv2?$last_elements ) + result$last_elements = rv2$last_elements; + } diff --git a/scripts/base/frameworks/sumstats/plugins/max.bro b/scripts/base/frameworks/sumstats/plugins/max.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/max.bro rename to scripts/base/frameworks/sumstats/plugins/max.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/min.bro b/scripts/base/frameworks/sumstats/plugins/min.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/min.bro rename to scripts/base/frameworks/sumstats/plugins/min.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/sample.bro b/scripts/base/frameworks/sumstats/plugins/sample.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/sample.bro rename to scripts/base/frameworks/sumstats/plugins/sample.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/std-dev.bro b/scripts/base/frameworks/sumstats/plugins/std-dev.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/std-dev.bro rename to scripts/base/frameworks/sumstats/plugins/std-dev.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/sum.bro b/scripts/base/frameworks/sumstats/plugins/sum.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/sum.bro rename to scripts/base/frameworks/sumstats/plugins/sum.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/topk.bro b/scripts/base/frameworks/sumstats/plugins/topk.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/topk.bro rename to scripts/base/frameworks/sumstats/plugins/topk.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/unique.bro b/scripts/base/frameworks/sumstats/plugins/unique.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/unique.bro rename to scripts/base/frameworks/sumstats/plugins/unique.zeek diff --git a/scripts/base/frameworks/sumstats/plugins/variance.bro b/scripts/base/frameworks/sumstats/plugins/variance.zeek similarity index 100% rename from scripts/base/frameworks/sumstats/plugins/variance.bro rename to scripts/base/frameworks/sumstats/plugins/variance.zeek diff --git a/scripts/base/frameworks/reporter/__load__.bro b/scripts/base/frameworks/tunnels/__load__.zeek similarity index 100% rename from scripts/base/frameworks/reporter/__load__.bro rename to scripts/base/frameworks/tunnels/__load__.zeek diff --git a/scripts/base/frameworks/tunnels/main.bro b/scripts/base/frameworks/tunnels/main.bro deleted file mode 100644 index f90616e38e..0000000000 --- a/scripts/base/frameworks/tunnels/main.bro +++ /dev/null @@ -1,151 +0,0 @@ -##! This script handles the tracking/logging of tunnels (e.g. Teredo, -##! AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6). -##! -##! For any connection that occurs over a tunnel, information about its -##! encapsulating tunnels is also found in the *tunnel* field of -##! :bro:type:`connection`. - -module Tunnel; - -export { - ## The tunnel logging stream identifier. - redef enum Log::ID += { LOG }; - - ## Types of interesting activity that can occur with a tunnel. - type Action: enum { - ## A new tunnel (encapsulating "connection") has been seen. - DISCOVER, - ## A tunnel connection has closed. - CLOSE, - ## No new connections over a tunnel happened in the amount of - ## time indicated by :bro:see:`Tunnel::expiration_interval`. - EXPIRE, - }; - - ## The record type which contains column fields of the tunnel log. - type Info: record { - ## Time at which some tunnel activity occurred. - ts: time &log; - ## The unique identifier for the tunnel, which may correspond - ## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels. - ## This is optional because there could be numerous connections - ## for payload proxies like SOCKS but we should treat it as a - ## single tunnel. - uid: string &log &optional; - ## The tunnel "connection" 4-tuple of endpoint addresses/ports. - ## For an IP tunnel, the ports will be 0. - id: conn_id &log; - ## The type of tunnel. - tunnel_type: Tunnel::Type &log; - ## The type of activity that occurred. - action: Action &log; - }; - - ## Logs all tunnels in an encapsulation chain with action - ## :bro:see:`Tunnel::DISCOVER` that aren't already in the - ## :bro:id:`Tunnel::active` table and adds them if not. - global register_all: function(ecv: EncapsulatingConnVector); - - ## Logs a single tunnel "connection" with action - ## :bro:see:`Tunnel::DISCOVER` if it's not already in the - ## :bro:id:`Tunnel::active` table and adds it if not. - global register: function(ec: EncapsulatingConn); - - ## Logs a single tunnel "connection" with action - ## :bro:see:`Tunnel::EXPIRE` and removes it from the - ## :bro:id:`Tunnel::active` table. - ## - ## t: A table of tunnels. - ## - ## idx: The index of the tunnel table corresponding to the tunnel to expire. - ## - ## Returns: 0secs, which when this function is used as an - ## :bro:attr:`&expire_func`, indicates to remove the element at - ## *idx* immediately. - global expire: function(t: table[conn_id] of Info, idx: conn_id): interval; - - ## Removes a single tunnel from the :bro:id:`Tunnel::active` table - ## and logs the closing/expiration of the tunnel. - ## - ## tunnel: The tunnel which has closed or expired. - ## - ## action: The specific reason for the tunnel ending. - global close: function(tunnel: Info, action: Action); - - ## The amount of time a tunnel is not used in establishment of new - ## connections before it is considered inactive/expired. - const expiration_interval = 1hrs &redef; - - ## Currently active tunnels. That is, tunnels for which new, - ## encapsulated connections have been seen in the interval indicated by - ## :bro:see:`Tunnel::expiration_interval`. - global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire; -} - -const ayiya_ports = { 5072/udp }; -const teredo_ports = { 3544/udp }; -const gtpv1_ports = { 2152/udp, 2123/udp }; -redef likely_server_ports += { ayiya_ports, teredo_ports, gtpv1_ports, vxlan_ports }; - -event bro_init() &priority=5 - { - Log::create_stream(Tunnel::LOG, [$columns=Info, $path="tunnel"]); - - Analyzer::register_for_ports(Analyzer::ANALYZER_AYIYA, ayiya_ports); - Analyzer::register_for_ports(Analyzer::ANALYZER_TEREDO, teredo_ports); - Analyzer::register_for_ports(Analyzer::ANALYZER_GTPV1, gtpv1_ports); - Analyzer::register_for_ports(Analyzer::ANALYZER_VXLAN, vxlan_ports); - } - -function register_all(ecv: EncapsulatingConnVector) - { - for ( i in ecv ) - register(ecv[i]); - } - -function register(ec: EncapsulatingConn) - { - if ( ec$cid !in active ) - { - local tunnel: Info; - tunnel$ts = network_time(); - if ( ec?$uid ) - tunnel$uid = ec$uid; - tunnel$id = ec$cid; - tunnel$action = DISCOVER; - tunnel$tunnel_type = ec$tunnel_type; - active[ec$cid] = tunnel; - Log::write(LOG, tunnel); - } - } - -function close(tunnel: Info, action: Action) - { - tunnel$action = action; - tunnel$ts = network_time(); - Log::write(LOG, tunnel); - delete active[tunnel$id]; - } - -function expire(t: table[conn_id] of Info, idx: conn_id): interval - { - close(t[idx], EXPIRE); - return 0secs; - } - -event new_connection(c: connection) &priority=5 - { - if ( c?$tunnel ) - register_all(c$tunnel); - } - -event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 - { - register_all(e); - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c$id in active ) - close(active[c$id], CLOSE); - } diff --git a/scripts/base/frameworks/tunnels/main.zeek b/scripts/base/frameworks/tunnels/main.zeek new file mode 100644 index 0000000000..09441c177c --- /dev/null +++ b/scripts/base/frameworks/tunnels/main.zeek @@ -0,0 +1,151 @@ +##! This script handles the tracking/logging of tunnels (e.g. Teredo, +##! AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6). +##! +##! For any connection that occurs over a tunnel, information about its +##! encapsulating tunnels is also found in the *tunnel* field of +##! :zeek:type:`connection`. + +module Tunnel; + +export { + ## The tunnel logging stream identifier. + redef enum Log::ID += { LOG }; + + ## Types of interesting activity that can occur with a tunnel. + type Action: enum { + ## A new tunnel (encapsulating "connection") has been seen. + DISCOVER, + ## A tunnel connection has closed. + CLOSE, + ## No new connections over a tunnel happened in the amount of + ## time indicated by :zeek:see:`Tunnel::expiration_interval`. + EXPIRE, + }; + + ## The record type which contains column fields of the tunnel log. + type Info: record { + ## Time at which some tunnel activity occurred. + ts: time &log; + ## The unique identifier for the tunnel, which may correspond + ## to a :zeek:type:`connection`'s *uid* field for non-IP-in-IP tunnels. + ## This is optional because there could be numerous connections + ## for payload proxies like SOCKS but we should treat it as a + ## single tunnel. + uid: string &log &optional; + ## The tunnel "connection" 4-tuple of endpoint addresses/ports. + ## For an IP tunnel, the ports will be 0. + id: conn_id &log; + ## The type of tunnel. + tunnel_type: Tunnel::Type &log; + ## The type of activity that occurred. + action: Action &log; + }; + + ## Logs all tunnels in an encapsulation chain with action + ## :zeek:see:`Tunnel::DISCOVER` that aren't already in the + ## :zeek:id:`Tunnel::active` table and adds them if not. + global register_all: function(ecv: EncapsulatingConnVector); + + ## Logs a single tunnel "connection" with action + ## :zeek:see:`Tunnel::DISCOVER` if it's not already in the + ## :zeek:id:`Tunnel::active` table and adds it if not. + global register: function(ec: EncapsulatingConn); + + ## Logs a single tunnel "connection" with action + ## :zeek:see:`Tunnel::EXPIRE` and removes it from the + ## :zeek:id:`Tunnel::active` table. + ## + ## t: A table of tunnels. + ## + ## idx: The index of the tunnel table corresponding to the tunnel to expire. + ## + ## Returns: 0secs, which when this function is used as an + ## :zeek:attr:`&expire_func`, indicates to remove the element at + ## *idx* immediately. + global expire: function(t: table[conn_id] of Info, idx: conn_id): interval; + + ## Removes a single tunnel from the :zeek:id:`Tunnel::active` table + ## and logs the closing/expiration of the tunnel. + ## + ## tunnel: The tunnel which has closed or expired. + ## + ## action: The specific reason for the tunnel ending. + global close: function(tunnel: Info, action: Action); + + ## The amount of time a tunnel is not used in establishment of new + ## connections before it is considered inactive/expired. + const expiration_interval = 1hrs &redef; + + ## Currently active tunnels. That is, tunnels for which new, + ## encapsulated connections have been seen in the interval indicated by + ## :zeek:see:`Tunnel::expiration_interval`. + global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire; +} + +const ayiya_ports = { 5072/udp }; +const teredo_ports = { 3544/udp }; +const gtpv1_ports = { 2152/udp, 2123/udp }; +redef likely_server_ports += { ayiya_ports, teredo_ports, gtpv1_ports, vxlan_ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(Tunnel::LOG, [$columns=Info, $path="tunnel"]); + + Analyzer::register_for_ports(Analyzer::ANALYZER_AYIYA, ayiya_ports); + Analyzer::register_for_ports(Analyzer::ANALYZER_TEREDO, teredo_ports); + Analyzer::register_for_ports(Analyzer::ANALYZER_GTPV1, gtpv1_ports); + Analyzer::register_for_ports(Analyzer::ANALYZER_VXLAN, vxlan_ports); + } + +function register_all(ecv: EncapsulatingConnVector) + { + for ( i in ecv ) + register(ecv[i]); + } + +function register(ec: EncapsulatingConn) + { + if ( ec$cid !in active ) + { + local tunnel: Info; + tunnel$ts = network_time(); + if ( ec?$uid ) + tunnel$uid = ec$uid; + tunnel$id = ec$cid; + tunnel$action = DISCOVER; + tunnel$tunnel_type = ec$tunnel_type; + active[ec$cid] = tunnel; + Log::write(LOG, tunnel); + } + } + +function close(tunnel: Info, action: Action) + { + tunnel$action = action; + tunnel$ts = network_time(); + Log::write(LOG, tunnel); + delete active[tunnel$id]; + } + +function expire(t: table[conn_id] of Info, idx: conn_id): interval + { + close(t[idx], EXPIRE); + return 0secs; + } + +event new_connection(c: connection) &priority=5 + { + if ( c?$tunnel ) + register_all(c$tunnel); + } + +event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 + { + register_all(e); + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c$id in active ) + close(active[c$id], CLOSE); + } diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro deleted file mode 100644 index 0c32cebcc5..0000000000 --- a/scripts/base/init-bare.bro +++ /dev/null @@ -1,5060 +0,0 @@ -@load base/bif/const.bif -@load base/bif/types.bif - -# Type declarations - -## An ordered array of strings. The entries are indexed by successive numbers. -## Note that it depends on the usage whether the first index is zero or one. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type string_array: table[count] of string; - -## A set of strings. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type string_set: set[string]; - -## A set of addresses. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type addr_set: set[addr]; - -## A set of counts. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type count_set: set[count]; - -## A vector of counts, used by some builtin functions to store a list of indices. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type index_vec: vector of count; - -## A vector of subnets. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type subnet_vec: vector of subnet; - -## A vector of any, used by some builtin functions to store a list of varying -## types. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type any_vec: vector of any; - -## A vector of strings. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type string_vec: vector of string; - -## A vector of x509 opaques. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type x509_opaque_vector: vector of opaque of x509; - -## A vector of addresses. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type addr_vec: vector of addr; - -## A table of strings indexed by strings. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type table_string_of_string: table[string] of string; - -## A table of counts indexed by strings. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type table_string_of_count: table[string] of count; - -## A set of file analyzer tags. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type files_tag_set: set[Files::Tag]; - -## A structure indicating a MIME type and strength of a match against -## file magic signatures. -## -## :bro:see:`file_magic` -type mime_match: record { - strength: int; ##< How strongly the signature matched. Used for - ##< prioritization when multiple file magic signatures - ##< match. - mime: string; ##< The MIME type of the file magic signature match. -}; - -## A vector of file magic signature matches, ordered by strength of -## the signature, strongest first. -## -## :bro:see:`file_magic` -type mime_matches: vector of mime_match; - -## A connection's transport-layer protocol. Note that Bro uses the term -## "connection" broadly, using flow semantics for ICMP and UDP. -type transport_proto: enum { - unknown_transport, ##< An unknown transport-layer protocol. - tcp, ##< TCP. - udp, ##< UDP. - icmp ##< ICMP. -}; - -## A connection's identifying 4-tuple of endpoints and ports. -## -## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as -## part of the port values, `orig_p` and `resp_p`, and can be extracted from -## them with :bro:id:`get_port_transport_proto`. -type conn_id: record { - orig_h: addr; ##< The originator's IP address. - orig_p: port; ##< The originator's port number. - resp_h: addr; ##< The responder's IP address. - resp_p: port; ##< The responder's port number. -} &log; - -## The identifying 4-tuple of a uni-directional flow. -## -## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as -## part of the port values, `src_p` and `dst_p`, and can be extracted from -## them with :bro:id:`get_port_transport_proto`. -type flow_id : record { - src_h: addr; ##< The source IP address. - src_p: port; ##< The source port number. - dst_h: addr; ##< The destination IP address. - dst_p: port; ##< The desintation port number. -} &log; - -## Specifics about an ICMP conversation. ICMP events typically pass this in -## addition to :bro:type:`conn_id`. -## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded icmp_unreachable -type icmp_conn: record { - orig_h: addr; ##< The originator's IP address. - resp_h: addr; ##< The responder's IP address. - itype: count; ##< The ICMP type of the packet that triggered the instantiation of the record. - icode: count; ##< The ICMP code of the packet that triggered the instantiation of the record. - len: count; ##< The length of the ICMP payload of the packet that triggered the instantiation of the record. - hlim: count; ##< The encapsulating IP header's Hop Limit value. - v6: bool; ##< True if it's an ICMPv6 packet. -}; - -## Packet context part of an ICMP message. The fields of this record reflect the -## packet that is described by the context. -## -## .. bro:see:: icmp_time_exceeded icmp_unreachable -type icmp_context: record { - id: conn_id; ##< The packet's 4-tuple. - len: count; ##< The length of the IP packet (headers + payload). - proto: count; ##< The packet's transport-layer protocol. - frag_offset: count; ##< The packet's fragmentation offset. - ## True if the packet's IP header is not fully included in the context - ## or if there is not enough of the transport header to determine source - ## and destination ports. If that is the case, the appropriate fields - ## of this record will be set to null values. - bad_hdr_len: bool; - bad_checksum: bool; ##< True if the packet's IP checksum is not correct. - MF: bool; ##< True if the packet's *more fragments* flag is set. - DF: bool; ##< True if the packet's *don't fragment* flag is set. -}; - -## Values extracted from a Prefix Information option in an ICMPv6 neighbor -## discovery message as specified by :rfc:`4861`. -## -## .. bro:see:: icmp6_nd_option -type icmp6_nd_prefix_info: record { - ## Number of leading bits of the *prefix* that are valid. - prefix_len: count; - ## Flag indicating the prefix can be used for on-link determination. - L_flag: bool; - ## Autonomous address-configuration flag. - A_flag: bool; - ## Length of time in seconds that the prefix is valid for purpose of - ## on-link determination (0xffffffff represents infinity). - valid_lifetime: interval; - ## Length of time in seconds that the addresses generated from the - ## prefix via stateless address autoconfiguration remain preferred - ## (0xffffffff represents infinity). - preferred_lifetime: interval; - ## An IP address or prefix of an IP address. Use the *prefix_len* field - ## to convert this into a :bro:type:`subnet`. - prefix: addr; -}; - -## Options extracted from ICMPv6 neighbor discovery messages as specified -## by :rfc:`4861`. -## -## .. bro:see:: icmp_router_solicitation icmp_router_advertisement -## icmp_neighbor_advertisement icmp_neighbor_solicitation icmp_redirect -## icmp6_nd_options -type icmp6_nd_option: record { - ## 8-bit identifier of the type of option. - otype: count; - ## 8-bit integer representing the length of the option (including the - ## type and length fields) in units of 8 octets. - len: count; - ## Source Link-Layer Address (Type 1) or Target Link-Layer Address (Type 2). - ## Byte ordering of this is dependent on the actual link-layer. - link_address: string &optional; - ## Prefix Information (Type 3). - prefix: icmp6_nd_prefix_info &optional; - ## Redirected header (Type 4). This field contains the context of the - ## original, redirected packet. - redirect: icmp_context &optional; - ## Recommended MTU for the link (Type 5). - mtu: count &optional; - ## The raw data of the option (everything after type & length fields), - ## useful for unknown option types or when the full option payload is - ## truncated in the captured packet. In those cases, option fields - ## won't be pre-extracted into the fields above. - payload: string &optional; -}; - -## A type alias for a vector of ICMPv6 neighbor discovery message options. -type icmp6_nd_options: vector of icmp6_nd_option; - -# A DNS mapping between IP address and hostname resolved by Bro's internal -# resolver. -# -# .. bro:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name -# dns_mapping_unverified dns_mapping_valid -type dns_mapping: record { - ## The time when the mapping was created, which corresponds to when - ## the DNS query was sent out. - creation_time: time; - ## If the mapping is the result of a name lookup, the queried host name; - ## otherwise empty. - req_host: string; - ## If the mapping is the result of a pointer lookup, the queried - ## address; otherwise null. - req_addr: addr; - ## True if the lookup returned success. Only then are the result fields - ## valid. - valid: bool; - ## If the mapping is the result of a pointer lookup, the resolved - ## hostname; otherwise empty. - hostname: string; - ## If the mapping is the result of an address lookup, the resolved - ## address(es); otherwise empty. - addrs: addr_set; -}; - -## A parsed host/port combination describing server endpoint for an upcoming -## data transfer. -## -## .. bro:see:: fmt_ftp_port parse_eftp_port parse_ftp_epsv parse_ftp_pasv -## parse_ftp_port -type ftp_port: record { - h: addr; ##< The host's address. - p: port; ##< The host's port. - valid: bool; ##< True if format was right. Only then are *h* and *p* valid. -}; - -## Statistics about what a TCP endpoint sent. -## -## .. bro:see:: conn_stats -type endpoint_stats: record { - num_pkts: count; ##< Number of packets. - num_rxmit: count; ##< Number of retransmissions. - num_rxmit_bytes: count; ##< Number of retransmitted bytes. - num_in_order: count; ##< Number of in-order packets. - num_OO: count; ##< Number of out-of-order packets. - num_repl: count; ##< Number of replicated packets (last packet was sent again). - ## Endian type used by the endpoint, if it could be determined from - ## the sequence numbers used. This is one of :bro:see:`ENDIAN_UNKNOWN`, - ## :bro:see:`ENDIAN_BIG`, :bro:see:`ENDIAN_LITTLE`, and - ## :bro:see:`ENDIAN_CONFUSED`. - endian_type: count; -}; - -module Tunnel; -export { - ## Records the identity of an encapsulating parent of a tunneled connection. - type EncapsulatingConn: record { - ## The 4-tuple of the encapsulating "connection". In case of an - ## IP-in-IP tunnel the ports will be set to 0. The direction - ## (i.e., orig and resp) are set according to the first tunneled - ## packet seen and not according to the side that established - ## the tunnel. - cid: conn_id; - ## The type of tunnel. - tunnel_type: Tunnel::Type; - ## A globally unique identifier that, for non-IP-in-IP tunnels, - ## cross-references the *uid* field of :bro:type:`connection`. - uid: string &optional; - } &log; -} # end export -module GLOBAL; - -## A type alias for a vector of encapsulating "connections", i.e. for when -## there are tunnels within tunnels. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type EncapsulatingConnVector: vector of Tunnel::EncapsulatingConn; - -## Statistics about a :bro:type:`connection` endpoint. -## -## .. bro:see:: connection -type endpoint: record { - size: count; ##< Logical size of data sent (for TCP: derived from sequence numbers). - ## Endpoint state. For a TCP connection, one of the constants: - ## :bro:see:`TCP_INACTIVE` :bro:see:`TCP_SYN_SENT` - ## :bro:see:`TCP_SYN_ACK_SENT` :bro:see:`TCP_PARTIAL` - ## :bro:see:`TCP_ESTABLISHED` :bro:see:`TCP_CLOSED` :bro:see:`TCP_RESET`. - ## For UDP, one of :bro:see:`UDP_ACTIVE` and :bro:see:`UDP_INACTIVE`. - state: count; - ## Number of packets sent. Only set if :bro:id:`use_conn_size_analyzer` - ## is true. - num_pkts: count &optional; - ## Number of IP-level bytes sent. Only set if - ## :bro:id:`use_conn_size_analyzer` is true. - num_bytes_ip: count &optional; - ## The current IPv6 flow label that the connection endpoint is using. - ## Always 0 if the connection is over IPv4. - flow_label: count; - ## The link-layer address seen in the first packet (if available). - l2_addr: string &optional; -}; - -## A connection. This is Bro's basic connection type describing IP- and -## transport-layer information about the conversation. Note that Bro uses a -## liberal interpretation of "connection" and associates instances of this type -## also with UDP and ICMP flows. -type connection: record { - id: conn_id; ##< The connection's identifying 4-tuple. - orig: endpoint; ##< Statistics about originator side. - resp: endpoint; ##< Statistics about responder side. - start_time: time; ##< The timestamp of the connection's first packet. - ## The duration of the conversation. Roughly speaking, this is the - ## interval between first and last data packet (low-level TCP details - ## may adjust it somewhat in ambiguous cases). - duration: interval; - ## The set of services the connection is using as determined by Bro's - ## dynamic protocol detection. Each entry is the label of an analyzer - ## that confirmed that it could parse the connection payload. While - ## typically, there will be at most one entry for each connection, in - ## principle it is possible that more than one protocol analyzer is able - ## to parse the same data. If so, all will be recorded. Also note that - ## the recorded services are independent of any transport-level protocols. - service: set[string]; - history: string; ##< State history of connections. See *history* in :bro:see:`Conn::Info`. - ## A globally unique connection identifier. For each connection, Bro - ## creates an ID that is very likely unique across independent Bro runs. - ## These IDs can thus be used to tag and locate information associated - ## with that connection. - uid: string; - ## If the connection is tunneled, this field contains information about - ## the encapsulating "connection(s)" with the outermost one starting - ## at index zero. It's also always the first such encapsulation seen - ## for the connection unless the :bro:id:`tunnel_changed` event is - ## handled and reassigns this field to the new encapsulation. - tunnel: EncapsulatingConnVector &optional; - - ## The outer VLAN, if applicable for this connection. - vlan: int &optional; - - ## The inner VLAN, if applicable for this connection. - inner_vlan: int &optional; -}; - -## Default amount of time a file can be inactive before the file analysis -## gives up and discards any internal state related to the file. -option default_file_timeout_interval: interval = 2 mins; - -## Default amount of bytes that file analysis will buffer in order to use -## for mime type matching. File analyzers attached at the time of mime type -## matching or later, will receive a copy of this buffer. -option default_file_bof_buffer_size: count = 4096; - -## A file that Bro is analyzing. This is Bro's type for describing the basic -## internal metadata collected about a "file", which is essentially just a -## byte stream that is e.g. pulled from a network connection or possibly -## some other input source. -type fa_file: record { - ## An identifier associated with a single file. - id: string; - - ## Identifier associated with a container file from which this one was - ## extracted as part of the file analysis. - parent_id: string &optional; - - ## An identification of the source of the file data. E.g. it may be - ## a network protocol over which it was transferred, or a local file - ## path which was read, or some other input source. - ## Examples are: "HTTP", "SMTP", "IRC_DATA", or the file path. - source: string; - - ## If the source of this file is a network connection, this field - ## may be set to indicate the directionality. - is_orig: bool &optional; - - ## The set of connections over which the file was transferred. - conns: table[conn_id] of connection &optional; - - ## The time at which the last activity for the file was seen. - last_active: time; - - ## Number of bytes provided to the file analysis engine for the file. - seen_bytes: count &default=0; - - ## Total number of bytes that are supposed to comprise the full file. - total_bytes: count &optional; - - ## The number of bytes in the file stream that were completely missed - ## during the process of analysis e.g. due to dropped packets. - missing_bytes: count &default=0; - - ## The number of bytes in the file stream that were not delivered to - ## stream file analyzers. Generally, this consists of bytes that - ## couldn't be reassembled, either because reassembly simply isn't - ## enabled, or due to size limitations of the reassembly buffer. - overflow_bytes: count &default=0; - - ## The amount of time between receiving new data for this file that - ## the analysis engine will wait before giving up on it. - timeout_interval: interval &default=default_file_timeout_interval; - - ## The number of bytes at the beginning of a file to save for later - ## inspection in the *bof_buffer* field. - bof_buffer_size: count &default=default_file_bof_buffer_size; - - ## The content of the beginning of a file up to *bof_buffer_size* bytes. - ## This is also the buffer that's used for file/mime type detection. - bof_buffer: string &optional; -} &redef; - -## Metadata that's been inferred about a particular file. -type fa_metadata: record { - ## The strongest matching MIME type if one was discovered. - mime_type: string &optional; - ## All matching MIME types if any were discovered. - mime_types: mime_matches &optional; - ## Specifies whether the MIME type was inferred using signatures, - ## or provided directly by the protocol the file appeared in. - inferred: bool &default=T; -}; - -## Fields of a SYN packet. -## -## .. bro:see:: connection_SYN_packet -type SYN_packet: record { - is_orig: bool; ##< True if the packet was sent the connection's originator. - DF: bool; ##< True if the *don't fragment* is set in the IP header. - ttl: count; ##< The IP header's time-to-live. - size: count; ##< The size of the packet's payload as specified in the IP header. - win_size: count; ##< The window size from the TCP header. - win_scale: int; ##< The window scale option if present, or -1 if not. - MSS: count; ##< The maximum segment size if present, or 0 if not. - SACK_OK: bool; ##< True if the *SACK* option is present. -}; - -## Packet capture statistics. All counts are cumulative. -## -## .. bro:see:: get_net_stats -type NetStats: record { - pkts_recvd: count &default=0; ##< Packets received by Bro. - pkts_dropped: count &default=0; ##< Packets reported dropped by the system. - ## Packets seen on the link. Note that this may differ - ## from *pkts_recvd* because of a potential capture_filter. See - ## :doc:`/scripts/base/frameworks/packet-filter/main.bro`. Depending on the - ## packet capture system, this value may not be available and will then - ## be always set to zero. - pkts_link: count &default=0; - bytes_recvd: count &default=0; ##< Bytes received by Bro. -}; - -type ConnStats: record { - total_conns: count; ##< - current_conns: count; ##< - current_conns_extern: count; ##< - sess_current_conns: count; ##< - - num_packets: count; - num_fragments: count; - max_fragments: count; - - num_tcp_conns: count; ##< Current number of TCP connections in memory. - max_tcp_conns: count; ##< Maximum number of concurrent TCP connections so far. - cumulative_tcp_conns: count; ##< Total number of TCP connections so far. - - num_udp_conns: count; ##< Current number of UDP flows in memory. - max_udp_conns: count; ##< Maximum number of concurrent UDP flows so far. - cumulative_udp_conns: count; ##< Total number of UDP flows so far. - - num_icmp_conns: count; ##< Current number of ICMP flows in memory. - max_icmp_conns: count; ##< Maximum number of concurrent ICMP flows so far. - cumulative_icmp_conns: count; ##< Total number of ICMP flows so far. - - killed_by_inactivity: count; -}; - -## Statistics about Bro's process. -## -## .. bro:see:: get_proc_stats -## -## .. note:: All process-level values refer to Bro's main process only, not to -## the child process it spawns for doing communication. -type ProcStats: record { - debug: bool; ##< True if compiled with --enable-debug. - start_time: time; ##< Start time of process. - real_time: interval; ##< Elapsed real time since Bro started running. - user_time: interval; ##< User CPU seconds. - system_time: interval; ##< System CPU seconds. - mem: count; ##< Maximum memory consumed, in KB. - minor_faults: count; ##< Page faults not requiring actual I/O. - major_faults: count; ##< Page faults requiring actual I/O. - num_swap: count; ##< Times swapped out. - blocking_input: count; ##< Blocking input operations. - blocking_output: count; ##< Blocking output operations. - num_context: count; ##< Number of involuntary context switches. -}; - -type EventStats: record { - queued: count; ##< Total number of events queued so far. - dispatched: count; ##< Total number of events dispatched so far. -}; - -## Holds statistics for all types of reassembly. -## -## .. bro:see:: get_reassembler_stats -type ReassemblerStats: record { - file_size: count; ##< Byte size of File reassembly tracking. - frag_size: count; ##< Byte size of Fragment reassembly tracking. - tcp_size: count; ##< Byte size of TCP reassembly tracking. - unknown_size: count; ##< Byte size of reassembly tracking for unknown purposes. -}; - -## Statistics of all regular expression matchers. -## -## .. bro:see:: get_matcher_stats -type MatcherStats: record { - matchers: count; ##< Number of distinct RE matchers. - nfa_states: count; ##< Number of NFA states across all matchers. - dfa_states: count; ##< Number of DFA states across all matchers. - computed: count; ##< Number of computed DFA state transitions. - mem: count; ##< Number of bytes used by DFA states. - hits: count; ##< Number of cache hits. - misses: count; ##< Number of cache misses. -}; - -## Statistics of timers. -## -## .. bro:see:: get_timer_stats -type TimerStats: record { - current: count; ##< Current number of pending timers. - max: count; ##< Maximum number of concurrent timers pending so far. - cumulative: count; ##< Cumulative number of timers scheduled. -}; - -## Statistics of file analysis. -## -## .. bro:see:: get_file_analysis_stats -type FileAnalysisStats: record { - current: count; ##< Current number of files being analyzed. - max: count; ##< Maximum number of concurrent files so far. - cumulative: count; ##< Cumulative number of files analyzed. -}; - -## Statistics related to Bro's active use of DNS. These numbers are -## about Bro performing DNS queries on it's own, not traffic -## being seen. -## -## .. bro:see:: get_dns_stats -type DNSStats: record { - requests: count; ##< Number of DNS requests made - successful: count; ##< Number of successful DNS replies. - failed: count; ##< Number of DNS reply failures. - pending: count; ##< Current pending queries. - cached_hosts: count; ##< Number of cached hosts. - cached_addresses: count; ##< Number of cached addresses. -}; - -## Statistics about number of gaps in TCP connections. -## -## .. bro:see:: get_gap_stats -type GapStats: record { - ack_events: count; ##< How many ack events *could* have had gaps. - ack_bytes: count; ##< How many bytes those covered. - gap_events: count; ##< How many *did* have gaps. - gap_bytes: count; ##< How many bytes were missing in the gaps. -}; - -## Statistics about threads. -## -## .. bro:see:: get_thread_stats -type ThreadStats: record { - num_threads: count; -}; - -## Statistics about Broker communication. -## -## .. bro:see:: get_broker_stats -type BrokerStats: record { - num_peers: count; - ## Number of active data stores. - num_stores: count; - ## Number of pending data store queries. - num_pending_queries: count; - ## Number of total log messages received. - num_events_incoming: count; - ## Number of total log messages sent. - num_events_outgoing: count; - ## Number of total log records received. - num_logs_incoming: count; - ## Number of total log records sent. - num_logs_outgoing: count; - ## Number of total identifiers received. - num_ids_incoming: count; - ## Number of total identifiers sent. - num_ids_outgoing: count; -}; - -## Statistics about reporter messages and weirds. -## -## .. bro:see:: get_reporter_stats -type ReporterStats: record { - ## Number of total weirds encountered, before any rate-limiting. - weirds: count; - ## Number of times each individual weird is encountered, before any - ## rate-limiting is applied. - weirds_by_type: table[string] of count; -}; - -## Deprecated. -## -## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere -## else. -type packet: record { - conn: connection; - is_orig: bool; - seq: count; ##< seq=k => it is the kth *packet* of the connection - timestamp: time; -}; - -## Table type used to map variable names to their memory allocation. -## -## .. bro:see:: global_sizes -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type var_sizes: table[string] of count; - -## Meta-information about a script-level identifier. -## -## .. bro:see:: global_ids id_table -type script_id: record { - type_name: string; ##< The name of the identifier's type. - exported: bool; ##< True if the identifier is exported. - constant: bool; ##< True if the identifier is a constant. - enum_constant: bool; ##< True if the identifier is an enum value. - option_value: bool; ##< True if the identifier is an option. - redefinable: bool; ##< True if the identifier is declared with the :bro:attr:`&redef` attribute. - value: any &optional; ##< The current value of the identifier. -}; - -## Table type used to map script-level identifiers to meta-information -## describing them. -## -## .. bro:see:: global_ids script_id -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type id_table: table[string] of script_id; - -## Meta-information about a record field. -## -## .. bro:see:: record_fields record_field_table -type record_field: record { - type_name: string; ##< The name of the field's type. - log: bool; ##< True if the field is declared with :bro:attr:`&log` attribute. - ## The current value of the field in the record instance passed into - ## :bro:see:`record_fields` (if it has one). - value: any &optional; - default_val: any &optional; ##< The value of the :bro:attr:`&default` attribute if defined. -}; - -## Table type used to map record field declarations to meta-information -## describing them. -## -## .. bro:see:: record_fields record_field -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type record_field_table: table[string] of record_field; - -## Meta-information about a parameter to a function/event. -## -## .. bro:see:: call_argument_vector new_event -type call_argument: record { - name: string; ##< The name of the parameter. - type_name: string; ##< The name of the parameters's type. - default_val: any &optional; ##< The value of the :bro:attr:`&default` attribute if defined. - - ## The value of the parameter as passed into a given call instance. - ## Might be unset in the case a :bro:attr:`&default` attribute is - ## defined. - value: any &optional; -}; - -## Vector type used to capture parameters of a function/event call. -## -## .. bro:see:: call_argument new_event -type call_argument_vector: vector of call_argument; - -# todo:: Do we still need these here? Can they move into the packet filter -# framework? -# -# The following two variables are defined here until the core is not -# dependent on the names remaining as they are now. - -## Set of BPF capture filters to use for capturing, indexed by a user-definable -## ID (which must be unique). If Bro is *not* configured with -## :bro:id:`PacketFilter::enable_auto_protocol_capture_filters`, -## all packets matching at least one of the filters in this table (and all in -## :bro:id:`restrict_filters`) will be analyzed. -## -## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters -## PacketFilter::unrestricted_filter restrict_filters -global capture_filters: table[string] of string &redef; - -## Set of BPF filters to restrict capturing, indexed by a user-definable ID -## (which must be unique). -## -## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters -## PacketFilter::unrestricted_filter capture_filters -global restrict_filters: table[string] of string &redef; - -## Enum type identifying dynamic BPF filters. These are used by -## :bro:see:`Pcap::precompile_pcap_filter` and :bro:see:`Pcap::precompile_pcap_filter`. -type PcapFilterID: enum { None }; - -## Deprecated. -## -## .. bro:see:: anonymize_addr -type IPAddrAnonymization: enum { - KEEP_ORIG_ADDR, - SEQUENTIALLY_NUMBERED, - RANDOM_MD5, - PREFIX_PRESERVING_A50, - PREFIX_PRESERVING_MD5, -}; - -## Deprecated. -## -## .. bro:see:: anonymize_addr -type IPAddrAnonymizationClass: enum { - ORIG_ADDR, - RESP_ADDR, - OTHER_ADDR, -}; - -## A locally unique ID identifying a communication peer. The ID is returned by -## :bro:id:`connect`. -## -## .. bro:see:: connect -type peer_id: count; - -## A communication peer. -## -## .. bro:see:: complete_handshake disconnect finished_send_state -## get_event_peer get_local_event_peer remote_capture_filter -## remote_connection_closed remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_log_peer remote_pong -## request_remote_events request_remote_logs request_remote_sync -## send_capture_filter send_current_packet send_id send_ping send_state -## set_accept_state set_compression_level -## -## .. todo::The type's name is too narrow these days, should rename. -type event_peer: record { - id: peer_id; ##< Locally unique ID of peer (returned by :bro:id:`connect`). - host: addr; ##< The IP address of the peer. - ## Either the port we connected to at the peer; or our port the peer - ## connected to if the session is remotely initiated. - p: port; - is_local: bool; ##< True if this record describes the local process. - descr: string; ##< The peer's :bro:see:`peer_description`. - class: string &optional; ##< The self-assigned *class* of the peer. -}; - -## Deprecated. -## -## .. bro:see:: rotate_file rotate_file_by_name rotate_interval -type rotate_info: record { - old_name: string; ##< Original filename. - new_name: string; ##< File name after rotation. - open: time; ##< Time when opened. - close: time; ##< Time when closed. -}; - -### The following aren't presently used, though they should be. -# # Structures needed for subsequence computations (str_smith_waterman): -# # -# type sw_variant: enum { -# SW_SINGLE, -# SW_MULTIPLE, -# }; - -## Parameters for the Smith-Waterman algorithm. -## -## .. bro:see:: str_smith_waterman -type sw_params: record { - ## Minimum size of a substring, minimum "granularity". - min_strlen: count &default = 3; - - ## Smith-Waterman flavor to use. - sw_variant: count &default = 0; -}; - -## Helper type for return value of Smith-Waterman algorithm. -## -## .. bro:see:: str_smith_waterman sw_substring_vec sw_substring sw_align_vec sw_params -type sw_align: record { - str: string; ##< String a substring is part of. - index: count; ##< Offset substring is located. -}; - -## Helper type for return value of Smith-Waterman algorithm. -## -## .. bro:see:: str_smith_waterman sw_substring_vec sw_substring sw_align sw_params -type sw_align_vec: vector of sw_align; - -## Helper type for return value of Smith-Waterman algorithm. -## -## .. bro:see:: str_smith_waterman sw_substring_vec sw_align_vec sw_align sw_params -## -type sw_substring: record { - str: string; ##< A substring. - aligns: sw_align_vec; ##< All strings of which it's a substring. - new: bool; ##< True if start of new alignment. -}; - -## Return type for Smith-Waterman algorithm. -## -## .. bro:see:: str_smith_waterman sw_substring sw_align_vec sw_align sw_params -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type sw_substring_vec: vector of sw_substring; - -## Policy-level representation of a packet passed on by libpcap. The data -## includes the complete packet as returned by libpcap, including the link-layer -## header. -## -## .. bro:see:: dump_packet get_current_packet -type pcap_packet: record { - ts_sec: count; ##< The non-fractional part of the packet's timestamp (i.e., full seconds since the epoch). - ts_usec: count; ##< The fractional part of the packet's timestamp. - caplen: count; ##< The number of bytes captured (<= *len*). - len: count; ##< The length of the packet in bytes, including link-level header. - data: string; ##< The payload of the packet, including link-level header. - link_type: link_encap; ##< Layer 2 link encapsulation type. -}; - -## GeoIP location information. -## -## .. bro:see:: lookup_location -type geo_location: record { - country_code: string &optional; ##< The country code. - region: string &optional; ##< The region. - city: string &optional; ##< The city. - latitude: double &optional; ##< Latitude. - longitude: double &optional; ##< Longitude. -} &log; - -## The directory containing MaxMind DB (.mmdb) files to use for GeoIP support. -const mmdb_dir: string = "" &redef; - -## Computed entropy values. The record captures a number of measures that are -## computed in parallel. See `A Pseudorandom Number Sequence Test Program -## `_ for more information, Bro uses the same -## code. -## -## .. bro:see:: entropy_test_add entropy_test_finish entropy_test_init find_entropy -type entropy_test_result: record { - entropy: double; ##< Information density. - chi_square: double; ##< Chi-Square value. - mean: double; ##< Arithmetic Mean. - monte_carlo_pi: double; ##< Monte-carlo value for pi. - serial_correlation: double; ##< Serial correlation coefficient. -}; - -# TCP values for :bro:see:`endpoint` *state* field. -# todo:: these should go into an enum to make them autodoc'able. -const TCP_INACTIVE = 0; ##< Endpoint is still inactive. -const TCP_SYN_SENT = 1; ##< Endpoint has sent SYN. -const TCP_SYN_ACK_SENT = 2; ##< Endpoint has sent SYN/ACK. -const TCP_PARTIAL = 3; ##< Endpoint has sent data but no initial SYN. -const TCP_ESTABLISHED = 4; ##< Endpoint has finished initial handshake regularly. -const TCP_CLOSED = 5; ##< Endpoint has closed connection. -const TCP_RESET = 6; ##< Endpoint has sent RST. - -# UDP values for :bro:see:`endpoint` *state* field. -# todo:: these should go into an enum to make them autodoc'able. -const UDP_INACTIVE = 0; ##< Endpoint is still inactive. -const UDP_ACTIVE = 1; ##< Endpoint has sent something. - -## If true, don't verify checksums. Useful for running on altered trace -## files, and for saving a few cycles, but at the risk of analyzing invalid -## data. Note that the ``-C`` command-line option overrides the setting of this -## variable. -const ignore_checksums = F &redef; - -## If true, instantiate connection state when a partial connection -## (one missing its initial establishment negotiation) is seen. -const partial_connection_ok = T &redef; - -## If true, instantiate connection state when a SYN/ACK is seen but not the -## initial SYN (even if :bro:see:`partial_connection_ok` is false). -const tcp_SYN_ack_ok = T &redef; - -## If true, pass any undelivered to the signature engine before flushing the state. -## If a connection state is removed, there may still be some data waiting in the -## reassembler. -const tcp_match_undelivered = T &redef; - -## Check up on the result of an initial SYN after this much time. -const tcp_SYN_timeout = 5 secs &redef; - -## After a connection has closed, wait this long for further activity -## before checking whether to time out its state. -const tcp_session_timer = 6 secs &redef; - -## When checking a closed connection for further activity, consider it -## inactive if there hasn't been any for this long. Complain if the -## connection is reused before this much time has elapsed. -const tcp_connection_linger = 5 secs &redef; - -## Wait this long upon seeing an initial SYN before timing out the -## connection attempt. -const tcp_attempt_delay = 5 secs &redef; - -## Upon seeing a normal connection close, flush state after this much time. -const tcp_close_delay = 5 secs &redef; - -## Upon seeing a RST, flush state after this much time. -const tcp_reset_delay = 5 secs &redef; - -## Generate a :bro:id:`connection_partial_close` event this much time after one -## half of a partial connection closes, assuming there has been no subsequent -## activity. -const tcp_partial_close_delay = 3 secs &redef; - -## If a connection belongs to an application that we don't analyze, -## time it out after this interval. If 0 secs, then don't time it out (but -## :bro:see:`tcp_inactivity_timeout`, :bro:see:`udp_inactivity_timeout`, and -## :bro:see:`icmp_inactivity_timeout` still apply). -const non_analyzed_lifetime = 0 secs &redef; - -## If a TCP connection is inactive, time it out after this interval. If 0 secs, -## then don't time it out. -## -## .. bro:see:: udp_inactivity_timeout icmp_inactivity_timeout set_inactivity_timeout -const tcp_inactivity_timeout = 5 min &redef; - -## If a UDP flow is inactive, time it out after this interval. If 0 secs, then -## don't time it out. -## -## .. bro:see:: tcp_inactivity_timeout icmp_inactivity_timeout set_inactivity_timeout -const udp_inactivity_timeout = 1 min &redef; - -## If an ICMP flow is inactive, time it out after this interval. If 0 secs, then -## don't time it out. -## -## .. bro:see:: tcp_inactivity_timeout udp_inactivity_timeout set_inactivity_timeout -const icmp_inactivity_timeout = 1 min &redef; - -## Number of FINs/RSTs in a row that constitute a "storm". Storms are reported -## as ``weird`` via the notice framework, and they must also come within -## intervals of at most :bro:see:`tcp_storm_interarrival_thresh`. -## -## .. bro:see:: tcp_storm_interarrival_thresh -const tcp_storm_thresh = 1000 &redef; - -## FINs/RSTs must come with this much time or less between them to be -## considered a "storm". -## -## .. bro:see:: tcp_storm_thresh -const tcp_storm_interarrival_thresh = 1 sec &redef; - -## Maximum amount of data that might plausibly be sent in an initial flight -## (prior to receiving any acks). Used to determine whether we must not be -## seeing our peer's ACKs. Set to zero to turn off this determination. -## -## .. bro:see:: tcp_max_above_hole_without_any_acks tcp_excessive_data_without_further_acks -const tcp_max_initial_window = 16384 &redef; - -## If we're not seeing our peer's ACKs, the maximum volume of data above a -## sequence hole that we'll tolerate before assuming that there's been a packet -## drop and we should give up on tracking a connection. If set to zero, then we -## don't ever give up. -## -## .. bro:see:: tcp_max_initial_window tcp_excessive_data_without_further_acks -const tcp_max_above_hole_without_any_acks = 16384 &redef; - -## If we've seen this much data without any of it being acked, we give up -## on that connection to avoid memory exhaustion due to buffering all that -## stuff. If set to zero, then we don't ever give up. Ideally, Bro would -## track the current window on a connection and use it to infer that data -## has in fact gone too far, but for now we just make this quite beefy. -## -## .. bro:see:: tcp_max_initial_window tcp_max_above_hole_without_any_acks -const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef; - -## Number of TCP segments to buffer beyond what's been acknowledged already -## to detect retransmission inconsistencies. Zero disables any additonal -## buffering. -const tcp_max_old_segments = 0 &redef; - -## For services without a handler, these sets define originator-side ports -## that still trigger reassembly. -## -## .. bro:see:: tcp_reassembler_ports_resp -const tcp_reassembler_ports_orig: set[port] = {} &redef; - -## For services without a handler, these sets define responder-side ports -## that still trigger reassembly. -## -## .. bro:see:: tcp_reassembler_ports_orig -const tcp_reassembler_ports_resp: set[port] = {} &redef; - -## Defines destination TCP ports for which the contents of the originator stream -## should be delivered via :bro:see:`tcp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_resp tcp_content_deliver_all_orig -## tcp_content_deliver_all_resp udp_content_delivery_ports_orig -## udp_content_delivery_ports_resp udp_content_deliver_all_orig -## udp_content_deliver_all_resp tcp_contents -const tcp_content_delivery_ports_orig: table[port] of bool = {} &redef; - -## Defines destination TCP ports for which the contents of the responder stream -## should be delivered via :bro:see:`tcp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig tcp_content_deliver_all_orig -## tcp_content_deliver_all_resp udp_content_delivery_ports_orig -## udp_content_delivery_ports_resp udp_content_deliver_all_orig -## udp_content_deliver_all_resp tcp_contents -const tcp_content_delivery_ports_resp: table[port] of bool = {} &redef; - -## If true, all TCP originator-side traffic is reported via -## :bro:see:`tcp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig tcp_content_delivery_ports_resp -## tcp_content_deliver_all_resp udp_content_delivery_ports_orig -## udp_content_delivery_ports_resp udp_content_deliver_all_orig -## udp_content_deliver_all_resp tcp_contents -const tcp_content_deliver_all_orig = F &redef; - -## If true, all TCP responder-side traffic is reported via -## :bro:see:`tcp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig -## tcp_content_delivery_ports_resp -## tcp_content_deliver_all_orig udp_content_delivery_ports_orig -## udp_content_delivery_ports_resp udp_content_deliver_all_orig -## udp_content_deliver_all_resp tcp_contents -const tcp_content_deliver_all_resp = F &redef; - -## Defines UDP destination ports for which the contents of the originator stream -## should be delivered via :bro:see:`udp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig -## tcp_content_delivery_ports_resp -## tcp_content_deliver_all_orig tcp_content_deliver_all_resp -## udp_content_delivery_ports_resp udp_content_deliver_all_orig -## udp_content_deliver_all_resp udp_contents -const udp_content_delivery_ports_orig: table[port] of bool = {} &redef; - -## Defines UDP destination ports for which the contents of the responder stream -## should be delivered via :bro:see:`udp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig -## tcp_content_delivery_ports_resp tcp_content_deliver_all_orig -## tcp_content_deliver_all_resp udp_content_delivery_ports_orig -## udp_content_deliver_all_orig udp_content_deliver_all_resp udp_contents -const udp_content_delivery_ports_resp: table[port] of bool = {} &redef; - -## If true, all UDP originator-side traffic is reported via -## :bro:see:`udp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig -## tcp_content_delivery_ports_resp tcp_content_deliver_all_resp -## tcp_content_delivery_ports_orig udp_content_delivery_ports_orig -## udp_content_delivery_ports_resp udp_content_deliver_all_resp -## udp_contents -const udp_content_deliver_all_orig = F &redef; - -## If true, all UDP responder-side traffic is reported via -## :bro:see:`udp_contents`. -## -## .. bro:see:: tcp_content_delivery_ports_orig -## tcp_content_delivery_ports_resp tcp_content_deliver_all_resp -## tcp_content_delivery_ports_orig udp_content_delivery_ports_orig -## udp_content_delivery_ports_resp udp_content_deliver_all_orig -## udp_contents -const udp_content_deliver_all_resp = F &redef; - -## Check for expired table entries after this amount of time. -## -## .. bro:see:: table_incremental_step table_expire_delay -const table_expire_interval = 10 secs &redef; - -## When expiring/serializing table entries, don't work on more than this many -## table entries at a time. -## -## .. bro:see:: table_expire_interval table_expire_delay -const table_incremental_step = 5000 &redef; - -## When expiring table entries, wait this amount of time before checking the -## next chunk of entries. -## -## .. bro:see:: table_expire_interval table_incremental_step -const table_expire_delay = 0.01 secs &redef; - -## Time to wait before timing out a DNS request. -const dns_session_timeout = 10 sec &redef; - -## Time to wait before timing out an NTP request. -const ntp_session_timeout = 300 sec &redef; - -## Time to wait before timing out an RPC request. -const rpc_timeout = 24 sec &redef; - -## How long to hold onto fragments for possible reassembly. A value of 0.0 -## means "forever", which resists evasion, but can lead to state accrual. -const frag_timeout = 0.0 sec &redef; - -## If positive, indicates the encapsulation header size that should -## be skipped. This applies to all packets. -const encap_hdr_size = 0 &redef; - -## Whether to use the ``ConnSize`` analyzer to count the number of packets and -## IP-level bytes transferred by each endpoint. If true, these values are -## returned in the connection's :bro:see:`endpoint` record value. -const use_conn_size_analyzer = T &redef; - -# todo:: these should go into an enum to make them autodoc'able. -const ENDIAN_UNKNOWN = 0; ##< Endian not yet determined. -const ENDIAN_LITTLE = 1; ##< Little endian. -const ENDIAN_BIG = 2; ##< Big endian. -const ENDIAN_CONFUSED = 3; ##< Tried to determine endian, but failed. - -# Values for :bro:see:`set_contents_file` *direction* argument. -# todo:: these should go into an enum to make them autodoc'able -const CONTENTS_NONE = 0; ##< Turn off recording of contents. -const CONTENTS_ORIG = 1; ##< Record originator contents. -const CONTENTS_RESP = 2; ##< Record responder contents. -const CONTENTS_BOTH = 3; ##< Record both originator and responder contents. - -# Values for code of ICMP *unreachable* messages. The list is not exhaustive. -# todo:: these should go into an enum to make them autodoc'able -# -# .. bro:see:: icmp_unreachable -const ICMP_UNREACH_NET = 0; ##< Network unreachable. -const ICMP_UNREACH_HOST = 1; ##< Host unreachable. -const ICMP_UNREACH_PROTOCOL = 2; ##< Protocol unreachable. -const ICMP_UNREACH_PORT = 3; ##< Port unreachable. -const ICMP_UNREACH_NEEDFRAG = 4; ##< Fragment needed. -const ICMP_UNREACH_ADMIN_PROHIB = 13; ##< Administratively prohibited. - -# Definitions for access to packet headers. Currently only used for -# discarders. -# todo:: these should go into an enum to make them autodoc'able -const IPPROTO_IP = 0; ##< Dummy for IP. -const IPPROTO_ICMP = 1; ##< Control message protocol. -const IPPROTO_IGMP = 2; ##< Group management protocol. -const IPPROTO_IPIP = 4; ##< IP encapsulation in IP. -const IPPROTO_TCP = 6; ##< TCP. -const IPPROTO_UDP = 17; ##< User datagram protocol. -const IPPROTO_IPV6 = 41; ##< IPv6 header. -const IPPROTO_ICMPV6 = 58; ##< ICMP for IPv6. -const IPPROTO_RAW = 255; ##< Raw IP packet. - -# Definitions for IPv6 extension headers. -const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. -const IPPROTO_ROUTING = 43; ##< IPv6 routing header. -const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. -const IPPROTO_ESP = 50; ##< IPv6 encapsulating security payload header. -const IPPROTO_AH = 51; ##< IPv6 authentication header. -const IPPROTO_NONE = 59; ##< IPv6 no next header. -const IPPROTO_DSTOPTS = 60; ##< IPv6 destination options header. -const IPPROTO_MOBILITY = 135; ##< IPv6 mobility header. - -## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or -## destination option headers) option field. -## -## .. bro:see:: ip6_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts -type ip6_option: record { - otype: count; ##< Option type. - len: count; ##< Option data length. - data: string; ##< Option data. -}; - -## A type alias for a vector of IPv6 options. -type ip6_options: vector of ip6_option; - -## Values extracted from an IPv6 Hop-by-Hop options extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option -type ip6_hopopts: record { - ## Protocol number of the next header (RFC 1700 et seq., IANA assigned - ## number), e.g. :bro:id:`IPPROTO_ICMP`. - nxt: count; - ## Length of header in 8-octet units, excluding first unit. - len: count; - ## The TLV encoded options; - options: ip6_options; -}; - -## Values extracted from an IPv6 Destination options extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option -type ip6_dstopts: record { - ## Protocol number of the next header (RFC 1700 et seq., IANA assigned - ## number), e.g. :bro:id:`IPPROTO_ICMP`. - nxt: count; - ## Length of header in 8-octet units, excluding first unit. - len: count; - ## The TLV encoded options; - options: ip6_options; -}; - -## Values extracted from an IPv6 Routing extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr -type ip6_routing: record { - ## Protocol number of the next header (RFC 1700 et seq., IANA assigned - ## number), e.g. :bro:id:`IPPROTO_ICMP`. - nxt: count; - ## Length of header in 8-octet units, excluding first unit. - len: count; - ## Routing type. - rtype: count; - ## Segments left. - segleft: count; - ## Type-specific data. - data: string; -}; - -## Values extracted from an IPv6 Fragment extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr -type ip6_fragment: record { - ## Protocol number of the next header (RFC 1700 et seq., IANA assigned - ## number), e.g. :bro:id:`IPPROTO_ICMP`. - nxt: count; - ## 8-bit reserved field. - rsv1: count; - ## Fragmentation offset. - offset: count; - ## 2-bit reserved field. - rsv2: count; - ## More fragments. - more: bool; - ## Fragment identification. - id: count; -}; - -## Values extracted from an IPv6 Authentication extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr -type ip6_ah: record { - ## Protocol number of the next header (RFC 1700 et seq., IANA assigned - ## number), e.g. :bro:id:`IPPROTO_ICMP`. - nxt: count; - ## Length of header in 4-octet units, excluding first two units. - len: count; - ## Reserved field. - rsv: count; - ## Security Parameter Index. - spi: count; - ## Sequence number, unset in the case that *len* field is zero. - seq: count &optional; - ## Authentication data, unset in the case that *len* field is zero. - data: string &optional; -}; - -## Values extracted from an IPv6 ESP extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr -type ip6_esp: record { - ## Security Parameters Index. - spi: count; - ## Sequence number. - seq: count; -}; - -## Values extracted from an IPv6 Mobility Binding Refresh Request message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_brr: record { - ## Reserved. - rsv: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Home Test Init message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_hoti: record { - ## Reserved. - rsv: count; - ## Home Init Cookie. - cookie: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Care-of Test Init message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_coti: record { - ## Reserved. - rsv: count; - ## Care-of Init Cookie. - cookie: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Home Test message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_hot: record { - ## Home Nonce Index. - nonce_idx: count; - ## Home Init Cookie. - cookie: count; - ## Home Keygen Token. - token: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Care-of Test message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_cot: record { - ## Care-of Nonce Index. - nonce_idx: count; - ## Care-of Init Cookie. - cookie: count; - ## Care-of Keygen Token. - token: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Binding Update message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_bu: record { - ## Sequence number. - seq: count; - ## Acknowledge bit. - a: bool; - ## Home Registration bit. - h: bool; - ## Link-Local Address Compatibility bit. - l: bool; - ## Key Management Mobility Capability bit. - k: bool; - ## Lifetime. - life: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Binding Acknowledgement message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_back: record { - ## Status. - status: count; - ## Key Management Mobility Capability. - k: bool; - ## Sequence number. - seq: count; - ## Lifetime. - life: count; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility Binding Error message. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg -type ip6_mobility_be: record { - ## Status. - status: count; - ## Home Address. - hoa: addr; - ## Mobility Options. - options: vector of ip6_option; -}; - -## Values extracted from an IPv6 Mobility header's message data. -## -## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr -type ip6_mobility_msg: record { - ## The type of message from the header's MH Type field. - id: count; - ## Binding Refresh Request. - brr: ip6_mobility_brr &optional; - ## Home Test Init. - hoti: ip6_mobility_hoti &optional; - ## Care-of Test Init. - coti: ip6_mobility_coti &optional; - ## Home Test. - hot: ip6_mobility_hot &optional; - ## Care-of Test. - cot: ip6_mobility_cot &optional; - ## Binding Update. - bu: ip6_mobility_bu &optional; - ## Binding Acknowledgement. - back: ip6_mobility_back &optional; - ## Binding Error. - be: ip6_mobility_be &optional; -}; - -## Values extracted from an IPv6 Mobility header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr -type ip6_mobility_hdr: record { - ## Protocol number of the next header (RFC 1700 et seq., IANA assigned - ## number), e.g. :bro:id:`IPPROTO_ICMP`. - nxt: count; - ## Length of header in 8-octet units, excluding first unit. - len: count; - ## Mobility header type used to identify header's the message. - mh_type: count; - ## Reserved field. - rsv: count; - ## Mobility header checksum. - chksum: count; - ## Mobility header message - msg: ip6_mobility_msg; -}; - -## A general container for a more specific IPv6 extension header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_hopopts ip6_dstopts ip6_routing ip6_fragment -## ip6_ah ip6_esp -type ip6_ext_hdr: record { - ## The RFC 1700 et seq. IANA assigned number identifying the type of - ## the extension header. - id: count; - ## Hop-by-hop option extension header. - hopopts: ip6_hopopts &optional; - ## Destination option extension header. - dstopts: ip6_dstopts &optional; - ## Routing extension header. - routing: ip6_routing &optional; - ## Fragment header. - fragment: ip6_fragment &optional; - ## Authentication extension header. - ah: ip6_ah &optional; - ## Encapsulating security payload header. - esp: ip6_esp &optional; - ## Mobility header. - mobility: ip6_mobility_hdr &optional; -}; - -## A type alias for a vector of IPv6 extension headers. -type ip6_ext_hdr_chain: vector of ip6_ext_hdr; - -## Values extracted from an IPv6 header. -## -## .. bro:see:: pkt_hdr ip4_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts -## ip6_routing ip6_fragment ip6_ah ip6_esp -type ip6_hdr: record { - class: count; ##< Traffic class. - flow: count; ##< Flow label. - len: count; ##< Payload length. - nxt: count; ##< Protocol number of the next header - ##< (RFC 1700 et seq., IANA assigned number) - ##< e.g. :bro:id:`IPPROTO_ICMP`. - hlim: count; ##< Hop limit. - src: addr; ##< Source address. - dst: addr; ##< Destination address. - exts: ip6_ext_hdr_chain; ##< Extension header chain. -}; - -## Values extracted from an IPv4 header. -## -## .. bro:see:: pkt_hdr ip6_hdr discarder_check_ip -type ip4_hdr: record { - hl: count; ##< Header length in bytes. - tos: count; ##< Type of service. - len: count; ##< Total length. - id: count; ##< Identification. - ttl: count; ##< Time to live. - p: count; ##< Protocol. - src: addr; ##< Source address. - dst: addr; ##< Destination address. -}; - -# TCP flags. -# -# todo:: these should go into an enum to make them autodoc'able -const TH_FIN = 1; ##< FIN. -const TH_SYN = 2; ##< SYN. -const TH_RST = 4; ##< RST. -const TH_PUSH = 8; ##< PUSH. -const TH_ACK = 16; ##< ACK. -const TH_URG = 32; ##< URG. -const TH_FLAGS = 63; ##< Mask combining all flags. - -## Values extracted from a TCP header. -## -## .. bro:see:: pkt_hdr discarder_check_tcp -type tcp_hdr: record { - sport: port; ##< source port. - dport: port; ##< destination port - seq: count; ##< sequence number - ack: count; ##< acknowledgement number - hl: count; ##< header length (in bytes) - dl: count; ##< data length (xxx: not in original tcphdr!) - flags: count; ##< flags - win: count; ##< window -}; - -## Values extracted from a UDP header. -## -## .. bro:see:: pkt_hdr discarder_check_udp -type udp_hdr: record { - sport: port; ##< source port - dport: port; ##< destination port - ulen: count; ##< udp length -}; - -## Values extracted from an ICMP header. -## -## .. bro:see:: pkt_hdr discarder_check_icmp -type icmp_hdr: record { - icmp_type: count; ##< type of message -}; - -## A packet header, consisting of an IP header and transport-layer header. -## -## .. bro:see:: new_packet -type pkt_hdr: record { - ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet. - ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet. - tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. - udp: udp_hdr &optional; ##< The UDP header if a UDP packet. - icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. -}; - -## Values extracted from the layer 2 header. -## -## .. bro:see:: pkt_hdr -type l2_hdr: record { - encap: link_encap; ##< L2 link encapsulation. - len: count; ##< Total frame length on wire. - cap_len: count; ##< Captured length. - src: string &optional; ##< L2 source (if Ethernet). - dst: string &optional; ##< L2 destination (if Ethernet). - vlan: count &optional; ##< Outermost VLAN tag if any (and Ethernet). - inner_vlan: count &optional; ##< Innermost VLAN tag if any (and Ethernet). - eth_type: count &optional; ##< Innermost Ethertype (if Ethernet). - proto: layer3_proto; ##< L3 protocol. -}; - -## A raw packet header, consisting of L2 header and everything in -## :bro:see:`pkt_hdr`. . -## -## .. bro:see:: raw_packet pkt_hdr -type raw_pkt_hdr: record { - l2: l2_hdr; ##< The layer 2 header. - ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet. - ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet. - tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. - udp: udp_hdr &optional; ##< The UDP header if a UDP packet. - icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. -}; - -## A Teredo origin indication header. See :rfc:`4380` for more information -## about the Teredo protocol. -## -## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication -## teredo_hdr -type teredo_auth: record { - id: string; ##< Teredo client identifier. - value: string; ##< HMAC-SHA1 over shared secret key between client and - ##< server, nonce, confirmation byte, origin indication - ##< (if present), and the IPv6 packet. - nonce: count; ##< Nonce chosen by Teredo client to be repeated by - ##< Teredo server. - confirm: count; ##< Confirmation byte to be set to 0 by Teredo client - ##< and non-zero by server if client needs new key. -}; - -## A Teredo authentication header. See :rfc:`4380` for more information -## about the Teredo protocol. -## -## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication -## teredo_hdr -type teredo_origin: record { - p: port; ##< Unobfuscated UDP port of Teredo client. - a: addr; ##< Unobfuscated IPv4 address of Teredo client. -}; - -## A Teredo packet header. See :rfc:`4380` for more information about the -## Teredo protocol. -## -## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication -type teredo_hdr: record { - auth: teredo_auth &optional; ##< Teredo authentication header. - origin: teredo_origin &optional; ##< Teredo origin indication header. - hdr: pkt_hdr; ##< IPv6 and transport protocol headers. -}; - -## A GTPv1 (GPRS Tunneling Protocol) header. -type gtpv1_hdr: record { - ## The 3-bit version field, which for GTPv1 should be 1. - version: count; - ## Protocol Type value differentiates GTP (value 1) from GTP' (value 0). - pt_flag: bool; - ## Reserved field, should be 0. - rsv: bool; - ## Extension Header flag. When 0, the *next_type* field may or may not - ## be present, but shouldn't be meaningful. When 1, *next_type* is - ## present and meaningful. - e_flag: bool; - ## Sequence Number flag. When 0, the *seq* field may or may not - ## be present, but shouldn't be meaningful. When 1, *seq* is - ## present and meaningful. - s_flag: bool; - ## N-PDU flag. When 0, the *n_pdu* field may or may not - ## be present, but shouldn't be meaningful. When 1, *n_pdu* is - ## present and meaningful. - pn_flag: bool; - ## Message Type. A value of 255 indicates user-plane data is encapsulated. - msg_type: count; - ## Length of the GTP packet payload (the rest of the packet following - ## the mandatory 8-byte GTP header). - length: count; - ## Tunnel Endpoint Identifier. Unambiguously identifies a tunnel - ## endpoint in receiving GTP-U or GTP-C protocol entity. - teid: count; - ## Sequence Number. Set if any *e_flag*, *s_flag*, or *pn_flag* field - ## is set. - seq: count &optional; - ## N-PDU Number. Set if any *e_flag*, *s_flag*, or *pn_flag* field is set. - n_pdu: count &optional; - ## Next Extension Header Type. Set if any *e_flag*, *s_flag*, or - ## *pn_flag* field is set. - next_type: count &optional; -}; - -type gtp_cause: count; -type gtp_imsi: count; -type gtp_teardown_ind: bool; -type gtp_nsapi: count; -type gtp_recovery: count; -type gtp_teid1: count; -type gtp_teid_control_plane: count; -type gtp_charging_id: count; -type gtp_charging_gateway_addr: addr; -type gtp_trace_reference: count; -type gtp_trace_type: count; -type gtp_tft: string; -type gtp_trigger_id: string; -type gtp_omc_id: string; -type gtp_reordering_required: bool; -type gtp_proto_config_options: string; -type gtp_charging_characteristics: count; -type gtp_selection_mode: count; -type gtp_access_point_name: string; -type gtp_msisdn: string; - -type gtp_gsn_addr: record { - ## If the GSN Address information element has length 4 or 16, then this - ## field is set to be the informational element's value interpreted as - ## an IPv4 or IPv6 address, respectively. - ip: addr &optional; - ## This field is set if it's not an IPv4 or IPv6 address. - other: string &optional; -}; - -type gtp_end_user_addr: record { - pdp_type_org: count; - pdp_type_num: count; - ## Set if the End User Address information element is IPv4/IPv6. - pdp_ip: addr &optional; - ## Set if the End User Address information element isn't IPv4/IPv6. - pdp_other_addr: string &optional; -}; - -type gtp_rai: record { - mcc: count; - mnc: count; - lac: count; - rac: count; -}; - -type gtp_qos_profile: record { - priority: count; - data: string; -}; - -type gtp_private_extension: record { - id: count; - value: string; -}; - -type gtp_create_pdp_ctx_request_elements: record { - imsi: gtp_imsi &optional; - rai: gtp_rai &optional; - recovery: gtp_recovery &optional; - select_mode: gtp_selection_mode &optional; - data1: gtp_teid1; - cp: gtp_teid_control_plane &optional; - nsapi: gtp_nsapi; - linked_nsapi: gtp_nsapi &optional; - charge_character: gtp_charging_characteristics &optional; - trace_ref: gtp_trace_reference &optional; - trace_type: gtp_trace_type &optional; - end_user_addr: gtp_end_user_addr &optional; - ap_name: gtp_access_point_name &optional; - opts: gtp_proto_config_options &optional; - signal_addr: gtp_gsn_addr; - user_addr: gtp_gsn_addr; - msisdn: gtp_msisdn &optional; - qos_prof: gtp_qos_profile; - tft: gtp_tft &optional; - trigger_id: gtp_trigger_id &optional; - omc_id: gtp_omc_id &optional; - ext: gtp_private_extension &optional; -}; - -type gtp_create_pdp_ctx_response_elements: record { - cause: gtp_cause; - reorder_req: gtp_reordering_required &optional; - recovery: gtp_recovery &optional; - data1: gtp_teid1 &optional; - cp: gtp_teid_control_plane &optional; - charging_id: gtp_charging_id &optional; - end_user_addr: gtp_end_user_addr &optional; - opts: gtp_proto_config_options &optional; - cp_addr: gtp_gsn_addr &optional; - user_addr: gtp_gsn_addr &optional; - qos_prof: gtp_qos_profile &optional; - charge_gateway: gtp_charging_gateway_addr &optional; - ext: gtp_private_extension &optional; -}; - -type gtp_update_pdp_ctx_request_elements: record { - imsi: gtp_imsi &optional; - rai: gtp_rai &optional; - recovery: gtp_recovery &optional; - data1: gtp_teid1; - cp: gtp_teid_control_plane &optional; - nsapi: gtp_nsapi; - trace_ref: gtp_trace_reference &optional; - trace_type: gtp_trace_type &optional; - cp_addr: gtp_gsn_addr; - user_addr: gtp_gsn_addr; - qos_prof: gtp_qos_profile; - tft: gtp_tft &optional; - trigger_id: gtp_trigger_id &optional; - omc_id: gtp_omc_id &optional; - ext: gtp_private_extension &optional; - end_user_addr: gtp_end_user_addr &optional; -}; - -type gtp_update_pdp_ctx_response_elements: record { - cause: gtp_cause; - recovery: gtp_recovery &optional; - data1: gtp_teid1 &optional; - cp: gtp_teid_control_plane &optional; - charging_id: gtp_charging_id &optional; - cp_addr: gtp_gsn_addr &optional; - user_addr: gtp_gsn_addr &optional; - qos_prof: gtp_qos_profile &optional; - charge_gateway: gtp_charging_gateway_addr &optional; - ext: gtp_private_extension &optional; -}; - -type gtp_delete_pdp_ctx_request_elements: record { - teardown_ind: gtp_teardown_ind &optional; - nsapi: gtp_nsapi; - ext: gtp_private_extension &optional; -}; - -type gtp_delete_pdp_ctx_response_elements: record { - cause: gtp_cause; - ext: gtp_private_extension &optional; -}; - -# Prototypes of Bro built-in functions. -@load base/bif/bro.bif -@load base/bif/stats.bif -@load base/bif/reporter.bif -@load base/bif/strings.bif -@load base/bif/option.bif - -## Deprecated. This is superseded by the new logging framework. -global log_file_name: function(tag: string): string &redef; - -## Deprecated. This is superseded by the new logging framework. -global open_log_file: function(tag: string): file &redef; - -## Specifies a directory for Bro to store its persistent state. All globals can -## be declared persistent via the :bro:attr:`&persistent` attribute. -const state_dir = ".state" &redef; - -## Length of the delays inserted when storing state incrementally. To avoid -## dropping packets when serializing larger volumes of persistent state to -## disk, Bro interleaves the operation with continued packet processing. -const state_write_delay = 0.01 secs &redef; - -global done_with_network = F; -event net_done(t: time) { done_with_network = T; } - -function log_file_name(tag: string): string - { - local suffix = getenv("BRO_LOG_SUFFIX") == "" ? "log" : getenv("BRO_LOG_SUFFIX"); - return fmt("%s.%s", tag, suffix); - } - -function open_log_file(tag: string): file - { - return open(log_file_name(tag)); - } - -## Internal function. -function add_interface(iold: string, inew: string): string - { - if ( iold == "" ) - return inew; - else - return fmt("%s %s", iold, inew); - } - -## Network interfaces to listen on. Use ``redef interfaces += "eth0"`` to -## extend. -global interfaces = "" &add_func = add_interface; - -## Internal function. -function add_signature_file(sold: string, snew: string): string - { - if ( sold == "" ) - return snew; - else - return cat(sold, " ", snew); - } - -## Signature files to read. Use ``redef signature_files += "foo.sig"`` to -## extend. Signature files added this way will be searched relative to -## ``BROPATH``. Using the ``@load-sigs`` directive instead is preferred -## since that can search paths relative to the current script. -global signature_files = "" &add_func = add_signature_file; - -## ``p0f`` fingerprint file to use. Will be searched relative to ``BROPATH``. -const passive_fingerprint_file = "base/misc/p0f.fp" &redef; - -## Definition of "secondary filters". A secondary filter is a BPF filter given -## as index in this table. For each such filter, the corresponding event is -## raised for all matching packets. -global secondary_filters: table[string] of event(filter: string, pkt: pkt_hdr) - &redef; - -## Maximum length of payload passed to discarder functions. -## -## .. bro:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp -## discarder_check_ip -global discarder_maxlen = 128 &redef; - -## Function for skipping packets based on their IP header. If defined, this -## function will be called for all IP packets before Bro performs any further -## analysis. If the function signals to discard a packet, no further processing -## will be performed on it. -## -## p: The IP header of the considered packet. -## -## Returns: True if the packet should not be analyzed any further. -## -## .. bro:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp -## discarder_maxlen -## -## .. note:: This is very low-level functionality and potentially expensive. -## Avoid using it. -global discarder_check_ip: function(p: pkt_hdr): bool; - -## Function for skipping packets based on their TCP header. If defined, this -## function will be called for all TCP packets before Bro performs any further -## analysis. If the function signals to discard a packet, no further processing -## will be performed on it. -## -## p: The IP and TCP headers of the considered packet. -## -## d: Up to :bro:see:`discarder_maxlen` bytes of the TCP payload. -## -## Returns: True if the packet should not be analyzed any further. -## -## .. bro:see:: discarder_check_ip discarder_check_udp discarder_check_icmp -## discarder_maxlen -## -## .. note:: This is very low-level functionality and potentially expensive. -## Avoid using it. -global discarder_check_tcp: function(p: pkt_hdr, d: string): bool; - -## Function for skipping packets based on their UDP header. If defined, this -## function will be called for all UDP packets before Bro performs any further -## analysis. If the function signals to discard a packet, no further processing -## will be performed on it. -## -## p: The IP and UDP headers of the considered packet. -## -## d: Up to :bro:see:`discarder_maxlen` bytes of the UDP payload. -## -## Returns: True if the packet should not be analyzed any further. -## -## .. bro:see:: discarder_check_ip discarder_check_tcp discarder_check_icmp -## discarder_maxlen -## -## .. note:: This is very low-level functionality and potentially expensive. -## Avoid using it. -global discarder_check_udp: function(p: pkt_hdr, d: string): bool; - -## Function for skipping packets based on their ICMP header. If defined, this -## function will be called for all ICMP packets before Bro performs any further -## analysis. If the function signals to discard a packet, no further processing -## will be performed on it. -## -## p: The IP and ICMP headers of the considered packet. -## -## Returns: True if the packet should not be analyzed any further. -## -## .. bro:see:: discarder_check_ip discarder_check_tcp discarder_check_udp -## discarder_maxlen -## -## .. note:: This is very low-level functionality and potentially expensive. -## Avoid using it. -global discarder_check_icmp: function(p: pkt_hdr): bool; - -## Bro's watchdog interval. -const watchdog_interval = 10 sec &redef; - -## The maximum number of timers to expire after processing each new -## packet. The value trades off spreading out the timer expiration load -## with possibly having to hold state longer. A value of 0 means -## "process all expired timers with each new packet". -const max_timer_expires = 300 &redef; - -## With a similar trade-off, this gives the number of remote events -## to process in a batch before interleaving other activity. -const max_remote_events_processed = 10 &redef; - -# These need to match the definitions in Login.h. -# -# .. bro:see:: get_login_state -# -# todo:: use enum to make them autodoc'able -const LOGIN_STATE_AUTHENTICATE = 0; # Trying to authenticate. -const LOGIN_STATE_LOGGED_IN = 1; # Successful authentication. -const LOGIN_STATE_SKIP = 2; # Skip any further processing. -const LOGIN_STATE_CONFUSED = 3; # We're confused. - -# It would be nice to replace these function definitions with some -# form of parameterized types. - -## Returns minimum of two ``double`` values. -## -## a: First value. -## b: Second value. -## -## Returns: The minimum of *a* and *b*. -function min_double(a: double, b: double): double { return a < b ? a : b; } - -## Returns maximum of two ``double`` values. -## -## a: First value. -## b: Second value. -## -## Returns: The maximum of *a* and *b*. -function max_double(a: double, b: double): double { return a > b ? a : b; } - -## Returns minimum of two ``interval`` values. -## -## a: First value. -## b: Second value. -## -## Returns: The minimum of *a* and *b*. -function min_interval(a: interval, b: interval): interval { return a < b ? a : b; } - -## Returns maximum of two ``interval`` values. -## -## a: First value. -## b: Second value. -## -## Returns: The maximum of *a* and *b*. -function max_interval(a: interval, b: interval): interval { return a > b ? a : b; } - -## Returns minimum of two ``count`` values. -## -## a: First value. -## b: Second value. -## -## Returns: The minimum of *a* and *b*. -function min_count(a: count, b: count): count { return a < b ? a : b; } - -## Returns maximum of two ``count`` values. -## -## a: First value. -## b: Second value. -## -## Returns: The maximum of *a* and *b*. -function max_count(a: count, b: count): count { return a > b ? a : b; } - -## TODO. -global skip_authentication: set[string] &redef; - -## TODO. -global direct_login_prompts: set[string] &redef; - -## TODO. -global login_prompts: set[string] &redef; - -## TODO. -global login_non_failure_msgs: set[string] &redef; - -## TODO. -global login_failure_msgs: set[string] &redef; - -## TODO. -global login_success_msgs: set[string] &redef; - -## TODO. -global login_timeouts: set[string] &redef; - -## A MIME header key/value pair. -## -## .. bro:see:: mime_header_list http_all_headers mime_all_headers mime_one_header -type mime_header_rec: record { - name: string; ##< The header name. - value: string; ##< The header value. -}; - -## A list of MIME headers. -## -## .. bro:see:: mime_header_rec http_all_headers mime_all_headers -type mime_header_list: table[count] of mime_header_rec; - -## The length of MIME data segments delivered to handlers of -## :bro:see:`mime_segment_data`. -## -## .. bro:see:: mime_segment_data mime_segment_overlap_length -global mime_segment_length = 1024 &redef; - -## The number of bytes of overlap between successive segments passed to -## :bro:see:`mime_segment_data`. -global mime_segment_overlap_length = 0 &redef; - -## An RPC portmapper mapping. -## -## .. bro:see:: pm_mappings -type pm_mapping: record { - program: count; ##< The RPC program. - version: count; ##< The program version. - p: port; ##< The port. -}; - -## Table of RPC portmapper mappings. -## -## .. bro:see:: pm_request_dump -type pm_mappings: table[count] of pm_mapping; - -## An RPC portmapper request. -## -## .. bro:see:: pm_attempt_getport pm_request_getport -type pm_port_request: record { - program: count; ##< The RPC program. - version: count; ##< The program version. - is_tcp: bool; ##< True if using TCP. -}; - -## An RPC portmapper *callit* request. -## -## .. bro:see:: pm_attempt_callit pm_request_callit -type pm_callit_request: record { - program: count; ##< The RPC program. - version: count; ##< The program version. - proc: count; ##< The procedure being called. - arg_size: count; ##< The size of the argument. -}; - -# See const.bif -# const RPC_SUCCESS = 0; -# const RPC_PROG_UNAVAIL = 1; -# const RPC_PROG_MISMATCH = 2; -# const RPC_PROC_UNAVAIL = 3; -# const RPC_GARBAGE_ARGS = 4; -# const RPC_SYSTEM_ERR = 5; -# const RPC_TIMEOUT = 6; -# const RPC_AUTH_ERROR = 7; -# const RPC_UNKNOWN_ERROR = 8; - -## Mapping of numerical RPC status codes to readable messages. -## -## .. bro:see:: pm_attempt_callit pm_attempt_dump pm_attempt_getport -## pm_attempt_null pm_attempt_set pm_attempt_unset rpc_dialogue rpc_reply -const RPC_status = { - [RPC_SUCCESS] = "ok", - [RPC_PROG_UNAVAIL] = "prog unavail", - [RPC_PROG_MISMATCH] = "mismatch", - [RPC_PROC_UNAVAIL] = "proc unavail", - [RPC_GARBAGE_ARGS] = "garbage args", - [RPC_SYSTEM_ERR] = "system err", - [RPC_TIMEOUT] = "timeout", - [RPC_AUTH_ERROR] = "auth error", - [RPC_UNKNOWN_ERROR] = "unknown" -}; - -module NFS3; - -export { - ## If true, :bro:see:`nfs_proc_read` and :bro:see:`nfs_proc_write` - ## events return the file data that has been read/written. - ## - ## .. bro:see:: NFS3::return_data_max NFS3::return_data_first_only - const return_data = F &redef; - - ## If :bro:id:`NFS3::return_data` is true, how much data should be - ## returned at most. - const return_data_max = 512 &redef; - - ## If :bro:id:`NFS3::return_data` is true, whether to *only* return data - ## if the read or write offset is 0, i.e., only return data for the - ## beginning of the file. - const return_data_first_only = T &redef; - - ## Record summarizing the general results and status of NFSv3 - ## request/reply pairs. - ## - ## Note that when *rpc_stat* or *nfs_stat* indicates not successful, - ## the reply record passed to the corresponding event will be empty and - ## contain uninitialized fields, so don't use it. Also note that time - ## and duration values might not be fully accurate. For TCP, we record - ## times when the corresponding chunk of data is delivered to the - ## analyzer. Depending on the reassembler, this might be well after the - ## first packet of the request was received. - ## - ## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup - ## nfs_proc_mkdir nfs_proc_not_implemented nfs_proc_null - ## nfs_proc_read nfs_proc_readdir nfs_proc_readlink nfs_proc_remove - ## nfs_proc_rmdir nfs_proc_write nfs_reply_status - type info_t: record { - ## The RPC status. - rpc_stat: rpc_status; - ## The NFS status. - nfs_stat: status_t; - ## The start time of the request. - req_start: time; - ## The duration of the request. - req_dur: interval; - ## The length in bytes of the request. - req_len: count; - ## The start time of the reply. - rep_start: time; - ## The duration of the reply. - rep_dur: interval; - ## The length in bytes of the reply. - rep_len: count; - ## The user id of the reply. - rpc_uid: count; - ## The group id of the reply. - rpc_gid: count; - ## The stamp of the reply. - rpc_stamp: count; - ## The machine name of the reply. - rpc_machine_name: string; - ## The auxiliary ids of the reply. - rpc_auxgids: index_vec; - }; - - ## NFS file attributes. Field names are based on RFC 1813. - ## - ## .. bro:see:: nfs_proc_sattr - type sattr_t: record { - mode: count &optional; ##< Mode - uid: count &optional; ##< User ID. - gid: count &optional; ##< Group ID. - size: count &optional; ##< Size. - atime: time_how_t &optional; ##< Time of last access. - mtime: time_how_t &optional; ##< Time of last modification. - }; - - ## NFS file attributes. Field names are based on RFC 1813. - ## - ## .. bro:see:: nfs_proc_getattr - type fattr_t: record { - ftype: file_type_t; ##< File type. - mode: count; ##< Mode - nlink: count; ##< Number of links. - uid: count; ##< User ID. - gid: count; ##< Group ID. - size: count; ##< Size. - used: count; ##< TODO. - rdev1: count; ##< TODO. - rdev2: count; ##< TODO. - fsid: count; ##< TODO. - fileid: count; ##< TODO. - atime: time; ##< Time of last access. - mtime: time; ##< Time of last modification. - ctime: time; ##< Time of creation. - }; - - ## NFS symlinkdata attributes. Field names are based on RFC 1813 - ## - ## .. bro:see:: nfs_proc_symlink - type symlinkdata_t: record { - symlink_attributes: sattr_t; ##< The initial attributes for the symbolic link - nfspath: string &optional; ##< The string containing the symbolic link data. - }; - - ## NFS *readdir* arguments. - ## - ## .. bro:see:: nfs_proc_readdir - type diropargs_t : record { - dirfh: string; ##< The file handle of the directory. - fname: string; ##< The name of the file we are interested in. - }; - - ## NFS *rename* arguments. - ## - ## .. bro:see:: nfs_proc_rename - type renameopargs_t : record { - src_dirfh : string; - src_fname : string; - dst_dirfh : string; - dst_fname : string; - }; - - ## NFS *symlink* arguments. - ## - ## .. bro:see:: nfs_proc_symlink - type symlinkargs_t: record { - link : diropargs_t; ##< The location of the link to be created. - symlinkdata: symlinkdata_t; ##< The symbolic link to be created. - }; - - ## NFS *link* arguments. - ## - ## .. bro:see:: nfs_proc_link - type linkargs_t: record { - fh : string; ##< The file handle for the existing file system object. - link : diropargs_t; ##< The location of the link to be created. - }; - - ## NFS *sattr* arguments. - ## - ## .. bro:see:: nfs_proc_sattr - type sattrargs_t: record { - fh : string; ##< The file handle for the existing file system object. - new_attributes: sattr_t; ##< The new attributes for the file. - }; - - ## NFS lookup reply. If the lookup failed, *dir_attr* may be set. If the - ## lookup succeeded, *fh* is always set and *obj_attr* and *dir_attr* - ## may be set. - ## - ## .. bro:see:: nfs_proc_lookup - type lookup_reply_t: record { - fh: string &optional; ##< File handle of object looked up. - obj_attr: fattr_t &optional; ##< Optional attributes associated w/ file - dir_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. - }; - - ## NFS *read* arguments. - ## - ## .. bro:see:: nfs_proc_read - type readargs_t: record { - fh: string; ##< File handle to read from. - offset: count; ##< Offset in file. - size: count; ##< Number of bytes to read. - }; - - ## NFS *read* reply. If the lookup fails, *attr* may be set. If the - ## lookup succeeds, *attr* may be set and all other fields are set. - type read_reply_t: record { - attr: fattr_t &optional; ##< Attributes. - size: count &optional; ##< Number of bytes read. - eof: bool &optional; ##< Sid the read end at EOF. - data: string &optional; ##< The actual data; not yet implemented. - }; - - ## NFS *readline* reply. If the request fails, *attr* may be set. If the - ## request succeeds, *attr* may be set and all other fields are set. - ## - ## .. bro:see:: nfs_proc_readlink - type readlink_reply_t: record { - attr: fattr_t &optional; ##< Attributes. - nfspath: string &optional; ##< Contents of the symlink; in general a pathname as text. - }; - - ## NFS *write* arguments. - ## - ## .. bro:see:: nfs_proc_write - type writeargs_t: record { - fh: string; ##< File handle to write to. - offset: count; ##< Offset in file. - size: count; ##< Number of bytes to write. - stable: stable_how_t; ##< How and when data is commited. - data: string &optional; ##< The actual data; not implemented yet. - }; - - ## NFS *wcc* attributes. - ## - ## .. bro:see:: NFS3::write_reply_t - type wcc_attr_t: record { - size: count; ##< The size. - atime: time; ##< Access time. - mtime: time; ##< Modification time. - }; - - ## NFS *link* reply. - ## - ## .. bro:see:: nfs_proc_link - type link_reply_t: record { - post_attr: fattr_t &optional; ##< Optional post-operation attributes of the file system object identified by file - preattr: wcc_attr_t &optional; ##< Optional attributes associated w/ file. - postattr: fattr_t &optional; ##< Optional attributes associated w/ file. - }; - - ## NFS *sattr* reply. If the request fails, *pre|post* attr may be set. - ## If the request succeeds, *pre|post* attr are set. - ## - type sattr_reply_t: record { - dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. - dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. - }; - - ## NFS *write* reply. If the request fails, *pre|post* attr may be set. - ## If the request succeeds, *pre|post* attr may be set and all other - ## fields are set. - ## - ## .. bro:see:: nfs_proc_write - type write_reply_t: record { - preattr: wcc_attr_t &optional; ##< Pre operation attributes. - postattr: fattr_t &optional; ##< Post operation attributes. - size: count &optional; ##< Size. - commited: stable_how_t &optional; ##< TODO. - verf: count &optional; ##< Write verifier cookie. - }; - - ## NFS reply for *create*, *mkdir*, and *symlink*. If the proc - ## failed, *dir_\*_attr* may be set. If the proc succeeded, *fh* and the - ## *attr*'s may be set. Note: no guarantee that *fh* is set after - ## success. - ## - ## .. bro:see:: nfs_proc_create nfs_proc_mkdir - type newobj_reply_t: record { - fh: string &optional; ##< File handle of object created. - obj_attr: fattr_t &optional; ##< Optional attributes associated w/ new object. - dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. - dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. - }; - - ## NFS reply for *remove*, *rmdir*. Corresponds to *wcc_data* in the spec. - ## - ## .. bro:see:: nfs_proc_remove nfs_proc_rmdir - type delobj_reply_t: record { - dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. - dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. - }; - - ## NFS reply for *rename*. Corresponds to *wcc_data* in the spec. - ## - ## .. bro:see:: nfs_proc_rename - type renameobj_reply_t: record { - src_dir_pre_attr: wcc_attr_t; - src_dir_post_attr: fattr_t; - dst_dir_pre_attr: wcc_attr_t; - dst_dir_post_attr: fattr_t; - }; - - ## NFS *readdir* arguments. Used for both *readdir* and *readdirplus*. - ## - ## .. bro:see:: nfs_proc_readdir - type readdirargs_t: record { - isplus: bool; ##< Is this a readdirplus request? - dirfh: string; ##< The directory filehandle. - cookie: count; ##< Cookie / pos in dir; 0 for first call. - cookieverf: count; ##< The cookie verifier. - dircount: count; ##< "count" field for readdir; maxcount otherwise (in bytes). - maxcount: count &optional; ##< Only used for readdirplus. in bytes. - }; - - ## NFS *direntry*. *fh* and *attr* are used for *readdirplus*. However, - ## even for *readdirplus* they may not be filled out. - ## - ## .. bro:see:: NFS3::direntry_vec_t NFS3::readdir_reply_t - type direntry_t: record { - fileid: count; ##< E.g., inode number. - fname: string; ##< Filename. - cookie: count; ##< Cookie value. - attr: fattr_t &optional; ##< *readdirplus*: the *fh* attributes for the entry. - fh: string &optional; ##< *readdirplus*: the *fh* for the entry - }; - - ## Vector of NFS *direntry*. - ## - ## .. bro:see:: NFS3::readdir_reply_t - type direntry_vec_t: vector of direntry_t; - - ## NFS *readdir* reply. Used for *readdir* and *readdirplus*. If an is - ## returned, *dir_attr* might be set. On success, *dir_attr* may be set, - ## all others must be set. - type readdir_reply_t: record { - isplus: bool; ##< True if the reply for a *readdirplus* request. - dir_attr: fattr_t &optional; ##< Directory attributes. - cookieverf: count &optional; ##< TODO. - entries: direntry_vec_t &optional; ##< Returned directory entries. - eof: bool; ##< If true, no more entries in directory. - }; - - ## NFS *fsstat*. - type fsstat_t: record { - attrs: fattr_t &optional; ##< Attributes. - tbytes: double; ##< TODO. - fbytes: double; ##< TODO. - abytes: double; ##< TODO. - tfiles: double; ##< TODO. - ffiles: double; ##< TODO. - afiles: double; ##< TODO. - invarsec: interval; ##< TODO. - }; -} # end export - - -module MOUNT3; -export { - - ## Record summarizing the general results and status of MOUNT3 - ## request/reply pairs. - ## - ## Note that when *rpc_stat* or *mount_stat* indicates not successful, - ## the reply record passed to the corresponding event will be empty and - ## contain uninitialized fields, so don't use it. Also note that time - # and duration values might not be fully accurate. For TCP, we record - # times when the corresponding chunk of data is delivered to the - # analyzer. Depending on the reassembler, this might be well after the - # first packet of the request was received. - # - # .. bro:see:: mount_proc_mnt mount_proc_dump mount_proc_umnt - # mount_proc_umntall mount_proc_export mount_proc_not_implemented - type info_t: record { - ## The RPC status. - rpc_stat: rpc_status; - ## The MOUNT status. - mnt_stat: status_t; - ## The start time of the request. - req_start: time; - ## The duration of the request. - req_dur: interval; - ## The length in bytes of the request. - req_len: count; - ## The start time of the reply. - rep_start: time; - ## The duration of the reply. - rep_dur: interval; - ## The length in bytes of the reply. - rep_len: count; - ## The user id of the reply. - rpc_uid: count; - ## The group id of the reply. - rpc_gid: count; - ## The stamp of the reply. - rpc_stamp: count; - ## The machine name of the reply. - rpc_machine_name: string; - ## The auxiliary ids of the reply. - rpc_auxgids: index_vec; - }; - - ## MOUNT *mnt* arguments. - ## - ## .. bro:see:: mount_proc_mnt - type dirmntargs_t : record { - dirname: string; ##< Name of directory to mount - }; - - ## MOUNT lookup reply. If the mount failed, *dir_attr* may be set. If the - ## mount succeeded, *fh* is always set. - ## - ## .. bro:see:: mount_proc_mnt - type mnt_reply_t: record { - dirfh: string &optional; ##< Dir handle - auth_flavors: vector of auth_flavor_t &optional; ##< Returned authentication flavors - }; - -} # end export - - -module Threading; - -export { - ## The heartbeat interval used by the threading framework. - ## Changing this should usually not be necessary and will break - ## several tests. - const heartbeat_interval = 1.0 secs &redef; -} - -module SSH; - -export { - ## The client and server each have some preferences for the algorithms used - ## in each direction. - type Algorithm_Prefs: record { - ## The algorithm preferences for client to server communication - client_to_server: vector of string &optional; - ## The algorithm preferences for server to client communication - server_to_client: vector of string &optional; - }; - - ## This record lists the preferences of an SSH endpoint for - ## algorithm selection. During the initial :abbr:`SSH (Secure Shell)` - ## key exchange, each endpoint lists the algorithms - ## that it supports, in order of preference. See - ## :rfc:`4253#section-7.1` for details. - type Capabilities: record { - ## Key exchange algorithms - kex_algorithms: string_vec; - ## The algorithms supported for the server host key - server_host_key_algorithms: string_vec; - ## Symmetric encryption algorithm preferences - encryption_algorithms: Algorithm_Prefs; - ## Symmetric MAC algorithm preferences - mac_algorithms: Algorithm_Prefs; - ## Compression algorithm preferences - compression_algorithms: Algorithm_Prefs; - ## Language preferences - languages: Algorithm_Prefs &optional; - ## Are these the capabilities of the server? - is_server: bool; - }; -} - -module GLOBAL; - -## An NTP message. -## -## .. bro:see:: ntp_message -type ntp_msg: record { - id: count; ##< Message ID. - code: count; ##< Message code. - stratum: count; ##< Stratum. - poll: count; ##< Poll. - precision: int; ##< Precision. - distance: interval; ##< Distance. - dispersion: interval; ##< Dispersion. - ref_t: time; ##< Reference time. - originate_t: time; ##< Originating time. - receive_t: time; ##< Receive time. - xmit_t: time; ##< Send time. -}; - - -module NTLM; - -export { - type NTLM::Version: record { - ## The major version of the Windows operating system in use - major : count; - ## The minor version of the Windows operating system in use - minor : count; - ## The build number of the Windows operating system in use - build : count; - ## The current revision of NTLMSSP in use - ntlmssp : count; - }; - - type NTLM::NegotiateFlags: record { - ## If set, requires 56-bit encryption - negotiate_56 : bool; - ## If set, requests an explicit key exchange - negotiate_key_exch : bool; - ## If set, requests 128-bit session key negotiation - negotiate_128 : bool; - ## If set, requests the protocol version number - negotiate_version : bool; - ## If set, indicates that the TargetInfo fields in the - ## CHALLENGE_MESSAGE are populated - negotiate_target_info : bool; - ## If set, requests the usage of the LMOWF function - request_non_nt_session_key : bool; - ## If set, requests and identify level token - negotiate_identify : bool; - ## If set, requests usage of NTLM v2 session security - ## Note: NTML v2 session security is actually NTLM v1 - negotiate_extended_sessionsecurity : bool; - ## If set, TargetName must be a server name - target_type_server : bool; - ## If set, TargetName must be a domain name - target_type_domain : bool; - - ## If set, requests the presence of a signature block - ## on all messages - negotiate_always_sign : bool; - ## If set, the workstation name is provided - negotiate_oem_workstation_supplied : bool; - ## If set, the domain name is provided - negotiate_oem_domain_supplied : bool; - ## If set, the connection should be anonymous - negotiate_anonymous_connection : bool; - ## If set, requests usage of NTLM v1 - negotiate_ntlm : bool; - - ## If set, requests LAN Manager session key computation - negotiate_lm_key : bool; - ## If set, requests connectionless authentication - negotiate_datagram : bool; - ## If set, requests session key negotiation for message - ## confidentiality - negotiate_seal : bool; - ## If set, requests session key negotiation for message - ## signatures - negotiate_sign : bool; - ## If set, the TargetName field is present - request_target : bool; - - ## If set, requests OEM character set encoding - negotiate_oem : bool; - ## If set, requests Unicode character set encoding - negotiate_unicode : bool; - }; - - type NTLM::Negotiate: record { - ## The negotiate flags - flags : NTLM::NegotiateFlags; - ## The domain name of the client, if known - domain_name : string &optional; - ## The machine name of the client, if known - workstation : string &optional; - ## The Windows version information, if supplied - version : NTLM::Version &optional; - }; - - type NTLM::AVs: record { - ## The server's NetBIOS computer name - nb_computer_name : string; - ## The server's NetBIOS domain name - nb_domain_name : string; - ## The FQDN of the computer - dns_computer_name : string &optional; - ## The FQDN of the domain - dns_domain_name : string &optional; - ## The FQDN of the forest - dns_tree_name : string &optional; - - ## Indicates to the client that the account - ## authentication is constrained - constrained_auth : bool &optional; - ## The associated timestamp, if present - timestamp : time &optional; - ## Indicates that the client is providing - ## a machine ID created at computer startup to - ## identify the calling machine - single_host_id : count &optional; - - ## The SPN of the target server - target_name : string &optional; - }; - - type NTLM::Challenge: record { - ## The negotiate flags - flags : NTLM::NegotiateFlags; - ## The server authentication realm. If the server is - ## domain-joined, the name of the domain. Otherwise - ## the server name. See flags.target_type_domain - ## and flags.target_type_server - target_name : string &optional; - ## The Windows version information, if supplied - version : NTLM::Version &optional; - ## Attribute-value pairs specified by the server - target_info : NTLM::AVs &optional; - }; - - type NTLM::Authenticate: record { - ## The negotiate flags - flags : NTLM::NegotiateFlags; - ## The domain or computer name hosting the account - domain_name : string &optional; - ## The name of the user to be authenticated. - user_name : string &optional; - ## The name of the computer to which the user was logged on. - workstation : string &optional; - ## The session key - session_key : string &optional; - ## The Windows version information, if supplied - version : NTLM::Version &optional; - }; -} - -module SMB; - -export { - ## MAC times for a file. - ## - ## For more information, see MS-SMB2:2.2.16 - ## - ## .. bro:see:: smb1_nt_create_andx_response smb2_create_response - type SMB::MACTimes: record { - ## The time when data was last written to the file. - modified : time &log; - ## The time when the file was last accessed. - accessed : time &log; - ## The time the file was created. - created : time &log; - ## The time when the file was last modified. - changed : time &log; - } &log; - - ## A set of file names used as named pipes over SMB. This - ## only comes into play as a heuristic to identify named - ## pipes when the drive mapping wasn't seen by Bro. - ## - ## .. bro:see:: smb_pipe_connect_heuristic - const SMB::pipe_filenames: set[string] &redef; -} - -module SMB1; - -export { - ## An SMB1 header. - ## - ## .. bro:see:: smb1_message smb1_empty_response smb1_error - ## smb1_check_directory_request smb1_check_directory_response - ## smb1_close_request smb1_create_directory_request - ## smb1_create_directory_response smb1_echo_request - ## smb1_echo_response smb1_negotiate_request - ## smb1_negotiate_response smb1_nt_cancel_request - ## smb1_nt_create_andx_request smb1_nt_create_andx_response - ## smb1_query_information_request smb1_read_andx_request - ## smb1_read_andx_response smb1_session_setup_andx_request - ## smb1_session_setup_andx_response smb1_transaction_request - ## smb1_transaction2_request smb1_trans2_find_first2_request - ## smb1_trans2_query_path_info_request - ## smb1_trans2_get_dfs_referral_request - ## smb1_tree_connect_andx_request smb1_tree_connect_andx_response - ## smb1_tree_disconnect smb1_write_andx_request - ## smb1_write_andx_response - type SMB1::Header : record { - command : count; ##< The command number - status : count; ##< The status code - flags : count; ##< Flag set 1 - flags2 : count; ##< Flag set 2 - tid : count; ##< Tree ID - pid : count; ##< Process ID - uid : count; ##< User ID - mid : count; ##< Multiplex ID - }; - - type SMB1::NegotiateRawMode: record { - ## Read raw supported - read_raw : bool; - ## Write raw supported - write_raw : bool; - }; - - type SMB1::NegotiateCapabilities: record { - ## The server supports SMB_COM_READ_RAW and SMB_COM_WRITE_RAW - raw_mode : bool; - ## The server supports SMB_COM_READ_MPX and SMB_COM_WRITE_MPX - mpx_mode : bool; - ## The server supports unicode strings - unicode : bool; - ## The server supports large files with 64 bit offsets - large_files : bool; - ## The server supports the SMBs particilar to the NT LM 0.12 dialect. Implies nt_find. - nt_smbs : bool; - - ## The server supports remote admin API requests via DCE-RPC - rpc_remote_apis : bool; - ## The server can respond with 32 bit status codes in Status.Status - status32 : bool; - ## The server supports level 2 oplocks - level_2_oplocks : bool; - ## The server supports SMB_COM_LOCK_AND_READ - lock_and_read : bool; - ## Reserved - nt_find : bool; - - ## The server is DFS aware - dfs : bool; - ## The server supports NT information level requests passing through - infolevel_passthru : bool; - ## The server supports large SMB_COM_READ_ANDX (up to 64k) - large_readx : bool; - ## The server supports large SMB_COM_WRITE_ANDX (up to 64k) - large_writex : bool; - ## The server supports CIFS Extensions for UNIX - unix : bool; - - ## The server supports SMB_BULK_READ, SMB_BULK_WRITE - ## Note: No known implementations support this - bulk_transfer : bool; - ## The server supports compressed data transfer. Requires bulk_transfer. - ## Note: No known implementations support this - compressed_data : bool; - ## The server supports extended security exchanges - extended_security : bool; - }; - - type SMB1::NegotiateResponseSecurity: record { - ## This indicates whether the server, as a whole, is operating under - ## Share Level or User Level security. - user_level : bool; - ## This indicates whether or not the server supports Challenge/Response - ## authentication. If the bit is false, then plaintext passwords must - ## be used. - challenge_response: bool; - ## This indicates if the server is capable of performing MAC message - ## signing. Note: Requires NT LM 0.12 or later. - signatures_enabled: bool &optional; - ## This indicates if the server is requiring the use of a MAC in each - ## packet. If false, message signing is optional. Note: Requires NT LM 0.12 - ## or later. - signatures_required: bool &optional; - }; - - type SMB1::NegotiateResponseCore: record { - ## Index of selected dialect - dialect_index : count; - }; - - type SMB1::NegotiateResponseLANMAN: record { - ## Count of parameter words (should be 13) - word_count : count; - ## Index of selected dialect - dialect_index : count; - ## Security mode - security_mode : SMB1::NegotiateResponseSecurity; - ## Max transmit buffer size (>= 1024) - max_buffer_size : count; - ## Max pending multiplexed requests - max_mpx_count : count; - - ## Max number of virtual circuits (VCs - transport-layer connections) - ## between client and server - max_number_vcs : count; - ## Raw mode - raw_mode : SMB1::NegotiateRawMode; - ## Unique token identifying this session - session_key : count; - ## Current date and time at server - server_time : time; - ## The challenge encryption key - encryption_key : string; - - ## The server's primary domain - primary_domain : string; - }; - - type SMB1::NegotiateResponseNTLM: record { - ## Count of parameter words (should be 17) - word_count : count; - ## Index of selected dialect - dialect_index : count; - ## Security mode - security_mode : SMB1::NegotiateResponseSecurity; - ## Max transmit buffer size - max_buffer_size : count; - ## Max pending multiplexed requests - max_mpx_count : count; - - ## Max number of virtual circuits (VCs - transport-layer connections) - ## between client and server - max_number_vcs : count; - ## Max raw buffer size - max_raw_size : count; - ## Unique token identifying this session - session_key : count; - ## Server capabilities - capabilities : SMB1::NegotiateCapabilities; - ## Current date and time at server - server_time : time; - - ## The challenge encryption key. - ## Present only for non-extended security (i.e. capabilities$extended_security = F) - encryption_key : string &optional; - ## The name of the domain. - ## Present only for non-extended security (i.e. capabilities$extended_security = F) - domain_name : string &optional; - ## A globally unique identifier assigned to the server. - ## Present only for extended security (i.e. capabilities$extended_security = T) - guid : string &optional; - ## Opaque security blob associated with the security package if capabilities$extended_security = T - ## Otherwise, the challenge for challenge/response authentication. - security_blob : string; - }; - - type SMB1::NegotiateResponse: record { - ## If the server does not understand any of the dialect strings, or if - ## PC NETWORK PROGRAM 1.0 is the chosen dialect. - core : SMB1::NegotiateResponseCore &optional; - ## If the chosen dialect is greater than core up to and including - ## LANMAN 2.1. - lanman : SMB1::NegotiateResponseLANMAN &optional; - ## If the chosen dialect is NT LM 0.12. - ntlm : SMB1::NegotiateResponseNTLM &optional; - }; - - type SMB1::SessionSetupAndXCapabilities: record { - ## The client can use unicode strings - unicode : bool; - ## The client can deal with files having 64 bit offsets - large_files : bool; - ## The client understands the SMBs introduced with NT LM 0.12 - ## Implies nt_find - nt_smbs : bool; - ## The client can receive 32 bit errors encoded in Status.Status - status32 : bool; - ## The client understands Level II oplocks - level_2_oplocks : bool; - ## Reserved. Implied by nt_smbs. - nt_find : bool; - }; - - type SMB1::SessionSetupAndXRequest: record { - ## Count of parameter words - ## - 10 for pre NT LM 0.12 - ## - 12 for NT LM 0.12 with extended security - ## - 13 for NT LM 0.12 without extended security - word_count : count; - ## Client maximum buffer size - max_buffer_size : count; - ## Actual maximum multiplexed pending request - max_mpx_count : count; - ## Virtual circuit number. First VC == 0 - vc_number : count; - ## Session key (valid iff vc_number > 0) - session_key : count; - - ## Client's native operating system - native_os : string; - ## Client's native LAN Manager type - native_lanman : string; - ## Account name - ## Note: not set for NT LM 0.12 with extended security - account_name : string &optional; - ## If challenge/response auth is not being used, this is the password. - ## Otherwise, it's the response to the server's challenge. - ## Note: Only set for pre NT LM 0.12 - account_password : string &optional; - ## Client's primary domain, if known - ## Note: not set for NT LM 0.12 with extended security - primary_domain : string &optional; - - ## Case insensitive password - ## Note: only set for NT LM 0.12 without extended security - case_insensitive_password : string &optional; - ## Case sensitive password - ## Note: only set for NT LM 0.12 without extended security - case_sensitive_password : string &optional; - ## Security blob - ## Note: only set for NT LM 0.12 with extended security - security_blob : string &optional; - ## Client capabilities - ## Note: only set for NT LM 0.12 - capabilities : SMB1::SessionSetupAndXCapabilities &optional; - }; - - type SMB1::SessionSetupAndXResponse: record { - ## Count of parameter words (should be 3 for pre NT LM 0.12 and 4 for NT LM 0.12) - word_count : count; - ## Were we logged in as a guest user? - is_guest : bool &optional; - ## Server's native operating system - native_os : string &optional; - ## Server's native LAN Manager type - native_lanman : string &optional; - ## Server's primary domain - primary_domain : string &optional; - ## Security blob if NTLM - security_blob : string &optional; - }; - - type SMB1::Trans2_Args: record { - ## Total parameter count - total_param_count: count; - ## Total data count - total_data_count: count; - ## Max parameter count - max_param_count: count; - ## Max data count - max_data_count: count; - ## Max setup count - max_setup_count: count; - ## Flags - flags: count; - ## Timeout - trans_timeout: count; - ## Parameter count - param_count: count; - ## Parameter offset - param_offset: count; - ## Data count - data_count: count; - ## Data offset - data_offset: count; - ## Setup count - setup_count: count; - }; - - type SMB1::Trans_Sec_Args: record { - ## Total parameter count - total_param_count: count; - ## Total data count - total_data_count: count; - ## Parameter count - param_count: count; - ## Parameter offset - param_offset: count; - ## Parameter displacement - param_displacement: count; - ## Data count - data_count: count; - ## Data offset - data_offset: count; - ## Data displacement - data_displacement: count; - }; - - type SMB1::Trans2_Sec_Args: record { - ## Total parameter count - total_param_count: count; - ## Total data count - total_data_count: count; - ## Parameter count - param_count: count; - ## Parameter offset - param_offset: count; - ## Parameter displacement - param_displacement: count; - ## Data count - data_count: count; - ## Data offset - data_offset: count; - ## Data displacement - data_displacement: count; - ## File ID - FID: count; - }; - - type SMB1::Find_First2_Request_Args: record { - ## File attributes to apply as a constraint to the search - search_attrs : count; - ## Max search results - search_count : count; - ## Misc. flags for how the server should manage the transaction - ## once results are returned - flags : count; - ## How detailed the information returned in the results should be - info_level : count; - ## Specify whether to search for directories or files - search_storage_type : count; - ## The string to serch for (note: may contain wildcards) - file_name : string; - }; - - type SMB1::Find_First2_Response_Args: record { - ## The server generated search identifier - sid : count; - ## Number of results returned by the search - search_count : count; - ## Whether or not the search can be continued using - ## the TRANS2_FIND_NEXT2 transaction - end_of_search : bool; - ## An extended attribute name that couldn't be retrieved - ext_attr_error : string &optional; - }; - - -} - -module SMB2; - -export { - ## An SMB2 header. - ## - ## For more information, see MS-SMB2:2.2.1.1 and MS-SMB2:2.2.1.2 - ## - ## .. bro:see:: smb2_message smb2_close_request smb2_close_response - ## smb2_create_request smb2_create_response smb2_negotiate_request - ## smb2_negotiate_response smb2_read_request - ## smb2_session_setup_request smb2_session_setup_response - ## smb2_file_rename smb2_file_delete - ## smb2_tree_connect_request smb2_tree_connect_response - ## smb2_write_request - type SMB2::Header: record { - ## The number of credits that this request consumes - credit_charge : count; - ## In a request, this is an indication to the server about the client's channel - ## change. In a response, this is the status field - status : count; - ## The command code of the packet - command : count; - ## The number of credits the client is requesting, or the number of credits - ## granted to the client in a response. - credits : count; - ## A flags field, which indicates how to process the operation (e.g. asynchronously) - flags : count; - ## A value that uniquely identifies the message request/response pair across all - ## messages that are sent on the same transport protocol connection - message_id : count; - ## A value that uniquely identifies the process that generated the event. - process_id : count; - ## A value that uniquely identifies the tree connect for the command. - tree_id : count; - ## A value that uniquely identifies the established session for the command. - session_id : count; - ## The 16-byte signature of the message, if SMB2_FLAGS_SIGNED is set in the ``flags`` - ## field. - signature : string; - }; - - ## An SMB2 globally unique identifier which identifies a file. - ## - ## For more information, see MS-SMB2:2.2.14.1 - ## - ## .. bro:see:: smb2_close_request smb2_create_response smb2_read_request - ## smb2_file_rename smb2_file_delete smb2_write_request - type SMB2::GUID: record { - ## A file handle that remains persistent when reconnected after a disconnect - persistent: count; - ## A file handle that can be changed when reconnected after a disconnect - volatile: count; - }; - - ## A series of boolean flags describing basic and extended file attributes for SMB2. - ## - ## For more information, see MS-CIFS:2.2.1.2.3 and MS-FSCC:2.6 - ## - ## .. bro:see:: smb2_create_response - type SMB2::FileAttrs: record { - ## The file is read only. Applications can read the file but cannot - ## write to it or delete it. - read_only: bool; - ## The file is hidden. It is not to be included in an ordinary directory listing. - hidden: bool; - ## The file is part of or is used exclusively by the operating system. - system: bool; - ## The file is a directory. - directory: bool; - ## The file has not been archived since it was last modified. Applications use - ## this attribute to mark files for backup or removal. - archive: bool; - ## The file has no other attributes set. This attribute is valid only if used alone. - normal: bool; - ## The file is temporary. This is a hint to the cache manager that it does not need - ## to flush the file to backing storage. - temporary: bool; - ## A file that is a sparse file. - sparse_file: bool; - ## A file or directory that has an associated reparse point. - reparse_point: bool; - ## The file or directory is compressed. For a file, this means that all of the data - ## in the file is compressed. For a directory, this means that compression is the - ## default for newly created files and subdirectories. - compressed: bool; - ## The data in this file is not available immediately. This attribute indicates that - ## the file data is physically moved to offline storage. This attribute is used by - ## Remote Storage, which is hierarchical storage management software. - offline: bool; - ## A file or directory that is not indexed by the content indexing service. - not_content_indexed: bool; - ## A file or directory that is encrypted. For a file, all data streams in the file - ## are encrypted. For a directory, encryption is the default for newly created files - ## and subdirectories. - encrypted: bool; - ## A file or directory that is configured with integrity support. For a file, all - ## data streams in the file have integrity support. For a directory, integrity support - ## is the default for newly created files and subdirectories, unless the caller - ## specifies otherwise. - integrity_stream: bool; - ## A file or directory that is configured to be excluded from the data integrity scan. - no_scrub_data: bool; - }; - - ## The response to an SMB2 *close* request, which is used by the client to close an instance - ## of a file that was opened previously. - ## - ## For more information, see MS-SMB2:2.2.16 - ## - ## .. bro:see:: smb2_close_response - type SMB2::CloseResponse: record { - ## The size, in bytes of the data that is allocated to the file. - alloc_size : count; - ## The size, in bytes, of the file. - eof : count; - ## The creation, last access, last write, and change times. - times : SMB::MACTimes; - ## The attributes of the file. - attrs : SMB2::FileAttrs; - }; - - ## Preauthentication information as defined in SMB v. 3.1.1 - ## - ## For more information, see MS-SMB2:2.3.1.1 - ## - type SMB2::PreAuthIntegrityCapabilities: record { - ## The number of hash algorithms. - hash_alg_count : count; - ## The salt length. - salt_length : count; - ## An array of hash algorithms (counts). - hash_alg : vector of count; - ## The salt. - salt : string; - }; - - ## Encryption information as defined in SMB v. 3.1.1 - ## - ## For more information, see MS-SMB2:2.3.1.2 - ## - type SMB2::EncryptionCapabilities: record { - ## The number of ciphers. - cipher_count : count; - ## An array of ciphers. - ciphers : vector of count; - }; - - ## Compression information as defined in SMB v. 3.1.1 - ## - ## For more information, see MS-SMB2:2.3.1.3 - ## - type SMB2::CompressionCapabilities: record { - ## The number of algorithms. - alg_count : count; - ## An array of compression algorithms. - algs : vector of count; - }; - - ## The context type information as defined in SMB v. 3.1.1 - ## - ## For more information, see MS-SMB2:2.3.1 - ## - type SMB2::NegotiateContextValue: record { - ## Specifies the type of context (preauth or encryption). - context_type : count; - ## The length in byte of the data field. - data_length : count; - ## The preauthentication information. - preauth_info : SMB2::PreAuthIntegrityCapabilities &optional; - ## The encryption information. - encryption_info : SMB2::EncryptionCapabilities &optional; - ## The compression information. - compression_info : SMB2::CompressionCapabilities &optional; - ## Indicates the server name the client must connect to. - netname: string &optional; - }; - - type SMB2::NegotiateContextValues: vector of SMB2::NegotiateContextValue; - - ## The response to an SMB2 *negotiate* request, which is used by tghe client to notify the server - ## what dialects of the SMB2 protocol the client understands. - ## - ## For more information, see MS-SMB2:2.2.4 - ## - ## .. bro:see:: smb2_negotiate_response - type SMB2::NegotiateResponse: record { - ## The preferred common SMB2 Protocol dialect number from the array that was sent in the SMB2 - ## NEGOTIATE Request. - dialect_revision : count; - ## The security mode field specifies whether SMB signing is enabled, required at the server, or both. - security_mode : count; - ## A globally unique identifier that is generate by the server to uniquely identify the server. - server_guid : string; - ## The system time of the SMB2 server when the SMB2 NEGOTIATE Request was processed. - system_time : time; - ## The SMB2 server start time. - server_start_time : time; - - ## The number of negotiate context values in SMB v. 3.1.1, otherwise reserved to 0. - negotiate_context_count : count; - ## An array of context values in SMB v. 3.1.1. - negotiate_context_values : SMB2::NegotiateContextValues; - }; - - ## The request sent by the client to request a new authenticated session - ## within a new or existing SMB 2 Protocol transport connection to the server. - ## - ## For more information, see MS-SMB2:2.2.5 - ## - ## .. bro:see:: smb2_session_setup_request - type SMB2::SessionSetupRequest: record { - ## The security mode field specifies whether SMB signing is enabled or required at the client. - security_mode: count; - }; - - ## A flags field that indicates additional information about the session that's sent in the - ## *session_setup* response. - ## - ## For more information, see MS-SMB2:2.2.6 - ## - ## .. bro:see:: smb2_session_setup_response - type SMB2::SessionSetupFlags: record { - ## If set, the client has been authenticated as a guest user. - guest: bool; - ## If set, the client has been authenticated as an anonymous user. - anonymous: bool; - ## If set, the server requires encryption of messages on this session. - encrypt: bool; - }; - - ## The response to an SMB2 *session_setup* request, which is sent by the client to request a - ## new authenticated session within a new or existing SMB 2 Protocol transport connection - ## to the server. - ## - ## For more information, see MS-SMB2:2.2.6 - ## - ## .. bro:see:: smb2_session_setup_response - type SMB2::SessionSetupResponse: record { - ## Additional information about the session - flags: SMB2::SessionSetupFlags; - }; - - ## The response to an SMB2 *tree_connect* request, which is sent by the client to request - ## access to a particular share on the server. - ## - ## For more information, see MS-SMB2:2.2.9 - ## - ## .. bro:see:: smb2_tree_connect_response - type SMB2::TreeConnectResponse: record { - ## The type of share being accessed. Physical disk, named pipe, or printer. - share_type: count; - }; - - ## The request sent by the client to request either creation of or access to a file. - ## - ## For more information, see MS-SMB2:2.2.13 - ## - ## .. bro:see:: smb2_create_request - type SMB2::CreateRequest: record { - ## Name of the file - filename : string; - ## Defines the action the server MUST take if the file that is specified already exists. - disposition : count; - ## Specifies the options to be applied when creating or opening the file. - create_options : count; - }; - - ## The response to an SMB2 *create_request* request, which is sent by the client to request - ## either creation of or access to a file. - ## - ## For more information, see MS-SMB2:2.2.14 - ## - ## .. bro:see:: smb2_create_response - type SMB2::CreateResponse: record { - ## The SMB2 GUID for the file. - file_id : SMB2::GUID; - ## Size of the file. - size : count; - ## Timestamps associated with the file in question. - times : SMB::MACTimes; - ## File attributes. - attrs : SMB2::FileAttrs; - ## The action taken in establishing the open. - create_action : count; - }; - - ## An SMB2 transform header (for SMB 3.x dialects with encryption enabled). - ## - ## For more information, see MS-SMB2:2.2.41 - ## - ## .. bro:see:: smb2_header smb2_message smb2_close_request smb2_close_response - ## smb2_create_request smb2_create_response smb2_negotiate_request - ## smb2_negotiate_response smb2_read_request - ## smb2_session_setup_request smb2_session_setup_response - ## smb2_file_rename smb2_file_delete - ## smb2_tree_connect_request smb2_tree_connect_response - ## smb2_write_request - type SMB2::Transform_header: record { - ## The 16-byte signature of the encrypted message, generated by using Session.EncryptionKey. - signature : string; - ## An implementation specific value assigned for every encrypted message. - nonce : string; - ## The size, in bytes, of the SMB2 message. - orig_msg_size : count; - ## A flags field, interpreted in different ways depending of the SMB2 dialect. - flags : count; - ## A value that uniquely identifies the established session for the command. - session_id : count; - }; -} - -module GLOBAL; - -module DHCP; - -export { - ## A list of addresses offered by a DHCP server. Could be routers, - ## DNS servers, or other. - ## - ## .. bro:see:: dhcp_message - type DHCP::Addrs: vector of addr; - - ## A DHCP message. - ## .. bro:see:: dhcp_message - type DHCP::Msg: record { - op: count; ##< Message OP code. 1 = BOOTREQUEST, 2 = BOOTREPLY - m_type: count; ##< The type of DHCP message. - xid: count; ##< Transaction ID of a DHCP session. - ## Number of seconds since client began address acquisition - ## or renewal process - secs: interval; - flags: count; - ciaddr: addr; ##< Original IP address of the client. - yiaddr: addr; ##< IP address assigned to the client. - siaddr: addr; ##< IP address of the server. - giaddr: addr; ##< IP address of the relaying gateway. - chaddr: string; ##< Client hardware address. - sname: string &default=""; ##< Server host name. - file_n: string &default=""; ##< Boot file name. - }; - - ## DHCP Client Identifier (Option 61) - ## .. bro:see:: dhcp_message - type DHCP::ClientID: record { - hwtype: count; - hwaddr: string; - }; - - ## DHCP Client FQDN Option information (Option 81) - type DHCP::ClientFQDN: record { - ## An unparsed bitfield of flags (refer to RFC 4702). - flags: count; - ## This field is deprecated in the standard. - rcode1: count; - ## This field is deprecated in the standard. - rcode2: count; - ## The Domain Name part of the option carries all or part of the FQDN - ## of a DHCP client. - domain_name: string; - }; - - ## DHCP Relay Agent Information Option (Option 82) - ## .. bro:see:: dhcp_message - type DHCP::SubOpt: record { - code: count; - value: string; - }; - - type DHCP::SubOpts: vector of DHCP::SubOpt; - - type DHCP::Options: record { - ## The ordered list of all DHCP option numbers. - options: index_vec &optional; - - ## Subnet Mask Value (option 1) - subnet_mask: addr &optional; - - ## Router addresses (option 3) - routers: DHCP::Addrs &optional; - - ## DNS Server addresses (option 6) - dns_servers: DHCP::Addrs &optional; - - ## The Hostname of the client (option 12) - host_name: string &optional; - - ## The DNS domain name of the client (option 15) - domain_name: string &optional; - - ## Enable/Disable IP Forwarding (option 19) - forwarding: bool &optional; - - ## Broadcast Address (option 28) - broadcast: addr &optional; - - ## Vendor specific data. This can frequently - ## be unparsed binary data. (option 43) - vendor: string &optional; - - ## NETBIOS name server list (option 44) - nbns: DHCP::Addrs &optional; - - ## Address requested by the client (option 50) - addr_request: addr &optional; - - ## Lease time offered by the server. (option 51) - lease: interval &optional; - - ## Server address to allow clients to distinguish - ## between lease offers. (option 54) - serv_addr: addr &optional; - - ## DHCP Parameter Request list (option 55) - param_list: index_vec &optional; - - ## Textual error message (option 56) - message: string &optional; - - ## Maximum Message Size (option 57) - max_msg_size: count &optional; - - ## This option specifies the time interval from address - ## assignment until the client transitions to the - ## RENEWING state. (option 58) - renewal_time: interval &optional; - - ## This option specifies the time interval from address - ## assignment until the client transitions to the - ## REBINDING state. (option 59) - rebinding_time: interval &optional; - - ## This option is used by DHCP clients to optionally - ## identify the vendor type and configuration of a DHCP - ## client. (option 60) - vendor_class: string &optional; - - ## DHCP Client Identifier (Option 61) - client_id: DHCP::ClientID &optional; - - ## User Class opaque value (Option 77) - user_class: string &optional; - - ## DHCP Client FQDN (Option 81) - client_fqdn: DHCP::ClientFQDN &optional; - - ## DHCP Relay Agent Information Option (Option 82) - sub_opt: DHCP::SubOpts &optional; - - ## Auto Config option to let host know if it's allowed to - ## auto assign an IP address. (Option 116) - auto_config: bool &optional; - - ## URL to find a proxy.pac for auto proxy config (Option 252) - auto_proxy_config: string &optional; - }; -} - -module GLOBAL; -## A DNS message. -## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl -## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end -## dns_message dns_query_reply dns_rejected dns_request -type dns_msg: record { - id: count; ##< Transaction ID. - - opcode: count; ##< Operation code. - rcode: count; ##< Return code. - - QR: bool; ##< Query response flag. - AA: bool; ##< Authoritative answer flag. - TC: bool; ##< Truncated packet flag. - RD: bool; ##< Recursion desired flag. - RA: bool; ##< Recursion available flag. - Z: count; ##< TODO. - - num_queries: count; ##< Number of query records. - num_answers: count; ##< Number of answer records. - num_auth: count; ##< Number of authoritative records. - num_addl: count; ##< Number of additional records. -}; - -## A DNS SOA record. -## -## .. bro:see:: dns_SOA_reply -type dns_soa: record { - mname: string; ##< Primary source of data for zone. - rname: string; ##< Mailbox for responsible person. - serial: count; ##< Version number of zone. - refresh: interval; ##< Seconds before refreshing. - retry: interval; ##< How long before retrying failed refresh. - expire: interval; ##< When zone no longer authoritative. - minimum: interval; ##< Minimum TTL to use when exporting. -}; - -## An additional DNS EDNS record. -## -## .. bro:see:: dns_EDNS_addl -type dns_edns_additional: record { - query: string; ##< Query. - qtype: count; ##< Query type. - t: count; ##< TODO. - payload_size: count; ##< TODO. - extended_rcode: count; ##< Extended return code. - version: count; ##< Version. - z_field: count; ##< TODO. - TTL: interval; ##< Time-to-live. - is_query: count; ##< TODO. -}; - -## An additional DNS TSIG record. -## -## .. bro:see:: dns_TSIG_addl -type dns_tsig_additional: record { - query: string; ##< Query. - qtype: count; ##< Query type. - alg_name: string; ##< Algorithm name. - sig: string; ##< Signature. - time_signed: time; ##< Time when signed. - fudge: time; ##< TODO. - orig_id: count; ##< TODO. - rr_error: count; ##< TODO. - is_query: count; ##< TODO. -}; - -## A DNSSEC RRSIG record. -## -## .. bro:see:: dns_RRSIG -type dns_rrsig_rr: record { - query: string; ##< Query. - answer_type: count; ##< Ans type. - type_covered: count; ##< qtype covered by RRSIG RR. - algorithm: count; ##< Algorithm. - labels: count; ##< Labels in the owner's name. - orig_ttl: interval; ##< Original TTL. - sig_exp: time; ##< Time when signed RR expires. - sig_incep: time; ##< Time when signed. - key_tag: count; ##< Key tag value. - signer_name: string; ##< Signature. - signature: string; ##< Hash of the RRDATA. - is_query: count; ##< The RR is a query/Response. -}; - -## A DNSSEC DNSKEY record. -## -## .. bro:see:: dns_DNSKEY -type dns_dnskey_rr: record { - query: string; ##< Query. - answer_type: count; ##< Ans type. - flags: count; ##< flags filed. - protocol: count; ##< Protocol, should be always 3 for DNSSEC. - algorithm: count; ##< Algorithm for Public Key. - public_key: string; ##< Public Key - is_query: count; ##< The RR is a query/Response. -}; - -## A DNSSEC NSEC3 record. -## -## .. bro:see:: dns_NSEC3 -type dns_nsec3_rr: record { - query: string; ##< Query. - answer_type: count; ##< Ans type. - nsec_flags: count; ##< flags field. - nsec_hash_algo: count; ##< Hash algorithm. - nsec_iter: count; ##< Iterations. - nsec_salt_len: count; ##< Salt length. - nsec_salt: string; ##< Salt value - nsec_hlen: count; ##< Hash length. - nsec_hash: string; ##< Hash value. - bitmaps: string_vec; ##< Type Bit Maps. - is_query: count; ##< The RR is a query/Response. -}; - -## A DNSSEC DS record. -## -## .. bro:see:: dns_DS -type dns_ds_rr: record { - query: string; ##< Query. - answer_type: count; ##< Ans type. - key_tag: count; ##< flags filed. - algorithm: count; ##< Algorithm for Public Key. - digest_type: count; ##< Digest Type. - digest_val: string; ##< Digest Value. - is_query: count; ##< The RR is a query/Response. -}; - -# DNS answer types. -# -# .. bro:see:: dns_answerr -# -# todo:: use enum to make them autodoc'able -const DNS_QUERY = 0; ##< A query. This shouldn't occur, just for completeness. -const DNS_ANS = 1; ##< An answer record. -const DNS_AUTH = 2; ##< An authoritative record. -const DNS_ADDL = 3; ##< An additional record. - -## The general part of a DNS reply. -## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_HINFO_reply -## dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply -## dns_TXT_reply dns_WKS_reply -type dns_answer: record { - ## Answer type. One of :bro:see:`DNS_QUERY`, :bro:see:`DNS_ANS`, - ## :bro:see:`DNS_AUTH` and :bro:see:`DNS_ADDL`. - answer_type: count; - query: string; ##< Query. - qtype: count; ##< Query type. - qclass: count; ##< Query class. - TTL: interval; ##< Time-to-live. -}; - -## For DNS servers in these sets, omit processing the AUTH records they include -## in their replies. -## -## .. bro:see:: dns_skip_all_auth dns_skip_addl -global dns_skip_auth: set[addr] &redef; - -## For DNS servers in these sets, omit processing the ADDL records they include -## in their replies. -## -## .. bro:see:: dns_skip_all_addl dns_skip_auth -global dns_skip_addl: set[addr] &redef; - -## If true, all DNS AUTH records are skipped. -## -## .. bro:see:: dns_skip_all_addl dns_skip_auth -global dns_skip_all_auth = T &redef; - -## If true, all DNS ADDL records are skipped. -## -## .. bro:see:: dns_skip_all_auth dns_skip_addl -global dns_skip_all_addl = T &redef; - -## If a DNS request includes more than this many queries, assume it's non-DNS -## traffic and do not process it. Set to 0 to turn off this functionality. -global dns_max_queries = 25 &redef; - -## The address of the DNS resolver to use. If not changed from the -## unspecified address, ``[::]``, the first nameserver from /etc/resolv.conf -## gets used (IPv6 is currently only supported if set via this option, not -## when parsed from the file). -const dns_resolver = [::] &redef; - -## HTTP session statistics. -## -## .. bro:see:: http_stats -type http_stats_rec: record { - num_requests: count; ##< Number of requests. - num_replies: count; ##< Number of replies. - request_version: double; ##< HTTP version of the requests. - reply_version: double; ##< HTTP Version of the replies. -}; - -## HTTP message statistics. -## -## .. bro:see:: http_message_done -type http_message_stat: record { - ## When the request/reply line was complete. - start: time; - ## Whether the message was interrupted. - interrupted: bool; - ## Reason phrase if interrupted. - finish_msg: string; - ## Length of body processed (before finished/interrupted). - body_length: count; - ## Total length of gaps within *body_length*. - content_gap_length: count; - ## Length of headers (including the req/reply line, but not CR/LF's). - header_length: count; -}; - -## Maximum number of HTTP entity data delivered to events. -## -## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data -global http_entity_data_delivery_size = 1500 &redef; - -## Skip HTTP data for performance considerations. The skipped -## portion will not go through TCP reassembly. -## -## .. bro:see:: http_entity_data skip_http_entity_data http_entity_data_delivery_size -const skip_http_data = F &redef; - -## Maximum length of HTTP URIs passed to events. Longer ones will be truncated -## to prevent over-long URIs (usually sent by worms) from slowing down event -## processing. A value of -1 means "do not truncate". -## -## .. bro:see:: http_request -const truncate_http_URI = -1 &redef; - -## IRC join information. -## -## .. bro:see:: irc_join_list -type irc_join_info: record { - nick: string; - channel: string; - password: string; - usermode: string; -}; - -## Set of IRC join information. -## -## .. bro:see:: irc_join_message -type irc_join_list: set[irc_join_info]; - -module PE; -export { -type PE::DOSHeader: record { - ## The magic number of a portable executable file ("MZ"). - signature : string; - ## The number of bytes in the last page that are used. - used_bytes_in_last_page : count; - ## The number of pages in the file that are part of the PE file itself. - file_in_pages : count; - ## Number of relocation entries stored after the header. - num_reloc_items : count; - ## Number of paragraphs in the header. - header_in_paragraphs : count; - ## Number of paragraps of additional memory that the program will need. - min_extra_paragraphs : count; - ## Maximum number of paragraphs of additional memory. - max_extra_paragraphs : count; - ## Relative value of the stack segment. - init_relative_ss : count; - ## Initial value of the SP register. - init_sp : count; - ## Checksum. The 16-bit sum of all words in the file should be 0. Normally not set. - checksum : count; - ## Initial value of the IP register. - init_ip : count; - ## Initial value of the CS register (relative to the initial segment). - init_relative_cs : count; - ## Offset of the first relocation table. - addr_of_reloc_table : count; - ## Overlays allow you to append data to the end of the file. If this is the main program, - ## this will be 0. - overlay_num : count; - ## OEM identifier. - oem_id : count; - ## Additional OEM info, specific to oem_id. - oem_info : count; - ## Address of the new EXE header. - addr_of_new_exe_header : count; -}; - -type PE::FileHeader: record { - ## The target machine that the file was compiled for. - machine : count; - ## The time that the file was created at. - ts : time; - ## Pointer to the symbol table. - sym_table_ptr : count; - ## Number of symbols. - num_syms : count; - ## The size of the optional header. - optional_header_size : count; - ## Bit flags that determine if this file is executable, non-relocatable, and/or a DLL. - characteristics : set[count]; -}; - -type PE::OptionalHeader: record { - ## PE32 or PE32+ indicator. - magic : count; - ## The major version of the linker used to create the PE. - major_linker_version : count; - ## The minor version of the linker used to create the PE. - minor_linker_version : count; - ## Size of the .text section. - size_of_code : count; - ## Size of the .data section. - size_of_init_data : count; - ## Size of the .bss section. - size_of_uninit_data : count; - ## The relative virtual address (RVA) of the entry point. - addr_of_entry_point : count; - ## The relative virtual address (RVA) of the .text section. - base_of_code : count; - ## The relative virtual address (RVA) of the .data section. - base_of_data : count &optional; - ## Preferred memory location for the image to be based at. - image_base : count; - ## The alignment (in bytes) of sections when they're loaded in memory. - section_alignment : count; - ## The alignment (in bytes) of the raw data of sections. - file_alignment : count; - ## The major version of the required OS. - os_version_major : count; - ## The minor version of the required OS. - os_version_minor : count; - ## The major version of this image. - major_image_version : count; - ## The minor version of this image. - minor_image_version : count; - ## The major version of the subsystem required to run this file. - major_subsys_version : count; - ## The minor version of the subsystem required to run this file. - minor_subsys_version : count; - ## The size (in bytes) of the iamge as the image is loaded in memory. - size_of_image : count; - ## The size (in bytes) of the headers, rounded up to file_alignment. - size_of_headers : count; - ## The image file checksum. - checksum : count; - ## The subsystem that's required to run this image. - subsystem : count; - ## Bit flags that determine how to execute or load this file. - dll_characteristics : set[count]; - ## A vector with the sizes of various tables and strings that are - ## defined in the optional header data directories. Examples include - ## the import table, the resource table, and debug information. - table_sizes : vector of count; - -}; - -## Record for Portable Executable (PE) section headers. -type PE::SectionHeader: record { - ## The name of the section - name : string; - ## The total size of the section when loaded into memory. - virtual_size : count; - ## The relative virtual address (RVA) of the section. - virtual_addr : count; - ## The size of the initialized data for the section, as it is - ## in the file on disk. - size_of_raw_data : count; - ## The virtual address of the initialized dat for the section, - ## as it is in the file on disk. - ptr_to_raw_data : count; - ## The file pointer to the beginning of relocation entries for - ## the section. - ptr_to_relocs : count; - ## The file pointer to the beginning of line-number entries for - ## the section. - ptr_to_line_nums : count; - ## The number of relocation entries for the section. - num_of_relocs : count; - ## The number of line-number entrie for the section. - num_of_line_nums : count; - ## Bit-flags that describe the characteristics of the section. - characteristics : set[count]; -}; -} -module GLOBAL; - -## Deprecated. -## -## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere -## else. -global irc_servers : set[addr] &redef; - -## Internal to the stepping stone detector. -const stp_delta: interval &redef; - -## Internal to the stepping stone detector. -const stp_idle_min: interval &redef; - -## Internal to the stepping stone detector. -global stp_skip_src: set[addr] &redef; - -## Deprecated. -const interconn_min_interarrival: interval &redef; - -## Deprecated. -const interconn_max_interarrival: interval &redef; - -## Deprecated. -const interconn_max_keystroke_pkt_size: count &redef; - -## Deprecated. -const interconn_default_pkt_size: count &redef; - -## Deprecated. -const interconn_stat_period: interval &redef; - -## Deprecated. -const interconn_stat_backoff: double &redef; - -## Deprecated. -type interconn_endp_stats: record { - num_pkts: count; - num_keystrokes_two_in_row: count; - num_normal_interarrivals: count; - num_8k0_pkts: count; - num_8k4_pkts: count; - is_partial: bool; - num_bytes: count; - num_7bit_ascii: count; - num_lines: count; - num_normal_lines: count; -}; - -## Deprecated. -const backdoor_stat_period: interval &redef; - -## Deprecated. -const backdoor_stat_backoff: double &redef; - -## Deprecated. -type backdoor_endp_stats: record { - is_partial: bool; - num_pkts: count; - num_8k0_pkts: count; - num_8k4_pkts: count; - num_lines: count; - num_normal_lines: count; - num_bytes: count; - num_7bit_ascii: count; -}; - -## Description of a signature match. -## -## .. bro:see:: signature_match -type signature_state: record { - sig_id: string; ##< ID of the matching signature. - conn: connection; ##< Matching connection. - is_orig: bool; ##< True if matching endpoint is originator. - payload_size: count; ##< Payload size of the first matching packet of current endpoint. -}; - -# Deprecated. -# -# .. todo:: This type is no longer used. Remove any reference of this from the -# core. -type software_version: record { - major: int; - minor: int; - minor2: int; - addl: string; -}; - -# Deprecated. -# -# .. todo:: This type is no longer used. Remove any reference of this from the -# core. -type software: record { - name: string; - version: software_version; -}; - -## Quality of passive fingerprinting matches. -## -## .. bro:see:: OS_version -type OS_version_inference: enum { - direct_inference, ##< TODO. - generic_inference, ##< TODO. - fuzzy_inference, ##< TODO. -}; - -## Passive fingerprinting match. -## -## .. bro:see:: OS_version_found -type OS_version: record { - genre: string; ##< Linux, Windows, AIX, ... - detail: string; ##< Kernel version or such. - dist: count; ##< How far is the host away from the sensor (TTL)?. - match_type: OS_version_inference; ##< Quality of the match. -}; - -## Defines for which subnets we should do passive fingerprinting. -## -## .. bro:see:: OS_version_found -global generate_OS_version_event: set[subnet] &redef; - -# Type used to report load samples via :bro:see:`load_sample`. For now, it's a -# set of names (event names, source file names, and perhaps ````), which were seen during the sample. -type load_sample_info: set[string]; - -## A BitTorrent peer. -## -## .. bro:see:: bittorrent_peer_set -type bittorrent_peer: record { - h: addr; ##< The peer's address. - p: port; ##< The peer's port. -}; - -## A set of BitTorrent peers. -## -## .. bro:see:: bt_tracker_response -type bittorrent_peer_set: set[bittorrent_peer]; - -## BitTorrent "benc" value. Note that "benc" = Bencode ("Bee-Encode"), per -## http://en.wikipedia.org/wiki/Bencode. -## -## .. bro:see:: bittorrent_benc_dir -type bittorrent_benc_value: record { - i: int &optional; ##< TODO. - s: string &optional; ##< TODO. - d: string &optional; ##< TODO. - l: string &optional; ##< TODO. -}; - -## A table of BitTorrent "benc" values. -## -## .. bro:see:: bt_tracker_response -type bittorrent_benc_dir: table[string] of bittorrent_benc_value; - -## Header table type used by BitTorrent analyzer. -## -## .. bro:see:: bt_tracker_request bt_tracker_response -## bt_tracker_response_not_ok -type bt_tracker_headers: table[string] of string; - -## A vector of boolean values that indicate the setting -## for a range of modbus coils. -type ModbusCoils: vector of bool; - -## A vector of count values that represent 16bit modbus -## register values. -type ModbusRegisters: vector of count; - -type ModbusHeaders: record { - ## Transaction identifier - tid: count; - ## Protocol identifier - pid: count; - ## Unit identifier (previously 'slave address') - uid: count; - ## MODBUS function code - function_code: count; -}; - -module Unified2; -export { - type Unified2::IDSEvent: record { - sensor_id: count; - event_id: count; - ts: time; - signature_id: count; - generator_id: count; - signature_revision: count; - classification_id: count; - priority_id: count; - src_ip: addr; - dst_ip: addr; - src_p: port; - dst_p: port; - impact_flag: count; - impact: count; - blocked: count; - ## Not available in "legacy" IDS events. - mpls_label: count &optional; - ## Not available in "legacy" IDS events. - vlan_id: count &optional; - ## Only available in "legacy" IDS events. - packet_action: count &optional; - }; - - type Unified2::Packet: record { - sensor_id: count; - event_id: count; - event_second: count; - packet_ts: time; - link_type: count; - data: string; - }; -} - -module SSL; -export { - type SignatureAndHashAlgorithm: record { - HashAlgorithm: count; ##< Hash algorithm number - SignatureAlgorithm: count; ##< Signature algorithm number - }; -} - -module GLOBAL; - -## A vector of Signature and Hash Algorithms. -## -## .. todo:: We need this type definition only for declaring builtin functions -## via ``bifcl``. We should extend ``bifcl`` to understand composite types -## directly and then remove this alias. -type signature_and_hashalgorithm_vec: vector of SSL::SignatureAndHashAlgorithm; - -module X509; -export { - type Certificate: record { - version: count &log; ##< Version number. - serial: string &log; ##< Serial number. - subject: string &log; ##< Subject. - issuer: string &log; ##< Issuer. - cn: string &optional; ##< Last (most specific) common name. - not_valid_before: time &log; ##< Timestamp before when certificate is not valid. - not_valid_after: time &log; ##< Timestamp after when certificate is not valid. - key_alg: string &log; ##< Name of the key algorithm - sig_alg: string &log; ##< Name of the signature algorithm - key_type: string &optional &log; ##< Key type, if key parseable by openssl (either rsa, dsa or ec) - key_length: count &optional &log; ##< Key length in bits - exponent: string &optional &log; ##< Exponent, if RSA-certificate - curve: string &optional &log; ##< Curve, if EC-certificate - }; - - type Extension: record { - name: string; ##< Long name of extension. oid if name not known - short_name: string &optional; ##< Short name of extension if known - oid: string; ##< Oid of extension - critical: bool; ##< True if extension is critical - value: string; ##< Extension content parsed to string for known extensions. Raw data otherwise. - }; - - type BasicConstraints: record { - ca: bool; ##< CA flag set? - path_len: count &optional; ##< Maximum path length - } &log; - - type SubjectAlternativeName: record { - dns: string_vec &optional &log; ##< List of DNS entries in SAN - uri: string_vec &optional &log; ##< List of URI entries in SAN - email: string_vec &optional &log; ##< List of email entries in SAN - ip: addr_vec &optional &log; ##< List of IP entries in SAN - other_fields: bool; ##< True if the certificate contained other, not recognized or parsed name fields - }; - - ## Result of an X509 certificate chain verification - type Result: record { - ## OpenSSL result code - result: int; - ## Result as string - result_string: string; - ## References to the final certificate chain, if verification successful. End-host certificate is first. - chain_certs: vector of opaque of x509 &optional; - }; -} - -module SOCKS; -export { - ## This record is for a SOCKS client or server to provide either a - ## name or an address to represent a desired or established connection. - type Address: record { - host: addr &optional; - name: string &optional; - } &log; -} - -module RADIUS; - -export { - type RADIUS::AttributeList: vector of string; - type RADIUS::Attributes: table[count] of RADIUS::AttributeList; - - type RADIUS::Message: record { - ## The type of message (Access-Request, Access-Accept, etc.). - code : count; - ## The transaction ID. - trans_id : count; - ## The "authenticator" string. - authenticator : string; - ## Any attributes. - attributes : RADIUS::Attributes &optional; - }; -} - -module RDP; -export { - type RDP::EarlyCapabilityFlags: record { - support_err_info_pdu: bool; - want_32bpp_session: bool; - support_statusinfo_pdu: bool; - strong_asymmetric_keys: bool; - support_monitor_layout_pdu: bool; - support_netchar_autodetect: bool; - support_dynvc_gfx_protocol: bool; - support_dynamic_time_zone: bool; - support_heartbeat_pdu: bool; - }; - - type RDP::ClientCoreData: record { - version_major: count; - version_minor: count; - desktop_width: count; - desktop_height: count; - color_depth: count; - sas_sequence: count; - keyboard_layout: count; - client_build: count; - client_name: string; - keyboard_type: count; - keyboard_sub: count; - keyboard_function_key: count; - ime_file_name: string; - post_beta2_color_depth: count &optional; - client_product_id: string &optional; - serial_number: count &optional; - high_color_depth: count &optional; - supported_color_depths: count &optional; - ec_flags: RDP::EarlyCapabilityFlags &optional; - dig_product_id: string &optional; - }; -} - -@load base/bif/plugins/Bro_SNMP.types.bif - -module SNMP; -export { - ## The top-level message data structure of an SNMPv1 datagram, not - ## including the PDU data. See :rfc:`1157`. - type SNMP::HeaderV1: record { - community: string; - }; - - ## The top-level message data structure of an SNMPv2 datagram, not - ## including the PDU data. See :rfc:`1901`. - type SNMP::HeaderV2: record { - community: string; - }; - - ## The ``ScopedPduData`` data structure of an SNMPv3 datagram, not - ## including the PDU data (i.e. just the "context" fields). - ## See :rfc:`3412`. - type SNMP::ScopedPDU_Context: record { - engine_id: string; - name: string; - }; - - ## The top-level message data structure of an SNMPv3 datagram, not - ## including the PDU data. See :rfc:`3412`. - type SNMP::HeaderV3: record { - id: count; - max_size: count; - flags: count; - auth_flag: bool; - priv_flag: bool; - reportable_flag: bool; - security_model: count; - security_params: string; - pdu_context: SNMP::ScopedPDU_Context &optional; - }; - - ## A generic SNMP header data structure that may include data from - ## any version of SNMP. The value of the ``version`` field - ## determines what header field is initialized. - type SNMP::Header: record { - version: count; - v1: SNMP::HeaderV1 &optional; ##< Set when ``version`` is 0. - v2: SNMP::HeaderV2 &optional; ##< Set when ``version`` is 1. - v3: SNMP::HeaderV3 &optional; ##< Set when ``version`` is 3. - }; - - ## A generic SNMP object value, that may include any of the - ## valid ``ObjectSyntax`` values from :rfc:`1155` or :rfc:`3416`. - ## The value is decoded whenever possible and assigned to - ## the appropriate field, which can be determined from the value - ## of the ``tag`` field. For tags that can't be mapped to an - ## appropriate type, the ``octets`` field holds the BER encoded - ## ASN.1 content if there is any (though, ``octets`` is may also - ## be used for other tags such as OCTET STRINGS or Opaque). Null - ## values will only have their corresponding tag value set. - type SNMP::ObjectValue: record { - tag: count; - oid: string &optional; - signed: int &optional; - unsigned: count &optional; - address: addr &optional; - octets: string &optional; - }; - - # These aren't an enum because it's easier to type fields as count. - # That way don't have to deal with type conversion, plus doesn't - # mislead that these are the only valid tag values (it's just the set - # of known tags). - const SNMP::OBJ_INTEGER_TAG : count = 0x02; ##< Signed 64-bit integer. - const SNMP::OBJ_OCTETSTRING_TAG : count = 0x04; ##< An octet string. - const SNMP::OBJ_UNSPECIFIED_TAG : count = 0x05; ##< A NULL value. - const SNMP::OBJ_OID_TAG : count = 0x06; ##< An Object Identifier. - const SNMP::OBJ_IPADDRESS_TAG : count = 0x40; ##< An IP address. - const SNMP::OBJ_COUNTER32_TAG : count = 0x41; ##< Unsigned 32-bit integer. - const SNMP::OBJ_UNSIGNED32_TAG : count = 0x42; ##< Unsigned 32-bit integer. - const SNMP::OBJ_TIMETICKS_TAG : count = 0x43; ##< Unsigned 32-bit integer. - const SNMP::OBJ_OPAQUE_TAG : count = 0x44; ##< An octet string. - const SNMP::OBJ_COUNTER64_TAG : count = 0x46; ##< Unsigned 64-bit integer. - const SNMP::OBJ_NOSUCHOBJECT_TAG : count = 0x80; ##< A NULL value. - const SNMP::OBJ_NOSUCHINSTANCE_TAG: count = 0x81; ##< A NULL value. - const SNMP::OBJ_ENDOFMIBVIEW_TAG : count = 0x82; ##< A NULL value. - - ## The ``VarBind`` data structure from either :rfc:`1157` or - ## :rfc:`3416`, which maps an Object Identifier to a value. - type SNMP::Binding: record { - oid: string; - value: SNMP::ObjectValue; - }; - - ## A ``VarBindList`` data structure from either :rfc:`1157` or :rfc:`3416`. - ## A sequences of :bro:see:`SNMP::Binding`, which maps an OIDs to values. - type SNMP::Bindings: vector of SNMP::Binding; - - ## A ``PDU`` data structure from either :rfc:`1157` or :rfc:`3416`. - type SNMP::PDU: record { - request_id: int; - error_status: int; - error_index: int; - bindings: SNMP::Bindings; - }; - - ## A ``Trap-PDU`` data structure from :rfc:`1157`. - type SNMP::TrapPDU: record { - enterprise: string; - agent: addr; - generic_trap: int; - specific_trap: int; - time_stamp: count; - bindings: SNMP::Bindings; - }; - - ## A ``BulkPDU`` data structure from :rfc:`3416`. - type SNMP::BulkPDU: record { - request_id: int; - non_repeaters: count; - max_repititions: count; - bindings: SNMP::Bindings; - }; -} - -@load base/bif/plugins/Bro_KRB.types.bif - -module KRB; -export { - ## Kerberos keytab file name. Used to decrypt tickets encountered on the wire. - const keytab = "" &redef; - ## KDC Options. See :rfc:`4120` - type KRB::KDC_Options: record { - ## The ticket to be issued should have its forwardable flag set. - forwardable : bool; - ## A (TGT) request for forwarding. - forwarded : bool; - ## The ticket to be issued should have its proxiable flag set. - proxiable : bool; - ## A request for a proxy. - proxy : bool; - ## The ticket to be issued should have its may-postdate flag set. - allow_postdate : bool; - ## A request for a postdated ticket. - postdated : bool; - ## The ticket to be issued should have its renewable flag set. - renewable : bool; - ## Reserved for opt_hardware_auth - opt_hardware_auth : bool; - ## Request that the KDC not check the transited field of a TGT against - ## the policy of the local realm before it will issue derivative tickets - ## based on the TGT. - disable_transited_check : bool; - ## If a ticket with the requested lifetime cannot be issued, a renewable - ## ticket is acceptable - renewable_ok : bool; - ## The ticket for the end server is to be encrypted in the session key - ## from the additional TGT provided - enc_tkt_in_skey : bool; - ## The request is for a renewal - renew : bool; - ## The request is to validate a postdated ticket. - validate : bool; - }; - - ## AP Options. See :rfc:`4120` - type KRB::AP_Options: record { - ## Indicates that user-to-user-authentication is in use - use_session_key : bool; - ## Mutual authentication is required - mutual_required : bool; - }; - - ## Used in a few places in the Kerberos analyzer for elements - ## that have a type and a string value. - type KRB::Type_Value: record { - ## The data type - data_type : count; - ## The data value - val : string; - }; - - type KRB::Type_Value_Vector: vector of KRB::Type_Value; - - ## A Kerberos host address See :rfc:`4120`. - type KRB::Host_Address: record { - ## IPv4 or IPv6 address - ip : addr &log &optional; - ## NetBIOS address - netbios : string &log &optional; - ## Some other type that we don't support yet - unknown : KRB::Type_Value &optional; - }; - - type KRB::Host_Address_Vector: vector of KRB::Host_Address; - - ## The data from the SAFE message. See :rfc:`4120`. - type KRB::SAFE_Msg: record { - ## Protocol version number (5 for KRB5) - pvno : count; - ## The message type (20 for SAFE_MSG) - msg_type : count; - ## The application-specific data that is being passed - ## from the sender to the reciever - data : string; - ## Current time from the sender of the message - timestamp : time &optional; - ## Sequence number used to detect replays - seq : count &optional; - ## Sender address - sender : Host_Address &optional; - ## Recipient address - recipient : Host_Address &optional; - }; - - ## The data from the ERROR_MSG message. See :rfc:`4120`. - type KRB::Error_Msg: record { - ## Protocol version number (5 for KRB5) - pvno : count; - ## The message type (30 for ERROR_MSG) - msg_type : count; - ## Current time on the client - client_time : time &optional; - ## Current time on the server - server_time : time; - ## The specific error code - error_code : count; - ## Realm of the ticket - client_realm : string &optional; - ## Name on the ticket - client_name : string &optional; - ## Realm of the service - service_realm : string; - ## Name of the service - service_name : string; - ## Additional text to explain the error - error_text : string &optional; - ## Optional pre-authentication data - pa_data : vector of KRB::Type_Value &optional; - }; - - ## A Kerberos ticket. See :rfc:`4120`. - type KRB::Ticket: record { - ## Protocol version number (5 for KRB5) - pvno : count; - ## Realm - realm : string; - ## Name of the service - service_name : string; - ## Cipher the ticket was encrypted with - cipher : count; - ## Cipher text of the ticket - ciphertext : string &optional; - ## Authentication info - authenticationinfo: string &optional; - }; - - type KRB::Ticket_Vector: vector of KRB::Ticket; - - ## The data from the AS_REQ and TGS_REQ messages. See :rfc:`4120`. - type KRB::KDC_Request: record { - ## Protocol version number (5 for KRB5) - pvno : count; - ## The message type (10 for AS_REQ, 12 for TGS_REQ) - msg_type : count; - ## Optional pre-authentication data - pa_data : vector of KRB::Type_Value &optional; - ## Options specified in the request - kdc_options : KRB::KDC_Options; - ## Name on the ticket - client_name : string &optional; - - ## Realm of the service - service_realm : string; - ## Name of the service - service_name : string &optional; - ## Time the ticket is good from - from : time &optional; - ## Time the ticket is good till - till : time; - ## The requested renew-till time - rtime : time &optional; - - ## A random nonce generated by the client - nonce : count; - ## The desired encryption algorithms, in order of preference - encryption_types : vector of count; - ## Any additional addresses the ticket should be valid for - host_addrs : vector of KRB::Host_Address &optional; - ## Additional tickets may be included for certain transactions - additional_tickets : vector of KRB::Ticket &optional; - }; - - ## The data from the AS_REQ and TGS_REQ messages. See :rfc:`4120`. - type KRB::KDC_Response: record { - ## Protocol version number (5 for KRB5) - pvno : count; - ## The message type (11 for AS_REP, 13 for TGS_REP) - msg_type : count; - ## Optional pre-authentication data - pa_data : vector of KRB::Type_Value &optional; - ## Realm on the ticket - client_realm : string &optional; - ## Name on the service - client_name : string; - - ## The ticket that was issued - ticket : KRB::Ticket; - }; -} - -module GLOBAL; - -@load base/bif/event.bif - -## BPF filter the user has set via the -f command line options. Empty if none. -const cmd_line_bpf_filter = "" &redef; - -## The maximum number of open files to keep cached at a given time. -## If set to zero, this is automatically determined by inspecting -## the current/maximum limit on open files for the process. -const max_files_in_cache = 0 &redef; - -## Deprecated. -const log_rotate_interval = 0 sec &redef; - -## Deprecated. -const log_rotate_base_time = "0:00" &redef; - -## Deprecated. -const log_max_size = 0.0 &redef; - -## Deprecated. -const log_encryption_key = "" &redef; - -## Write profiling info into this file in regular intervals. The easiest way to -## activate profiling is loading :doc:`/scripts/policy/misc/profiling.bro`. -## -## .. bro:see:: profiling_interval expensive_profiling_multiple segment_profiling -global profiling_file: file &redef; - -## Update interval for profiling (0 disables). The easiest way to activate -## profiling is loading :doc:`/scripts/policy/misc/profiling.bro`. -## -## .. bro:see:: profiling_file expensive_profiling_multiple segment_profiling -const profiling_interval = 0 secs &redef; - -## Multiples of :bro:see:`profiling_interval` at which (more expensive) memory -## profiling is done (0 disables). -## -## .. bro:see:: profiling_interval profiling_file segment_profiling -const expensive_profiling_multiple = 0 &redef; - -## If true, then write segment profiling information (very high volume!) -## in addition to profiling statistics. -## -## .. bro:see:: profiling_interval expensive_profiling_multiple profiling_file -const segment_profiling = F &redef; - -## Output modes for packet profiling information. -## -## .. bro:see:: pkt_profile_mode pkt_profile_freq pkt_profile_file -type pkt_profile_modes: enum { - PKT_PROFILE_MODE_NONE, ##< No output. - PKT_PROFILE_MODE_SECS, ##< Output every :bro:see:`pkt_profile_freq` seconds. - PKT_PROFILE_MODE_PKTS, ##< Output every :bro:see:`pkt_profile_freq` packets. - PKT_PROFILE_MODE_BYTES, ##< Output every :bro:see:`pkt_profile_freq` bytes. -}; - -## Output mode for packet profiling information. -## -## .. bro:see:: pkt_profile_modes pkt_profile_freq pkt_profile_file -const pkt_profile_mode = PKT_PROFILE_MODE_NONE &redef; - -## Frequency associated with packet profiling. -## -## .. bro:see:: pkt_profile_modes pkt_profile_mode pkt_profile_file -const pkt_profile_freq = 0.0 &redef; - -## File where packet profiles are logged. -## -## .. bro:see:: pkt_profile_modes pkt_profile_freq pkt_profile_mode -global pkt_profile_file: file &redef; - -## Rate at which to generate :bro:see:`load_sample` events. As all -## events, the event is only generated if you've also defined a -## :bro:see:`load_sample` handler. Units are inverse number of packets; e.g., -## a value of 20 means "roughly one in every 20 packets". -## -## .. bro:see:: load_sample -global load_sample_freq = 20 &redef; - -## Whether to attempt to automatically detect SYN/FIN/RST-filtered trace -## and not report missing segments for such connections. -## If this is enabled, then missing data at the end of connections may not -## be reported via :bro:see:`content_gap`. -const detect_filtered_trace = F &redef; - -## Whether we want :bro:see:`content_gap` for partial -## connections. A connection is partial if it is missing a full handshake. Note -## that gap reports for partial connections might not be reliable. -## -## .. bro:see:: content_gap partial_connection -const report_gaps_for_partial = F &redef; - -## Flag to prevent Bro from exiting automatically when input is exhausted. -## Normally Bro terminates when all packet sources have gone dry -## and communication isn't enabled. If this flag is set, Bro's main loop will -## instead keep idling until :bro:see:`terminate` is explicitly called. -## -## This is mainly for testing purposes when termination behaviour needs to be -## controlled for reproducing results. -const exit_only_after_terminate = F &redef; - -## The CA certificate file to authorize remote Bros/Broccolis. -## -## .. bro:see:: ssl_private_key ssl_passphrase -const ssl_ca_certificate = "" &redef; - -## File containing our private key and our certificate. -## -## .. bro:see:: ssl_ca_certificate ssl_passphrase -const ssl_private_key = "" &redef; - -## The passphrase for our private key. Keeping this undefined -## causes Bro to prompt for the passphrase. -## -## .. bro:see:: ssl_private_key ssl_ca_certificate -const ssl_passphrase = "" &redef; - -## Default mode for Bro's user-space dynamic packet filter. If true, packets -## that aren't explicitly allowed through, are dropped from any further -## processing. -## -## .. note:: This is not the BPF packet filter but an additional dynamic filter -## that Bro optionally applies just before normal processing starts. -## -## .. bro:see:: install_dst_addr_filter install_dst_net_filter -## install_src_addr_filter install_src_net_filter uninstall_dst_addr_filter -## uninstall_dst_net_filter uninstall_src_addr_filter uninstall_src_net_filter -const packet_filter_default = F &redef; - -## Maximum size of regular expression groups for signature matching. -const sig_max_group_size = 50 &redef; - -## Deprecated. No longer functional. -const enable_syslog = F &redef; - -## Description transmitted to remote communication peers for identification. -const peer_description = "bro" &redef; - -## If true, broadcast events received from one peer to all other peers. -## -## .. bro:see:: forward_remote_state_changes -## -## .. note:: This option is only temporary and will disappear once we get a -## more sophisticated script-level communication framework. -const forward_remote_events = F &redef; - -## If true, broadcast state updates received from one peer to all other peers. -## -## .. bro:see:: forward_remote_events -## -## .. note:: This option is only temporary and will disappear once we get a -## more sophisticated script-level communication framework. -const forward_remote_state_changes = F &redef; - -## The number of IO chunks allowed to be buffered between the child -## and parent process of remote communication before Bro starts dropping -## connections to remote peers in an attempt to catch up. -const chunked_io_buffer_soft_cap = 800000 &redef; - -## Place-holder constant indicating "no peer". -const PEER_ID_NONE = 0; - -# Signature payload pattern types. -# todo:: use enum to help autodoc -# todo:: Still used? -#const SIG_PATTERN_PAYLOAD = 0; -#const SIG_PATTERN_HTTP = 1; -#const SIG_PATTERN_FTP = 2; -#const SIG_PATTERN_FINGER = 3; - -# Deprecated. -# todo::Should use the new logging framework directly. -const REMOTE_LOG_INFO = 1; ##< Deprecated. -const REMOTE_LOG_ERROR = 2; ##< Deprecated. - -# Source of logging messages from the communication framework. -# todo:: these should go into an enum to make them autodoc'able. -const REMOTE_SRC_CHILD = 1; ##< Message from the child process. -const REMOTE_SRC_PARENT = 2; ##< Message from the parent process. -const REMOTE_SRC_SCRIPT = 3; ##< Message from a policy script. - -## Synchronize trace processing at a regular basis in pseudo-realtime mode. -## -## .. bro:see:: remote_trace_sync_peers -const remote_trace_sync_interval = 0 secs &redef; - -## Number of peers across which to synchronize trace processing in -## pseudo-realtime mode. -## -## .. bro:see:: remote_trace_sync_interval -const remote_trace_sync_peers = 0 &redef; - -## Whether for :bro:attr:`&synchronized` state to send the old value as a -## consistency check. -const remote_check_sync_consistency = F &redef; - -## Reassemble the beginning of all TCP connections before doing -## signature matching. Enabling this provides more accurate matching at the -## expense of CPU cycles. -## -## .. bro:see:: dpd_buffer_size -## dpd_match_only_beginning dpd_ignore_ports -## -## .. note:: Despite the name, this option affects *all* signature matching, not -## only signatures used for dynamic protocol detection. -const dpd_reassemble_first_packets = T &redef; - -## Size of per-connection buffer used for dynamic protocol detection. For each -## connection, Bro buffers this initial amount of payload in memory so that -## complete protocol analysis can start even after the initial packets have -## already passed through (i.e., when a DPD signature matches only later). -## However, once the buffer is full, data is deleted and lost to analyzers that -## are activated afterwards. Then only analyzers that can deal with partial -## connections will be able to analyze the session. -## -## .. bro:see:: dpd_reassemble_first_packets dpd_match_only_beginning -## dpd_ignore_ports -const dpd_buffer_size = 1024 &redef; - -## If true, stops signature matching if :bro:see:`dpd_buffer_size` has been -## reached. -## -## .. bro:see:: dpd_reassemble_first_packets dpd_buffer_size -## dpd_ignore_ports -## -## .. note:: Despite the name, this option affects *all* signature matching, not -## only signatures used for dynamic protocol detection. -const dpd_match_only_beginning = T &redef; - -## If true, don't consider any ports for deciding which protocol analyzer to -## use. -## -## .. bro:see:: dpd_reassemble_first_packets dpd_buffer_size -## dpd_match_only_beginning -const dpd_ignore_ports = F &redef; - -## Ports which the core considers being likely used by servers. For ports in -## this set, it may heuristically decide to flip the direction of the -## connection if it misses the initial handshake. -const likely_server_ports: set[port] &redef; - -## Per-incident timer managers are drained after this amount of inactivity. -const timer_mgr_inactivity_timeout = 1 min &redef; - -## If true, output profiling for Time-Machine queries. -const time_machine_profiling = F &redef; - -## If true, warns about unused event handlers at startup. -const check_for_unused_event_handlers = F &redef; - -# If true, dumps all invoked event handlers at startup. -# todo::Still used? -# const dump_used_event_handlers = F &redef; - -## Deprecated. -const suppress_local_output = F &redef; - -## Holds the filename of the trace file given with ``-w`` (empty if none). -## -## .. bro:see:: record_all_packets -const trace_output_file = ""; - -## If a trace file is given with ``-w``, dump *all* packets seen by Bro into it. -## By default, Bro applies (very few) heuristics to reduce the volume. A side -## effect of setting this to true is that we can write the packets out before we -## actually process them, which can be helpful for debugging in case the -## analysis triggers a crash. -## -## .. bro:see:: trace_output_file -const record_all_packets = F &redef; - -## Ignore certain TCP retransmissions for :bro:see:`conn_stats`. Some -## connections (e.g., SSH) retransmit the acknowledged last byte to keep the -## connection alive. If *ignore_keep_alive_rexmit* is set to true, such -## retransmissions will be excluded in the rexmit counter in -## :bro:see:`conn_stats`. -## -## .. bro:see:: conn_stats -const ignore_keep_alive_rexmit = F &redef; - -module JSON; -export { - type TimestampFormat: enum { - ## Timestamps will be formatted as UNIX epoch doubles. This is - ## the format that Bro typically writes out timestamps. - TS_EPOCH, - ## Timestamps will be formatted as unsigned integers that - ## represent the number of milliseconds since the UNIX - ## epoch. - TS_MILLIS, - ## Timestamps will be formatted in the ISO8601 DateTime format. - ## Subseconds are also included which isn't actually part of the - ## standard but most consumers that parse ISO8601 seem to be able - ## to cope with that. - TS_ISO8601, - }; -} - -module Tunnel; -export { - ## The maximum depth of a tunnel to decapsulate until giving up. - ## Setting this to zero will disable all types of tunnel decapsulation. - const max_depth: count = 2 &redef; - - ## Toggle whether to do IPv{4,6}-in-IPv{4,6} decapsulation. - const enable_ip = T &redef; - - ## Toggle whether to do IPv{4,6}-in-AYIYA decapsulation. - const enable_ayiya = T &redef; - - ## Toggle whether to do IPv6-in-Teredo decapsulation. - const enable_teredo = T &redef; - - ## Toggle whether to do GTPv1 decapsulation. - const enable_gtpv1 = T &redef; - - ## Toggle whether to do GRE decapsulation. - const enable_gre = T &redef; - - ## With this set, the Teredo analyzer waits until it sees both sides - ## of a connection using a valid Teredo encapsulation before issuing - ## a :bro:see:`protocol_confirmation`. If it's false, the first - ## occurrence of a packet with valid Teredo encapsulation causes a - ## confirmation. - const delay_teredo_confirmation = T &redef; - - ## With this set, the GTP analyzer waits until the most-recent upflow - ## and downflow packets are a valid GTPv1 encapsulation before - ## issuing :bro:see:`protocol_confirmation`. If it's false, the - ## first occurrence of a packet with valid GTPv1 encapsulation causes - ## confirmation. Since the same inner connection can be carried - ## differing outer upflow/downflow connections, setting to false - ## may work better. - const delay_gtp_confirmation = F &redef; - - ## How often to cleanup internal state for inactive IP tunnels - ## (includes GRE tunnels). - const ip_tunnel_timeout = 24hrs &redef; - - ## Whether to validate the checksum supplied in the outer UDP header - ## of a VXLAN encapsulation. The spec says the checksum should be - ## transmitted as zero, but if not, then the decapsulating destination - ## may choose whether to perform the validation. - const validate_vxlan_checksums = T &redef; - - ## The set of UDP ports used for VXLAN traffic. Traffic using this - ## UDP destination port will attempt to be decapsulated. Note that if - ## if you customize this, you may still want to manually ensure that - ## :bro:see:`likely_server_ports` also gets populated accordingly. - const vxlan_ports: set[port] = { 4789/udp } &redef; -} # end export - -module Reporter; -export { - ## Tunable for sending reporter info messages to STDERR. The option to - ## turn it off is presented here in case Bro is being run by some - ## external harness and shouldn't output anything to the console. - const info_to_stderr = T &redef; - - ## Tunable for sending reporter warning messages to STDERR. The option - ## to turn it off is presented here in case Bro is being run by some - ## external harness and shouldn't output anything to the console. - const warnings_to_stderr = T &redef; - - ## Tunable for sending reporter error messages to STDERR. The option to - ## turn it off is presented here in case Bro is being run by some - ## external harness and shouldn't output anything to the console. - const errors_to_stderr = T &redef; -} - -module Pcap; -export { - ## Number of bytes per packet to capture from live interfaces. - const snaplen = 9216 &redef; - - ## Number of Mbytes to provide as buffer space when capturing from live - ## interfaces. - const bufsize = 128 &redef; -} # end export - -module DCE_RPC; -export { - ## The maximum number of simultaneous fragmented commands that - ## the DCE_RPC analyzer will tolerate before the it will generate - ## a weird and skip further input. - const max_cmd_reassembly = 20 &redef; - - ## The maximum number of fragmented bytes that the DCE_RPC analyzer - ## will tolerate on a command before the analyzer will generate a weird - ## and skip further input. - const max_frag_data = 30000 &redef; -} - -module NCP; -export { - ## The maximum number of bytes to allocate when parsing NCP frames. - const max_frame_size = 65536 &redef; -} - -module Cluster; -export { - type Cluster::Pool: record {}; -} - -module Weird; -export { - ## Prevents rate-limiting sampling of any weirds named in the table. - option sampling_whitelist: set[string] = {}; - - ## How many weirds of a given type to tolerate before sampling begins. - ## I.e. this many consecutive weirds of a given type will be allowed to - ## raise events for script-layer handling before being rate-limited. - option sampling_threshold : count = 25; - - ## The rate-limiting sampling rate. One out of every of this number of - ## rate-limited weirds of a given type will be allowed to raise events - ## for further script-layer handling. Setting the sampling rate to 0 - ## will disable all output of rate-limited weirds. - option sampling_rate : count = 1000; - - ## How long a weird of a given type is allowed to keep state/counters in - ## memory. For "net" weirds an expiration timer starts per weird name when - ## first initializing its counter. For "flow" weirds an expiration timer - ## starts once per src/dst IP pair for the first weird of any name. For - ## "conn" weirds, counters and expiration timers are kept for the duration - ## of the connection for each named weird and reset when necessary. E.g. - ## if a "conn" weird by the name of "foo" is seen more than - ## :bro:see:`Weird::sampling_threshold` times, then an expiration timer - ## begins for "foo" and upon triggering will reset the counter for "foo" - ## and unthrottle its rate-limiting until it once again exceeds the - ## threshold. - option sampling_duration = 10min; -} - -module GLOBAL; - -## Seed for hashes computed internally for probabilistic data structures. Using -## the same value here will make the hashes compatible between independent Bro -## instances. If left unset, Bro will use a temporary local seed. -const global_hash_seed: string = "" &redef; - -## Number of bits in UIDs that are generated to identify connections and -## files. The larger the value, the more confidence in UID uniqueness. -## The maximum is currently 128 bits. -const bits_per_uid: count = 96 &redef; - -## Whether usage of the old communication system is considered an error or -## not. The default Bro configuration no longer works with the non-Broker -## communication system unless you have manually taken action to initialize -## and set up the old comm. system. Deprecation warnings are still emitted -## when setting this flag, but they will not result in a fatal error. -const old_comm_usage_is_ok: bool = F &redef; diff --git a/scripts/base/init-bare.zeek b/scripts/base/init-bare.zeek new file mode 100644 index 0000000000..8bc02f379d --- /dev/null +++ b/scripts/base/init-bare.zeek @@ -0,0 +1,5184 @@ +@load base/bif/const.bif +@load base/bif/types.bif + +# Type declarations + +## An ordered array of strings. The entries are indexed by successive numbers. +## Note that it depends on the usage whether the first index is zero or one. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type string_array: table[count] of string; + +## A set of strings. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type string_set: set[string]; + +## A set of addresses. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type addr_set: set[addr]; + +## A set of counts. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type count_set: set[count]; + +## A vector of counts, used by some builtin functions to store a list of indices. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type index_vec: vector of count; + +## A vector of subnets. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type subnet_vec: vector of subnet; + +## A vector of any, used by some builtin functions to store a list of varying +## types. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type any_vec: vector of any; + +## A vector of strings. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type string_vec: vector of string; + +## A vector of x509 opaques. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type x509_opaque_vector: vector of opaque of x509; + +## A vector of addresses. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type addr_vec: vector of addr; + +## A table of strings indexed by strings. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type table_string_of_string: table[string] of string; + +## A table of counts indexed by strings. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type table_string_of_count: table[string] of count; + +## A set of file analyzer tags. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type files_tag_set: set[Files::Tag]; + +## A structure indicating a MIME type and strength of a match against +## file magic signatures. +## +## :zeek:see:`file_magic` +type mime_match: record { + strength: int; ##< How strongly the signature matched. Used for + ##< prioritization when multiple file magic signatures + ##< match. + mime: string; ##< The MIME type of the file magic signature match. +}; + +## A vector of file magic signature matches, ordered by strength of +## the signature, strongest first. +## +## :zeek:see:`file_magic` +type mime_matches: vector of mime_match; + +## A connection's transport-layer protocol. Note that Zeek uses the term +## "connection" broadly, using flow semantics for ICMP and UDP. +type transport_proto: enum { + unknown_transport, ##< An unknown transport-layer protocol. + tcp, ##< TCP. + udp, ##< UDP. + icmp ##< ICMP. +}; + +## A connection's identifying 4-tuple of endpoints and ports. +## +## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as +## part of the port values, `orig_p` and `resp_p`, and can be extracted from +## them with :zeek:id:`get_port_transport_proto`. +type conn_id: record { + orig_h: addr; ##< The originator's IP address. + orig_p: port; ##< The originator's port number. + resp_h: addr; ##< The responder's IP address. + resp_p: port; ##< The responder's port number. +} &log; + +## The identifying 4-tuple of a uni-directional flow. +## +## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as +## part of the port values, `src_p` and `dst_p`, and can be extracted from +## them with :zeek:id:`get_port_transport_proto`. +type flow_id : record { + src_h: addr; ##< The source IP address. + src_p: port; ##< The source port number. + dst_h: addr; ##< The destination IP address. + dst_p: port; ##< The desintation port number. +} &log; + +## Specifics about an ICMP conversation. ICMP events typically pass this in +## addition to :zeek:type:`conn_id`. +## +## .. zeek:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent +## icmp_time_exceeded icmp_unreachable +type icmp_conn: record { + orig_h: addr; ##< The originator's IP address. + resp_h: addr; ##< The responder's IP address. + itype: count; ##< The ICMP type of the packet that triggered the instantiation of the record. + icode: count; ##< The ICMP code of the packet that triggered the instantiation of the record. + len: count; ##< The length of the ICMP payload of the packet that triggered the instantiation of the record. + hlim: count; ##< The encapsulating IP header's Hop Limit value. + v6: bool; ##< True if it's an ICMPv6 packet. +}; + +## Packet context part of an ICMP message. The fields of this record reflect the +## packet that is described by the context. +## +## .. zeek:see:: icmp_time_exceeded icmp_unreachable +type icmp_context: record { + id: conn_id; ##< The packet's 4-tuple. + len: count; ##< The length of the IP packet (headers + payload). + proto: count; ##< The packet's transport-layer protocol. + frag_offset: count; ##< The packet's fragmentation offset. + ## True if the packet's IP header is not fully included in the context + ## or if there is not enough of the transport header to determine source + ## and destination ports. If that is the case, the appropriate fields + ## of this record will be set to null values. + bad_hdr_len: bool; + bad_checksum: bool; ##< True if the packet's IP checksum is not correct. + MF: bool; ##< True if the packet's *more fragments* flag is set. + DF: bool; ##< True if the packet's *don't fragment* flag is set. +}; + +## Values extracted from a Prefix Information option in an ICMPv6 neighbor +## discovery message as specified by :rfc:`4861`. +## +## .. zeek:see:: icmp6_nd_option +type icmp6_nd_prefix_info: record { + ## Number of leading bits of the *prefix* that are valid. + prefix_len: count; + ## Flag indicating the prefix can be used for on-link determination. + L_flag: bool; + ## Autonomous address-configuration flag. + A_flag: bool; + ## Length of time in seconds that the prefix is valid for purpose of + ## on-link determination (0xffffffff represents infinity). + valid_lifetime: interval; + ## Length of time in seconds that the addresses generated from the + ## prefix via stateless address autoconfiguration remain preferred + ## (0xffffffff represents infinity). + preferred_lifetime: interval; + ## An IP address or prefix of an IP address. Use the *prefix_len* field + ## to convert this into a :zeek:type:`subnet`. + prefix: addr; +}; + +## Options extracted from ICMPv6 neighbor discovery messages as specified +## by :rfc:`4861`. +## +## .. zeek:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_advertisement icmp_neighbor_solicitation icmp_redirect +## icmp6_nd_options +type icmp6_nd_option: record { + ## 8-bit identifier of the type of option. + otype: count; + ## 8-bit integer representing the length of the option (including the + ## type and length fields) in units of 8 octets. + len: count; + ## Source Link-Layer Address (Type 1) or Target Link-Layer Address (Type 2). + ## Byte ordering of this is dependent on the actual link-layer. + link_address: string &optional; + ## Prefix Information (Type 3). + prefix: icmp6_nd_prefix_info &optional; + ## Redirected header (Type 4). This field contains the context of the + ## original, redirected packet. + redirect: icmp_context &optional; + ## Recommended MTU for the link (Type 5). + mtu: count &optional; + ## The raw data of the option (everything after type & length fields), + ## useful for unknown option types or when the full option payload is + ## truncated in the captured packet. In those cases, option fields + ## won't be pre-extracted into the fields above. + payload: string &optional; +}; + +## A type alias for a vector of ICMPv6 neighbor discovery message options. +type icmp6_nd_options: vector of icmp6_nd_option; + +# A DNS mapping between IP address and hostname resolved by Zeek's internal +# resolver. +# +# .. zeek:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name +# dns_mapping_unverified dns_mapping_valid +type dns_mapping: record { + ## The time when the mapping was created, which corresponds to when + ## the DNS query was sent out. + creation_time: time; + ## If the mapping is the result of a name lookup, the queried host name; + ## otherwise empty. + req_host: string; + ## If the mapping is the result of a pointer lookup, the queried + ## address; otherwise null. + req_addr: addr; + ## True if the lookup returned success. Only then are the result fields + ## valid. + valid: bool; + ## If the mapping is the result of a pointer lookup, the resolved + ## hostname; otherwise empty. + hostname: string; + ## If the mapping is the result of an address lookup, the resolved + ## address(es); otherwise empty. + addrs: addr_set; +}; + +## A parsed host/port combination describing server endpoint for an upcoming +## data transfer. +## +## .. zeek:see:: fmt_ftp_port parse_eftp_port parse_ftp_epsv parse_ftp_pasv +## parse_ftp_port +type ftp_port: record { + h: addr; ##< The host's address. + p: port; ##< The host's port. + valid: bool; ##< True if format was right. Only then are *h* and *p* valid. +}; + +## Statistics about what a TCP endpoint sent. +## +## .. zeek:see:: conn_stats +type endpoint_stats: record { + num_pkts: count; ##< Number of packets. + num_rxmit: count; ##< Number of retransmissions. + num_rxmit_bytes: count; ##< Number of retransmitted bytes. + num_in_order: count; ##< Number of in-order packets. + num_OO: count; ##< Number of out-of-order packets. + num_repl: count; ##< Number of replicated packets (last packet was sent again). + ## Endian type used by the endpoint, if it could be determined from + ## the sequence numbers used. This is one of :zeek:see:`ENDIAN_UNKNOWN`, + ## :zeek:see:`ENDIAN_BIG`, :zeek:see:`ENDIAN_LITTLE`, and + ## :zeek:see:`ENDIAN_CONFUSED`. + endian_type: count; +}; + +module Tunnel; +export { + ## Records the identity of an encapsulating parent of a tunneled connection. + type EncapsulatingConn: record { + ## The 4-tuple of the encapsulating "connection". In case of an + ## IP-in-IP tunnel the ports will be set to 0. The direction + ## (i.e., orig and resp) are set according to the first tunneled + ## packet seen and not according to the side that established + ## the tunnel. + cid: conn_id; + ## The type of tunnel. + tunnel_type: Tunnel::Type; + ## A globally unique identifier that, for non-IP-in-IP tunnels, + ## cross-references the *uid* field of :zeek:type:`connection`. + uid: string &optional; + } &log; +} # end export +module GLOBAL; + +## A type alias for a vector of encapsulating "connections", i.e. for when +## there are tunnels within tunnels. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type EncapsulatingConnVector: vector of Tunnel::EncapsulatingConn; + +## Statistics about a :zeek:type:`connection` endpoint. +## +## .. zeek:see:: connection +type endpoint: record { + size: count; ##< Logical size of data sent (for TCP: derived from sequence numbers). + ## Endpoint state. For a TCP connection, one of the constants: + ## :zeek:see:`TCP_INACTIVE` :zeek:see:`TCP_SYN_SENT` + ## :zeek:see:`TCP_SYN_ACK_SENT` :zeek:see:`TCP_PARTIAL` + ## :zeek:see:`TCP_ESTABLISHED` :zeek:see:`TCP_CLOSED` :zeek:see:`TCP_RESET`. + ## For UDP, one of :zeek:see:`UDP_ACTIVE` and :zeek:see:`UDP_INACTIVE`. + state: count; + ## Number of packets sent. Only set if :zeek:id:`use_conn_size_analyzer` + ## is true. + num_pkts: count &optional; + ## Number of IP-level bytes sent. Only set if + ## :zeek:id:`use_conn_size_analyzer` is true. + num_bytes_ip: count &optional; + ## The current IPv6 flow label that the connection endpoint is using. + ## Always 0 if the connection is over IPv4. + flow_label: count; + ## The link-layer address seen in the first packet (if available). + l2_addr: string &optional; +}; + +## A connection. This is Zeek's basic connection type describing IP- and +## transport-layer information about the conversation. Note that Zeek uses a +## liberal interpretation of "connection" and associates instances of this type +## also with UDP and ICMP flows. +type connection: record { + id: conn_id; ##< The connection's identifying 4-tuple. + orig: endpoint; ##< Statistics about originator side. + resp: endpoint; ##< Statistics about responder side. + start_time: time; ##< The timestamp of the connection's first packet. + ## The duration of the conversation. Roughly speaking, this is the + ## interval between first and last data packet (low-level TCP details + ## may adjust it somewhat in ambiguous cases). + duration: interval; + ## The set of services the connection is using as determined by Zeek's + ## dynamic protocol detection. Each entry is the label of an analyzer + ## that confirmed that it could parse the connection payload. While + ## typically, there will be at most one entry for each connection, in + ## principle it is possible that more than one protocol analyzer is able + ## to parse the same data. If so, all will be recorded. Also note that + ## the recorded services are independent of any transport-level protocols. + service: set[string]; + history: string; ##< State history of connections. See *history* in :zeek:see:`Conn::Info`. + ## A globally unique connection identifier. For each connection, Zeek + ## creates an ID that is very likely unique across independent Zeek runs. + ## These IDs can thus be used to tag and locate information associated + ## with that connection. + uid: string; + ## If the connection is tunneled, this field contains information about + ## the encapsulating "connection(s)" with the outermost one starting + ## at index zero. It's also always the first such encapsulation seen + ## for the connection unless the :zeek:id:`tunnel_changed` event is + ## handled and reassigns this field to the new encapsulation. + tunnel: EncapsulatingConnVector &optional; + + ## The outer VLAN, if applicable for this connection. + vlan: int &optional; + + ## The inner VLAN, if applicable for this connection. + inner_vlan: int &optional; +}; + +## Default amount of time a file can be inactive before the file analysis +## gives up and discards any internal state related to the file. +option default_file_timeout_interval: interval = 2 mins; + +## Default amount of bytes that file analysis will buffer in order to use +## for mime type matching. File analyzers attached at the time of mime type +## matching or later, will receive a copy of this buffer. +option default_file_bof_buffer_size: count = 4096; + +## A file that Zeek is analyzing. This is Zeek's type for describing the basic +## internal metadata collected about a "file", which is essentially just a +## byte stream that is e.g. pulled from a network connection or possibly +## some other input source. +type fa_file: record { + ## An identifier associated with a single file. + id: string; + + ## Identifier associated with a container file from which this one was + ## extracted as part of the file analysis. + parent_id: string &optional; + + ## An identification of the source of the file data. E.g. it may be + ## a network protocol over which it was transferred, or a local file + ## path which was read, or some other input source. + ## Examples are: "HTTP", "SMTP", "IRC_DATA", or the file path. + source: string; + + ## If the source of this file is a network connection, this field + ## may be set to indicate the directionality. + is_orig: bool &optional; + + ## The set of connections over which the file was transferred. + conns: table[conn_id] of connection &optional; + + ## The time at which the last activity for the file was seen. + last_active: time; + + ## Number of bytes provided to the file analysis engine for the file. + seen_bytes: count &default=0; + + ## Total number of bytes that are supposed to comprise the full file. + total_bytes: count &optional; + + ## The number of bytes in the file stream that were completely missed + ## during the process of analysis e.g. due to dropped packets. + missing_bytes: count &default=0; + + ## The number of bytes in the file stream that were not delivered to + ## stream file analyzers. Generally, this consists of bytes that + ## couldn't be reassembled, either because reassembly simply isn't + ## enabled, or due to size limitations of the reassembly buffer. + overflow_bytes: count &default=0; + + ## The amount of time between receiving new data for this file that + ## the analysis engine will wait before giving up on it. + timeout_interval: interval &default=default_file_timeout_interval; + + ## The number of bytes at the beginning of a file to save for later + ## inspection in the *bof_buffer* field. + bof_buffer_size: count &default=default_file_bof_buffer_size; + + ## The content of the beginning of a file up to *bof_buffer_size* bytes. + ## This is also the buffer that's used for file/mime type detection. + bof_buffer: string &optional; +} &redef; + +## Metadata that's been inferred about a particular file. +type fa_metadata: record { + ## The strongest matching MIME type if one was discovered. + mime_type: string &optional; + ## All matching MIME types if any were discovered. + mime_types: mime_matches &optional; + ## Specifies whether the MIME type was inferred using signatures, + ## or provided directly by the protocol the file appeared in. + inferred: bool &default=T; +}; + +## Fields of a SYN packet. +## +## .. zeek:see:: connection_SYN_packet +type SYN_packet: record { + is_orig: bool; ##< True if the packet was sent the connection's originator. + DF: bool; ##< True if the *don't fragment* is set in the IP header. + ttl: count; ##< The IP header's time-to-live. + size: count; ##< The size of the packet's payload as specified in the IP header. + win_size: count; ##< The window size from the TCP header. + win_scale: int; ##< The window scale option if present, or -1 if not. + MSS: count; ##< The maximum segment size if present, or 0 if not. + SACK_OK: bool; ##< True if the *SACK* option is present. +}; + +## Packet capture statistics. All counts are cumulative. +## +## .. zeek:see:: get_net_stats +type NetStats: record { + pkts_recvd: count &default=0; ##< Packets received by Zeek. + pkts_dropped: count &default=0; ##< Packets reported dropped by the system. + ## Packets seen on the link. Note that this may differ + ## from *pkts_recvd* because of a potential capture_filter. See + ## :doc:`/scripts/base/frameworks/packet-filter/main.zeek`. Depending on the + ## packet capture system, this value may not be available and will then + ## be always set to zero. + pkts_link: count &default=0; + bytes_recvd: count &default=0; ##< Bytes received by Zeek. +}; + +type ConnStats: record { + total_conns: count; ##< + current_conns: count; ##< + current_conns_extern: count; ##< + sess_current_conns: count; ##< + + num_packets: count; + num_fragments: count; + max_fragments: count; + + num_tcp_conns: count; ##< Current number of TCP connections in memory. + max_tcp_conns: count; ##< Maximum number of concurrent TCP connections so far. + cumulative_tcp_conns: count; ##< Total number of TCP connections so far. + + num_udp_conns: count; ##< Current number of UDP flows in memory. + max_udp_conns: count; ##< Maximum number of concurrent UDP flows so far. + cumulative_udp_conns: count; ##< Total number of UDP flows so far. + + num_icmp_conns: count; ##< Current number of ICMP flows in memory. + max_icmp_conns: count; ##< Maximum number of concurrent ICMP flows so far. + cumulative_icmp_conns: count; ##< Total number of ICMP flows so far. + + killed_by_inactivity: count; +}; + +## Statistics about Zeek's process. +## +## .. zeek:see:: get_proc_stats +## +## .. note:: All process-level values refer to Zeek's main process only, not to +## the child process it spawns for doing communication. +type ProcStats: record { + debug: bool; ##< True if compiled with --enable-debug. + start_time: time; ##< Start time of process. + real_time: interval; ##< Elapsed real time since Zeek started running. + user_time: interval; ##< User CPU seconds. + system_time: interval; ##< System CPU seconds. + mem: count; ##< Maximum memory consumed, in KB. + minor_faults: count; ##< Page faults not requiring actual I/O. + major_faults: count; ##< Page faults requiring actual I/O. + num_swap: count; ##< Times swapped out. + blocking_input: count; ##< Blocking input operations. + blocking_output: count; ##< Blocking output operations. + num_context: count; ##< Number of involuntary context switches. +}; + +type EventStats: record { + queued: count; ##< Total number of events queued so far. + dispatched: count; ##< Total number of events dispatched so far. +}; + +## Holds statistics for all types of reassembly. +## +## .. zeek:see:: get_reassembler_stats +type ReassemblerStats: record { + file_size: count; ##< Byte size of File reassembly tracking. + frag_size: count; ##< Byte size of Fragment reassembly tracking. + tcp_size: count; ##< Byte size of TCP reassembly tracking. + unknown_size: count; ##< Byte size of reassembly tracking for unknown purposes. +}; + +## Statistics of all regular expression matchers. +## +## .. zeek:see:: get_matcher_stats +type MatcherStats: record { + matchers: count; ##< Number of distinct RE matchers. + nfa_states: count; ##< Number of NFA states across all matchers. + dfa_states: count; ##< Number of DFA states across all matchers. + computed: count; ##< Number of computed DFA state transitions. + mem: count; ##< Number of bytes used by DFA states. + hits: count; ##< Number of cache hits. + misses: count; ##< Number of cache misses. +}; + +## Statistics of timers. +## +## .. zeek:see:: get_timer_stats +type TimerStats: record { + current: count; ##< Current number of pending timers. + max: count; ##< Maximum number of concurrent timers pending so far. + cumulative: count; ##< Cumulative number of timers scheduled. +}; + +## Statistics of file analysis. +## +## .. zeek:see:: get_file_analysis_stats +type FileAnalysisStats: record { + current: count; ##< Current number of files being analyzed. + max: count; ##< Maximum number of concurrent files so far. + cumulative: count; ##< Cumulative number of files analyzed. +}; + +## Statistics related to Zeek's active use of DNS. These numbers are +## about Zeek performing DNS queries on it's own, not traffic +## being seen. +## +## .. zeek:see:: get_dns_stats +type DNSStats: record { + requests: count; ##< Number of DNS requests made + successful: count; ##< Number of successful DNS replies. + failed: count; ##< Number of DNS reply failures. + pending: count; ##< Current pending queries. + cached_hosts: count; ##< Number of cached hosts. + cached_addresses: count; ##< Number of cached addresses. +}; + +## Statistics about number of gaps in TCP connections. +## +## .. zeek:see:: get_gap_stats +type GapStats: record { + ack_events: count; ##< How many ack events *could* have had gaps. + ack_bytes: count; ##< How many bytes those covered. + gap_events: count; ##< How many *did* have gaps. + gap_bytes: count; ##< How many bytes were missing in the gaps. +}; + +## Statistics about threads. +## +## .. zeek:see:: get_thread_stats +type ThreadStats: record { + num_threads: count; +}; + +## Statistics about Broker communication. +## +## .. zeek:see:: get_broker_stats +type BrokerStats: record { + num_peers: count; + ## Number of active data stores. + num_stores: count; + ## Number of pending data store queries. + num_pending_queries: count; + ## Number of total log messages received. + num_events_incoming: count; + ## Number of total log messages sent. + num_events_outgoing: count; + ## Number of total log records received. + num_logs_incoming: count; + ## Number of total log records sent. + num_logs_outgoing: count; + ## Number of total identifiers received. + num_ids_incoming: count; + ## Number of total identifiers sent. + num_ids_outgoing: count; +}; + +## Statistics about reporter messages and weirds. +## +## .. zeek:see:: get_reporter_stats +type ReporterStats: record { + ## Number of total weirds encountered, before any rate-limiting. + weirds: count; + ## Number of times each individual weird is encountered, before any + ## rate-limiting is applied. + weirds_by_type: table[string] of count; +}; + +## Deprecated. +## +## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere +## else. +type packet: record { + conn: connection; + is_orig: bool; + seq: count; ##< seq=k => it is the kth *packet* of the connection + timestamp: time; +}; + +## Table type used to map variable names to their memory allocation. +## +## .. zeek:see:: global_sizes +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type var_sizes: table[string] of count; + +## Meta-information about a script-level identifier. +## +## .. zeek:see:: global_ids id_table +type script_id: record { + type_name: string; ##< The name of the identifier's type. + exported: bool; ##< True if the identifier is exported. + constant: bool; ##< True if the identifier is a constant. + enum_constant: bool; ##< True if the identifier is an enum value. + option_value: bool; ##< True if the identifier is an option. + redefinable: bool; ##< True if the identifier is declared with the :zeek:attr:`&redef` attribute. + value: any &optional; ##< The current value of the identifier. +}; + +## Table type used to map script-level identifiers to meta-information +## describing them. +## +## .. zeek:see:: global_ids script_id +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type id_table: table[string] of script_id; + +## Meta-information about a record field. +## +## .. zeek:see:: record_fields record_field_table +type record_field: record { + type_name: string; ##< The name of the field's type. + log: bool; ##< True if the field is declared with :zeek:attr:`&log` attribute. + ## The current value of the field in the record instance passed into + ## :zeek:see:`record_fields` (if it has one). + value: any &optional; + default_val: any &optional; ##< The value of the :zeek:attr:`&default` attribute if defined. +}; + +## Table type used to map record field declarations to meta-information +## describing them. +## +## .. zeek:see:: record_fields record_field +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type record_field_table: table[string] of record_field; + +## Meta-information about a parameter to a function/event. +## +## .. zeek:see:: call_argument_vector new_event +type call_argument: record { + name: string; ##< The name of the parameter. + type_name: string; ##< The name of the parameters's type. + default_val: any &optional; ##< The value of the :zeek:attr:`&default` attribute if defined. + + ## The value of the parameter as passed into a given call instance. + ## Might be unset in the case a :zeek:attr:`&default` attribute is + ## defined. + value: any &optional; +}; + +## Vector type used to capture parameters of a function/event call. +## +## .. zeek:see:: call_argument new_event +type call_argument_vector: vector of call_argument; + +# todo:: Do we still need these here? Can they move into the packet filter +# framework? +# +# The following two variables are defined here until the core is not +# dependent on the names remaining as they are now. + +## Set of BPF capture filters to use for capturing, indexed by a user-definable +## ID (which must be unique). If Zeek is *not* configured with +## :zeek:id:`PacketFilter::enable_auto_protocol_capture_filters`, +## all packets matching at least one of the filters in this table (and all in +## :zeek:id:`restrict_filters`) will be analyzed. +## +## .. zeek:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters +## PacketFilter::unrestricted_filter restrict_filters +global capture_filters: table[string] of string &redef; + +## Set of BPF filters to restrict capturing, indexed by a user-definable ID +## (which must be unique). +## +## .. zeek:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters +## PacketFilter::unrestricted_filter capture_filters +global restrict_filters: table[string] of string &redef; + +## Enum type identifying dynamic BPF filters. These are used by +## :zeek:see:`Pcap::precompile_pcap_filter` and :zeek:see:`Pcap::precompile_pcap_filter`. +type PcapFilterID: enum { None }; + +## Deprecated. +## +## .. zeek:see:: anonymize_addr +type IPAddrAnonymization: enum { + KEEP_ORIG_ADDR, + SEQUENTIALLY_NUMBERED, + RANDOM_MD5, + PREFIX_PRESERVING_A50, + PREFIX_PRESERVING_MD5, +}; + +## Deprecated. +## +## .. zeek:see:: anonymize_addr +type IPAddrAnonymizationClass: enum { + ORIG_ADDR, + RESP_ADDR, + OTHER_ADDR, +}; + +## Deprecated. +## +## .. zeek:see:: rotate_file rotate_file_by_name +type rotate_info: record { + old_name: string; ##< Original filename. + new_name: string; ##< File name after rotation. + open: time; ##< Time when opened. + close: time; ##< Time when closed. +}; + +### The following aren't presently used, though they should be. +# # Structures needed for subsequence computations (str_smith_waterman): +# # +# type sw_variant: enum { +# SW_SINGLE, +# SW_MULTIPLE, +# }; + +## Parameters for the Smith-Waterman algorithm. +## +## .. zeek:see:: str_smith_waterman +type sw_params: record { + ## Minimum size of a substring, minimum "granularity". + min_strlen: count &default = 3; + + ## Smith-Waterman flavor to use. + sw_variant: count &default = 0; +}; + +## Helper type for return value of Smith-Waterman algorithm. +## +## .. zeek:see:: str_smith_waterman sw_substring_vec sw_substring sw_align_vec sw_params +type sw_align: record { + str: string; ##< String a substring is part of. + index: count; ##< Offset substring is located. +}; + +## Helper type for return value of Smith-Waterman algorithm. +## +## .. zeek:see:: str_smith_waterman sw_substring_vec sw_substring sw_align sw_params +type sw_align_vec: vector of sw_align; + +## Helper type for return value of Smith-Waterman algorithm. +## +## .. zeek:see:: str_smith_waterman sw_substring_vec sw_align_vec sw_align sw_params +## +type sw_substring: record { + str: string; ##< A substring. + aligns: sw_align_vec; ##< All strings of which it's a substring. + new: bool; ##< True if start of new alignment. +}; + +## Return type for Smith-Waterman algorithm. +## +## .. zeek:see:: str_smith_waterman sw_substring sw_align_vec sw_align sw_params +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type sw_substring_vec: vector of sw_substring; + +## Policy-level representation of a packet passed on by libpcap. The data +## includes the complete packet as returned by libpcap, including the link-layer +## header. +## +## .. zeek:see:: dump_packet get_current_packet +type pcap_packet: record { + ts_sec: count; ##< The non-fractional part of the packet's timestamp (i.e., full seconds since the epoch). + ts_usec: count; ##< The fractional part of the packet's timestamp. + caplen: count; ##< The number of bytes captured (<= *len*). + len: count; ##< The length of the packet in bytes, including link-level header. + data: string; ##< The payload of the packet, including link-level header. + link_type: link_encap; ##< Layer 2 link encapsulation type. +}; + +## GeoIP location information. +## +## .. zeek:see:: lookup_location +type geo_location: record { + country_code: string &optional; ##< The country code. + region: string &optional; ##< The region. + city: string &optional; ##< The city. + latitude: double &optional; ##< Latitude. + longitude: double &optional; ##< Longitude. +} &log; + +## The directory containing MaxMind DB (.mmdb) files to use for GeoIP support. +const mmdb_dir: string = "" &redef; + +## Computed entropy values. The record captures a number of measures that are +## computed in parallel. See `A Pseudorandom Number Sequence Test Program +## `_ for more information, Zeek uses the same +## code. +## +## .. zeek:see:: entropy_test_add entropy_test_finish entropy_test_init find_entropy +type entropy_test_result: record { + entropy: double; ##< Information density. + chi_square: double; ##< Chi-Square value. + mean: double; ##< Arithmetic Mean. + monte_carlo_pi: double; ##< Monte-carlo value for pi. + serial_correlation: double; ##< Serial correlation coefficient. +}; + +# TCP values for :zeek:see:`endpoint` *state* field. +# todo:: these should go into an enum to make them autodoc'able. +const TCP_INACTIVE = 0; ##< Endpoint is still inactive. +const TCP_SYN_SENT = 1; ##< Endpoint has sent SYN. +const TCP_SYN_ACK_SENT = 2; ##< Endpoint has sent SYN/ACK. +const TCP_PARTIAL = 3; ##< Endpoint has sent data but no initial SYN. +const TCP_ESTABLISHED = 4; ##< Endpoint has finished initial handshake regularly. +const TCP_CLOSED = 5; ##< Endpoint has closed connection. +const TCP_RESET = 6; ##< Endpoint has sent RST. + +# UDP values for :zeek:see:`endpoint` *state* field. +# todo:: these should go into an enum to make them autodoc'able. +const UDP_INACTIVE = 0; ##< Endpoint is still inactive. +const UDP_ACTIVE = 1; ##< Endpoint has sent something. + +## If true, don't verify checksums. Useful for running on altered trace +## files, and for saving a few cycles, but at the risk of analyzing invalid +## data. Note that the ``-C`` command-line option overrides the setting of this +## variable. +const ignore_checksums = F &redef; + +## If true, instantiate connection state when a partial connection +## (one missing its initial establishment negotiation) is seen. +const partial_connection_ok = T &redef; + +## If true, instantiate connection state when a SYN/ACK is seen but not the +## initial SYN (even if :zeek:see:`partial_connection_ok` is false). +const tcp_SYN_ack_ok = T &redef; + +## If true, pass any undelivered to the signature engine before flushing the state. +## If a connection state is removed, there may still be some data waiting in the +## reassembler. +const tcp_match_undelivered = T &redef; + +## Check up on the result of an initial SYN after this much time. +const tcp_SYN_timeout = 5 secs &redef; + +## After a connection has closed, wait this long for further activity +## before checking whether to time out its state. +const tcp_session_timer = 6 secs &redef; + +## When checking a closed connection for further activity, consider it +## inactive if there hasn't been any for this long. Complain if the +## connection is reused before this much time has elapsed. +const tcp_connection_linger = 5 secs &redef; + +## Wait this long upon seeing an initial SYN before timing out the +## connection attempt. +const tcp_attempt_delay = 5 secs &redef; + +## Upon seeing a normal connection close, flush state after this much time. +const tcp_close_delay = 5 secs &redef; + +## Upon seeing a RST, flush state after this much time. +const tcp_reset_delay = 5 secs &redef; + +## Generate a :zeek:id:`connection_partial_close` event this much time after one +## half of a partial connection closes, assuming there has been no subsequent +## activity. +const tcp_partial_close_delay = 3 secs &redef; + +## If a connection belongs to an application that we don't analyze, +## time it out after this interval. If 0 secs, then don't time it out (but +## :zeek:see:`tcp_inactivity_timeout`, :zeek:see:`udp_inactivity_timeout`, and +## :zeek:see:`icmp_inactivity_timeout` still apply). +const non_analyzed_lifetime = 0 secs &redef; + +## If a TCP connection is inactive, time it out after this interval. If 0 secs, +## then don't time it out. +## +## .. zeek:see:: udp_inactivity_timeout icmp_inactivity_timeout set_inactivity_timeout +const tcp_inactivity_timeout = 5 min &redef; + +## If a UDP flow is inactive, time it out after this interval. If 0 secs, then +## don't time it out. +## +## .. zeek:see:: tcp_inactivity_timeout icmp_inactivity_timeout set_inactivity_timeout +const udp_inactivity_timeout = 1 min &redef; + +## If an ICMP flow is inactive, time it out after this interval. If 0 secs, then +## don't time it out. +## +## .. zeek:see:: tcp_inactivity_timeout udp_inactivity_timeout set_inactivity_timeout +const icmp_inactivity_timeout = 1 min &redef; + +## Number of FINs/RSTs in a row that constitute a "storm". Storms are reported +## as ``weird`` via the notice framework, and they must also come within +## intervals of at most :zeek:see:`tcp_storm_interarrival_thresh`. +## +## .. zeek:see:: tcp_storm_interarrival_thresh +const tcp_storm_thresh = 1000 &redef; + +## FINs/RSTs must come with this much time or less between them to be +## considered a "storm". +## +## .. zeek:see:: tcp_storm_thresh +const tcp_storm_interarrival_thresh = 1 sec &redef; + +## Maximum amount of data that might plausibly be sent in an initial flight +## (prior to receiving any acks). Used to determine whether we must not be +## seeing our peer's ACKs. Set to zero to turn off this determination. +## +## .. zeek:see:: tcp_max_above_hole_without_any_acks tcp_excessive_data_without_further_acks +const tcp_max_initial_window = 16384 &redef; + +## If we're not seeing our peer's ACKs, the maximum volume of data above a +## sequence hole that we'll tolerate before assuming that there's been a packet +## drop and we should give up on tracking a connection. If set to zero, then we +## don't ever give up. +## +## .. zeek:see:: tcp_max_initial_window tcp_excessive_data_without_further_acks +const tcp_max_above_hole_without_any_acks = 16384 &redef; + +## If we've seen this much data without any of it being acked, we give up +## on that connection to avoid memory exhaustion due to buffering all that +## stuff. If set to zero, then we don't ever give up. Ideally, Zeek would +## track the current window on a connection and use it to infer that data +## has in fact gone too far, but for now we just make this quite beefy. +## +## .. zeek:see:: tcp_max_initial_window tcp_max_above_hole_without_any_acks +const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef; + +## Number of TCP segments to buffer beyond what's been acknowledged already +## to detect retransmission inconsistencies. Zero disables any additonal +## buffering. +const tcp_max_old_segments = 0 &redef; + +## For services without a handler, these sets define originator-side ports +## that still trigger reassembly. +## +## .. zeek:see:: tcp_reassembler_ports_resp +const tcp_reassembler_ports_orig: set[port] = {} &redef; + +## For services without a handler, these sets define responder-side ports +## that still trigger reassembly. +## +## .. zeek:see:: tcp_reassembler_ports_orig +const tcp_reassembler_ports_resp: set[port] = {} &redef; + +## Defines destination TCP ports for which the contents of the originator stream +## should be delivered via :zeek:see:`tcp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_resp tcp_content_deliver_all_orig +## tcp_content_deliver_all_resp udp_content_delivery_ports_orig +## udp_content_delivery_ports_resp udp_content_deliver_all_orig +## udp_content_deliver_all_resp tcp_contents +const tcp_content_delivery_ports_orig: table[port] of bool = {} &redef; + +## Defines destination TCP ports for which the contents of the responder stream +## should be delivered via :zeek:see:`tcp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig tcp_content_deliver_all_orig +## tcp_content_deliver_all_resp udp_content_delivery_ports_orig +## udp_content_delivery_ports_resp udp_content_deliver_all_orig +## udp_content_deliver_all_resp tcp_contents +const tcp_content_delivery_ports_resp: table[port] of bool = {} &redef; + +## If true, all TCP originator-side traffic is reported via +## :zeek:see:`tcp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig tcp_content_delivery_ports_resp +## tcp_content_deliver_all_resp udp_content_delivery_ports_orig +## udp_content_delivery_ports_resp udp_content_deliver_all_orig +## udp_content_deliver_all_resp tcp_contents +const tcp_content_deliver_all_orig = F &redef; + +## If true, all TCP responder-side traffic is reported via +## :zeek:see:`tcp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig +## tcp_content_delivery_ports_resp +## tcp_content_deliver_all_orig udp_content_delivery_ports_orig +## udp_content_delivery_ports_resp udp_content_deliver_all_orig +## udp_content_deliver_all_resp tcp_contents +const tcp_content_deliver_all_resp = F &redef; + +## Defines UDP destination ports for which the contents of the originator stream +## should be delivered via :zeek:see:`udp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig +## tcp_content_delivery_ports_resp +## tcp_content_deliver_all_orig tcp_content_deliver_all_resp +## udp_content_delivery_ports_resp udp_content_deliver_all_orig +## udp_content_deliver_all_resp udp_contents +const udp_content_delivery_ports_orig: table[port] of bool = {} &redef; + +## Defines UDP destination ports for which the contents of the responder stream +## should be delivered via :zeek:see:`udp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig +## tcp_content_delivery_ports_resp tcp_content_deliver_all_orig +## tcp_content_deliver_all_resp udp_content_delivery_ports_orig +## udp_content_deliver_all_orig udp_content_deliver_all_resp udp_contents +const udp_content_delivery_ports_resp: table[port] of bool = {} &redef; + +## If true, all UDP originator-side traffic is reported via +## :zeek:see:`udp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig +## tcp_content_delivery_ports_resp tcp_content_deliver_all_resp +## tcp_content_delivery_ports_orig udp_content_delivery_ports_orig +## udp_content_delivery_ports_resp udp_content_deliver_all_resp +## udp_contents +const udp_content_deliver_all_orig = F &redef; + +## If true, all UDP responder-side traffic is reported via +## :zeek:see:`udp_contents`. +## +## .. zeek:see:: tcp_content_delivery_ports_orig +## tcp_content_delivery_ports_resp tcp_content_deliver_all_resp +## tcp_content_delivery_ports_orig udp_content_delivery_ports_orig +## udp_content_delivery_ports_resp udp_content_deliver_all_orig +## udp_contents +const udp_content_deliver_all_resp = F &redef; + +## Check for expired table entries after this amount of time. +## +## .. zeek:see:: table_incremental_step table_expire_delay +const table_expire_interval = 10 secs &redef; + +## When expiring/serializing table entries, don't work on more than this many +## table entries at a time. +## +## .. zeek:see:: table_expire_interval table_expire_delay +const table_incremental_step = 5000 &redef; + +## When expiring table entries, wait this amount of time before checking the +## next chunk of entries. +## +## .. zeek:see:: table_expire_interval table_incremental_step +const table_expire_delay = 0.01 secs &redef; + +## Time to wait before timing out a DNS request. +const dns_session_timeout = 10 sec &redef; + +## Time to wait before timing out an RPC request. +const rpc_timeout = 24 sec &redef; + +## How long to hold onto fragments for possible reassembly. A value of 0.0 +## means "forever", which resists evasion, but can lead to state accrual. +const frag_timeout = 0.0 sec &redef; + +## If positive, indicates the encapsulation header size that should +## be skipped. This applies to all packets. +const encap_hdr_size = 0 &redef; + +## Whether to use the ``ConnSize`` analyzer to count the number of packets and +## IP-level bytes transferred by each endpoint. If true, these values are +## returned in the connection's :zeek:see:`endpoint` record value. +const use_conn_size_analyzer = T &redef; + +# todo:: these should go into an enum to make them autodoc'able. +const ENDIAN_UNKNOWN = 0; ##< Endian not yet determined. +const ENDIAN_LITTLE = 1; ##< Little endian. +const ENDIAN_BIG = 2; ##< Big endian. +const ENDIAN_CONFUSED = 3; ##< Tried to determine endian, but failed. + +# Values for :zeek:see:`set_contents_file` *direction* argument. +# todo:: these should go into an enum to make them autodoc'able +const CONTENTS_NONE = 0; ##< Turn off recording of contents. +const CONTENTS_ORIG = 1; ##< Record originator contents. +const CONTENTS_RESP = 2; ##< Record responder contents. +const CONTENTS_BOTH = 3; ##< Record both originator and responder contents. + +# Values for code of ICMP *unreachable* messages. The list is not exhaustive. +# todo:: these should go into an enum to make them autodoc'able +# +# .. zeek:see:: icmp_unreachable +const ICMP_UNREACH_NET = 0; ##< Network unreachable. +const ICMP_UNREACH_HOST = 1; ##< Host unreachable. +const ICMP_UNREACH_PROTOCOL = 2; ##< Protocol unreachable. +const ICMP_UNREACH_PORT = 3; ##< Port unreachable. +const ICMP_UNREACH_NEEDFRAG = 4; ##< Fragment needed. +const ICMP_UNREACH_ADMIN_PROHIB = 13; ##< Administratively prohibited. + +# Definitions for access to packet headers. Currently only used for +# discarders. +# todo:: these should go into an enum to make them autodoc'able +const IPPROTO_IP = 0; ##< Dummy for IP. +const IPPROTO_ICMP = 1; ##< Control message protocol. +const IPPROTO_IGMP = 2; ##< Group management protocol. +const IPPROTO_IPIP = 4; ##< IP encapsulation in IP. +const IPPROTO_TCP = 6; ##< TCP. +const IPPROTO_UDP = 17; ##< User datagram protocol. +const IPPROTO_IPV6 = 41; ##< IPv6 header. +const IPPROTO_ICMPV6 = 58; ##< ICMP for IPv6. +const IPPROTO_RAW = 255; ##< Raw IP packet. + +# Definitions for IPv6 extension headers. +const IPPROTO_HOPOPTS = 0; ##< IPv6 hop-by-hop-options header. +const IPPROTO_ROUTING = 43; ##< IPv6 routing header. +const IPPROTO_FRAGMENT = 44; ##< IPv6 fragment header. +const IPPROTO_ESP = 50; ##< IPv6 encapsulating security payload header. +const IPPROTO_AH = 51; ##< IPv6 authentication header. +const IPPROTO_NONE = 59; ##< IPv6 no next header. +const IPPROTO_DSTOPTS = 60; ##< IPv6 destination options header. +const IPPROTO_MOBILITY = 135; ##< IPv6 mobility header. + +## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or +## destination option headers) option field. +## +## .. zeek:see:: ip6_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts +type ip6_option: record { + otype: count; ##< Option type. + len: count; ##< Option data length. + data: string; ##< Option data. +}; + +## A type alias for a vector of IPv6 options. +type ip6_options: vector of ip6_option; + +## Values extracted from an IPv6 Hop-by-Hop options extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option +type ip6_hopopts: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :zeek:id:`IPPROTO_ICMP`. + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## The TLV encoded options; + options: ip6_options; +}; + +## Values extracted from an IPv6 Destination options extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option +type ip6_dstopts: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :zeek:id:`IPPROTO_ICMP`. + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## The TLV encoded options; + options: ip6_options; +}; + +## Values extracted from an IPv6 Routing extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr +type ip6_routing: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :zeek:id:`IPPROTO_ICMP`. + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## Routing type. + rtype: count; + ## Segments left. + segleft: count; + ## Type-specific data. + data: string; +}; + +## Values extracted from an IPv6 Fragment extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr +type ip6_fragment: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :zeek:id:`IPPROTO_ICMP`. + nxt: count; + ## 8-bit reserved field. + rsv1: count; + ## Fragmentation offset. + offset: count; + ## 2-bit reserved field. + rsv2: count; + ## More fragments. + more: bool; + ## Fragment identification. + id: count; +}; + +## Values extracted from an IPv6 Authentication extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr +type ip6_ah: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :zeek:id:`IPPROTO_ICMP`. + nxt: count; + ## Length of header in 4-octet units, excluding first two units. + len: count; + ## Reserved field. + rsv: count; + ## Security Parameter Index. + spi: count; + ## Sequence number, unset in the case that *len* field is zero. + seq: count &optional; + ## Authentication data, unset in the case that *len* field is zero. + data: string &optional; +}; + +## Values extracted from an IPv6 ESP extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr +type ip6_esp: record { + ## Security Parameters Index. + spi: count; + ## Sequence number. + seq: count; +}; + +## Values extracted from an IPv6 Mobility Binding Refresh Request message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_brr: record { + ## Reserved. + rsv: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Home Test Init message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_hoti: record { + ## Reserved. + rsv: count; + ## Home Init Cookie. + cookie: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Care-of Test Init message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_coti: record { + ## Reserved. + rsv: count; + ## Care-of Init Cookie. + cookie: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Home Test message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_hot: record { + ## Home Nonce Index. + nonce_idx: count; + ## Home Init Cookie. + cookie: count; + ## Home Keygen Token. + token: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Care-of Test message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_cot: record { + ## Care-of Nonce Index. + nonce_idx: count; + ## Care-of Init Cookie. + cookie: count; + ## Care-of Keygen Token. + token: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Binding Update message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_bu: record { + ## Sequence number. + seq: count; + ## Acknowledge bit. + a: bool; + ## Home Registration bit. + h: bool; + ## Link-Local Address Compatibility bit. + l: bool; + ## Key Management Mobility Capability bit. + k: bool; + ## Lifetime. + life: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Binding Acknowledgement message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_back: record { + ## Status. + status: count; + ## Key Management Mobility Capability. + k: bool; + ## Sequence number. + seq: count; + ## Lifetime. + life: count; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility Binding Error message. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg +type ip6_mobility_be: record { + ## Status. + status: count; + ## Home Address. + hoa: addr; + ## Mobility Options. + options: vector of ip6_option; +}; + +## Values extracted from an IPv6 Mobility header's message data. +## +## .. zeek:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr +type ip6_mobility_msg: record { + ## The type of message from the header's MH Type field. + id: count; + ## Binding Refresh Request. + brr: ip6_mobility_brr &optional; + ## Home Test Init. + hoti: ip6_mobility_hoti &optional; + ## Care-of Test Init. + coti: ip6_mobility_coti &optional; + ## Home Test. + hot: ip6_mobility_hot &optional; + ## Care-of Test. + cot: ip6_mobility_cot &optional; + ## Binding Update. + bu: ip6_mobility_bu &optional; + ## Binding Acknowledgement. + back: ip6_mobility_back &optional; + ## Binding Error. + be: ip6_mobility_be &optional; +}; + +## Values extracted from an IPv6 Mobility header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr +type ip6_mobility_hdr: record { + ## Protocol number of the next header (RFC 1700 et seq., IANA assigned + ## number), e.g. :zeek:id:`IPPROTO_ICMP`. + nxt: count; + ## Length of header in 8-octet units, excluding first unit. + len: count; + ## Mobility header type used to identify header's the message. + mh_type: count; + ## Reserved field. + rsv: count; + ## Mobility header checksum. + chksum: count; + ## Mobility header message + msg: ip6_mobility_msg; +}; + +## A general container for a more specific IPv6 extension header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_hopopts ip6_dstopts ip6_routing ip6_fragment +## ip6_ah ip6_esp +type ip6_ext_hdr: record { + ## The RFC 1700 et seq. IANA assigned number identifying the type of + ## the extension header. + id: count; + ## Hop-by-hop option extension header. + hopopts: ip6_hopopts &optional; + ## Destination option extension header. + dstopts: ip6_dstopts &optional; + ## Routing extension header. + routing: ip6_routing &optional; + ## Fragment header. + fragment: ip6_fragment &optional; + ## Authentication extension header. + ah: ip6_ah &optional; + ## Encapsulating security payload header. + esp: ip6_esp &optional; + ## Mobility header. + mobility: ip6_mobility_hdr &optional; +}; + +## A type alias for a vector of IPv6 extension headers. +type ip6_ext_hdr_chain: vector of ip6_ext_hdr; + +## Values extracted from an IPv6 header. +## +## .. zeek:see:: pkt_hdr ip4_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts +## ip6_routing ip6_fragment ip6_ah ip6_esp +type ip6_hdr: record { + class: count; ##< Traffic class. + flow: count; ##< Flow label. + len: count; ##< Payload length. + nxt: count; ##< Protocol number of the next header + ##< (RFC 1700 et seq., IANA assigned number) + ##< e.g. :zeek:id:`IPPROTO_ICMP`. + hlim: count; ##< Hop limit. + src: addr; ##< Source address. + dst: addr; ##< Destination address. + exts: ip6_ext_hdr_chain; ##< Extension header chain. +}; + +## Values extracted from an IPv4 header. +## +## .. zeek:see:: pkt_hdr ip6_hdr discarder_check_ip +type ip4_hdr: record { + hl: count; ##< Header length in bytes. + tos: count; ##< Type of service. + len: count; ##< Total length. + id: count; ##< Identification. + ttl: count; ##< Time to live. + p: count; ##< Protocol. + src: addr; ##< Source address. + dst: addr; ##< Destination address. +}; + +# TCP flags. +# +# todo:: these should go into an enum to make them autodoc'able +const TH_FIN = 1; ##< FIN. +const TH_SYN = 2; ##< SYN. +const TH_RST = 4; ##< RST. +const TH_PUSH = 8; ##< PUSH. +const TH_ACK = 16; ##< ACK. +const TH_URG = 32; ##< URG. +const TH_FLAGS = 63; ##< Mask combining all flags. + +## Values extracted from a TCP header. +## +## .. zeek:see:: pkt_hdr discarder_check_tcp +type tcp_hdr: record { + sport: port; ##< source port. + dport: port; ##< destination port + seq: count; ##< sequence number + ack: count; ##< acknowledgement number + hl: count; ##< header length (in bytes) + dl: count; ##< data length (xxx: not in original tcphdr!) + flags: count; ##< flags + win: count; ##< window +}; + +## Values extracted from a UDP header. +## +## .. zeek:see:: pkt_hdr discarder_check_udp +type udp_hdr: record { + sport: port; ##< source port + dport: port; ##< destination port + ulen: count; ##< udp length +}; + +## Values extracted from an ICMP header. +## +## .. zeek:see:: pkt_hdr discarder_check_icmp +type icmp_hdr: record { + icmp_type: count; ##< type of message +}; + +## A packet header, consisting of an IP header and transport-layer header. +## +## .. zeek:see:: new_packet +type pkt_hdr: record { + ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet. + ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet. + tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. + udp: udp_hdr &optional; ##< The UDP header if a UDP packet. + icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. +}; + +## Values extracted from the layer 2 header. +## +## .. zeek:see:: pkt_hdr +type l2_hdr: record { + encap: link_encap; ##< L2 link encapsulation. + len: count; ##< Total frame length on wire. + cap_len: count; ##< Captured length. + src: string &optional; ##< L2 source (if Ethernet). + dst: string &optional; ##< L2 destination (if Ethernet). + vlan: count &optional; ##< Outermost VLAN tag if any (and Ethernet). + inner_vlan: count &optional; ##< Innermost VLAN tag if any (and Ethernet). + eth_type: count &optional; ##< Innermost Ethertype (if Ethernet). + proto: layer3_proto; ##< L3 protocol. +}; + +## A raw packet header, consisting of L2 header and everything in +## :zeek:see:`pkt_hdr`. . +## +## .. zeek:see:: raw_packet pkt_hdr +type raw_pkt_hdr: record { + l2: l2_hdr; ##< The layer 2 header. + ip: ip4_hdr &optional; ##< The IPv4 header if an IPv4 packet. + ip6: ip6_hdr &optional; ##< The IPv6 header if an IPv6 packet. + tcp: tcp_hdr &optional; ##< The TCP header if a TCP packet. + udp: udp_hdr &optional; ##< The UDP header if a UDP packet. + icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet. +}; + +## A Teredo origin indication header. See :rfc:`4380` for more information +## about the Teredo protocol. +## +## .. zeek:see:: teredo_bubble teredo_origin_indication teredo_authentication +## teredo_hdr +type teredo_auth: record { + id: string; ##< Teredo client identifier. + value: string; ##< HMAC-SHA1 over shared secret key between client and + ##< server, nonce, confirmation byte, origin indication + ##< (if present), and the IPv6 packet. + nonce: count; ##< Nonce chosen by Teredo client to be repeated by + ##< Teredo server. + confirm: count; ##< Confirmation byte to be set to 0 by Teredo client + ##< and non-zero by server if client needs new key. +}; + +## A Teredo authentication header. See :rfc:`4380` for more information +## about the Teredo protocol. +## +## .. zeek:see:: teredo_bubble teredo_origin_indication teredo_authentication +## teredo_hdr +type teredo_origin: record { + p: port; ##< Unobfuscated UDP port of Teredo client. + a: addr; ##< Unobfuscated IPv4 address of Teredo client. +}; + +## A Teredo packet header. See :rfc:`4380` for more information about the +## Teredo protocol. +## +## .. zeek:see:: teredo_bubble teredo_origin_indication teredo_authentication +type teredo_hdr: record { + auth: teredo_auth &optional; ##< Teredo authentication header. + origin: teredo_origin &optional; ##< Teredo origin indication header. + hdr: pkt_hdr; ##< IPv6 and transport protocol headers. +}; + +## A GTPv1 (GPRS Tunneling Protocol) header. +type gtpv1_hdr: record { + ## The 3-bit version field, which for GTPv1 should be 1. + version: count; + ## Protocol Type value differentiates GTP (value 1) from GTP' (value 0). + pt_flag: bool; + ## Reserved field, should be 0. + rsv: bool; + ## Extension Header flag. When 0, the *next_type* field may or may not + ## be present, but shouldn't be meaningful. When 1, *next_type* is + ## present and meaningful. + e_flag: bool; + ## Sequence Number flag. When 0, the *seq* field may or may not + ## be present, but shouldn't be meaningful. When 1, *seq* is + ## present and meaningful. + s_flag: bool; + ## N-PDU flag. When 0, the *n_pdu* field may or may not + ## be present, but shouldn't be meaningful. When 1, *n_pdu* is + ## present and meaningful. + pn_flag: bool; + ## Message Type. A value of 255 indicates user-plane data is encapsulated. + msg_type: count; + ## Length of the GTP packet payload (the rest of the packet following + ## the mandatory 8-byte GTP header). + length: count; + ## Tunnel Endpoint Identifier. Unambiguously identifies a tunnel + ## endpoint in receiving GTP-U or GTP-C protocol entity. + teid: count; + ## Sequence Number. Set if any *e_flag*, *s_flag*, or *pn_flag* field + ## is set. + seq: count &optional; + ## N-PDU Number. Set if any *e_flag*, *s_flag*, or *pn_flag* field is set. + n_pdu: count &optional; + ## Next Extension Header Type. Set if any *e_flag*, *s_flag*, or + ## *pn_flag* field is set. + next_type: count &optional; +}; + +type gtp_cause: count; +type gtp_imsi: count; +type gtp_teardown_ind: bool; +type gtp_nsapi: count; +type gtp_recovery: count; +type gtp_teid1: count; +type gtp_teid_control_plane: count; +type gtp_charging_id: count; +type gtp_charging_gateway_addr: addr; +type gtp_trace_reference: count; +type gtp_trace_type: count; +type gtp_tft: string; +type gtp_trigger_id: string; +type gtp_omc_id: string; +type gtp_reordering_required: bool; +type gtp_proto_config_options: string; +type gtp_charging_characteristics: count; +type gtp_selection_mode: count; +type gtp_access_point_name: string; +type gtp_msisdn: string; + +type gtp_gsn_addr: record { + ## If the GSN Address information element has length 4 or 16, then this + ## field is set to be the informational element's value interpreted as + ## an IPv4 or IPv6 address, respectively. + ip: addr &optional; + ## This field is set if it's not an IPv4 or IPv6 address. + other: string &optional; +}; + +type gtp_end_user_addr: record { + pdp_type_org: count; + pdp_type_num: count; + ## Set if the End User Address information element is IPv4/IPv6. + pdp_ip: addr &optional; + ## Set if the End User Address information element isn't IPv4/IPv6. + pdp_other_addr: string &optional; +}; + +type gtp_rai: record { + mcc: count; + mnc: count; + lac: count; + rac: count; +}; + +type gtp_qos_profile: record { + priority: count; + data: string; +}; + +type gtp_private_extension: record { + id: count; + value: string; +}; + +type gtp_create_pdp_ctx_request_elements: record { + imsi: gtp_imsi &optional; + rai: gtp_rai &optional; + recovery: gtp_recovery &optional; + select_mode: gtp_selection_mode &optional; + data1: gtp_teid1; + cp: gtp_teid_control_plane &optional; + nsapi: gtp_nsapi; + linked_nsapi: gtp_nsapi &optional; + charge_character: gtp_charging_characteristics &optional; + trace_ref: gtp_trace_reference &optional; + trace_type: gtp_trace_type &optional; + end_user_addr: gtp_end_user_addr &optional; + ap_name: gtp_access_point_name &optional; + opts: gtp_proto_config_options &optional; + signal_addr: gtp_gsn_addr; + user_addr: gtp_gsn_addr; + msisdn: gtp_msisdn &optional; + qos_prof: gtp_qos_profile; + tft: gtp_tft &optional; + trigger_id: gtp_trigger_id &optional; + omc_id: gtp_omc_id &optional; + ext: gtp_private_extension &optional; +}; + +type gtp_create_pdp_ctx_response_elements: record { + cause: gtp_cause; + reorder_req: gtp_reordering_required &optional; + recovery: gtp_recovery &optional; + data1: gtp_teid1 &optional; + cp: gtp_teid_control_plane &optional; + charging_id: gtp_charging_id &optional; + end_user_addr: gtp_end_user_addr &optional; + opts: gtp_proto_config_options &optional; + cp_addr: gtp_gsn_addr &optional; + user_addr: gtp_gsn_addr &optional; + qos_prof: gtp_qos_profile &optional; + charge_gateway: gtp_charging_gateway_addr &optional; + ext: gtp_private_extension &optional; +}; + +type gtp_update_pdp_ctx_request_elements: record { + imsi: gtp_imsi &optional; + rai: gtp_rai &optional; + recovery: gtp_recovery &optional; + data1: gtp_teid1; + cp: gtp_teid_control_plane &optional; + nsapi: gtp_nsapi; + trace_ref: gtp_trace_reference &optional; + trace_type: gtp_trace_type &optional; + cp_addr: gtp_gsn_addr; + user_addr: gtp_gsn_addr; + qos_prof: gtp_qos_profile; + tft: gtp_tft &optional; + trigger_id: gtp_trigger_id &optional; + omc_id: gtp_omc_id &optional; + ext: gtp_private_extension &optional; + end_user_addr: gtp_end_user_addr &optional; +}; + +type gtp_update_pdp_ctx_response_elements: record { + cause: gtp_cause; + recovery: gtp_recovery &optional; + data1: gtp_teid1 &optional; + cp: gtp_teid_control_plane &optional; + charging_id: gtp_charging_id &optional; + cp_addr: gtp_gsn_addr &optional; + user_addr: gtp_gsn_addr &optional; + qos_prof: gtp_qos_profile &optional; + charge_gateway: gtp_charging_gateway_addr &optional; + ext: gtp_private_extension &optional; +}; + +type gtp_delete_pdp_ctx_request_elements: record { + teardown_ind: gtp_teardown_ind &optional; + nsapi: gtp_nsapi; + ext: gtp_private_extension &optional; +}; + +type gtp_delete_pdp_ctx_response_elements: record { + cause: gtp_cause; + ext: gtp_private_extension &optional; +}; + +# Prototypes of Zeek built-in functions. +@load base/bif/zeek.bif +@load base/bif/stats.bif +@load base/bif/reporter.bif +@load base/bif/strings.bif +@load base/bif/option.bif + +## Deprecated. This is superseded by the new logging framework. +global log_file_name: function(tag: string): string &redef; + +## Deprecated. This is superseded by the new logging framework. +global open_log_file: function(tag: string): file &redef; + +global done_with_network = F; +event net_done(t: time) { done_with_network = T; } + +function log_file_name(tag: string): string + { + local suffix = getenv("ZEEK_LOG_SUFFIX"); + + if ( suffix == "" ) + suffix = "log"; + + return fmt("%s.%s", tag, suffix); + } + +function open_log_file(tag: string): file + { + return open(log_file_name(tag)); + } + +## Internal function. +function add_interface(iold: string, inew: string): string + { + if ( iold == "" ) + return inew; + else + return fmt("%s %s", iold, inew); + } + +## Network interfaces to listen on. Use ``redef interfaces += "eth0"`` to +## extend. +global interfaces = "" &add_func = add_interface; + +## Internal function. +function add_signature_file(sold: string, snew: string): string + { + if ( sold == "" ) + return snew; + else + return cat(sold, " ", snew); + } + +## Signature files to read. Use ``redef signature_files += "foo.sig"`` to +## extend. Signature files added this way will be searched relative to +## ``ZEEKPATH``. Using the ``@load-sigs`` directive instead is preferred +## since that can search paths relative to the current script. +global signature_files = "" &add_func = add_signature_file; + +## Definition of "secondary filters". A secondary filter is a BPF filter given +## as index in this table. For each such filter, the corresponding event is +## raised for all matching packets. +global secondary_filters: table[string] of event(filter: string, pkt: pkt_hdr) + &redef; + +## Maximum length of payload passed to discarder functions. +## +## .. zeek:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp +## discarder_check_ip +global discarder_maxlen = 128 &redef; + +## Function for skipping packets based on their IP header. If defined, this +## function will be called for all IP packets before Zeek performs any further +## analysis. If the function signals to discard a packet, no further processing +## will be performed on it. +## +## p: The IP header of the considered packet. +## +## Returns: True if the packet should not be analyzed any further. +## +## .. zeek:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp +## discarder_maxlen +## +## .. note:: This is very low-level functionality and potentially expensive. +## Avoid using it. +global discarder_check_ip: function(p: pkt_hdr): bool; + +## Function for skipping packets based on their TCP header. If defined, this +## function will be called for all TCP packets before Zeek performs any further +## analysis. If the function signals to discard a packet, no further processing +## will be performed on it. +## +## p: The IP and TCP headers of the considered packet. +## +## d: Up to :zeek:see:`discarder_maxlen` bytes of the TCP payload. +## +## Returns: True if the packet should not be analyzed any further. +## +## .. zeek:see:: discarder_check_ip discarder_check_udp discarder_check_icmp +## discarder_maxlen +## +## .. note:: This is very low-level functionality and potentially expensive. +## Avoid using it. +global discarder_check_tcp: function(p: pkt_hdr, d: string): bool; + +## Function for skipping packets based on their UDP header. If defined, this +## function will be called for all UDP packets before Zeek performs any further +## analysis. If the function signals to discard a packet, no further processing +## will be performed on it. +## +## p: The IP and UDP headers of the considered packet. +## +## d: Up to :zeek:see:`discarder_maxlen` bytes of the UDP payload. +## +## Returns: True if the packet should not be analyzed any further. +## +## .. zeek:see:: discarder_check_ip discarder_check_tcp discarder_check_icmp +## discarder_maxlen +## +## .. note:: This is very low-level functionality and potentially expensive. +## Avoid using it. +global discarder_check_udp: function(p: pkt_hdr, d: string): bool; + +## Function for skipping packets based on their ICMP header. If defined, this +## function will be called for all ICMP packets before Zeek performs any further +## analysis. If the function signals to discard a packet, no further processing +## will be performed on it. +## +## p: The IP and ICMP headers of the considered packet. +## +## Returns: True if the packet should not be analyzed any further. +## +## .. zeek:see:: discarder_check_ip discarder_check_tcp discarder_check_udp +## discarder_maxlen +## +## .. note:: This is very low-level functionality and potentially expensive. +## Avoid using it. +global discarder_check_icmp: function(p: pkt_hdr): bool; + +## Zeek's watchdog interval. +const watchdog_interval = 10 sec &redef; + +## The maximum number of timers to expire after processing each new +## packet. The value trades off spreading out the timer expiration load +## with possibly having to hold state longer. A value of 0 means +## "process all expired timers with each new packet". +const max_timer_expires = 300 &redef; + +# These need to match the definitions in Login.h. +# +# .. zeek:see:: get_login_state +# +# todo:: use enum to make them autodoc'able +const LOGIN_STATE_AUTHENTICATE = 0; # Trying to authenticate. +const LOGIN_STATE_LOGGED_IN = 1; # Successful authentication. +const LOGIN_STATE_SKIP = 2; # Skip any further processing. +const LOGIN_STATE_CONFUSED = 3; # We're confused. + +# It would be nice to replace these function definitions with some +# form of parameterized types. + +## Returns minimum of two ``double`` values. +## +## a: First value. +## b: Second value. +## +## Returns: The minimum of *a* and *b*. +function min_double(a: double, b: double): double { return a < b ? a : b; } + +## Returns maximum of two ``double`` values. +## +## a: First value. +## b: Second value. +## +## Returns: The maximum of *a* and *b*. +function max_double(a: double, b: double): double { return a > b ? a : b; } + +## Returns minimum of two ``interval`` values. +## +## a: First value. +## b: Second value. +## +## Returns: The minimum of *a* and *b*. +function min_interval(a: interval, b: interval): interval { return a < b ? a : b; } + +## Returns maximum of two ``interval`` values. +## +## a: First value. +## b: Second value. +## +## Returns: The maximum of *a* and *b*. +function max_interval(a: interval, b: interval): interval { return a > b ? a : b; } + +## Returns minimum of two ``count`` values. +## +## a: First value. +## b: Second value. +## +## Returns: The minimum of *a* and *b*. +function min_count(a: count, b: count): count { return a < b ? a : b; } + +## Returns maximum of two ``count`` values. +## +## a: First value. +## b: Second value. +## +## Returns: The maximum of *a* and *b*. +function max_count(a: count, b: count): count { return a > b ? a : b; } + +## TODO. +global skip_authentication: set[string] &redef; + +## TODO. +global direct_login_prompts: set[string] &redef; + +## TODO. +global login_prompts: set[string] &redef; + +## TODO. +global login_non_failure_msgs: set[string] &redef; + +## TODO. +global login_failure_msgs: set[string] &redef; + +## TODO. +global login_success_msgs: set[string] &redef; + +## TODO. +global login_timeouts: set[string] &redef; + +## A MIME header key/value pair. +## +## .. zeek:see:: mime_header_list http_all_headers mime_all_headers mime_one_header +type mime_header_rec: record { + name: string; ##< The header name. + value: string; ##< The header value. +}; + +## A list of MIME headers. +## +## .. zeek:see:: mime_header_rec http_all_headers mime_all_headers +type mime_header_list: table[count] of mime_header_rec; + +## The length of MIME data segments delivered to handlers of +## :zeek:see:`mime_segment_data`. +## +## .. zeek:see:: mime_segment_data mime_segment_overlap_length +global mime_segment_length = 1024 &redef; + +## The number of bytes of overlap between successive segments passed to +## :zeek:see:`mime_segment_data`. +global mime_segment_overlap_length = 0 &redef; + +## An RPC portmapper mapping. +## +## .. zeek:see:: pm_mappings +type pm_mapping: record { + program: count; ##< The RPC program. + version: count; ##< The program version. + p: port; ##< The port. +}; + +## Table of RPC portmapper mappings. +## +## .. zeek:see:: pm_request_dump +type pm_mappings: table[count] of pm_mapping; + +## An RPC portmapper request. +## +## .. zeek:see:: pm_attempt_getport pm_request_getport +type pm_port_request: record { + program: count; ##< The RPC program. + version: count; ##< The program version. + is_tcp: bool; ##< True if using TCP. +}; + +## An RPC portmapper *callit* request. +## +## .. zeek:see:: pm_attempt_callit pm_request_callit +type pm_callit_request: record { + program: count; ##< The RPC program. + version: count; ##< The program version. + proc: count; ##< The procedure being called. + arg_size: count; ##< The size of the argument. +}; + +# See const.bif +# const RPC_SUCCESS = 0; +# const RPC_PROG_UNAVAIL = 1; +# const RPC_PROG_MISMATCH = 2; +# const RPC_PROC_UNAVAIL = 3; +# const RPC_GARBAGE_ARGS = 4; +# const RPC_SYSTEM_ERR = 5; +# const RPC_TIMEOUT = 6; +# const RPC_AUTH_ERROR = 7; +# const RPC_UNKNOWN_ERROR = 8; + +## Mapping of numerical RPC status codes to readable messages. +## +## .. zeek:see:: pm_attempt_callit pm_attempt_dump pm_attempt_getport +## pm_attempt_null pm_attempt_set pm_attempt_unset rpc_dialogue rpc_reply +const RPC_status = { + [RPC_SUCCESS] = "ok", + [RPC_PROG_UNAVAIL] = "prog unavail", + [RPC_PROG_MISMATCH] = "mismatch", + [RPC_PROC_UNAVAIL] = "proc unavail", + [RPC_GARBAGE_ARGS] = "garbage args", + [RPC_SYSTEM_ERR] = "system err", + [RPC_TIMEOUT] = "timeout", + [RPC_AUTH_ERROR] = "auth error", + [RPC_UNKNOWN_ERROR] = "unknown" +}; + +module NFS3; + +export { + ## If true, :zeek:see:`nfs_proc_read` and :zeek:see:`nfs_proc_write` + ## events return the file data that has been read/written. + ## + ## .. zeek:see:: NFS3::return_data_max NFS3::return_data_first_only + const return_data = F &redef; + + ## If :zeek:id:`NFS3::return_data` is true, how much data should be + ## returned at most. + const return_data_max = 512 &redef; + + ## If :zeek:id:`NFS3::return_data` is true, whether to *only* return data + ## if the read or write offset is 0, i.e., only return data for the + ## beginning of the file. + const return_data_first_only = T &redef; + + ## Record summarizing the general results and status of NFSv3 + ## request/reply pairs. + ## + ## Note that when *rpc_stat* or *nfs_stat* indicates not successful, + ## the reply record passed to the corresponding event will be empty and + ## contain uninitialized fields, so don't use it. Also note that time + ## and duration values might not be fully accurate. For TCP, we record + ## times when the corresponding chunk of data is delivered to the + ## analyzer. Depending on the reassembler, this might be well after the + ## first packet of the request was received. + ## + ## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup + ## nfs_proc_mkdir nfs_proc_not_implemented nfs_proc_null + ## nfs_proc_read nfs_proc_readdir nfs_proc_readlink nfs_proc_remove + ## nfs_proc_rmdir nfs_proc_write nfs_reply_status + type info_t: record { + ## The RPC status. + rpc_stat: rpc_status; + ## The NFS status. + nfs_stat: status_t; + ## The start time of the request. + req_start: time; + ## The duration of the request. + req_dur: interval; + ## The length in bytes of the request. + req_len: count; + ## The start time of the reply. + rep_start: time; + ## The duration of the reply. + rep_dur: interval; + ## The length in bytes of the reply. + rep_len: count; + ## The user id of the reply. + rpc_uid: count; + ## The group id of the reply. + rpc_gid: count; + ## The stamp of the reply. + rpc_stamp: count; + ## The machine name of the reply. + rpc_machine_name: string; + ## The auxiliary ids of the reply. + rpc_auxgids: index_vec; + }; + + ## NFS file attributes. Field names are based on RFC 1813. + ## + ## .. zeek:see:: nfs_proc_sattr + type sattr_t: record { + mode: count &optional; ##< Mode + uid: count &optional; ##< User ID. + gid: count &optional; ##< Group ID. + size: count &optional; ##< Size. + atime: time_how_t &optional; ##< Time of last access. + mtime: time_how_t &optional; ##< Time of last modification. + }; + + ## NFS file attributes. Field names are based on RFC 1813. + ## + ## .. zeek:see:: nfs_proc_getattr + type fattr_t: record { + ftype: file_type_t; ##< File type. + mode: count; ##< Mode + nlink: count; ##< Number of links. + uid: count; ##< User ID. + gid: count; ##< Group ID. + size: count; ##< Size. + used: count; ##< TODO. + rdev1: count; ##< TODO. + rdev2: count; ##< TODO. + fsid: count; ##< TODO. + fileid: count; ##< TODO. + atime: time; ##< Time of last access. + mtime: time; ##< Time of last modification. + ctime: time; ##< Time of creation. + }; + + ## NFS symlinkdata attributes. Field names are based on RFC 1813 + ## + ## .. zeek:see:: nfs_proc_symlink + type symlinkdata_t: record { + symlink_attributes: sattr_t; ##< The initial attributes for the symbolic link + nfspath: string &optional; ##< The string containing the symbolic link data. + }; + + ## NFS *readdir* arguments. + ## + ## .. zeek:see:: nfs_proc_readdir + type diropargs_t : record { + dirfh: string; ##< The file handle of the directory. + fname: string; ##< The name of the file we are interested in. + }; + + ## NFS *rename* arguments. + ## + ## .. zeek:see:: nfs_proc_rename + type renameopargs_t : record { + src_dirfh : string; + src_fname : string; + dst_dirfh : string; + dst_fname : string; + }; + + ## NFS *symlink* arguments. + ## + ## .. zeek:see:: nfs_proc_symlink + type symlinkargs_t: record { + link : diropargs_t; ##< The location of the link to be created. + symlinkdata: symlinkdata_t; ##< The symbolic link to be created. + }; + + ## NFS *link* arguments. + ## + ## .. zeek:see:: nfs_proc_link + type linkargs_t: record { + fh : string; ##< The file handle for the existing file system object. + link : diropargs_t; ##< The location of the link to be created. + }; + + ## NFS *sattr* arguments. + ## + ## .. zeek:see:: nfs_proc_sattr + type sattrargs_t: record { + fh : string; ##< The file handle for the existing file system object. + new_attributes: sattr_t; ##< The new attributes for the file. + }; + + ## NFS lookup reply. If the lookup failed, *dir_attr* may be set. If the + ## lookup succeeded, *fh* is always set and *obj_attr* and *dir_attr* + ## may be set. + ## + ## .. zeek:see:: nfs_proc_lookup + type lookup_reply_t: record { + fh: string &optional; ##< File handle of object looked up. + obj_attr: fattr_t &optional; ##< Optional attributes associated w/ file + dir_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. + }; + + ## NFS *read* arguments. + ## + ## .. zeek:see:: nfs_proc_read + type readargs_t: record { + fh: string; ##< File handle to read from. + offset: count; ##< Offset in file. + size: count; ##< Number of bytes to read. + }; + + ## NFS *read* reply. If the lookup fails, *attr* may be set. If the + ## lookup succeeds, *attr* may be set and all other fields are set. + type read_reply_t: record { + attr: fattr_t &optional; ##< Attributes. + size: count &optional; ##< Number of bytes read. + eof: bool &optional; ##< Sid the read end at EOF. + data: string &optional; ##< The actual data; not yet implemented. + }; + + ## NFS *readline* reply. If the request fails, *attr* may be set. If the + ## request succeeds, *attr* may be set and all other fields are set. + ## + ## .. zeek:see:: nfs_proc_readlink + type readlink_reply_t: record { + attr: fattr_t &optional; ##< Attributes. + nfspath: string &optional; ##< Contents of the symlink; in general a pathname as text. + }; + + ## NFS *write* arguments. + ## + ## .. zeek:see:: nfs_proc_write + type writeargs_t: record { + fh: string; ##< File handle to write to. + offset: count; ##< Offset in file. + size: count; ##< Number of bytes to write. + stable: stable_how_t; ##< How and when data is commited. + data: string &optional; ##< The actual data; not implemented yet. + }; + + ## NFS *wcc* attributes. + ## + ## .. zeek:see:: NFS3::write_reply_t + type wcc_attr_t: record { + size: count; ##< The size. + atime: time; ##< Access time. + mtime: time; ##< Modification time. + }; + + ## NFS *link* reply. + ## + ## .. zeek:see:: nfs_proc_link + type link_reply_t: record { + post_attr: fattr_t &optional; ##< Optional post-operation attributes of the file system object identified by file + preattr: wcc_attr_t &optional; ##< Optional attributes associated w/ file. + postattr: fattr_t &optional; ##< Optional attributes associated w/ file. + }; + + ## NFS *sattr* reply. If the request fails, *pre|post* attr may be set. + ## If the request succeeds, *pre|post* attr are set. + ## + type sattr_reply_t: record { + dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. + dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. + }; + + ## NFS *write* reply. If the request fails, *pre|post* attr may be set. + ## If the request succeeds, *pre|post* attr may be set and all other + ## fields are set. + ## + ## .. zeek:see:: nfs_proc_write + type write_reply_t: record { + preattr: wcc_attr_t &optional; ##< Pre operation attributes. + postattr: fattr_t &optional; ##< Post operation attributes. + size: count &optional; ##< Size. + commited: stable_how_t &optional; ##< TODO. + verf: count &optional; ##< Write verifier cookie. + }; + + ## NFS reply for *create*, *mkdir*, and *symlink*. If the proc + ## failed, *dir_\*_attr* may be set. If the proc succeeded, *fh* and the + ## *attr*'s may be set. Note: no guarantee that *fh* is set after + ## success. + ## + ## .. zeek:see:: nfs_proc_create nfs_proc_mkdir + type newobj_reply_t: record { + fh: string &optional; ##< File handle of object created. + obj_attr: fattr_t &optional; ##< Optional attributes associated w/ new object. + dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. + dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. + }; + + ## NFS reply for *remove*, *rmdir*. Corresponds to *wcc_data* in the spec. + ## + ## .. zeek:see:: nfs_proc_remove nfs_proc_rmdir + type delobj_reply_t: record { + dir_pre_attr: wcc_attr_t &optional; ##< Optional attributes associated w/ dir. + dir_post_attr: fattr_t &optional; ##< Optional attributes associated w/ dir. + }; + + ## NFS reply for *rename*. Corresponds to *wcc_data* in the spec. + ## + ## .. zeek:see:: nfs_proc_rename + type renameobj_reply_t: record { + src_dir_pre_attr: wcc_attr_t; + src_dir_post_attr: fattr_t; + dst_dir_pre_attr: wcc_attr_t; + dst_dir_post_attr: fattr_t; + }; + + ## NFS *readdir* arguments. Used for both *readdir* and *readdirplus*. + ## + ## .. zeek:see:: nfs_proc_readdir + type readdirargs_t: record { + isplus: bool; ##< Is this a readdirplus request? + dirfh: string; ##< The directory filehandle. + cookie: count; ##< Cookie / pos in dir; 0 for first call. + cookieverf: count; ##< The cookie verifier. + dircount: count; ##< "count" field for readdir; maxcount otherwise (in bytes). + maxcount: count &optional; ##< Only used for readdirplus. in bytes. + }; + + ## NFS *direntry*. *fh* and *attr* are used for *readdirplus*. However, + ## even for *readdirplus* they may not be filled out. + ## + ## .. zeek:see:: NFS3::direntry_vec_t NFS3::readdir_reply_t + type direntry_t: record { + fileid: count; ##< E.g., inode number. + fname: string; ##< Filename. + cookie: count; ##< Cookie value. + attr: fattr_t &optional; ##< *readdirplus*: the *fh* attributes for the entry. + fh: string &optional; ##< *readdirplus*: the *fh* for the entry + }; + + ## Vector of NFS *direntry*. + ## + ## .. zeek:see:: NFS3::readdir_reply_t + type direntry_vec_t: vector of direntry_t; + + ## NFS *readdir* reply. Used for *readdir* and *readdirplus*. If an is + ## returned, *dir_attr* might be set. On success, *dir_attr* may be set, + ## all others must be set. + type readdir_reply_t: record { + isplus: bool; ##< True if the reply for a *readdirplus* request. + dir_attr: fattr_t &optional; ##< Directory attributes. + cookieverf: count &optional; ##< TODO. + entries: direntry_vec_t &optional; ##< Returned directory entries. + eof: bool; ##< If true, no more entries in directory. + }; + + ## NFS *fsstat*. + type fsstat_t: record { + attrs: fattr_t &optional; ##< Attributes. + tbytes: double; ##< TODO. + fbytes: double; ##< TODO. + abytes: double; ##< TODO. + tfiles: double; ##< TODO. + ffiles: double; ##< TODO. + afiles: double; ##< TODO. + invarsec: interval; ##< TODO. + }; +} # end export + + +module MOUNT3; +export { + + ## Record summarizing the general results and status of MOUNT3 + ## request/reply pairs. + ## + ## Note that when *rpc_stat* or *mount_stat* indicates not successful, + ## the reply record passed to the corresponding event will be empty and + ## contain uninitialized fields, so don't use it. Also note that time + # and duration values might not be fully accurate. For TCP, we record + # times when the corresponding chunk of data is delivered to the + # analyzer. Depending on the reassembler, this might be well after the + # first packet of the request was received. + # + # .. zeek:see:: mount_proc_mnt mount_proc_dump mount_proc_umnt + # mount_proc_umntall mount_proc_export mount_proc_not_implemented + type info_t: record { + ## The RPC status. + rpc_stat: rpc_status; + ## The MOUNT status. + mnt_stat: status_t; + ## The start time of the request. + req_start: time; + ## The duration of the request. + req_dur: interval; + ## The length in bytes of the request. + req_len: count; + ## The start time of the reply. + rep_start: time; + ## The duration of the reply. + rep_dur: interval; + ## The length in bytes of the reply. + rep_len: count; + ## The user id of the reply. + rpc_uid: count; + ## The group id of the reply. + rpc_gid: count; + ## The stamp of the reply. + rpc_stamp: count; + ## The machine name of the reply. + rpc_machine_name: string; + ## The auxiliary ids of the reply. + rpc_auxgids: index_vec; + }; + + ## MOUNT *mnt* arguments. + ## + ## .. zeek:see:: mount_proc_mnt + type dirmntargs_t : record { + dirname: string; ##< Name of directory to mount + }; + + ## MOUNT lookup reply. If the mount failed, *dir_attr* may be set. If the + ## mount succeeded, *fh* is always set. + ## + ## .. zeek:see:: mount_proc_mnt + type mnt_reply_t: record { + dirfh: string &optional; ##< Dir handle + auth_flavors: vector of auth_flavor_t &optional; ##< Returned authentication flavors + }; + +} # end export + + +module Threading; + +export { + ## The heartbeat interval used by the threading framework. + ## Changing this should usually not be necessary and will break + ## several tests. + const heartbeat_interval = 1.0 secs &redef; +} + +module SSH; + +export { + ## The client and server each have some preferences for the algorithms used + ## in each direction. + type Algorithm_Prefs: record { + ## The algorithm preferences for client to server communication + client_to_server: vector of string &optional; + ## The algorithm preferences for server to client communication + server_to_client: vector of string &optional; + }; + + ## This record lists the preferences of an SSH endpoint for + ## algorithm selection. During the initial :abbr:`SSH (Secure Shell)` + ## key exchange, each endpoint lists the algorithms + ## that it supports, in order of preference. See + ## :rfc:`4253#section-7.1` for details. + type Capabilities: record { + ## Key exchange algorithms + kex_algorithms: string_vec; + ## The algorithms supported for the server host key + server_host_key_algorithms: string_vec; + ## Symmetric encryption algorithm preferences + encryption_algorithms: Algorithm_Prefs; + ## Symmetric MAC algorithm preferences + mac_algorithms: Algorithm_Prefs; + ## Compression algorithm preferences + compression_algorithms: Algorithm_Prefs; + ## Language preferences + languages: Algorithm_Prefs &optional; + ## Are these the capabilities of the server? + is_server: bool; + }; +} + +module NTLM; + +export { + type NTLM::Version: record { + ## The major version of the Windows operating system in use + major : count; + ## The minor version of the Windows operating system in use + minor : count; + ## The build number of the Windows operating system in use + build : count; + ## The current revision of NTLMSSP in use + ntlmssp : count; + }; + + type NTLM::NegotiateFlags: record { + ## If set, requires 56-bit encryption + negotiate_56 : bool; + ## If set, requests an explicit key exchange + negotiate_key_exch : bool; + ## If set, requests 128-bit session key negotiation + negotiate_128 : bool; + ## If set, requests the protocol version number + negotiate_version : bool; + ## If set, indicates that the TargetInfo fields in the + ## CHALLENGE_MESSAGE are populated + negotiate_target_info : bool; + ## If set, requests the usage of the LMOWF function + request_non_nt_session_key : bool; + ## If set, requests and identify level token + negotiate_identify : bool; + ## If set, requests usage of NTLM v2 session security + ## Note: NTML v2 session security is actually NTLM v1 + negotiate_extended_sessionsecurity : bool; + ## If set, TargetName must be a server name + target_type_server : bool; + ## If set, TargetName must be a domain name + target_type_domain : bool; + + ## If set, requests the presence of a signature block + ## on all messages + negotiate_always_sign : bool; + ## If set, the workstation name is provided + negotiate_oem_workstation_supplied : bool; + ## If set, the domain name is provided + negotiate_oem_domain_supplied : bool; + ## If set, the connection should be anonymous + negotiate_anonymous_connection : bool; + ## If set, requests usage of NTLM v1 + negotiate_ntlm : bool; + + ## If set, requests LAN Manager session key computation + negotiate_lm_key : bool; + ## If set, requests connectionless authentication + negotiate_datagram : bool; + ## If set, requests session key negotiation for message + ## confidentiality + negotiate_seal : bool; + ## If set, requests session key negotiation for message + ## signatures + negotiate_sign : bool; + ## If set, the TargetName field is present + request_target : bool; + + ## If set, requests OEM character set encoding + negotiate_oem : bool; + ## If set, requests Unicode character set encoding + negotiate_unicode : bool; + }; + + type NTLM::Negotiate: record { + ## The negotiate flags + flags : NTLM::NegotiateFlags; + ## The domain name of the client, if known + domain_name : string &optional; + ## The machine name of the client, if known + workstation : string &optional; + ## The Windows version information, if supplied + version : NTLM::Version &optional; + }; + + type NTLM::AVs: record { + ## The server's NetBIOS computer name + nb_computer_name : string; + ## The server's NetBIOS domain name + nb_domain_name : string; + ## The FQDN of the computer + dns_computer_name : string &optional; + ## The FQDN of the domain + dns_domain_name : string &optional; + ## The FQDN of the forest + dns_tree_name : string &optional; + + ## Indicates to the client that the account + ## authentication is constrained + constrained_auth : bool &optional; + ## The associated timestamp, if present + timestamp : time &optional; + ## Indicates that the client is providing + ## a machine ID created at computer startup to + ## identify the calling machine + single_host_id : count &optional; + + ## The SPN of the target server + target_name : string &optional; + }; + + type NTLM::Challenge: record { + ## The negotiate flags + flags : NTLM::NegotiateFlags; + ## The server authentication realm. If the server is + ## domain-joined, the name of the domain. Otherwise + ## the server name. See flags.target_type_domain + ## and flags.target_type_server + target_name : string &optional; + ## The Windows version information, if supplied + version : NTLM::Version &optional; + ## Attribute-value pairs specified by the server + target_info : NTLM::AVs &optional; + }; + + type NTLM::Authenticate: record { + ## The negotiate flags + flags : NTLM::NegotiateFlags; + ## The domain or computer name hosting the account + domain_name : string &optional; + ## The name of the user to be authenticated. + user_name : string &optional; + ## The name of the computer to which the user was logged on. + workstation : string &optional; + ## The session key + session_key : string &optional; + ## The Windows version information, if supplied + version : NTLM::Version &optional; + }; +} + +module SMB; + +export { + ## MAC times for a file. + ## + ## For more information, see MS-SMB2:2.2.16 + ## + ## .. zeek:see:: smb1_nt_create_andx_response smb2_create_response + type SMB::MACTimes: record { + ## The time when data was last written to the file. + modified : time &log; + ## The time when the file was last accessed. + accessed : time &log; + ## The time the file was created. + created : time &log; + ## The time when the file was last modified. + changed : time &log; + } &log; + + ## A set of file names used as named pipes over SMB. This + ## only comes into play as a heuristic to identify named + ## pipes when the drive mapping wasn't seen by Zeek. + ## + ## .. zeek:see:: smb_pipe_connect_heuristic + const SMB::pipe_filenames: set[string] &redef; +} + +module SMB1; + +export { + ## An SMB1 header. + ## + ## .. zeek:see:: smb1_message smb1_empty_response smb1_error + ## smb1_check_directory_request smb1_check_directory_response + ## smb1_close_request smb1_create_directory_request + ## smb1_create_directory_response smb1_echo_request + ## smb1_echo_response smb1_negotiate_request + ## smb1_negotiate_response smb1_nt_cancel_request + ## smb1_nt_create_andx_request smb1_nt_create_andx_response + ## smb1_query_information_request smb1_read_andx_request + ## smb1_read_andx_response smb1_session_setup_andx_request + ## smb1_session_setup_andx_response smb1_transaction_request + ## smb1_transaction2_request smb1_trans2_find_first2_request + ## smb1_trans2_query_path_info_request + ## smb1_trans2_get_dfs_referral_request + ## smb1_tree_connect_andx_request smb1_tree_connect_andx_response + ## smb1_tree_disconnect smb1_write_andx_request + ## smb1_write_andx_response + type SMB1::Header : record { + command : count; ##< The command number + status : count; ##< The status code + flags : count; ##< Flag set 1 + flags2 : count; ##< Flag set 2 + tid : count; ##< Tree ID + pid : count; ##< Process ID + uid : count; ##< User ID + mid : count; ##< Multiplex ID + }; + + type SMB1::NegotiateRawMode: record { + ## Read raw supported + read_raw : bool; + ## Write raw supported + write_raw : bool; + }; + + type SMB1::NegotiateCapabilities: record { + ## The server supports SMB_COM_READ_RAW and SMB_COM_WRITE_RAW + raw_mode : bool; + ## The server supports SMB_COM_READ_MPX and SMB_COM_WRITE_MPX + mpx_mode : bool; + ## The server supports unicode strings + unicode : bool; + ## The server supports large files with 64 bit offsets + large_files : bool; + ## The server supports the SMBs particilar to the NT LM 0.12 dialect. Implies nt_find. + nt_smbs : bool; + + ## The server supports remote admin API requests via DCE-RPC + rpc_remote_apis : bool; + ## The server can respond with 32 bit status codes in Status.Status + status32 : bool; + ## The server supports level 2 oplocks + level_2_oplocks : bool; + ## The server supports SMB_COM_LOCK_AND_READ + lock_and_read : bool; + ## Reserved + nt_find : bool; + + ## The server is DFS aware + dfs : bool; + ## The server supports NT information level requests passing through + infolevel_passthru : bool; + ## The server supports large SMB_COM_READ_ANDX (up to 64k) + large_readx : bool; + ## The server supports large SMB_COM_WRITE_ANDX (up to 64k) + large_writex : bool; + ## The server supports CIFS Extensions for UNIX + unix : bool; + + ## The server supports SMB_BULK_READ, SMB_BULK_WRITE + ## Note: No known implementations support this + bulk_transfer : bool; + ## The server supports compressed data transfer. Requires bulk_transfer. + ## Note: No known implementations support this + compressed_data : bool; + ## The server supports extended security exchanges + extended_security : bool; + }; + + type SMB1::NegotiateResponseSecurity: record { + ## This indicates whether the server, as a whole, is operating under + ## Share Level or User Level security. + user_level : bool; + ## This indicates whether or not the server supports Challenge/Response + ## authentication. If the bit is false, then plaintext passwords must + ## be used. + challenge_response: bool; + ## This indicates if the server is capable of performing MAC message + ## signing. Note: Requires NT LM 0.12 or later. + signatures_enabled: bool &optional; + ## This indicates if the server is requiring the use of a MAC in each + ## packet. If false, message signing is optional. Note: Requires NT LM 0.12 + ## or later. + signatures_required: bool &optional; + }; + + type SMB1::NegotiateResponseCore: record { + ## Index of selected dialect + dialect_index : count; + }; + + type SMB1::NegotiateResponseLANMAN: record { + ## Count of parameter words (should be 13) + word_count : count; + ## Index of selected dialect + dialect_index : count; + ## Security mode + security_mode : SMB1::NegotiateResponseSecurity; + ## Max transmit buffer size (>= 1024) + max_buffer_size : count; + ## Max pending multiplexed requests + max_mpx_count : count; + + ## Max number of virtual circuits (VCs - transport-layer connections) + ## between client and server + max_number_vcs : count; + ## Raw mode + raw_mode : SMB1::NegotiateRawMode; + ## Unique token identifying this session + session_key : count; + ## Current date and time at server + server_time : time; + ## The challenge encryption key + encryption_key : string; + + ## The server's primary domain + primary_domain : string; + }; + + type SMB1::NegotiateResponseNTLM: record { + ## Count of parameter words (should be 17) + word_count : count; + ## Index of selected dialect + dialect_index : count; + ## Security mode + security_mode : SMB1::NegotiateResponseSecurity; + ## Max transmit buffer size + max_buffer_size : count; + ## Max pending multiplexed requests + max_mpx_count : count; + + ## Max number of virtual circuits (VCs - transport-layer connections) + ## between client and server + max_number_vcs : count; + ## Max raw buffer size + max_raw_size : count; + ## Unique token identifying this session + session_key : count; + ## Server capabilities + capabilities : SMB1::NegotiateCapabilities; + ## Current date and time at server + server_time : time; + + ## The challenge encryption key. + ## Present only for non-extended security (i.e. capabilities$extended_security = F) + encryption_key : string &optional; + ## The name of the domain. + ## Present only for non-extended security (i.e. capabilities$extended_security = F) + domain_name : string &optional; + ## A globally unique identifier assigned to the server. + ## Present only for extended security (i.e. capabilities$extended_security = T) + guid : string &optional; + ## Opaque security blob associated with the security package if capabilities$extended_security = T + ## Otherwise, the challenge for challenge/response authentication. + security_blob : string; + }; + + type SMB1::NegotiateResponse: record { + ## If the server does not understand any of the dialect strings, or if + ## PC NETWORK PROGRAM 1.0 is the chosen dialect. + core : SMB1::NegotiateResponseCore &optional; + ## If the chosen dialect is greater than core up to and including + ## LANMAN 2.1. + lanman : SMB1::NegotiateResponseLANMAN &optional; + ## If the chosen dialect is NT LM 0.12. + ntlm : SMB1::NegotiateResponseNTLM &optional; + }; + + type SMB1::SessionSetupAndXCapabilities: record { + ## The client can use unicode strings + unicode : bool; + ## The client can deal with files having 64 bit offsets + large_files : bool; + ## The client understands the SMBs introduced with NT LM 0.12 + ## Implies nt_find + nt_smbs : bool; + ## The client can receive 32 bit errors encoded in Status.Status + status32 : bool; + ## The client understands Level II oplocks + level_2_oplocks : bool; + ## Reserved. Implied by nt_smbs. + nt_find : bool; + }; + + type SMB1::SessionSetupAndXRequest: record { + ## Count of parameter words + ## - 10 for pre NT LM 0.12 + ## - 12 for NT LM 0.12 with extended security + ## - 13 for NT LM 0.12 without extended security + word_count : count; + ## Client maximum buffer size + max_buffer_size : count; + ## Actual maximum multiplexed pending request + max_mpx_count : count; + ## Virtual circuit number. First VC == 0 + vc_number : count; + ## Session key (valid iff vc_number > 0) + session_key : count; + + ## Client's native operating system + native_os : string; + ## Client's native LAN Manager type + native_lanman : string; + ## Account name + ## Note: not set for NT LM 0.12 with extended security + account_name : string &optional; + ## If challenge/response auth is not being used, this is the password. + ## Otherwise, it's the response to the server's challenge. + ## Note: Only set for pre NT LM 0.12 + account_password : string &optional; + ## Client's primary domain, if known + ## Note: not set for NT LM 0.12 with extended security + primary_domain : string &optional; + + ## Case insensitive password + ## Note: only set for NT LM 0.12 without extended security + case_insensitive_password : string &optional; + ## Case sensitive password + ## Note: only set for NT LM 0.12 without extended security + case_sensitive_password : string &optional; + ## Security blob + ## Note: only set for NT LM 0.12 with extended security + security_blob : string &optional; + ## Client capabilities + ## Note: only set for NT LM 0.12 + capabilities : SMB1::SessionSetupAndXCapabilities &optional; + }; + + type SMB1::SessionSetupAndXResponse: record { + ## Count of parameter words (should be 3 for pre NT LM 0.12 and 4 for NT LM 0.12) + word_count : count; + ## Were we logged in as a guest user? + is_guest : bool &optional; + ## Server's native operating system + native_os : string &optional; + ## Server's native LAN Manager type + native_lanman : string &optional; + ## Server's primary domain + primary_domain : string &optional; + ## Security blob if NTLM + security_blob : string &optional; + }; + + type SMB1::Trans2_Args: record { + ## Total parameter count + total_param_count: count; + ## Total data count + total_data_count: count; + ## Max parameter count + max_param_count: count; + ## Max data count + max_data_count: count; + ## Max setup count + max_setup_count: count; + ## Flags + flags: count; + ## Timeout + trans_timeout: count; + ## Parameter count + param_count: count; + ## Parameter offset + param_offset: count; + ## Data count + data_count: count; + ## Data offset + data_offset: count; + ## Setup count + setup_count: count; + }; + + type SMB1::Trans_Sec_Args: record { + ## Total parameter count + total_param_count: count; + ## Total data count + total_data_count: count; + ## Parameter count + param_count: count; + ## Parameter offset + param_offset: count; + ## Parameter displacement + param_displacement: count; + ## Data count + data_count: count; + ## Data offset + data_offset: count; + ## Data displacement + data_displacement: count; + }; + + type SMB1::Trans2_Sec_Args: record { + ## Total parameter count + total_param_count: count; + ## Total data count + total_data_count: count; + ## Parameter count + param_count: count; + ## Parameter offset + param_offset: count; + ## Parameter displacement + param_displacement: count; + ## Data count + data_count: count; + ## Data offset + data_offset: count; + ## Data displacement + data_displacement: count; + ## File ID + FID: count; + }; + + type SMB1::Find_First2_Request_Args: record { + ## File attributes to apply as a constraint to the search + search_attrs : count; + ## Max search results + search_count : count; + ## Misc. flags for how the server should manage the transaction + ## once results are returned + flags : count; + ## How detailed the information returned in the results should be + info_level : count; + ## Specify whether to search for directories or files + search_storage_type : count; + ## The string to serch for (note: may contain wildcards) + file_name : string; + }; + + type SMB1::Find_First2_Response_Args: record { + ## The server generated search identifier + sid : count; + ## Number of results returned by the search + search_count : count; + ## Whether or not the search can be continued using + ## the TRANS2_FIND_NEXT2 transaction + end_of_search : bool; + ## An extended attribute name that couldn't be retrieved + ext_attr_error : string &optional; + }; + + +} + +module SMB2; + +export { + ## An SMB2 header. + ## + ## For more information, see MS-SMB2:2.2.1.1 and MS-SMB2:2.2.1.2 + ## + ## .. zeek:see:: smb2_message smb2_close_request smb2_close_response + ## smb2_create_request smb2_create_response smb2_negotiate_request + ## smb2_negotiate_response smb2_read_request + ## smb2_session_setup_request smb2_session_setup_response + ## smb2_file_rename smb2_file_delete + ## smb2_tree_connect_request smb2_tree_connect_response + ## smb2_write_request + type SMB2::Header: record { + ## The number of credits that this request consumes + credit_charge : count; + ## In a request, this is an indication to the server about the client's channel + ## change. In a response, this is the status field + status : count; + ## The command code of the packet + command : count; + ## The number of credits the client is requesting, or the number of credits + ## granted to the client in a response. + credits : count; + ## A flags field, which indicates how to process the operation (e.g. asynchronously) + flags : count; + ## A value that uniquely identifies the message request/response pair across all + ## messages that are sent on the same transport protocol connection + message_id : count; + ## A value that uniquely identifies the process that generated the event. + process_id : count; + ## A value that uniquely identifies the tree connect for the command. + tree_id : count; + ## A value that uniquely identifies the established session for the command. + session_id : count; + ## The 16-byte signature of the message, if SMB2_FLAGS_SIGNED is set in the ``flags`` + ## field. + signature : string; + }; + + ## An SMB2 globally unique identifier which identifies a file. + ## + ## For more information, see MS-SMB2:2.2.14.1 + ## + ## .. zeek:see:: smb2_close_request smb2_create_response smb2_read_request + ## smb2_file_rename smb2_file_delete smb2_write_request + type SMB2::GUID: record { + ## A file handle that remains persistent when reconnected after a disconnect + persistent: count; + ## A file handle that can be changed when reconnected after a disconnect + volatile: count; + }; + + ## A series of boolean flags describing basic and extended file attributes for SMB2. + ## + ## For more information, see MS-CIFS:2.2.1.2.3 and MS-FSCC:2.6 + ## + ## .. zeek:see:: smb2_create_response + type SMB2::FileAttrs: record { + ## The file is read only. Applications can read the file but cannot + ## write to it or delete it. + read_only: bool; + ## The file is hidden. It is not to be included in an ordinary directory listing. + hidden: bool; + ## The file is part of or is used exclusively by the operating system. + system: bool; + ## The file is a directory. + directory: bool; + ## The file has not been archived since it was last modified. Applications use + ## this attribute to mark files for backup or removal. + archive: bool; + ## The file has no other attributes set. This attribute is valid only if used alone. + normal: bool; + ## The file is temporary. This is a hint to the cache manager that it does not need + ## to flush the file to backing storage. + temporary: bool; + ## A file that is a sparse file. + sparse_file: bool; + ## A file or directory that has an associated reparse point. + reparse_point: bool; + ## The file or directory is compressed. For a file, this means that all of the data + ## in the file is compressed. For a directory, this means that compression is the + ## default for newly created files and subdirectories. + compressed: bool; + ## The data in this file is not available immediately. This attribute indicates that + ## the file data is physically moved to offline storage. This attribute is used by + ## Remote Storage, which is hierarchical storage management software. + offline: bool; + ## A file or directory that is not indexed by the content indexing service. + not_content_indexed: bool; + ## A file or directory that is encrypted. For a file, all data streams in the file + ## are encrypted. For a directory, encryption is the default for newly created files + ## and subdirectories. + encrypted: bool; + ## A file or directory that is configured with integrity support. For a file, all + ## data streams in the file have integrity support. For a directory, integrity support + ## is the default for newly created files and subdirectories, unless the caller + ## specifies otherwise. + integrity_stream: bool; + ## A file or directory that is configured to be excluded from the data integrity scan. + no_scrub_data: bool; + }; + + ## The response to an SMB2 *close* request, which is used by the client to close an instance + ## of a file that was opened previously. + ## + ## For more information, see MS-SMB2:2.2.16 + ## + ## .. zeek:see:: smb2_close_response + type SMB2::CloseResponse: record { + ## The size, in bytes of the data that is allocated to the file. + alloc_size : count; + ## The size, in bytes, of the file. + eof : count; + ## The creation, last access, last write, and change times. + times : SMB::MACTimes; + ## The attributes of the file. + attrs : SMB2::FileAttrs; + }; + + ## Preauthentication information as defined in SMB v. 3.1.1 + ## + ## For more information, see MS-SMB2:2.3.1.1 + ## + type SMB2::PreAuthIntegrityCapabilities: record { + ## The number of hash algorithms. + hash_alg_count : count; + ## The salt length. + salt_length : count; + ## An array of hash algorithms (counts). + hash_alg : vector of count; + ## The salt. + salt : string; + }; + + ## Encryption information as defined in SMB v. 3.1.1 + ## + ## For more information, see MS-SMB2:2.3.1.2 + ## + type SMB2::EncryptionCapabilities: record { + ## The number of ciphers. + cipher_count : count; + ## An array of ciphers. + ciphers : vector of count; + }; + + ## Compression information as defined in SMB v. 3.1.1 + ## + ## For more information, see MS-SMB2:2.3.1.3 + ## + type SMB2::CompressionCapabilities: record { + ## The number of algorithms. + alg_count : count; + ## An array of compression algorithms. + algs : vector of count; + }; + + ## The context type information as defined in SMB v. 3.1.1 + ## + ## For more information, see MS-SMB2:2.3.1 + ## + type SMB2::NegotiateContextValue: record { + ## Specifies the type of context (preauth or encryption). + context_type : count; + ## The length in byte of the data field. + data_length : count; + ## The preauthentication information. + preauth_info : SMB2::PreAuthIntegrityCapabilities &optional; + ## The encryption information. + encryption_info : SMB2::EncryptionCapabilities &optional; + ## The compression information. + compression_info : SMB2::CompressionCapabilities &optional; + ## Indicates the server name the client must connect to. + netname: string &optional; + }; + + type SMB2::NegotiateContextValues: vector of SMB2::NegotiateContextValue; + + ## The response to an SMB2 *negotiate* request, which is used by tghe client to notify the server + ## what dialects of the SMB2 protocol the client understands. + ## + ## For more information, see MS-SMB2:2.2.4 + ## + ## .. zeek:see:: smb2_negotiate_response + type SMB2::NegotiateResponse: record { + ## The preferred common SMB2 Protocol dialect number from the array that was sent in the SMB2 + ## NEGOTIATE Request. + dialect_revision : count; + ## The security mode field specifies whether SMB signing is enabled, required at the server, or both. + security_mode : count; + ## A globally unique identifier that is generate by the server to uniquely identify the server. + server_guid : string; + ## The system time of the SMB2 server when the SMB2 NEGOTIATE Request was processed. + system_time : time; + ## The SMB2 server start time. + server_start_time : time; + + ## The number of negotiate context values in SMB v. 3.1.1, otherwise reserved to 0. + negotiate_context_count : count; + ## An array of context values in SMB v. 3.1.1. + negotiate_context_values : SMB2::NegotiateContextValues; + }; + + ## The request sent by the client to request a new authenticated session + ## within a new or existing SMB 2 Protocol transport connection to the server. + ## + ## For more information, see MS-SMB2:2.2.5 + ## + ## .. zeek:see:: smb2_session_setup_request + type SMB2::SessionSetupRequest: record { + ## The security mode field specifies whether SMB signing is enabled or required at the client. + security_mode: count; + }; + + ## A flags field that indicates additional information about the session that's sent in the + ## *session_setup* response. + ## + ## For more information, see MS-SMB2:2.2.6 + ## + ## .. zeek:see:: smb2_session_setup_response + type SMB2::SessionSetupFlags: record { + ## If set, the client has been authenticated as a guest user. + guest: bool; + ## If set, the client has been authenticated as an anonymous user. + anonymous: bool; + ## If set, the server requires encryption of messages on this session. + encrypt: bool; + }; + + ## The response to an SMB2 *session_setup* request, which is sent by the client to request a + ## new authenticated session within a new or existing SMB 2 Protocol transport connection + ## to the server. + ## + ## For more information, see MS-SMB2:2.2.6 + ## + ## .. zeek:see:: smb2_session_setup_response + type SMB2::SessionSetupResponse: record { + ## Additional information about the session + flags: SMB2::SessionSetupFlags; + }; + + ## The response to an SMB2 *tree_connect* request, which is sent by the client to request + ## access to a particular share on the server. + ## + ## For more information, see MS-SMB2:2.2.9 + ## + ## .. zeek:see:: smb2_tree_connect_response + type SMB2::TreeConnectResponse: record { + ## The type of share being accessed. Physical disk, named pipe, or printer. + share_type: count; + }; + + ## The request sent by the client to request either creation of or access to a file. + ## + ## For more information, see MS-SMB2:2.2.13 + ## + ## .. zeek:see:: smb2_create_request + type SMB2::CreateRequest: record { + ## Name of the file + filename : string; + ## Defines the action the server MUST take if the file that is specified already exists. + disposition : count; + ## Specifies the options to be applied when creating or opening the file. + create_options : count; + }; + + ## The response to an SMB2 *create_request* request, which is sent by the client to request + ## either creation of or access to a file. + ## + ## For more information, see MS-SMB2:2.2.14 + ## + ## .. zeek:see:: smb2_create_response + type SMB2::CreateResponse: record { + ## The SMB2 GUID for the file. + file_id : SMB2::GUID; + ## Size of the file. + size : count; + ## Timestamps associated with the file in question. + times : SMB::MACTimes; + ## File attributes. + attrs : SMB2::FileAttrs; + ## The action taken in establishing the open. + create_action : count; + }; + + ## An SMB2 transform header (for SMB 3.x dialects with encryption enabled). + ## + ## For more information, see MS-SMB2:2.2.41 + ## + ## .. zeek:see:: smb2_transform_header smb2_message smb2_close_request smb2_close_response + ## smb2_create_request smb2_create_response smb2_negotiate_request + ## smb2_negotiate_response smb2_read_request + ## smb2_session_setup_request smb2_session_setup_response + ## smb2_file_rename smb2_file_delete + ## smb2_tree_connect_request smb2_tree_connect_response + ## smb2_write_request + type SMB2::Transform_header: record { + ## The 16-byte signature of the encrypted message, generated by using Session.EncryptionKey. + signature : string; + ## An implementation specific value assigned for every encrypted message. + nonce : string; + ## The size, in bytes, of the SMB2 message. + orig_msg_size : count; + ## A flags field, interpreted in different ways depending of the SMB2 dialect. + flags : count; + ## A value that uniquely identifies the established session for the command. + session_id : count; + }; +} + +module GLOBAL; + +module DHCP; + +export { + ## A list of addresses offered by a DHCP server. Could be routers, + ## DNS servers, or other. + ## + ## .. zeek:see:: dhcp_message + type DHCP::Addrs: vector of addr; + + ## A DHCP message. + ## .. zeek:see:: dhcp_message + type DHCP::Msg: record { + op: count; ##< Message OP code. 1 = BOOTREQUEST, 2 = BOOTREPLY + m_type: count; ##< The type of DHCP message. + xid: count; ##< Transaction ID of a DHCP session. + ## Number of seconds since client began address acquisition + ## or renewal process + secs: interval; + flags: count; + ciaddr: addr; ##< Original IP address of the client. + yiaddr: addr; ##< IP address assigned to the client. + siaddr: addr; ##< IP address of the server. + giaddr: addr; ##< IP address of the relaying gateway. + chaddr: string; ##< Client hardware address. + sname: string &default=""; ##< Server host name. + file_n: string &default=""; ##< Boot file name. + }; + + ## DHCP Client Identifier (Option 61) + ## .. zeek:see:: dhcp_message + type DHCP::ClientID: record { + hwtype: count; + hwaddr: string; + }; + + ## DHCP Client FQDN Option information (Option 81) + type DHCP::ClientFQDN: record { + ## An unparsed bitfield of flags (refer to RFC 4702). + flags: count; + ## This field is deprecated in the standard. + rcode1: count; + ## This field is deprecated in the standard. + rcode2: count; + ## The Domain Name part of the option carries all or part of the FQDN + ## of a DHCP client. + domain_name: string; + }; + + ## DHCP Relay Agent Information Option (Option 82) + ## .. zeek:see:: dhcp_message + type DHCP::SubOpt: record { + code: count; + value: string; + }; + + type DHCP::SubOpts: vector of DHCP::SubOpt; + + type DHCP::Options: record { + ## The ordered list of all DHCP option numbers. + options: index_vec &optional; + + ## Subnet Mask Value (option 1) + subnet_mask: addr &optional; + + ## Router addresses (option 3) + routers: DHCP::Addrs &optional; + + ## DNS Server addresses (option 6) + dns_servers: DHCP::Addrs &optional; + + ## The Hostname of the client (option 12) + host_name: string &optional; + + ## The DNS domain name of the client (option 15) + domain_name: string &optional; + + ## Enable/Disable IP Forwarding (option 19) + forwarding: bool &optional; + + ## Broadcast Address (option 28) + broadcast: addr &optional; + + ## Vendor specific data. This can frequently + ## be unparsed binary data. (option 43) + vendor: string &optional; + + ## NETBIOS name server list (option 44) + nbns: DHCP::Addrs &optional; + + ## Address requested by the client (option 50) + addr_request: addr &optional; + + ## Lease time offered by the server. (option 51) + lease: interval &optional; + + ## Server address to allow clients to distinguish + ## between lease offers. (option 54) + serv_addr: addr &optional; + + ## DHCP Parameter Request list (option 55) + param_list: index_vec &optional; + + ## Textual error message (option 56) + message: string &optional; + + ## Maximum Message Size (option 57) + max_msg_size: count &optional; + + ## This option specifies the time interval from address + ## assignment until the client transitions to the + ## RENEWING state. (option 58) + renewal_time: interval &optional; + + ## This option specifies the time interval from address + ## assignment until the client transitions to the + ## REBINDING state. (option 59) + rebinding_time: interval &optional; + + ## This option is used by DHCP clients to optionally + ## identify the vendor type and configuration of a DHCP + ## client. (option 60) + vendor_class: string &optional; + + ## DHCP Client Identifier (Option 61) + client_id: DHCP::ClientID &optional; + + ## User Class opaque value (Option 77) + user_class: string &optional; + + ## DHCP Client FQDN (Option 81) + client_fqdn: DHCP::ClientFQDN &optional; + + ## DHCP Relay Agent Information Option (Option 82) + sub_opt: DHCP::SubOpts &optional; + + ## Auto Config option to let host know if it's allowed to + ## auto assign an IP address. (Option 116) + auto_config: bool &optional; + + ## URL to find a proxy.pac for auto proxy config (Option 252) + auto_proxy_config: string &optional; + + ## The offset of the client's subnet in seconds from UTC. (Option 2) + time_offset: int &optional; + + ## A list of :rfc:`868` time servers available to the client. + ## (Option 4) + time_servers: DHCP::Addrs &optional; + + ## A list of IEN 116 name servers available to the client. (Option 5) + name_servers: DHCP::Addrs &optional; + + ## A list of IP addresses indicating NTP servers available to the + ## client. (Option 42) + ntp_servers: DHCP::Addrs &optional; + }; +} + +module GLOBAL; +## A DNS message. +## +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end +## dns_message dns_query_reply dns_rejected dns_request +type dns_msg: record { + id: count; ##< Transaction ID. + + opcode: count; ##< Operation code. + rcode: count; ##< Return code. + + QR: bool; ##< Query response flag. + AA: bool; ##< Authoritative answer flag. + TC: bool; ##< Truncated packet flag. + RD: bool; ##< Recursion desired flag. + RA: bool; ##< Recursion available flag. + Z: count; ##< TODO. + + num_queries: count; ##< Number of query records. + num_answers: count; ##< Number of answer records. + num_auth: count; ##< Number of authoritative records. + num_addl: count; ##< Number of additional records. +}; + +## A DNS SOA record. +## +## .. zeek:see:: dns_SOA_reply +type dns_soa: record { + mname: string; ##< Primary source of data for zone. + rname: string; ##< Mailbox for responsible person. + serial: count; ##< Version number of zone. + refresh: interval; ##< Seconds before refreshing. + retry: interval; ##< How long before retrying failed refresh. + expire: interval; ##< When zone no longer authoritative. + minimum: interval; ##< Minimum TTL to use when exporting. +}; + +## An additional DNS EDNS record. +## +## .. zeek:see:: dns_EDNS_addl +type dns_edns_additional: record { + query: string; ##< Query. + qtype: count; ##< Query type. + t: count; ##< TODO. + payload_size: count; ##< TODO. + extended_rcode: count; ##< Extended return code. + version: count; ##< Version. + z_field: count; ##< TODO. + TTL: interval; ##< Time-to-live. + is_query: count; ##< TODO. +}; + +## An additional DNS TSIG record. +## +## .. zeek:see:: dns_TSIG_addl +type dns_tsig_additional: record { + query: string; ##< Query. + qtype: count; ##< Query type. + alg_name: string; ##< Algorithm name. + sig: string; ##< Signature. + time_signed: time; ##< Time when signed. + fudge: time; ##< TODO. + orig_id: count; ##< TODO. + rr_error: count; ##< TODO. + is_query: count; ##< TODO. +}; + +## A DNSSEC RRSIG record. +## +## .. zeek:see:: dns_RRSIG +type dns_rrsig_rr: record { + query: string; ##< Query. + answer_type: count; ##< Ans type. + type_covered: count; ##< qtype covered by RRSIG RR. + algorithm: count; ##< Algorithm. + labels: count; ##< Labels in the owner's name. + orig_ttl: interval; ##< Original TTL. + sig_exp: time; ##< Time when signed RR expires. + sig_incep: time; ##< Time when signed. + key_tag: count; ##< Key tag value. + signer_name: string; ##< Signature. + signature: string; ##< Hash of the RRDATA. + is_query: count; ##< The RR is a query/Response. +}; + +## A DNSSEC DNSKEY record. +## +## .. zeek:see:: dns_DNSKEY +type dns_dnskey_rr: record { + query: string; ##< Query. + answer_type: count; ##< Ans type. + flags: count; ##< flags filed. + protocol: count; ##< Protocol, should be always 3 for DNSSEC. + algorithm: count; ##< Algorithm for Public Key. + public_key: string; ##< Public Key + is_query: count; ##< The RR is a query/Response. +}; + +## A DNSSEC NSEC3 record. +## +## .. zeek:see:: dns_NSEC3 +type dns_nsec3_rr: record { + query: string; ##< Query. + answer_type: count; ##< Ans type. + nsec_flags: count; ##< flags field. + nsec_hash_algo: count; ##< Hash algorithm. + nsec_iter: count; ##< Iterations. + nsec_salt_len: count; ##< Salt length. + nsec_salt: string; ##< Salt value + nsec_hlen: count; ##< Hash length. + nsec_hash: string; ##< Hash value. + bitmaps: string_vec; ##< Type Bit Maps. + is_query: count; ##< The RR is a query/Response. +}; + +## A DNSSEC DS record. +## +## .. zeek:see:: dns_DS +type dns_ds_rr: record { + query: string; ##< Query. + answer_type: count; ##< Ans type. + key_tag: count; ##< flags filed. + algorithm: count; ##< Algorithm for Public Key. + digest_type: count; ##< Digest Type. + digest_val: string; ##< Digest Value. + is_query: count; ##< The RR is a query/Response. +}; + +# DNS answer types. +# +# .. zeek:see:: dns_answerr +# +# todo:: use enum to make them autodoc'able +const DNS_QUERY = 0; ##< A query. This shouldn't occur, just for completeness. +const DNS_ANS = 1; ##< An answer record. +const DNS_AUTH = 2; ##< An authoritative record. +const DNS_ADDL = 3; ##< An additional record. + +## The general part of a DNS reply. +## +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_HINFO_reply +## dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply +## dns_TXT_reply dns_WKS_reply +type dns_answer: record { + ## Answer type. One of :zeek:see:`DNS_QUERY`, :zeek:see:`DNS_ANS`, + ## :zeek:see:`DNS_AUTH` and :zeek:see:`DNS_ADDL`. + answer_type: count; + query: string; ##< Query. + qtype: count; ##< Query type. + qclass: count; ##< Query class. + TTL: interval; ##< Time-to-live. +}; + +## For DNS servers in these sets, omit processing the AUTH records they include +## in their replies. +## +## .. zeek:see:: dns_skip_all_auth dns_skip_addl +global dns_skip_auth: set[addr] &redef; + +## For DNS servers in these sets, omit processing the ADDL records they include +## in their replies. +## +## .. zeek:see:: dns_skip_all_addl dns_skip_auth +global dns_skip_addl: set[addr] &redef; + +## If true, all DNS AUTH records are skipped. +## +## .. zeek:see:: dns_skip_all_addl dns_skip_auth +global dns_skip_all_auth = T &redef; + +## If true, all DNS ADDL records are skipped. +## +## .. zeek:see:: dns_skip_all_auth dns_skip_addl +global dns_skip_all_addl = T &redef; + +## If a DNS request includes more than this many queries, assume it's non-DNS +## traffic and do not process it. Set to 0 to turn off this functionality. +global dns_max_queries = 25 &redef; + +## HTTP session statistics. +## +## .. zeek:see:: http_stats +type http_stats_rec: record { + num_requests: count; ##< Number of requests. + num_replies: count; ##< Number of replies. + request_version: double; ##< HTTP version of the requests. + reply_version: double; ##< HTTP Version of the replies. +}; + +## HTTP message statistics. +## +## .. zeek:see:: http_message_done +type http_message_stat: record { + ## When the request/reply line was complete. + start: time; + ## Whether the message was interrupted. + interrupted: bool; + ## Reason phrase if interrupted. + finish_msg: string; + ## Length of body processed (before finished/interrupted). + body_length: count; + ## Total length of gaps within *body_length*. + content_gap_length: count; + ## Length of headers (including the req/reply line, but not CR/LF's). + header_length: count; +}; + +## Maximum number of HTTP entity data delivered to events. +## +## .. zeek:see:: http_entity_data skip_http_entity_data skip_http_data +global http_entity_data_delivery_size = 1500 &redef; + +## Skip HTTP data for performance considerations. The skipped +## portion will not go through TCP reassembly. +## +## .. zeek:see:: http_entity_data skip_http_entity_data http_entity_data_delivery_size +const skip_http_data = F &redef; + +## Maximum length of HTTP URIs passed to events. Longer ones will be truncated +## to prevent over-long URIs (usually sent by worms) from slowing down event +## processing. A value of -1 means "do not truncate". +## +## .. zeek:see:: http_request +const truncate_http_URI = -1 &redef; + +## IRC join information. +## +## .. zeek:see:: irc_join_list +type irc_join_info: record { + nick: string; + channel: string; + password: string; + usermode: string; +}; + +## Set of IRC join information. +## +## .. zeek:see:: irc_join_message +type irc_join_list: set[irc_join_info]; + +module PE; +export { +type PE::DOSHeader: record { + ## The magic number of a portable executable file ("MZ"). + signature : string; + ## The number of bytes in the last page that are used. + used_bytes_in_last_page : count; + ## The number of pages in the file that are part of the PE file itself. + file_in_pages : count; + ## Number of relocation entries stored after the header. + num_reloc_items : count; + ## Number of paragraphs in the header. + header_in_paragraphs : count; + ## Number of paragraps of additional memory that the program will need. + min_extra_paragraphs : count; + ## Maximum number of paragraphs of additional memory. + max_extra_paragraphs : count; + ## Relative value of the stack segment. + init_relative_ss : count; + ## Initial value of the SP register. + init_sp : count; + ## Checksum. The 16-bit sum of all words in the file should be 0. Normally not set. + checksum : count; + ## Initial value of the IP register. + init_ip : count; + ## Initial value of the CS register (relative to the initial segment). + init_relative_cs : count; + ## Offset of the first relocation table. + addr_of_reloc_table : count; + ## Overlays allow you to append data to the end of the file. If this is the main program, + ## this will be 0. + overlay_num : count; + ## OEM identifier. + oem_id : count; + ## Additional OEM info, specific to oem_id. + oem_info : count; + ## Address of the new EXE header. + addr_of_new_exe_header : count; +}; + +type PE::FileHeader: record { + ## The target machine that the file was compiled for. + machine : count; + ## The time that the file was created at. + ts : time; + ## Pointer to the symbol table. + sym_table_ptr : count; + ## Number of symbols. + num_syms : count; + ## The size of the optional header. + optional_header_size : count; + ## Bit flags that determine if this file is executable, non-relocatable, and/or a DLL. + characteristics : set[count]; +}; + +type PE::OptionalHeader: record { + ## PE32 or PE32+ indicator. + magic : count; + ## The major version of the linker used to create the PE. + major_linker_version : count; + ## The minor version of the linker used to create the PE. + minor_linker_version : count; + ## Size of the .text section. + size_of_code : count; + ## Size of the .data section. + size_of_init_data : count; + ## Size of the .bss section. + size_of_uninit_data : count; + ## The relative virtual address (RVA) of the entry point. + addr_of_entry_point : count; + ## The relative virtual address (RVA) of the .text section. + base_of_code : count; + ## The relative virtual address (RVA) of the .data section. + base_of_data : count &optional; + ## Preferred memory location for the image to be based at. + image_base : count; + ## The alignment (in bytes) of sections when they're loaded in memory. + section_alignment : count; + ## The alignment (in bytes) of the raw data of sections. + file_alignment : count; + ## The major version of the required OS. + os_version_major : count; + ## The minor version of the required OS. + os_version_minor : count; + ## The major version of this image. + major_image_version : count; + ## The minor version of this image. + minor_image_version : count; + ## The major version of the subsystem required to run this file. + major_subsys_version : count; + ## The minor version of the subsystem required to run this file. + minor_subsys_version : count; + ## The size (in bytes) of the iamge as the image is loaded in memory. + size_of_image : count; + ## The size (in bytes) of the headers, rounded up to file_alignment. + size_of_headers : count; + ## The image file checksum. + checksum : count; + ## The subsystem that's required to run this image. + subsystem : count; + ## Bit flags that determine how to execute or load this file. + dll_characteristics : set[count]; + ## A vector with the sizes of various tables and strings that are + ## defined in the optional header data directories. Examples include + ## the import table, the resource table, and debug information. + table_sizes : vector of count; + +}; + +## Record for Portable Executable (PE) section headers. +type PE::SectionHeader: record { + ## The name of the section + name : string; + ## The total size of the section when loaded into memory. + virtual_size : count; + ## The relative virtual address (RVA) of the section. + virtual_addr : count; + ## The size of the initialized data for the section, as it is + ## in the file on disk. + size_of_raw_data : count; + ## The virtual address of the initialized dat for the section, + ## as it is in the file on disk. + ptr_to_raw_data : count; + ## The file pointer to the beginning of relocation entries for + ## the section. + ptr_to_relocs : count; + ## The file pointer to the beginning of line-number entries for + ## the section. + ptr_to_line_nums : count; + ## The number of relocation entries for the section. + num_of_relocs : count; + ## The number of line-number entrie for the section. + num_of_line_nums : count; + ## Bit-flags that describe the characteristics of the section. + characteristics : set[count]; +}; +} +module GLOBAL; + +## Deprecated. +## +## .. todo:: Remove. It's still declared internally but doesn't seem used anywhere +## else. +global irc_servers : set[addr] &redef; + +## Internal to the stepping stone detector. +const stp_delta: interval &redef; + +## Internal to the stepping stone detector. +const stp_idle_min: interval &redef; + +## Internal to the stepping stone detector. +global stp_skip_src: set[addr] &redef; + +## Deprecated. +const interconn_min_interarrival: interval &redef; + +## Deprecated. +const interconn_max_interarrival: interval &redef; + +## Deprecated. +const interconn_max_keystroke_pkt_size: count &redef; + +## Deprecated. +const interconn_default_pkt_size: count &redef; + +## Deprecated. +const interconn_stat_period: interval &redef; + +## Deprecated. +const interconn_stat_backoff: double &redef; + +## Deprecated. +type interconn_endp_stats: record { + num_pkts: count; + num_keystrokes_two_in_row: count; + num_normal_interarrivals: count; + num_8k0_pkts: count; + num_8k4_pkts: count; + is_partial: bool; + num_bytes: count; + num_7bit_ascii: count; + num_lines: count; + num_normal_lines: count; +}; + +## Deprecated. +const backdoor_stat_period: interval &redef; + +## Deprecated. +const backdoor_stat_backoff: double &redef; + +## Deprecated. +type backdoor_endp_stats: record { + is_partial: bool; + num_pkts: count; + num_8k0_pkts: count; + num_8k4_pkts: count; + num_lines: count; + num_normal_lines: count; + num_bytes: count; + num_7bit_ascii: count; +}; + +## Description of a signature match. +## +## .. zeek:see:: signature_match +type signature_state: record { + sig_id: string; ##< ID of the matching signature. + conn: connection; ##< Matching connection. + is_orig: bool; ##< True if matching endpoint is originator. + payload_size: count; ##< Payload size of the first matching packet of current endpoint. +}; + +# Deprecated. +# +# .. todo:: This type is no longer used. Remove any reference of this from the +# core. +type software_version: record { + major: int; + minor: int; + minor2: int; + addl: string; +}; + +# Deprecated. +# +# .. todo:: This type is no longer used. Remove any reference of this from the +# core. +type software: record { + name: string; + version: software_version; +}; + +# Type used to report load samples via :zeek:see:`load_sample`. For now, it's a +# set of names (event names, source file names, and perhaps ````), which were seen during the sample. +type load_sample_info: set[string]; + +## A BitTorrent peer. +## +## .. zeek:see:: bittorrent_peer_set +type bittorrent_peer: record { + h: addr; ##< The peer's address. + p: port; ##< The peer's port. +}; + +## A set of BitTorrent peers. +## +## .. zeek:see:: bt_tracker_response +type bittorrent_peer_set: set[bittorrent_peer]; + +## BitTorrent "benc" value. Note that "benc" = Bencode ("Bee-Encode"), per +## http://en.wikipedia.org/wiki/Bencode. +## +## .. zeek:see:: bittorrent_benc_dir +type bittorrent_benc_value: record { + i: int &optional; ##< TODO. + s: string &optional; ##< TODO. + d: string &optional; ##< TODO. + l: string &optional; ##< TODO. +}; + +## A table of BitTorrent "benc" values. +## +## .. zeek:see:: bt_tracker_response +type bittorrent_benc_dir: table[string] of bittorrent_benc_value; + +## Header table type used by BitTorrent analyzer. +## +## .. zeek:see:: bt_tracker_request bt_tracker_response +## bt_tracker_response_not_ok +type bt_tracker_headers: table[string] of string; + +## A vector of boolean values that indicate the setting +## for a range of modbus coils. +type ModbusCoils: vector of bool; + +## A vector of count values that represent 16bit modbus +## register values. +type ModbusRegisters: vector of count; + +type ModbusHeaders: record { + ## Transaction identifier + tid: count; + ## Protocol identifier + pid: count; + ## Unit identifier (previously 'slave address') + uid: count; + ## MODBUS function code + function_code: count; +}; + +module Unified2; +export { + type Unified2::IDSEvent: record { + sensor_id: count; + event_id: count; + ts: time; + signature_id: count; + generator_id: count; + signature_revision: count; + classification_id: count; + priority_id: count; + src_ip: addr; + dst_ip: addr; + src_p: port; + dst_p: port; + impact_flag: count; + impact: count; + blocked: count; + ## Not available in "legacy" IDS events. + mpls_label: count &optional; + ## Not available in "legacy" IDS events. + vlan_id: count &optional; + ## Only available in "legacy" IDS events. + packet_action: count &optional; + }; + + type Unified2::Packet: record { + sensor_id: count; + event_id: count; + event_second: count; + packet_ts: time; + link_type: count; + data: string; + }; +} + +module SSL; +export { + type SignatureAndHashAlgorithm: record { + HashAlgorithm: count; ##< Hash algorithm number + SignatureAlgorithm: count; ##< Signature algorithm number + }; + + type PSKIdentity: record { + identity: string; ##< PSK identity + obfuscated_ticket_age: count; + }; + +## Number of non-DTLS frames that can occur in a DTLS connection before +## parsing of the connection is suspended. +## DTLS does not immediately stop parsing a connection because other protocols +## might be interleaved in the same UDP "connection". +const SSL::dtls_max_version_errors = 10 &redef; + +## Maximum number of invalid version errors to report in one DTLS connection. +const SSL::dtls_max_reported_version_errors = 1 &redef; + +} + +module GLOBAL; + +## A vector of Signature and Hash Algorithms. +## +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. +type signature_and_hashalgorithm_vec: vector of SSL::SignatureAndHashAlgorithm; + +type psk_identity_vec: vector of SSL::PSKIdentity; + +module X509; +export { + type Certificate: record { + version: count &log; ##< Version number. + serial: string &log; ##< Serial number. + subject: string &log; ##< Subject. + issuer: string &log; ##< Issuer. + cn: string &optional; ##< Last (most specific) common name. + not_valid_before: time &log; ##< Timestamp before when certificate is not valid. + not_valid_after: time &log; ##< Timestamp after when certificate is not valid. + key_alg: string &log; ##< Name of the key algorithm + sig_alg: string &log; ##< Name of the signature algorithm + key_type: string &optional &log; ##< Key type, if key parseable by openssl (either rsa, dsa or ec) + key_length: count &optional &log; ##< Key length in bits + exponent: string &optional &log; ##< Exponent, if RSA-certificate + curve: string &optional &log; ##< Curve, if EC-certificate + }; + + type Extension: record { + name: string; ##< Long name of extension. oid if name not known + short_name: string &optional; ##< Short name of extension if known + oid: string; ##< Oid of extension + critical: bool; ##< True if extension is critical + value: string; ##< Extension content parsed to string for known extensions. Raw data otherwise. + }; + + type BasicConstraints: record { + ca: bool; ##< CA flag set? + path_len: count &optional; ##< Maximum path length + } &log; + + type SubjectAlternativeName: record { + dns: string_vec &optional &log; ##< List of DNS entries in SAN + uri: string_vec &optional &log; ##< List of URI entries in SAN + email: string_vec &optional &log; ##< List of email entries in SAN + ip: addr_vec &optional &log; ##< List of IP entries in SAN + other_fields: bool; ##< True if the certificate contained other, not recognized or parsed name fields + }; + + ## Result of an X509 certificate chain verification + type Result: record { + ## OpenSSL result code + result: int; + ## Result as string + result_string: string; + ## References to the final certificate chain, if verification successful. End-host certificate is first. + chain_certs: vector of opaque of x509 &optional; + }; +} + +module SOCKS; +export { + ## This record is for a SOCKS client or server to provide either a + ## name or an address to represent a desired or established connection. + type Address: record { + host: addr &optional; + name: string &optional; + } &log; +} + +module RADIUS; + +export { + type RADIUS::AttributeList: vector of string; + type RADIUS::Attributes: table[count] of RADIUS::AttributeList; + + type RADIUS::Message: record { + ## The type of message (Access-Request, Access-Accept, etc.). + code : count; + ## The transaction ID. + trans_id : count; + ## The "authenticator" string. + authenticator : string; + ## Any attributes. + attributes : RADIUS::Attributes &optional; + }; +} + +module RDP; +export { + type RDP::EarlyCapabilityFlags: record { + support_err_info_pdu: bool; + want_32bpp_session: bool; + support_statusinfo_pdu: bool; + strong_asymmetric_keys: bool; + support_monitor_layout_pdu: bool; + support_netchar_autodetect: bool; + support_dynvc_gfx_protocol: bool; + support_dynamic_time_zone: bool; + support_heartbeat_pdu: bool; + }; + + type RDP::ClientCoreData: record { + version_major: count; + version_minor: count; + desktop_width: count; + desktop_height: count; + color_depth: count; + sas_sequence: count; + keyboard_layout: count; + client_build: count; + client_name: string; + keyboard_type: count; + keyboard_sub: count; + keyboard_function_key: count; + ime_file_name: string; + post_beta2_color_depth: count &optional; + client_product_id: string &optional; + serial_number: count &optional; + high_color_depth: count &optional; + supported_color_depths: count &optional; + ec_flags: RDP::EarlyCapabilityFlags &optional; + dig_product_id: string &optional; + }; + + ## The TS_UD_CS_SEC data block contains security-related information used + ## to advertise client cryptographic support. + type RDP::ClientSecurityData: record { + ## Cryptographic encryption methods supported by the client and used in + ## conjunction with Standard RDP Security. Known flags: + ## + ## - 0x00000001: support for 40-bit session encryption keys + ## - 0x00000002: support for 128-bit session encryption keys + ## - 0x00000008: support for 56-bit session encryption keys + ## - 0x00000010: support for FIPS compliant encryption and MAC methods + encryption_methods: count; + ## Only used in French locale and designates the encryption method. If + ## non-zero, then encryption_methods should be set to 0. + ext_encryption_methods: count; + }; + + ## Name and flags for a single channel requested by the client. + type RDP::ClientChannelDef: record { + ## A unique name for the channel + name: string; + ## Channel Def raw options as count + options: count; + ## Absence of this flag indicates that this channel is + ## a placeholder and that the server MUST NOT set it up. + initialized: bool; + ## Unused, must be ignored by the server. + encrypt_rdp: bool; + ## Unused, must be ignored by the server. + encrypt_sc: bool; + ## Unused, must be ignored by the server. + encrypt_cs: bool; + ## Channel data must be sent with high MCS priority. + pri_high: bool; + ## Channel data must be sent with medium MCS priority. + pri_med: bool; + ## Channel data must be sent with low MCS priority. + pri_low: bool; + ## Virtual channel data must be compressed if RDP data is being compressed. + compress_rdp: bool; + ## Virtual channel data must be compressed. + compress: bool; + ## Ignored by the server. + show_protocol: bool; + ## Channel must be persistent across remote control transactions. + persistent: bool; + }; + + ## The TS_UD_CS_CLUSTER data block is sent by the client to the server + ## either to advertise that it can support the Server Redirection PDUs + ## or to request a connection to a given session identifier. + type RDP::ClientClusterData: record { + ## Cluster information flags. + flags: count; + ## If the *redir_sessionid_field_valid* flag is set, this field + ## contains a valid session identifier to which the client requests + ## to connect. + redir_session_id: count; + ## The client can receive server session redirection packets. + ## If this flag is set, the *svr_session_redir_version_mask* + ## field MUST contain the server session redirection version that + ## the client supports. + redir_supported: bool; + ## The server session redirection version that the client supports. + svr_session_redir_version_mask: count; + ## Whether the *redir_session_id* field identifies a session on + ## the server to associate with the connection. + redir_sessionid_field_valid: bool; + ## The client logged on with a smart card. + redir_smartcard: bool; + }; + + ## The list of channels requested by the client. + type RDP::ClientChannelList: vector of ClientChannelDef; +} + +@load base/bif/plugins/Zeek_SNMP.types.bif + +module SNMP; +export { + ## The top-level message data structure of an SNMPv1 datagram, not + ## including the PDU data. See :rfc:`1157`. + type SNMP::HeaderV1: record { + community: string; + }; + + ## The top-level message data structure of an SNMPv2 datagram, not + ## including the PDU data. See :rfc:`1901`. + type SNMP::HeaderV2: record { + community: string; + }; + + ## The ``ScopedPduData`` data structure of an SNMPv3 datagram, not + ## including the PDU data (i.e. just the "context" fields). + ## See :rfc:`3412`. + type SNMP::ScopedPDU_Context: record { + engine_id: string; + name: string; + }; + + ## The top-level message data structure of an SNMPv3 datagram, not + ## including the PDU data. See :rfc:`3412`. + type SNMP::HeaderV3: record { + id: count; + max_size: count; + flags: count; + auth_flag: bool; + priv_flag: bool; + reportable_flag: bool; + security_model: count; + security_params: string; + pdu_context: SNMP::ScopedPDU_Context &optional; + }; + + ## A generic SNMP header data structure that may include data from + ## any version of SNMP. The value of the ``version`` field + ## determines what header field is initialized. + type SNMP::Header: record { + version: count; + v1: SNMP::HeaderV1 &optional; ##< Set when ``version`` is 0. + v2: SNMP::HeaderV2 &optional; ##< Set when ``version`` is 1. + v3: SNMP::HeaderV3 &optional; ##< Set when ``version`` is 3. + }; + + ## A generic SNMP object value, that may include any of the + ## valid ``ObjectSyntax`` values from :rfc:`1155` or :rfc:`3416`. + ## The value is decoded whenever possible and assigned to + ## the appropriate field, which can be determined from the value + ## of the ``tag`` field. For tags that can't be mapped to an + ## appropriate type, the ``octets`` field holds the BER encoded + ## ASN.1 content if there is any (though, ``octets`` is may also + ## be used for other tags such as OCTET STRINGS or Opaque). Null + ## values will only have their corresponding tag value set. + type SNMP::ObjectValue: record { + tag: count; + oid: string &optional; + signed: int &optional; + unsigned: count &optional; + address: addr &optional; + octets: string &optional; + }; + + # These aren't an enum because it's easier to type fields as count. + # That way don't have to deal with type conversion, plus doesn't + # mislead that these are the only valid tag values (it's just the set + # of known tags). + const SNMP::OBJ_INTEGER_TAG : count = 0x02; ##< Signed 64-bit integer. + const SNMP::OBJ_OCTETSTRING_TAG : count = 0x04; ##< An octet string. + const SNMP::OBJ_UNSPECIFIED_TAG : count = 0x05; ##< A NULL value. + const SNMP::OBJ_OID_TAG : count = 0x06; ##< An Object Identifier. + const SNMP::OBJ_IPADDRESS_TAG : count = 0x40; ##< An IP address. + const SNMP::OBJ_COUNTER32_TAG : count = 0x41; ##< Unsigned 32-bit integer. + const SNMP::OBJ_UNSIGNED32_TAG : count = 0x42; ##< Unsigned 32-bit integer. + const SNMP::OBJ_TIMETICKS_TAG : count = 0x43; ##< Unsigned 32-bit integer. + const SNMP::OBJ_OPAQUE_TAG : count = 0x44; ##< An octet string. + const SNMP::OBJ_COUNTER64_TAG : count = 0x46; ##< Unsigned 64-bit integer. + const SNMP::OBJ_NOSUCHOBJECT_TAG : count = 0x80; ##< A NULL value. + const SNMP::OBJ_NOSUCHINSTANCE_TAG: count = 0x81; ##< A NULL value. + const SNMP::OBJ_ENDOFMIBVIEW_TAG : count = 0x82; ##< A NULL value. + + ## The ``VarBind`` data structure from either :rfc:`1157` or + ## :rfc:`3416`, which maps an Object Identifier to a value. + type SNMP::Binding: record { + oid: string; + value: SNMP::ObjectValue; + }; + + ## A ``VarBindList`` data structure from either :rfc:`1157` or :rfc:`3416`. + ## A sequences of :zeek:see:`SNMP::Binding`, which maps an OIDs to values. + type SNMP::Bindings: vector of SNMP::Binding; + + ## A ``PDU`` data structure from either :rfc:`1157` or :rfc:`3416`. + type SNMP::PDU: record { + request_id: int; + error_status: int; + error_index: int; + bindings: SNMP::Bindings; + }; + + ## A ``Trap-PDU`` data structure from :rfc:`1157`. + type SNMP::TrapPDU: record { + enterprise: string; + agent: addr; + generic_trap: int; + specific_trap: int; + time_stamp: count; + bindings: SNMP::Bindings; + }; + + ## A ``BulkPDU`` data structure from :rfc:`3416`. + type SNMP::BulkPDU: record { + request_id: int; + non_repeaters: count; + max_repititions: count; + bindings: SNMP::Bindings; + }; +} + +@load base/bif/plugins/Zeek_KRB.types.bif + +module KRB; +export { + ## Kerberos keytab file name. Used to decrypt tickets encountered on the wire. + const keytab = "" &redef; + ## KDC Options. See :rfc:`4120` + type KRB::KDC_Options: record { + ## The ticket to be issued should have its forwardable flag set. + forwardable : bool; + ## A (TGT) request for forwarding. + forwarded : bool; + ## The ticket to be issued should have its proxiable flag set. + proxiable : bool; + ## A request for a proxy. + proxy : bool; + ## The ticket to be issued should have its may-postdate flag set. + allow_postdate : bool; + ## A request for a postdated ticket. + postdated : bool; + ## The ticket to be issued should have its renewable flag set. + renewable : bool; + ## Reserved for opt_hardware_auth + opt_hardware_auth : bool; + ## Request that the KDC not check the transited field of a TGT against + ## the policy of the local realm before it will issue derivative tickets + ## based on the TGT. + disable_transited_check : bool; + ## If a ticket with the requested lifetime cannot be issued, a renewable + ## ticket is acceptable + renewable_ok : bool; + ## The ticket for the end server is to be encrypted in the session key + ## from the additional TGT provided + enc_tkt_in_skey : bool; + ## The request is for a renewal + renew : bool; + ## The request is to validate a postdated ticket. + validate : bool; + }; + + ## AP Options. See :rfc:`4120` + type KRB::AP_Options: record { + ## Indicates that user-to-user-authentication is in use + use_session_key : bool; + ## Mutual authentication is required + mutual_required : bool; + }; + + ## Used in a few places in the Kerberos analyzer for elements + ## that have a type and a string value. + type KRB::Type_Value: record { + ## The data type + data_type : count; + ## The data value + val : string; + }; + + type KRB::Type_Value_Vector: vector of KRB::Type_Value; + + ## A Kerberos host address See :rfc:`4120`. + type KRB::Host_Address: record { + ## IPv4 or IPv6 address + ip : addr &log &optional; + ## NetBIOS address + netbios : string &log &optional; + ## Some other type that we don't support yet + unknown : KRB::Type_Value &optional; + }; + + type KRB::Host_Address_Vector: vector of KRB::Host_Address; + + ## The data from the SAFE message. See :rfc:`4120`. + type KRB::SAFE_Msg: record { + ## Protocol version number (5 for KRB5) + pvno : count; + ## The message type (20 for SAFE_MSG) + msg_type : count; + ## The application-specific data that is being passed + ## from the sender to the reciever + data : string; + ## Current time from the sender of the message + timestamp : time &optional; + ## Sequence number used to detect replays + seq : count &optional; + ## Sender address + sender : Host_Address &optional; + ## Recipient address + recipient : Host_Address &optional; + }; + + ## The data from the ERROR_MSG message. See :rfc:`4120`. + type KRB::Error_Msg: record { + ## Protocol version number (5 for KRB5) + pvno : count &optional; + ## The message type (30 for ERROR_MSG) + msg_type : count &optional; + ## Current time on the client + client_time : time &optional; + ## Current time on the server + server_time : time &optional; + ## The specific error code + error_code : count; + ## Realm of the ticket + client_realm : string &optional; + ## Name on the ticket + client_name : string &optional; + ## Realm of the service + service_realm : string &optional; + ## Name of the service + service_name : string &optional; + ## Additional text to explain the error + error_text : string &optional; + ## Optional pre-authentication data + pa_data : vector of KRB::Type_Value &optional; + }; + + ## A Kerberos ticket. See :rfc:`4120`. + type KRB::Ticket: record { + ## Protocol version number (5 for KRB5) + pvno : count; + ## Realm + realm : string; + ## Name of the service + service_name : string; + ## Cipher the ticket was encrypted with + cipher : count; + ## Cipher text of the ticket + ciphertext : string &optional; + ## Authentication info + authenticationinfo: string &optional; + }; + + type KRB::Ticket_Vector: vector of KRB::Ticket; + + ## The data from the AS_REQ and TGS_REQ messages. See :rfc:`4120`. + type KRB::KDC_Request: record { + ## Protocol version number (5 for KRB5) + pvno : count; + ## The message type (10 for AS_REQ, 12 for TGS_REQ) + msg_type : count; + ## Optional pre-authentication data + pa_data : vector of KRB::Type_Value &optional; + ## Options specified in the request + kdc_options : KRB::KDC_Options &optional; + ## Name on the ticket + client_name : string &optional; + + ## Realm of the service + service_realm : string &optional; + ## Name of the service + service_name : string &optional; + ## Time the ticket is good from + from : time &optional; + ## Time the ticket is good till + till : time &optional; + ## The requested renew-till time + rtime : time &optional; + + ## A random nonce generated by the client + nonce : count &optional; + ## The desired encryption algorithms, in order of preference + encryption_types : vector of count &optional; + ## Any additional addresses the ticket should be valid for + host_addrs : vector of KRB::Host_Address &optional; + ## Additional tickets may be included for certain transactions + additional_tickets : vector of KRB::Ticket &optional; + }; + + ## The data from the AS_REQ and TGS_REQ messages. See :rfc:`4120`. + type KRB::KDC_Response: record { + ## Protocol version number (5 for KRB5) + pvno : count; + ## The message type (11 for AS_REP, 13 for TGS_REP) + msg_type : count; + ## Optional pre-authentication data + pa_data : vector of KRB::Type_Value &optional; + ## Realm on the ticket + client_realm : string &optional; + ## Name on the service + client_name : string; + + ## The ticket that was issued + ticket : KRB::Ticket; + }; +} + +module GLOBAL; + +@load base/bif/event.bif + +## BPF filter the user has set via the -f command line options. Empty if none. +const cmd_line_bpf_filter = "" &redef; + +## The maximum number of open files to keep cached at a given time. +## If set to zero, this is automatically determined by inspecting +## the current/maximum limit on open files for the process. +const max_files_in_cache = 0 &redef; + +## Deprecated. +const log_rotate_base_time = "0:00" &redef; + +## Write profiling info into this file in regular intervals. The easiest way to +## activate profiling is loading :doc:`/scripts/policy/misc/profiling.zeek`. +## +## .. zeek:see:: profiling_interval expensive_profiling_multiple segment_profiling +global profiling_file: file &redef; + +## Update interval for profiling (0 disables). The easiest way to activate +## profiling is loading :doc:`/scripts/policy/misc/profiling.zeek`. +## +## .. zeek:see:: profiling_file expensive_profiling_multiple segment_profiling +const profiling_interval = 0 secs &redef; + +## Multiples of :zeek:see:`profiling_interval` at which (more expensive) memory +## profiling is done (0 disables). +## +## .. zeek:see:: profiling_interval profiling_file segment_profiling +const expensive_profiling_multiple = 0 &redef; + +## If true, then write segment profiling information (very high volume!) +## in addition to profiling statistics. +## +## .. zeek:see:: profiling_interval expensive_profiling_multiple profiling_file +const segment_profiling = F &redef; + +## Output modes for packet profiling information. +## +## .. zeek:see:: pkt_profile_mode pkt_profile_freq pkt_profile_file +type pkt_profile_modes: enum { + PKT_PROFILE_MODE_NONE, ##< No output. + PKT_PROFILE_MODE_SECS, ##< Output every :zeek:see:`pkt_profile_freq` seconds. + PKT_PROFILE_MODE_PKTS, ##< Output every :zeek:see:`pkt_profile_freq` packets. + PKT_PROFILE_MODE_BYTES, ##< Output every :zeek:see:`pkt_profile_freq` bytes. +}; + +## Output mode for packet profiling information. +## +## .. zeek:see:: pkt_profile_modes pkt_profile_freq pkt_profile_file +const pkt_profile_mode = PKT_PROFILE_MODE_NONE &redef; + +## Frequency associated with packet profiling. +## +## .. zeek:see:: pkt_profile_modes pkt_profile_mode pkt_profile_file +const pkt_profile_freq = 0.0 &redef; + +## File where packet profiles are logged. +## +## .. zeek:see:: pkt_profile_modes pkt_profile_freq pkt_profile_mode +global pkt_profile_file: file &redef; + +## Rate at which to generate :zeek:see:`load_sample` events. As all +## events, the event is only generated if you've also defined a +## :zeek:see:`load_sample` handler. Units are inverse number of packets; e.g., +## a value of 20 means "roughly one in every 20 packets". +## +## .. zeek:see:: load_sample +global load_sample_freq = 20 &redef; + +## Whether to attempt to automatically detect SYN/FIN/RST-filtered trace +## and not report missing segments for such connections. +## If this is enabled, then missing data at the end of connections may not +## be reported via :zeek:see:`content_gap`. +const detect_filtered_trace = F &redef; + +## Whether we want :zeek:see:`content_gap` for partial +## connections. A connection is partial if it is missing a full handshake. Note +## that gap reports for partial connections might not be reliable. +## +## .. zeek:see:: content_gap partial_connection +const report_gaps_for_partial = F &redef; + +## Flag to prevent Zeek from exiting automatically when input is exhausted. +## Normally Zeek terminates when all packet sources have gone dry +## and communication isn't enabled. If this flag is set, Zeek's main loop will +## instead keep idling until :zeek:see:`terminate` is explicitly called. +## +## This is mainly for testing purposes when termination behaviour needs to be +## controlled for reproducing results. +const exit_only_after_terminate = F &redef; + +## Default mode for Zeek's user-space dynamic packet filter. If true, packets +## that aren't explicitly allowed through, are dropped from any further +## processing. +## +## .. note:: This is not the BPF packet filter but an additional dynamic filter +## that Zeek optionally applies just before normal processing starts. +## +## .. zeek:see:: install_dst_addr_filter install_dst_net_filter +## install_src_addr_filter install_src_net_filter uninstall_dst_addr_filter +## uninstall_dst_net_filter uninstall_src_addr_filter uninstall_src_net_filter +const packet_filter_default = F &redef; + +## Maximum size of regular expression groups for signature matching. +const sig_max_group_size = 50 &redef; + +## Description transmitted to remote communication peers for identification. +const peer_description = "zeek" &redef; + +## The number of IO chunks allowed to be buffered between the child +## and parent process of remote communication before Zeek starts dropping +## connections to remote peers in an attempt to catch up. +const chunked_io_buffer_soft_cap = 800000 &redef; + +## Reassemble the beginning of all TCP connections before doing +## signature matching. Enabling this provides more accurate matching at the +## expense of CPU cycles. +## +## .. zeek:see:: dpd_buffer_size +## dpd_match_only_beginning dpd_ignore_ports +## +## .. note:: Despite the name, this option affects *all* signature matching, not +## only signatures used for dynamic protocol detection. +const dpd_reassemble_first_packets = T &redef; + +## Size of per-connection buffer used for dynamic protocol detection. For each +## connection, Zeek buffers this initial amount of payload in memory so that +## complete protocol analysis can start even after the initial packets have +## already passed through (i.e., when a DPD signature matches only later). +## However, once the buffer is full, data is deleted and lost to analyzers that +## are activated afterwards. Then only analyzers that can deal with partial +## connections will be able to analyze the session. +## +## .. zeek:see:: dpd_reassemble_first_packets dpd_match_only_beginning +## dpd_ignore_ports +const dpd_buffer_size = 1024 &redef; + +## If true, stops signature matching if :zeek:see:`dpd_buffer_size` has been +## reached. +## +## .. zeek:see:: dpd_reassemble_first_packets dpd_buffer_size +## dpd_ignore_ports +## +## .. note:: Despite the name, this option affects *all* signature matching, not +## only signatures used for dynamic protocol detection. +const dpd_match_only_beginning = T &redef; + +## If true, don't consider any ports for deciding which protocol analyzer to +## use. +## +## .. zeek:see:: dpd_reassemble_first_packets dpd_buffer_size +## dpd_match_only_beginning +const dpd_ignore_ports = F &redef; + +## Ports which the core considers being likely used by servers. For ports in +## this set, it may heuristically decide to flip the direction of the +## connection if it misses the initial handshake. +const likely_server_ports: set[port] &redef; + +## Per-incident timer managers are drained after this amount of inactivity. +const timer_mgr_inactivity_timeout = 1 min &redef; + +## If true, output profiling for Time-Machine queries. +const time_machine_profiling = F &redef; + +## If true, warns about unused event handlers at startup. +const check_for_unused_event_handlers = F &redef; + +# If true, dumps all invoked event handlers at startup. +# todo::Still used? +# const dump_used_event_handlers = F &redef; + +## Deprecated. +const suppress_local_output = F &redef; + +## Holds the filename of the trace file given with ``-w`` (empty if none). +## +## .. zeek:see:: record_all_packets +const trace_output_file = ""; + +## If a trace file is given with ``-w``, dump *all* packets seen by Zeek into it. +## By default, Zeek applies (very few) heuristics to reduce the volume. A side +## effect of setting this to true is that we can write the packets out before we +## actually process them, which can be helpful for debugging in case the +## analysis triggers a crash. +## +## .. zeek:see:: trace_output_file +const record_all_packets = F &redef; + +## Ignore certain TCP retransmissions for :zeek:see:`conn_stats`. Some +## connections (e.g., SSH) retransmit the acknowledged last byte to keep the +## connection alive. If *ignore_keep_alive_rexmit* is set to true, such +## retransmissions will be excluded in the rexmit counter in +## :zeek:see:`conn_stats`. +## +## .. zeek:see:: conn_stats +const ignore_keep_alive_rexmit = F &redef; + +module JSON; +export { + type TimestampFormat: enum { + ## Timestamps will be formatted as UNIX epoch doubles. This is + ## the format that Zeek typically writes out timestamps. + TS_EPOCH, + ## Timestamps will be formatted as unsigned integers that + ## represent the number of milliseconds since the UNIX + ## epoch. + TS_MILLIS, + ## Timestamps will be formatted in the ISO8601 DateTime format. + ## Subseconds are also included which isn't actually part of the + ## standard but most consumers that parse ISO8601 seem to be able + ## to cope with that. + TS_ISO8601, + }; +} + +module Tunnel; +export { + ## The maximum depth of a tunnel to decapsulate until giving up. + ## Setting this to zero will disable all types of tunnel decapsulation. + const max_depth: count = 2 &redef; + + ## Toggle whether to do IPv{4,6}-in-IPv{4,6} decapsulation. + const enable_ip = T &redef; + + ## Toggle whether to do IPv{4,6}-in-AYIYA decapsulation. + const enable_ayiya = T &redef; + + ## Toggle whether to do IPv6-in-Teredo decapsulation. + const enable_teredo = T &redef; + + ## Toggle whether to do GTPv1 decapsulation. + const enable_gtpv1 = T &redef; + + ## Toggle whether to do GRE decapsulation. + const enable_gre = T &redef; + + ## With this set, the Teredo analyzer waits until it sees both sides + ## of a connection using a valid Teredo encapsulation before issuing + ## a :zeek:see:`protocol_confirmation`. If it's false, the first + ## occurrence of a packet with valid Teredo encapsulation causes a + ## confirmation. + const delay_teredo_confirmation = T &redef; + + ## With this set, the GTP analyzer waits until the most-recent upflow + ## and downflow packets are a valid GTPv1 encapsulation before + ## issuing :zeek:see:`protocol_confirmation`. If it's false, the + ## first occurrence of a packet with valid GTPv1 encapsulation causes + ## confirmation. Since the same inner connection can be carried + ## differing outer upflow/downflow connections, setting to false + ## may work better. + const delay_gtp_confirmation = F &redef; + + ## How often to cleanup internal state for inactive IP tunnels + ## (includes GRE tunnels). + const ip_tunnel_timeout = 24hrs &redef; + + ## Whether to validate the checksum supplied in the outer UDP header + ## of a VXLAN encapsulation. The spec says the checksum should be + ## transmitted as zero, but if not, then the decapsulating destination + ## may choose whether to perform the validation. + const validate_vxlan_checksums = T &redef; + + ## The set of UDP ports used for VXLAN traffic. Traffic using this + ## UDP destination port will attempt to be decapsulated. Note that if + ## if you customize this, you may still want to manually ensure that + ## :zeek:see:`likely_server_ports` also gets populated accordingly. + const vxlan_ports: set[port] = { 4789/udp } &redef; +} # end export + +module Reporter; +export { + ## Tunable for sending reporter info messages to STDERR. The option to + ## turn it off is presented here in case Zeek is being run by some + ## external harness and shouldn't output anything to the console. + const info_to_stderr = T &redef; + + ## Tunable for sending reporter warning messages to STDERR. The option + ## to turn it off is presented here in case Zeek is being run by some + ## external harness and shouldn't output anything to the console. + const warnings_to_stderr = T &redef; + + ## Tunable for sending reporter error messages to STDERR. The option to + ## turn it off is presented here in case Zeek is being run by some + ## external harness and shouldn't output anything to the console. + const errors_to_stderr = T &redef; +} + +module Pcap; +export { + ## Number of bytes per packet to capture from live interfaces. + const snaplen = 9216 &redef; + + ## Number of Mbytes to provide as buffer space when capturing from live + ## interfaces. + const bufsize = 128 &redef; +} # end export + +module DCE_RPC; +export { + ## The maximum number of simultaneous fragmented commands that + ## the DCE_RPC analyzer will tolerate before the it will generate + ## a weird and skip further input. + const max_cmd_reassembly = 20 &redef; + + ## The maximum number of fragmented bytes that the DCE_RPC analyzer + ## will tolerate on a command before the analyzer will generate a weird + ## and skip further input. + const max_frag_data = 30000 &redef; +} + +module NCP; +export { + ## The maximum number of bytes to allocate when parsing NCP frames. + const max_frame_size = 65536 &redef; +} + +module NTP; +export { + ## NTP standard message as defined in :rfc:`5905` for modes 1-5 + ## This record contains the standard fields used by the NTP protocol + ## for standard syncronization operations. + type NTP::StandardMessage: record { + ## This value mainly identifies the type of server (primary server, + ## secondary server, etc.). Possible values, as in :rfc:`5905`, are: + ## + ## * 0 -> unspecified or invalid + ## * 1 -> primary server (e.g., equipped with a GPS receiver) + ## * 2-15 -> secondary server (via NTP) + ## * 16 -> unsynchronized + ## * 17-255 -> reserved + ## + ## For stratum 0, a *kiss_code* can be given for debugging and + ## monitoring. + stratum: count; + ## The maximum interval between successive messages. + poll: interval; + ## The precision of the system clock. + precision: interval; + ## Root delay. The total round-trip delay to the reference clock. + root_delay: interval; + ## Root Dispersion. The total dispersion to the reference clock. + root_disp: interval; + ## For stratum 0, four-character ASCII string used for debugging and + ## monitoring. Values are defined in :rfc:`1345`. + kiss_code: string &optional; + ## Reference ID. For stratum 1, this is the ID assigned to the + ## reference clock by IANA. + ## For example: GOES, GPS, GAL, etc. (see :rfc:`5905`) + ref_id: string &optional; + ## Above stratum 1, when using IPv4, the IP address of the reference + ## clock. Note that the NTP protocol did not originally specify a + ## large enough field to represent IPv6 addresses, so they use + ## the first four bytes of the MD5 hash of the reference clock's + ## IPv6 address (i.e. an IPv4 address here is not necessarily IPv4). + ref_addr: addr &optional; + ## Reference timestamp. Time when the system clock was last set or + ## correct. + ref_time: time; + ## Origin timestamp. Time at the client when the request departed for + ## the NTP server. + org_time: time; + ## Receive timestamp. Time at the server when the request arrived from + ## the NTP client. + rec_time: time; + ## Transmit timestamp. Time at the server when the response departed + # for the NTP client. + xmt_time: time; + ## Key used to designate a secret MD5 key. + key_id: count &optional; + ## MD5 hash computed over the key followed by the NTP packet header and + ## extension fields. + digest: string &optional; + ## Number of extension fields (which are not currently parsed). + num_exts: count &default=0; + }; + + ## NTP control message as defined in :rfc:`1119` for mode=6 + ## This record contains the fields used by the NTP protocol + ## for control operations. + type NTP::ControlMessage: record { + ## An integer specifying the command function. Values currently defined: + ## + ## * 1 read status command/response + ## * 2 read variables command/response + ## * 3 write variables command/response + ## * 4 read clock variables command/response + ## * 5 write clock variables command/response + ## * 6 set trap address/port command/response + ## * 7 trap response + ## + ## Other values are reserved. + op_code: count; + ## The response bit. Set to zero for commands, one for responses. + resp_bit: bool; + ## The error bit. Set to zero for normal response, one for error + ## response. + err_bit: bool; + ## The more bit. Set to zero for last fragment, one for all others. + more_bit: bool; + ## The sequence number of the command or response. + sequence: count; + ## The current status of the system, peer or clock. + #TODO: this can be further parsed internally + status: count; + ## A 16-bit integer identifying a valid association. + association_id: count; + ## Message data for the command or response + Authenticator (optional). + data: string &optional; + ## This is an integer identifying the cryptographic + ## key used to generate the message-authentication code. + key_id: count &optional; + ## This is a crypto-checksum computed by the encryption procedure. + crypto_checksum: string &optional; + }; + + ## NTP mode 7 message. Note that this is not defined in any RFC and is + ## implementation dependent. We used the official implementation from the + ## `NTP official project `_. A mode 7 packet is used + ## exchanging data between an NTP server and a client for purposes other + ## than time synchronization, e.g. monitoring, statistics gathering and + ## configuration. For details see the documentation from the `NTP official + ## project `_, code v. ntp-4.2.8p13, in include/ntp_request.h. + type NTP::Mode7Message: record { + ## An implementation-specific code which specifies the + ## operation to be (which has been) performed and/or the + ## format and semantics of the data included in the packet. + req_code: count; + ## The authenticated bit. If set, this packet is authenticated. + auth_bit: bool; + ## For a multipacket response, contains the sequence + ## number of this packet. 0 is the first in the sequence, + ## 127 (or less) is the last. The More Bit must be set in + ## all packets but the last. + sequence: count; + ## The number of the implementation this request code + ## is defined by. An implementation number of zero is used + ## for requst codes/data formats which all implementations + ## agree on. Implementation number 255 is reserved (for + ## extensions, in case we run out). + implementation: count; + ## Must be 0 for a request. For a response, holds an error + ## code relating to the request. If nonzero, the operation + ## requested wasn't performed. + ## + ## * 0 - no error + ## * 1 - incompatible implementation number + ## * 2 - unimplemented request code + ## * 3 - format error (wrong data items, data size, packet size etc.) + ## * 4 - no data available (e.g. request for details on unknown peer) + ## * 5 - unknown + ## * 6 - unknown + ## * 7 - authentication failure (i.e. permission denied) + err: count; + ## Rest of data + data: string &optional; + }; + + ## NTP message as defined in :rfc:`5905`. Does include fields for mode 7, + ## reserved for private use in :rfc:`5905`, but used in some implementation + ## for commands such as "monlist". + type NTP::Message: record { + ## The NTP version number (1, 2, 3, 4). + version: count; + ## The NTP mode being used. Possible values are: + ## + ## * 1 - symmetric active + ## * 2 - symmetric passive + ## * 3 - client + ## * 4 - server + ## * 5 - broadcast + ## * 6 - NTP control message + ## * 7 - reserved for private use + mode: count; + ## If mode 1-5, the standard fields for syncronization operations are + ## here. See :rfc:`5905` + std_msg: NTP::StandardMessage &optional; + ## If mode 6, the fields for control operations are here. + ## See :rfc:`1119` + control_msg: NTP::ControlMessage &optional; + ## If mode 7, the fields for extra operations are here. + ## Note that this is not defined in any RFC + ## and is implementation dependent. We used the official implementation + ## from the `NTP official project `_. + ## A mode 7 packet is used exchanging data between an NTP server + ## and a client for purposes other than time synchronization, e.g. + ## monitoring, statistics gathering and configuration. + mode7_msg: NTP::Mode7Message &optional; + }; +} + +module Cluster; +export { + type Cluster::Pool: record {}; +} + +module Weird; +export { + ## Prevents rate-limiting sampling of any weirds named in the table. + option sampling_whitelist: set[string] = {}; + + ## How many weirds of a given type to tolerate before sampling begins. + ## I.e. this many consecutive weirds of a given type will be allowed to + ## raise events for script-layer handling before being rate-limited. + option sampling_threshold : count = 25; + + ## The rate-limiting sampling rate. One out of every of this number of + ## rate-limited weirds of a given type will be allowed to raise events + ## for further script-layer handling. Setting the sampling rate to 0 + ## will disable all output of rate-limited weirds. + option sampling_rate : count = 1000; + + ## How long a weird of a given type is allowed to keep state/counters in + ## memory. For "net" weirds an expiration timer starts per weird name when + ## first initializing its counter. For "flow" weirds an expiration timer + ## starts once per src/dst IP pair for the first weird of any name. For + ## "conn" weirds, counters and expiration timers are kept for the duration + ## of the connection for each named weird and reset when necessary. E.g. + ## if a "conn" weird by the name of "foo" is seen more than + ## :zeek:see:`Weird::sampling_threshold` times, then an expiration timer + ## begins for "foo" and upon triggering will reset the counter for "foo" + ## and unthrottle its rate-limiting until it once again exceeds the + ## threshold. + option sampling_duration = 10min; +} + +module BinPAC; +export { + ## Maximum capacity, in bytes, that the BinPAC flowbuffer is allowed to + ## grow to for use with incremental parsing of a given connection/analyzer. + const flowbuffer_capacity_max = 10 * 1024 * 1024 &redef; + + ## The initial capacity, in bytes, that will be allocated to the BinPAC + ## flowbuffer of a given connection/analyzer. If the buffer buffer is + ## later contracted, its capacity is also reduced to this size. + const flowbuffer_capacity_min = 512 &redef; + + ## The threshold, in bytes, at which the BinPAC flowbuffer of a given + ## connection/analyzer will have its capacity contracted to + ## :zeek:see:`BinPAC::flowbuffer_capacity_min` after parsing a full unit. + ## I.e. this is the maximum capacity to reserve in between the parsing of + ## units. If, after parsing a unit, the flowbuffer capacity is greater + ## than this value, it will be contracted. + const flowbuffer_contract_threshold = 2 * 1024 * 1024 &redef; +} + +module GLOBAL; + +## Seed for hashes computed internally for probabilistic data structures. Using +## the same value here will make the hashes compatible between independent Zeek +## instances. If left unset, Zeek will use a temporary local seed. +const global_hash_seed: string = "" &redef; + +## Number of bits in UIDs that are generated to identify connections and +## files. The larger the value, the more confidence in UID uniqueness. +## The maximum is currently 128 bits. +const bits_per_uid: count = 96 &redef; + +## Whether usage of the old communication system is considered an error or +## not. The default Zeek configuration no longer works with the non-Broker +## communication system unless you have manually taken action to initialize +## and set up the old comm. system. Deprecation warnings are still emitted +## when setting this flag, but they will not result in a fatal error. +const old_comm_usage_is_ok: bool = F &redef; diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro deleted file mode 100644 index 463f5c2942..0000000000 --- a/scripts/base/init-default.bro +++ /dev/null @@ -1,82 +0,0 @@ -##! This script loads everything in the base/ script directory. If you want -##! to run Bro without all of these scripts loaded by default, you can use -##! the ``-b`` (``--bare-mode``) command line argument. You can also copy the -##! "@load" lines from this script to your own script to load only the scripts -##! that you actually want. - -@load base/utils/site -@load base/utils/active-http -@load base/utils/addrs -@load base/utils/conn-ids -@load base/utils/dir -@load base/utils/directions-and-hosts -@load base/utils/email -@load base/utils/exec -@load base/utils/files -@load base/utils/geoip-distance -@load base/utils/hash_hrw -@load base/utils/numbers -@load base/utils/paths -@load base/utils/patterns -@load base/utils/queue -@load base/utils/strings -@load base/utils/thresholds -@load base/utils/time -@load base/utils/urls - -# This has some deep interplay between types and BiFs so it's -# loaded in base/init-bare.bro -#@load base/frameworks/logging -@load base/frameworks/notice -@load base/frameworks/analyzer -@load base/frameworks/dpd -@load base/frameworks/signatures -@load base/frameworks/packet-filter -@load base/frameworks/software -@load base/frameworks/control -@load base/frameworks/cluster -@load base/frameworks/intel -@load base/frameworks/config -@load base/frameworks/reporter -@load base/frameworks/sumstats -@load base/frameworks/tunnels -@load base/frameworks/openflow -@load base/frameworks/netcontrol - -@load base/protocols/conn -@load base/protocols/dce-rpc -@load base/protocols/dhcp -@load base/protocols/dnp3 -@load base/protocols/dns -@load base/protocols/ftp -@load base/protocols/http -@load base/protocols/imap -@load base/protocols/irc -@load base/protocols/krb -@load base/protocols/modbus -@load base/protocols/mysql -@load base/protocols/ntlm -@load base/protocols/pop3 -@load base/protocols/radius -@load base/protocols/rdp -@load base/protocols/rfb -@load base/protocols/sip -@load base/protocols/snmp -@load base/protocols/smb -@load base/protocols/smtp -@load base/protocols/socks -@load base/protocols/ssh -@load base/protocols/ssl -@load base/protocols/syslog -@load base/protocols/tunnels -@load base/protocols/xmpp - -@load base/files/pe -@load base/files/hash -@load base/files/extract -@load base/files/unified2 -@load base/files/x509 - -@load base/misc/find-checksum-offloading -@load base/misc/find-filtered-trace -@load base/misc/version diff --git a/scripts/base/init-default.zeek b/scripts/base/init-default.zeek new file mode 100644 index 0000000000..8c8aca01f8 --- /dev/null +++ b/scripts/base/init-default.zeek @@ -0,0 +1,82 @@ +##! This script loads everything in the base/ script directory. If you want +##! to run Zeek without all of these scripts loaded by default, you can use +##! the ``-b`` (``--bare-mode``) command line argument. You can also copy the +##! "@load" lines from this script to your own script to load only the scripts +##! that you actually want. + +@load base/utils/site +@load base/utils/active-http +@load base/utils/addrs +@load base/utils/conn-ids +@load base/utils/dir +@load base/utils/directions-and-hosts +@load base/utils/email +@load base/utils/exec +@load base/utils/files +@load base/utils/geoip-distance +@load base/utils/hash_hrw +@load base/utils/numbers +@load base/utils/paths +@load base/utils/patterns +@load base/utils/queue +@load base/utils/strings +@load base/utils/thresholds +@load base/utils/time +@load base/utils/urls + +# This has some deep interplay between types and BiFs so it's +# loaded in base/init-bare.zeek +#@load base/frameworks/logging +@load base/frameworks/notice +@load base/frameworks/analyzer +@load base/frameworks/dpd +@load base/frameworks/signatures +@load base/frameworks/packet-filter +@load base/frameworks/software +@load base/frameworks/control +@load base/frameworks/cluster +@load base/frameworks/intel +@load base/frameworks/config +@load base/frameworks/reporter +@load base/frameworks/sumstats +@load base/frameworks/tunnels +@load base/frameworks/openflow +@load base/frameworks/netcontrol + +@load base/protocols/conn +@load base/protocols/dce-rpc +@load base/protocols/dhcp +@load base/protocols/dnp3 +@load base/protocols/dns +@load base/protocols/ftp +@load base/protocols/http +@load base/protocols/imap +@load base/protocols/irc +@load base/protocols/krb +@load base/protocols/modbus +@load base/protocols/mysql +@load base/protocols/ntlm +@load base/protocols/ntp +@load base/protocols/pop3 +@load base/protocols/radius +@load base/protocols/rdp +@load base/protocols/rfb +@load base/protocols/sip +@load base/protocols/snmp +@load base/protocols/smb +@load base/protocols/smtp +@load base/protocols/socks +@load base/protocols/ssh +@load base/protocols/ssl +@load base/protocols/syslog +@load base/protocols/tunnels +@load base/protocols/xmpp + +@load base/files/pe +@load base/files/hash +@load base/files/extract +@load base/files/x509 + +@load base/misc/find-checksum-offloading +@load base/misc/find-filtered-trace +@load base/misc/version diff --git a/scripts/base/init-frameworks-and-bifs.bro b/scripts/base/init-frameworks-and-bifs.bro deleted file mode 100644 index f772e2d223..0000000000 --- a/scripts/base/init-frameworks-and-bifs.bro +++ /dev/null @@ -1,15 +0,0 @@ -# Load these frameworks here because they use fairly deep integration with -# BiFs and script-land defined types. They are also more likely to -# make use of calling BIFs for variable initializations, and that -# can't be done until init-bare.bro has been loaded completely (hence -# the separate file). -@load base/frameworks/logging -@load base/frameworks/broker -@load base/frameworks/input -@load base/frameworks/analyzer -@load base/frameworks/files - -@load base/bif - -# Load BiFs defined by plugins. -@load base/bif/plugins diff --git a/scripts/base/init-frameworks-and-bifs.zeek b/scripts/base/init-frameworks-and-bifs.zeek new file mode 100644 index 0000000000..19897e7ffb --- /dev/null +++ b/scripts/base/init-frameworks-and-bifs.zeek @@ -0,0 +1,15 @@ +# Load these frameworks here because they use fairly deep integration with +# BiFs and script-land defined types. They are also more likely to +# make use of calling BIFs for variable initializations, and that +# can't be done until init-bare.zeek has been loaded completely (hence +# the separate file). +@load base/frameworks/logging +@load base/frameworks/broker +@load base/frameworks/input +@load base/frameworks/analyzer +@load base/frameworks/files + +@load base/bif + +# Load BiFs defined by plugins. +@load base/bif/plugins diff --git a/scripts/base/misc/find-checksum-offloading.bro b/scripts/base/misc/find-checksum-offloading.bro deleted file mode 100644 index 334cf4a2db..0000000000 --- a/scripts/base/misc/find-checksum-offloading.bro +++ /dev/null @@ -1,87 +0,0 @@ -##! Discover cases where the local interface is sniffed and outbound packets -##! have checksum offloading. Load this script to receive a notice if it's -##! likely that checksum offload effects are being seen on a live interface or -##! in a packet trace file. - -@load base/frameworks/notice - -module ChecksumOffloading; - -export { - ## The interval which is used for checking packet statistics - ## to see if checksum offloading is affecting analysis. - const check_interval = 10secs &redef; -} - -# Keep track of how many bad checksums have been seen. -global bad_ip_checksums = 0; -global bad_tcp_checksums = 0; -global bad_udp_checksums = 0; - -# Track to see if this script is done so that messages aren't created multiple times. -global done = F; - -event ChecksumOffloading::check() - { - if ( done ) - return; - - local pkts_recvd = get_net_stats()$pkts_recvd; - local bad_ip_checksum_pct = (pkts_recvd != 0) ? (bad_ip_checksums*1.0 / pkts_recvd*1.0) : 0; - local bad_tcp_checksum_pct = (pkts_recvd != 0) ? (bad_tcp_checksums*1.0 / pkts_recvd*1.0) : 0; - local bad_udp_checksum_pct = (pkts_recvd != 0) ? (bad_udp_checksums*1.0 / pkts_recvd*1.0) : 0; - - if ( bad_ip_checksum_pct > 0.05 || - bad_tcp_checksum_pct > 0.05 || - bad_udp_checksum_pct > 0.05 ) - { - local packet_src = reading_traces() ? "trace file likely has" : "interface is likely receiving"; - local bad_checksum_msg = (bad_ip_checksum_pct > 0.0) ? "IP" : ""; - if ( bad_tcp_checksum_pct > 0.0 ) - { - if ( |bad_checksum_msg| > 0 ) - bad_checksum_msg += " and "; - bad_checksum_msg += "TCP"; - } - if ( bad_udp_checksum_pct > 0.0 ) - { - if ( |bad_checksum_msg| > 0 ) - bad_checksum_msg += " and "; - bad_checksum_msg += "UDP"; - } - - local message = fmt("Your %s invalid %s checksums, most likely from NIC checksum offloading. By default, packets with invalid checksums are discarded by Bro unless using the -C command-line option or toggling the 'ignore_checksums' variable. Alternatively, disable checksum offloading by the network adapter to ensure Bro analyzes the actual checksums that are transmitted.", packet_src, bad_checksum_msg); - Reporter::warning(message); - done = T; - } - else if ( pkts_recvd < 20 ) - { - # Keep scheduling this event until we've seen some lower threshold of - # total packets. - schedule check_interval { ChecksumOffloading::check() }; - } - } - -event bro_init() - { - schedule check_interval { ChecksumOffloading::check() }; - } - -event net_weird(name: string) - { - if ( name == "bad_IP_checksum" ) - ++bad_ip_checksums; - } - -event conn_weird(name: string, c: connection, addl: string) - { - if ( name == "bad_TCP_checksum" ) - ++bad_tcp_checksums; - else if ( name == "bad_UDP_checksum" ) - ++bad_udp_checksums; - } - -event bro_done() - { - event ChecksumOffloading::check(); - } diff --git a/scripts/base/misc/find-checksum-offloading.zeek b/scripts/base/misc/find-checksum-offloading.zeek new file mode 100644 index 0000000000..751afeac43 --- /dev/null +++ b/scripts/base/misc/find-checksum-offloading.zeek @@ -0,0 +1,87 @@ +##! Discover cases where the local interface is sniffed and outbound packets +##! have checksum offloading. Load this script to receive a notice if it's +##! likely that checksum offload effects are being seen on a live interface or +##! in a packet trace file. + +@load base/frameworks/notice + +module ChecksumOffloading; + +export { + ## The interval which is used for checking packet statistics + ## to see if checksum offloading is affecting analysis. + const check_interval = 10secs &redef; +} + +# Keep track of how many bad checksums have been seen. +global bad_ip_checksums = 0; +global bad_tcp_checksums = 0; +global bad_udp_checksums = 0; + +# Track to see if this script is done so that messages aren't created multiple times. +global done = F; + +event ChecksumOffloading::check() + { + if ( done ) + return; + + local pkts_recvd = get_net_stats()$pkts_recvd; + local bad_ip_checksum_pct = (pkts_recvd != 0) ? (bad_ip_checksums*1.0 / pkts_recvd*1.0) : 0; + local bad_tcp_checksum_pct = (pkts_recvd != 0) ? (bad_tcp_checksums*1.0 / pkts_recvd*1.0) : 0; + local bad_udp_checksum_pct = (pkts_recvd != 0) ? (bad_udp_checksums*1.0 / pkts_recvd*1.0) : 0; + + if ( bad_ip_checksum_pct > 0.05 || + bad_tcp_checksum_pct > 0.05 || + bad_udp_checksum_pct > 0.05 ) + { + local packet_src = reading_traces() ? "trace file likely has" : "interface is likely receiving"; + local bad_checksum_msg = (bad_ip_checksum_pct > 0.0) ? "IP" : ""; + if ( bad_tcp_checksum_pct > 0.0 ) + { + if ( |bad_checksum_msg| > 0 ) + bad_checksum_msg += " and "; + bad_checksum_msg += "TCP"; + } + if ( bad_udp_checksum_pct > 0.0 ) + { + if ( |bad_checksum_msg| > 0 ) + bad_checksum_msg += " and "; + bad_checksum_msg += "UDP"; + } + + local message = fmt("Your %s invalid %s checksums, most likely from NIC checksum offloading. By default, packets with invalid checksums are discarded by Zeek unless using the -C command-line option or toggling the 'ignore_checksums' variable. Alternatively, disable checksum offloading by the network adapter to ensure Zeek analyzes the actual checksums that are transmitted.", packet_src, bad_checksum_msg); + Reporter::warning(message); + done = T; + } + else if ( pkts_recvd < 20 ) + { + # Keep scheduling this event until we've seen some lower threshold of + # total packets. + schedule check_interval { ChecksumOffloading::check() }; + } + } + +event zeek_init() + { + schedule check_interval { ChecksumOffloading::check() }; + } + +event net_weird(name: string) + { + if ( name == "bad_IP_checksum" ) + ++bad_ip_checksums; + } + +event conn_weird(name: string, c: connection, addl: string) + { + if ( name == "bad_TCP_checksum" ) + ++bad_tcp_checksums; + else if ( name == "bad_UDP_checksum" ) + ++bad_udp_checksums; + } + +event zeek_done() + { + event ChecksumOffloading::check(); + } diff --git a/scripts/base/misc/find-filtered-trace.bro b/scripts/base/misc/find-filtered-trace.bro deleted file mode 100644 index a723b656a7..0000000000 --- a/scripts/base/misc/find-filtered-trace.bro +++ /dev/null @@ -1,49 +0,0 @@ -##! Discovers trace files that contain TCP traffic consisting only of -##! control packets (e.g. it's been filtered to contain only SYN/FIN/RST -##! packets and no content). On finding such a trace, a warning is -##! emitted that suggests toggling the :bro:see:`detect_filtered_trace` -##! option may be desired if the user does not want Bro to report -##! missing TCP segments. - -module FilteredTraceDetection; - -export { - - ## Flag to enable filtered trace file detection and warning message. - global enable: bool = T &redef; -} - -global saw_tcp_conn_with_data: bool = F; -global saw_a_tcp_conn: bool = F; - -event connection_state_remove(c: connection) - { - if ( ! reading_traces() ) - return; - - if ( ! enable ) - return; - - if ( saw_tcp_conn_with_data ) - return; - - if ( ! is_tcp_port(c$id$orig_p) ) - return; - - saw_a_tcp_conn = T; - - if ( /[Dd]/ in c$history ) - saw_tcp_conn_with_data = T; - } - -event bro_done() - { - if ( ! enable ) - return; - - if ( ! saw_a_tcp_conn ) - return; - - if ( ! saw_tcp_conn_with_data ) - Reporter::warning("The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Bro reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired."); - } diff --git a/scripts/base/misc/find-filtered-trace.zeek b/scripts/base/misc/find-filtered-trace.zeek new file mode 100644 index 0000000000..e4dcf9848c --- /dev/null +++ b/scripts/base/misc/find-filtered-trace.zeek @@ -0,0 +1,49 @@ +##! Discovers trace files that contain TCP traffic consisting only of +##! control packets (e.g. it's been filtered to contain only SYN/FIN/RST +##! packets and no content). On finding such a trace, a warning is +##! emitted that suggests toggling the :zeek:see:`detect_filtered_trace` +##! option may be desired if the user does not want Zeek to report +##! missing TCP segments. + +module FilteredTraceDetection; + +export { + + ## Flag to enable filtered trace file detection and warning message. + global enable: bool = T &redef; +} + +global saw_tcp_conn_with_data: bool = F; +global saw_a_tcp_conn: bool = F; + +event connection_state_remove(c: connection) + { + if ( ! reading_traces() ) + return; + + if ( ! enable ) + return; + + if ( saw_tcp_conn_with_data ) + return; + + if ( ! is_tcp_port(c$id$orig_p) ) + return; + + saw_a_tcp_conn = T; + + if ( /[Dd]/ in c$history ) + saw_tcp_conn_with_data = T; + } + +event zeek_done() + { + if ( ! enable ) + return; + + if ( ! saw_a_tcp_conn ) + return; + + if ( ! saw_tcp_conn_with_data ) + Reporter::warning("The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Zeek reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired."); + } diff --git a/scripts/base/misc/p0f.fp b/scripts/base/misc/p0f.fp deleted file mode 100644 index 0ee37b4e37..0000000000 --- a/scripts/base/misc/p0f.fp +++ /dev/null @@ -1,834 +0,0 @@ -# -# p0f - SYN fingerprints -# ---------------------- -# -# .-------------------------------------------------------------------------. -# | The purpose of this file is to cover signatures for incoming TCP/IP | -# | connections (SYN packets). This is the default mode of operation for | -# | p0f. This is also the biggest and most up-to-date set of signatures | -# | shipped with this project. The file also contains a detailed discussion | -# | of all metrics examined by p0f, and some practical notes on how to | -# | add new signatures. | -# `-------------------------------------------------------------------------' -# -# (C) Copyright 2000-2006 by Michal Zalewski -# -# Each line in this file specifies a single fingerprint. Please read the -# information below carefully before attempting to append any signatures -# reported by p0f as UNKNOWN to this file to avoid mistakes. Note that -# this file is compatible only with the default operation mode, and not -# with -R or -A options (SYN+ACK and RST+ modes). -# -# We use the following set metrics for fingerprinting: -# -# - Window size (WSS) - a highly OS dependent setting used for TCP/IP -# performance control (max. amount of data to be sent without ACK). -# Some systems use a fixed value for initial packets. On other -# systems, it is a multiple of MSS or MTU (MSS+40). In some rare -# cases, the value is just arbitrary. -# -# NEW SIGNATURE: if p0f reported a special value of 'Snn', the number -# appears to be a multiple of MSS (MSS*nn); a special value of 'Tnn' -# means it is a multiple of MTU ((MSS+40)*nn). Unless you notice the -# value of nn is not fixed (unlikely), just copy the Snn or Tnn token -# literally. If you know this device has a simple stack and a fixed -# MTU, you can however multiply S value by MSS, or T value by MSS+40, -# and put it instead of Snn or Tnn. One system may exhibit several T -# or S values. In some situations, this might be a source of some -# additional information about the setup if you have some time to dig -# thru the kernel sources; in some other cases, like Windows, there seem -# to be a multitude of variants and WSS selection algorithms, but it's -# rather difficult to find a pattern without having the source. -# -# If WSS looks like a regular fixed value (for example is a power of two), -# or if you can confirm the value is fixed by looking at several -# fingerprints, please quote it literaly. If there's no apparent pattern -# in WSS chosen, you should consider wildcarding this value - but this -# should be the last option. -# -# NOTE: Some NAT devices, such as Linux iptables with --set-mss, will -# modify MSS, but not WSS. As a result, MSS is changed to reflect -# the MTU of the NAT device, but WSS remains a multiple of the original -# MSS. Fortunately for us, the source device would almost always be -# hooked up to Ethernet. P0f handles it automatically for the original -# MSS of 1460, by adding "NAT!" tag to the result. -# -# In certain configurations, Linux erratically (?) uses MTU from another -# interface on the default gw interface. This only happens on systems with -# two network interfaces. Thus, some Linux systems that do not go thru NAT, -# but have multiple interfaces instead, will be also tagged this way. -# -# P0f recognizes and automatically wildcards WSS of 12345, as generated -# by sendack and sendsyn utilities shipped with the program, when -# reporting a new signature. See test/sendack.c and test/sendsyn.c for more -# information about this. -# -# - Overall packet size - a function of all IP and TCP options and bugs. -# While this is partly redundant in the real world, we record this value -# to capture rare cases when there are IP options (which we do not currently -# examine) or packet data past the headers. Both situations are rare. -# -# Packet size MAY be wildcarded, but the meaning of the wildcard is -# very special, and means the packet must be larger than PACKET_BIG -# (defined in config.h as 100). This is usually not necessary, except -# for some really broken implementations in RST+ mode. For more information, -# see p0fr.fp. P0f automatically wildcards big packets when reporting -# new signatures. -# -# NEW SIGNATURE: Copy this value literally. -# -# - Initial TTL - We check the actual TTL of a received packet. It can't -# be higher than the initial TTL, and also shouldn't be dramatically -# lower (maximum distance is defined in config.h as 40 hops). -# -# NEW SIGNATURE: *Never* copy TTL from a p0f-reported signature literally. -# You need to determine the initial TTL. The best way to do it is to -# check the documentation for a remote system, or check its settings. -# A fairly good method is to simply round the observed TTL up to -# 32, 64, 128, or 255, but it should be noted that some obscure devices -# might not use round TTLs (in particular, some shoddy appliances and -# IRIX and Tru64 are known to use "original" initial TTL settings). If not -# sure, use traceroute or mtr to see how far you are from the host. -# -# Note that -F option overrides this check if no signature can be found. -# -# - Don't fragment flag (DF) - some modern OSes set this to implement PMTU -# discovery. Others do not bother. -# -# NEW SIGNATURE: Copy this value literally. Note: this setting is -# sometimes cleared by firewalls and/or certain connectivity clients. -# Try to find out what's the actual state for a given OS if you see both, -# and add the right one. P0f will automatically detect a case when a -# firewall removed the DF flag and will append "(firewall!)" suffix to -# the signature, so if the DF version is the right one, don't add no-DF -# variant, unless it has a different meaning. -# -# - Maximum segment size (MSS) - this setting is usually link-dependent. P0f -# uses it to determine link type of the remote host. -# -# NEW SIGNATURE: Always wildcard this value, except for rare cases when -# you have an appliance with a fixed value, know the system supports only -# a very limited number of network interface types, or know the system -# is using a value it pulled out of nowhere. I use specific unique MSS -# to tell Google crawlbots from the rest of Linux population, for example. -# -# If a specific MSS/MTU is unique to a certain link type, be sure to -# add it to mtu.h instead of creating several variants of each signature. -# -# - Window scaling (WSCALE) - this feature is used to scale WSS. -# It extends the size of a TCP/IP window to 32 bits, of sorts. Some modern -# systems implement this feature. -# -# NEW SIGNATURE: Observe several signatures. Initial WSCALE is often set -# to zero or other low value. There's usually no need to wildcard this -# parameter. -# -# - Timestamp - some systems that implement timestamps set them to -# zero in the initial SYN. This case is detected and handled appropriately. -# -# NEW SIGNATURE: Copy T or T0 option literally. -# -# - Selective ACK permitted - a flag set by systems that implement -# selective ACK functionality, -# -# NEW SIGNATURE: copy S option literally. -# -# - NOP option - its presence, count and sequence is a useful OS-dependent -# characteristic, -# -# NEW SIGNATURE: copy N options literally. -# -# - Other and unrecognized options (TTCP-related and such) - implemented by -# some eccentric or very buggy TCP/IP stacks ;-), -# -# NEW SIGNATURE: copy ? options literally. -# -# - EOL option. Contrary to the popular belief, the presence of EOL -# option is actually quite rare, most systems just NOP-pad to the -# packet boundary. -# -# NEW SIGNATURE: copy E option literally. -# -# - The sequence of TCP all options mentioned above - this is very -# specific to the implementation, -# -# NEW SIGNATURE: Copy the sequence literally. -# -# - Quirks. Some buggy stacks set certain values that should be zeroed in a -# TCP packet to non-zero values. This has no effect as of today, but is -# a valuable source of information. Some systems actually seem to leak -# memory there. Other systems just exhibit harmful but very specific -# behavior. This section captures all unusual yes-no properties not -# related to the main and expected header layout. We detect the following: -# -# - Data past the headers. Neither SYN nor SYN+ACK packets are supposed -# to carry any payload. If they do, we should take notice. The actual -# payload is not examined, but will be displayed if use the -X option. -# Note that payload is not unusual in RST+ mode (see p0fr.fp), very -# rare otherwise. -# -# - Options past EOL. Some systems have some trailing data past EOL -# in the options section of TCP/IP headers. P0f does not examine this -# data as of today, simply detects its presence. If there is a -# confirmed sizable population of systems that have data past EOL, it -# might be a good idea to look at it. Until then, you have to recompile -# p0f with DEBUG_EXTRAS set or use -x to display this data, -# -# - Zero IP ID. This again is a (mostly) harmless setting to use a fixed -# IP ID for packets with DF set. Some systems reportedly use zero ID, -# most OSes do not. There is a very slight probability of a false -# positive when IP ID is "naturally" chosen to be zero on a system -# that otherwise does set proper values, but the probability is -# neglible (if it becomes a problem, recompile p0f with IGNORE_ZEROID -# set in the sources). -# -# - IP options specified. Usually, packets do not have any IP options -# set, but there can be some. Until there is a confirmed sizable -# population of systems that do have IP options in a packet, p0f -# does not examine those in detail, but it might change (use -# DEBUG_EXTRAS or -x to display IP options if any found), -# -# - URG pointer value. SYN packets do not have URG flag set, so the -# value in URG pointer in TCP header is ignored. Most systems set it -# to zero, but some OSes (some versions of Windows, for example) do -# not zero this field or even simply leak memory; the actual value is -# not examined, because most cases seem to be just random garbage -# (you can use DEBUG_EXTRAS or -x to report this information though); -# see doc/win-memleak.txt for more information, -# -# - "Unused" field value. This should be always zero, but some systems -# forget to clear it. This might result in some funny issues in the -# future. P0f checks for non-zero value (and will display it if -# DEBUG_EXTRAS is set, or you can use -x), -# -# - ACK number non-zero. ACK value in SYN packets with no ACK flag -# is disregarded and is usually set to zero (just like with URG -# pointer), but some systems forget to do it. The exact value is -# not examined (but will be displayed with DEBUG_EXTRAS, or you can -# use -x). Note that this is not an anomaly in SYN+ACK and RST+ modes, -# -# - Non-zero second timestamp. The initial SYN packet should have the -# second timestamp always zeroed. SYN+ACK and RST+ may "legally" have -# this quirk though, -# -# - Unusual flags. If, in addition to SYN (or SYN+ACK), there are some -# auxilinary flags that do not modify the very meaning of a packet, -# p0f records this (this can be URG, PUSH, or something else). -# -# Note: ECN flags (ECE and CWR) are ignored and denoted in a separate -# way. ECN is never by default, because some systems can't handle it, -# and it probably does not make much sense to include it in signatures -# right now. -# -# - TCP option segment parsing problems. If p0f fails to decode options -# because of a badly broken packet, it records this fact. -# -# There are several other quirks valid only in RST+ mode, see p0fr.fp for -# more information. Those quirks are unheard of in SYN and SYN+ACK -# modes. -# -# NEW SIGNATURE: Copy "quirks" section literally. -# -# We DO NOT use ToS for fingerprinting. While the original TCP/IP -# fingerprinting research believed this value would be useful for this -# purpose, it is not. The setting is way too often tweaked by network -# devices. -# -# To wildcard MSS, WSS or WSCALE, replace it with '*'. You can also use a -# modulo operator to match any values that divide by nnn - '%nnn' (and, -# as stated above, WSS also supports special values Snn and Tnn). -# -# Fingerprint entry format: -# -# wwww:ttt:D:ss:OOO...:QQ:OS:Details -# -# wwww - window size (can be * or %nnn or Sxx or Txx) -# "Snn" (multiple of MSS) and "Tnn" (multiple of MTU) are allowed. -# ttt - initial TTL -# D - don't fragment bit (0 - not set, 1 - set) -# ss - overall SYN packet size (* has a special meaning) -# OOO - option value and order specification (see below) -# QQ - quirks list (see below) -# OS - OS genre (Linux, Solaris, Windows) -# details - OS description (2.0.27 on x86, etc) -# -# If OS genre starts with '*', p0f will not show distance, link type -# and timestamp data. It is useful for userland TCP/IP stacks of -# network scanners and so on, where many settings are randomized or -# bogus. -# -# If OS genre starts with @, it denotes an approximate hit for a group -# of operating systems (signature reporting still enabled in this case). -# Use this feature at the end of this file to catch cases for which -# you don't have a precise match, but can tell it's Windows or FreeBSD -# or whatnot by looking at, say, flag layout alone. -# -# If OS genre starts with - (which can prefix @ or *), the entry is -# not considered to be a real operating system (but userland stack -# instead). It is important to mark all scanners and so on with -, -# so that they are not used for masquerade detection (also add this -# prefix for signatures of application-induced behavior, such as -# increased window size with Opera browser). -# -# Option block description is a list of comma or space separated -# options in the order they appear in the packet: -# -# N - NOP option -# E - EOL option -# Wnnn - window scaling option, value nnn (or * or %nnn) -# Mnnn - maximum segment size option, value nnn (or * or %nnn) -# S - selective ACK OK -# T - timestamp -# T0 - timestamp with zero value -# ?n - unrecognized option number n. -# -# P0f can sometimes report ?nn among the options. This means it couldn't -# recognize this option (option number nn). It's either a bug in p0f, or -# a faulty TCP/IP stack, or, if the number is listed here: -# -# http://www.iana.org/assignments/tcp-parameters -# -# ...the stack might be simply quite exotic. -# -# To denote no TCP options, use a single '.'. -# -# Quirks section is usually an empty list ('.') of oddities or bugs of this -# particular stack. List items are not separated in any way. Possible values: -# -# P - options past EOL, -# Z - zero IP ID, -# I - IP options specified, -# U - urg pointer non-zero, -# X - unused (x2) field non-zero, -# A - ACK number non-zero, -# T - non-zero second timestamp, -# F - unusual flags (PUSH, URG, etc), -# D - data payload, -# ! - broken options segment. -# -# WARNING WARNING WARNING -# ----------------------- -# -# Do not add a system X as OS Y just because NMAP says so. It is often -# the case that X is a NAT firewall. While nmap is talking to the -# device itself, p0f is fingerprinting the guy behind the firewall -# instead. -# -# When in doubt, use common sense, don't add something that looks like -# a completely different system as Linux or FreeBSD or LinkSys router. -# Check DNS name, establish a connection to the remote host and look -# at SYN+ACK (p0f -A -S should do) - does it look similar? -# -# Some users tweak their TCP/IP settings - enable or disable RFC1323, -# RFC1644 or RFC2018 support, disable PMTU discovery, change MTU, initial -# TTL and so on. Always compare a new rule to other fingerprints for -# this system, and verify the system isn't "customized". It is OK to -# add signature variants caused by commonly used software (PFs, security -# packages, etc), but it makes no sense to try to add every single -# possible /proc/sys/net/ipv4/* tweak on Linux or so. -# -# KEEP IN MIND: Some packet firewalls configured to normalize outgoing -# traffic (OpenBSD pf with "scrub" enabled, for example) will, well, -# normalize packets. Signatures will not correspond to the originating -# system (and probably not quite to the firewall either). -# -# NOTE: Try to keep this file in some reasonable order, from most to -# least likely systems. This will speed up operation. Also keep most -# generic and broad rules near ehe end. -# -# Still decided to add signature? Let us know - mail a copy of your discovery -# to lcamtuf@coredump.cx. You can help make p0f better, and I can help you -# make your signature more accurate. -# - -########################## -# Standard OS signatures # -########################## - -# ----------------- AIX --------------------- - -# AIX is first because its signatures are close to NetBSD, MacOS X and -# Linux 2.0, but it uses a fairly rare MSSes, at least sometimes... -# This is a shoddy hack, though. - -45046:64:0:44:M*:.:AIX:4.3 - -16384:64:0:44:M512:.:AIX:4.3.2 and earlier - -16384:64:0:60:M512,N,W%2,N,N,T:.:AIX:4.3.3-5.2 (1) -32768:64:0:60:M512,N,W%2,N,N,T:.:AIX:4.3.3-5.2 (2) -65535:64:0:60:M512,N,W%2,N,N,T:.:AIX:4.3.3-5.2 (3) - -65535:64:0:64:M*,N,W1,N,N,T,N,N,S:.:AIX:5.3 ML1 - -# ----------------- Linux ------------------- - -S1:64:0:44:M*:A:Linux:1.2.x -512:64:0:44:M*:.:Linux:2.0.3x (1) -16384:64:0:44:M*:.:Linux:2.0.3x (2) - -# Endian snafu! Nelson says "ha-ha": -2:64:0:44:M*:.:Linux:2.0.3x (MkLinux) on Mac (1) -64:64:0:44:M*:.:Linux:2.0.3x (MkLinux) on Mac (2) - -S4:64:1:60:M1360,S,T,N,W0:.:Linux:2.4 (Google crawlbot) -S4:64:1:60:M1430,S,T,N,W0:.:Linux:2.4-2.6 (Google crawlbot) - -S2:64:1:60:M*,S,T,N,W0:.:Linux:2.4 (large MTU?) -S3:64:1:60:M*,S,T,N,W0:.:Linux:2.4 (newer) -S4:64:1:60:M*,S,T,N,W0:.:Linux:2.4-2.6 - -S3:64:1:60:M*,S,T,N,W1:.:Linux:2.6, seldom 2.4 (older, 1) -S4:64:1:60:M*,S,T,N,W1:.:Linux:2.6, seldom 2.4 (older, 2) -S3:64:1:60:M*,S,T,N,W2:.:Linux:2.6, seldom 2.4 (older, 3) -S4:64:1:60:M*,S,T,N,W2:.:Linux:2.6, seldom 2.4 (older, 4) -T4:64:1:60:M*,S,T,N,W2:.:Linux:2.6 (older, 5) - -S4:64:1:60:M*,S,T,N,W5:.:Linux:2.6 (newer, 1) -S4:64:1:60:M*,S,T,N,W6:.:Linux:2.6 (newer, 2) -S4:64:1:60:M*,S,T,N,W7:.:Linux:2.6 (newer, 3) -T4:64:1:60:M*,S,T,N,W7:.:Linux:2.6 (newer, 4) - - -S20:64:1:60:M*,S,T,N,W0:.:Linux:2.2 (1) -S22:64:1:60:M*,S,T,N,W0:.:Linux:2.2 (2) -S11:64:1:60:M*,S,T,N,W0:.:Linux:2.2 (3) - -# Popular cluster config scripts disable timestamps and -# selective ACK: - -S4:64:1:48:M1460,N,W0:.:Linux:2.4 in cluster - -# This happens only over loopback, but let's make folks happy: -32767:64:1:60:M16396,S,T,N,W0:.:Linux:2.4 (loopback) -32767:64:1:60:M16396,S,T,N,W2:.:Linux:2.6 (newer, loopback) -S8:64:1:60:M3884,S,T,N,W0:.:Linux:2.2 (loopback) - -# Opera visitors: -16384:64:1:60:M*,S,T,N,W0:.:-Linux:2.2 (Opera?) -32767:64:1:60:M*,S,T,N,W0:.:-Linux:2.4 (Opera?) - -# Some fairly common mods & oddities: -S22:64:1:52:M*,N,N,S,N,W0:.:Linux:2.2 (tstamp-) -S4:64:1:52:M*,N,N,S,N,W0:.:Linux:2.4 (tstamp-) -S4:64:1:52:M*,N,N,S,N,W2:.:Linux:2.6 (tstamp-) -S4:64:1:44:M*:.:Linux:2.6? (barebone, rare!) -T4:64:1:60:M1412,S,T,N,W0:.:Linux:2.4 (rare!) - -# ----------------- FreeBSD ----------------- - -16384:64:1:44:M*:.:FreeBSD:2.0-4.2 -16384:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.4 (1) - -1024:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.4 (2) - -57344:64:1:44:M*:.:FreeBSD:4.6-4.8 (RFC1323-) -57344:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.6-4.9 - -32768:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.8-5.1 (or MacOS X 10.2-10.3) -65535:64:1:60:M*,N,W0,N,N,T:.:FreeBSD:4.7-5.2 (or MacOS X 10.2-10.4) (1) -65535:64:1:60:M*,N,W1,N,N,T:.:FreeBSD:4.7-5.2 (or MacOS X 10.2-10.4) (2) - -65535:64:1:60:M*,N,W0,N,N,T:Z:FreeBSD:5.1 (1) -65535:64:1:60:M*,N,W1,N,N,T:Z:FreeBSD:5.1 (2) -65535:64:1:60:M*,N,W2,N,N,T:Z:FreeBSD:5.1 (3) -65535:64:1:64:M*,N,N,S,N,W1,N,N,T:.:FreeBSD:5.3-5.4 -65535:64:1:64:M*,N,W1,N,N,T,S,E:P:FreeBSD:6.x (1) -65535:64:1:64:M*,N,W0,N,N,T,S,E:P:FreeBSD:6.x (2) - -65535:64:1:44:M*:Z:FreeBSD:5.2 (RFC1323-) - -# 16384:64:1:60:M*,N,N,N,N,N,N,T:.:FreeBSD:4.4 (tstamp-) - -# ----------------- NetBSD ------------------ - -16384:64:0:60:M*,N,W0,N,N,T:.:NetBSD:1.3 -65535:64:0:60:M*,N,W0,N,N,T0:.:-NetBSD:1.6 (Opera) -16384:64:1:60:M*,N,W0,N,N,T0:.:NetBSD:1.6 -65535:64:1:60:M*,N,W1,N,N,T0:.:NetBSD:1.6W-current (DF) -65535:64:1:60:M*,N,W0,N,N,T0:.:NetBSD:1.6X (DF) -32768:64:1:60:M*,N,W0,N,N,T0:.:NetBSD:1.6Z or 2.0 (DF) -32768:64:1:64:M1416,N,W0,S,N,N,N,N,T0:.:NetBSD:2.0G (DF) -32768:64:1:64:M*,N,W0,S,N,N,N,N,T0:.:NetBSD:3.0 (DF) - -# ----------------- OpenBSD ----------------- - -16384:64:1:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.0-3.9 -57344:64:1:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.3-3.4 -16384:64:0:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.0-3.4 (scrub) -65535:64:1:64:M*,N,N,S,N,W0,N,N,T:.:-OpenBSD:3.0-3.4 (Opera?) -32768:64:1:64:M*,N,N,S,N,W0,N,N,T:.:OpenBSD:3.7 - -# ----------------- Solaris ----------------- - -S17:64:1:64:N,W3,N,N,T0,N,N,S,M*:.:Solaris:8 (RFC1323 on) -S17:64:1:48:N,N,S,M*:.:Solaris:8 (1) -S17:255:1:44:M*:.:Solaris:2.5-7 (1) - -# Sometimes, just sometimes, Solaris feels like coming up with -# rather arbitrary MSS values ;-) - -S6:255:1:44:M*:.:Solaris:2.5-7 (2) -S23:64:1:48:N,N,S,M*:.:Solaris:8 (2) -S34:64:1:48:M*,N,N,S:.:Solaris:9 -S34:64:1:48:M*,N,N,N,N:.:Solaris:9 (no sack) -S44:255:1:44:M*:.:Solaris:7 - -4096:64:0:44:M1460:.:SunOS:4.1.x - -S34:64:1:52:M*,N,W0,N,N,S:.:Solaris:10 (beta) -32850:64:1:64:M*,N,N,T,N,W1,N,N,S:.:Solaris:10 (1203?) -32850:64:1:64:M*,N,W1,N,N,T,N,N,S:.:Solaris:9.1 - -# ----------------- IRIX -------------------- - -49152:60:0:44:M*:.:IRIX:6.2-6.4 -61440:60:0:44:M*:.:IRIX:6.2-6.5 -49152:60:0:52:M*,N,W2,N,N,S:.:IRIX:6.5 (RFC1323+) (1) -49152:60:0:52:M*,N,W3,N,N,S:.:IRIX:6.5 (RFC1323+) (2) - -61440:60:0:48:M*,N,N,S:.:IRIX:6.5.12-6.5.21 (1) -49152:60:0:48:M*,N,N,S:.:IRIX:6.5.12-6.5.21 (2) - -49152:60:0:64:M*,N,W2,N,N,T,N,N,S:.:IRIX:6.5 IP27 - -# ----------------- Tru64 ------------------- -# Tru64 and OpenVMS share the same stack on occassions. -# Relax. - -32768:60:1:48:M*,N,W0:.:Tru64:4.0 (or OS/2 Warp 4) -32768:60:0:48:M*,N,W0:.:Tru64:5.0 (or OpenVMS 7.x on Compaq 5.0 stack) -8192:60:0:44:M1460:.:Tru64:5.1 (no RFC1323) (or QNX 6) -61440:60:0:48:M*,N,W0:.:Tru64:v5.1a JP4 (or OpenVMS 7.x on Compaq 5.x stack) - -# ----------------- OpenVMS ----------------- - -6144:64:1:60:M*,N,W0,N,N,T:.:OpenVMS:7.2 (Multinet 4.3-4.4 stack) - -# ----------------- MacOS ------------------- - -S2:255:1:48:M*,W0,E:.:MacOS:8.6 classic - -16616:255:1:48:M*,W0,E:.:MacOS:7.3-8.6 (OTTCP) -16616:255:1:48:M*,N,N,N,E:.:MacOS:8.1-8.6 (OTTCP) -32768:255:1:48:M*,W0,N:.:MacOS:9.0-9.2 - -32768:255:1:48:M1380,N,N,N,N:.:MacOS:9.1 (OT 2.7.4) (1) -65535:255:1:48:M*,N,N,N,N:.:MacOS:9.1 (OT 2.7.4) (2) - -# ----------------- Windows ----------------- - -# Windows TCP/IP stack is a mess. For most recent XP, 2000 and -# even 98, the pathlevel, not the actual OS version, is more -# relevant to the signature. They share the same code, so it would -# seem. Luckily for us, almost all Windows 9x boxes have an -# awkward MSS of 536, which I use to tell one from another -# in most difficult cases. - -8192:32:1:44:M*:.:Windows:3.11 (Tucows) -S44:64:1:64:M*,N,W0,N,N,T0,N,N,S:.:Windows:95 -8192:128:1:64:M*,N,W0,N,N,T0,N,N,S:.:Windows:95b - -# There were so many tweaking tools and so many stack versions for -# Windows 98 it is no longer possible to tell them from each other -# without some very serious research. Until then, there's an insane -# number of signatures, for your amusement: - -S44:32:1:48:M*,N,N,S:.:Windows:98 (low TTL) (1) -8192:32:1:48:M*,N,N,S:.:Windows:98 (low TTL) (2) -%8192:64:1:48:M536,N,N,S:.:Windows:98 (13) -%8192:128:1:48:M536,N,N,S:.:Windows:98 (15) -S4:64:1:48:M*,N,N,S:.:Windows:98 (1) -S6:64:1:48:M*,N,N,S:.:Windows:98 (2) -S12:64:1:48:M*,N,N,S:.:Windows:98 (3 -T30:64:1:64:M1460,N,W0,N,N,T0,N,N,S:.:Windows:98 (16) -32767:64:1:48:M*,N,N,S:.:Windows:98 (4) -37300:64:1:48:M*,N,N,S:.:Windows:98 (5) -46080:64:1:52:M*,N,W3,N,N,S:.:Windows:98 (RFC1323+) -65535:64:1:44:M*:.:Windows:98 (no sack) -S16:128:1:48:M*,N,N,S:.:Windows:98 (6) -S16:128:1:64:M*,N,W0,N,N,T0,N,N,S:.:Windows:98 (7) -S26:128:1:48:M*,N,N,S:.:Windows:98 (8) -T30:128:1:48:M*,N,N,S:.:Windows:98 (9) -32767:128:1:52:M*,N,W0,N,N,S:.:Windows:98 (10) -60352:128:1:48:M*,N,N,S:.:Windows:98 (11) -60352:128:1:64:M*,N,W2,N,N,T0,N,N,S:.:Windows:98 (12) - -# What's with 1414 on NT? -T31:128:1:44:M1414:.:Windows:NT 4.0 SP6a (1) -64512:128:1:44:M1414:.:Windows:NT 4.0 SP6a (2) -8192:128:1:44:M*:.:Windows:NT 4.0 (older) - -# Windows XP and 2000. Most of the signatures that were -# either dubious or non-specific (no service pack data) -# were deleted and replaced with generics at the end. - -65535:128:1:48:M*,N,N,S:.:Windows:2000 SP4, XP SP1+ -%8192:128:1:48:M*,N,N,S:.:Windows:2000 SP2+, XP SP1+ (seldom 98) -S20:128:1:48:M*,N,N,S:.:Windows:SP3 -S45:128:1:48:M*,N,N,S:.:Windows:2000 SP4, XP SP1+ (2) -40320:128:1:48:M*,N,N,S:.:Windows:2000 SP4 - -S6:128:1:48:M*,N,N,S:.:Windows:XP, 2000 SP2+ -S12:128:1:48:M*,N,N,S:.:Windows:XP SP1+ (1) -S44:128:1:48:M*,N,N,S:.:Windows:XP SP1+, 2000 SP3 -64512:128:1:48:M*,N,N,S:.:Windows:XP SP1+, 2000 SP3 (2) -32767:128:1:48:M*,N,N,S:.:Windows:XP SP1+, 2000 SP4 (3) - -# Windows 2003 & Vista - -8192:128:1:52:M*,W8,N,N,N,S:.:Windows:Vista (beta) -32768:32:1:52:M1460,N,W0,N,N,S:.:Windows:2003 AS -65535:64:1:52:M1460,N,W2,N,N,S:.:Windows:2003 (1) -65535:64:1:48:M1460,N,N,S:.:Windows:2003 (2) - -# Odds, ends, mods: - -S52:128:1:48:M1260,N,N,S:.:Windows:XP/2000 via Cisco -65520:128:1:48:M*,N,N,S:.:Windows:XP bare-bone -16384:128:1:52:M536,N,W0,N,N,S:.:Windows:2000 w/ZoneAlarm? -2048:255:0:40:.:.:Windows:.NET Enterprise Server -44620:64:0:48:M*,N,N,S:.:Windows:ME no SP (?) -S6:255:1:48:M536,N,N,S:.:Windows:95 winsock 2 -32000:128:0:48:M*,N,N,S:.:Windows:XP w/Winroute? -16384:64:1:48:M1452,N,N,S:.:Windows:XP w/Sygate? (1) -17256:64:1:48:M1460,N,N,S:.:Windows:XP w/Sygate? (2) - -# No need to be more specific, it passes: -*:128:1:48:M*,N,N,S:U:-Windows:XP/2000 while downloading (leak!) - -# ----------------- HP/UX ------------------- - -32768:64:1:44:M*:.:HP-UX:B.10.20 -32768:64:1:48:M*,W0,N:.:HP-UX:11.00-11.11 - -# Whoa. Hardcore WSS. -0:64:0:48:M*,W0,N:.:HP-UX:B.11.00 A (RFC1323+) - -# ----------------- RiscOS ------------------ - -16384:64:1:68:M1460,N,W0,N,N,T,N,N,?12:.:RISC OS:3.70-4.36 (inet 5.04) -12288:32:0:44:M536:.:RISC OS:3.70 inet 4.10 -4096:64:1:56:M1460,N,N,T:T:RISC OS:3.70 freenet 2.00 - -# ----------------- BSD/OS ------------------ - -8192:64:1:60:M1460,N,W0,N,N,T:.:BSD/OS:3.1-4.3 (or MacOS X 10.2) - -# ---------------- NetwonOS ----------------- - -4096:64:0:44:M1420:.:NewtonOS:2.1 - -# ---------------- NeXTSTEP ----------------- - -S8:64:0:44:M512:.:NeXTSTEP:3.3 (1) -S4:64:0:44:M1024:.:NeXTSTEP:3.3 (2) - -# ------------------ BeOS ------------------- - -1024:255:0:48:M*,N,W0:.:BeOS:5.0-5.1 -12288:255:0:44:M*:.:BeOS:5.0.x - -# ------------------ OS/400 ----------------- - -8192:64:1:60:M1440,N,W0,N,N,T:.:OS/400:V4R4/R5 -8192:64:0:44:M536:.:OS/400:V4R3/M0 -4096:64:1:60:M1440,N,W0,N,N,T:.:OS/400:V4R5 + CF67032 - -28672:64:0:44:M1460:A:OS/390:? - -# ------------------ ULTRIX ----------------- - -16384:64:0:40:.:.:ULTRIX:4.5 - -# ------------------- QNX ------------------- - -S16:64:0:44:M512:.:QNX:demodisk -16384:64:0:60:M1460,N,W0,N,N,T0:.:QNX:6.x - -# ------------------ Novell ----------------- - -16384:128:1:44:M1460:.:Novell:NetWare 5.0 -6144:128:1:44:M1460:.:Novell:IntranetWare 4.11 -6144:128:1:44:M1368:.:Novell:BorderManager ? - -# According to rfp: -6144:128:1:52:M*,W0,N,S,N,N:.:Novell:Netware 6 SP3 - -# -------------- SCO UnixWare --------------- - -S3:64:1:60:M1460,N,W0,N,N,T:.:SCO:UnixWare 7.1 -S17:64:1:60:M*,N,W0,N,N,T:.:SCO:UnixWare 7.1.x -S23:64:1:44:M1380:.:SCO:OpenServer 5.0 - -# ------------------- DOS ------------------- - -2048:255:0:44:M536:.:DOS:Arachne via WATTCP/1.05 -T2:255:0:44:M984:.:DOS:Arachne via WATTCP/1.05 (eepro) -16383:64:0:44:M536:.:DOS:Unknown via WATTCP (epppd) - -# ------------------ OS/2 ------------------- - -S56:64:0:44:M512:.:OS/2:4 -28672:64:0:44:M1460:.:OS/2:Warp 4.0 - -# ----------------- TOPS-20 ----------------- - -# Another hardcore MSS, one of the ACK leakers hunted down. -0:64:0:44:M1460:A:TOPS-20:version 7 - -# ------------------ AMIGA ------------------ - -S32:64:1:56:M*,N,N,S,N,N,?12:.:AMIGA:3.9 BB2 with Miami stack - -# ------------------ Minix ------------------ - -# Not quite sure. -# 8192:210:0:44:M1460:X:@Minix:? - -# ------------------ Plan9 ------------------ - -65535:255:0:48:M1460,W0,N:.:Plan9:edition 4 - -# ----------------- AMIGAOS ----------------- - -16384:64:1:48:M1560,N,N,S:.:AMIGAOS:3.9 BB2 MiamiDX - -# ----------------- FreeMiNT ---------------- - -S44:255:0:44:M536:.:FreeMiNT:1 patch 16A (Atari) - -########################################### -# Appliance / embedded / other signatures # -########################################### - -# ---------- Firewalls / routers ------------ - -S12:64:1:44:M1460:.:@Checkpoint:(unknown 1) -S12:64:1:48:N,N,S,M1460:.:@Checkpoint:(unknown 2) -4096:32:0:44:M1460:.:ExtremeWare:4.x - -S32:64:0:68:M512,N,W0,N,N,T,N,N,?12:.:Nokia:IPSO w/Checkpoint NG FP3 -S16:64:0:68:M1024,N,W0,N,N,T,N,N,?12:.:Nokia:IPSO 3.7 build 026 - -S4:64:1:60:W0,N,S,T,M1460:.:FortiNet:FortiGate 50 - -8192:64:1:44:M1460:.:@Eagle:Secure Gateway - -# ------- Switches and other stuff ---------- - -4128:255:0:44:M*:Z:Cisco:7200, Catalyst 3500, etc -S8:255:0:44:M*:.:Cisco:12008 -S4:255:0:44:M536:Z:Cisco:IOS 11.0 -60352:128:1:64:M1460,N,W2,N,N,T,N,N,S:.:Alteon:ACEswitch -64512:128:1:44:M1370:.:Nortel:Contivity Client - -# ---------- Caches and whatnots ------------ - -8190:255:0:44:M1428:.:Google:Wireless Transcoder (1) -8190:255:0:44:M1460:.:Google:Wireless Transcoder (2) -8192:64:1:64:M1460,N,N,S,N,W0,N,N,T:.:NetCache:5.2 -16384:64:1:64:M1460,N,N,S,N,W0,N:.:NetCache:5.3 -65535:64:1:64:M1460,N,N,S,N,W*,N,N,T:.:NetCache:5.3-5.5 (or FreeBSD 5.4) -20480:64:1:64:M1460,N,N,S,N,W0,N,N,T:.:NetCache:4.1 -S44:64:1:64:M1460,N,N,S,N,W0,N,N,T:.:NetCache:5.5 - -32850:64:1:64:N,W1,N,N,T,N,N,S,M*:.:NetCache:Data OnTap 5.x - -65535:64:0:60:M1460,N,W0,N,N,T:.:CacheFlow:CacheOS 4.1 -8192:64:0:60:M1380,N,N,N,N,N,N,T:.:CacheFlow:CacheOS 1.1 - -S4:64:0:48:M1460,N,N,S:.:Cisco:Content Engine - -27085:128:0:40:.:.:Dell:PowerApp cache (Linux-based) - -65535:255:1:48:N,W1,M1460:.:Inktomi:crawler -S1:255:1:60:M1460,S,T,N,W0:.:LookSmart:ZyBorg - -16384:255:0:40:.:.:Proxyblocker:(what's this?) - -65535:255:0:48:M*,N,N,S:.:Redline: T|X 2200 - -# ----------- Embedded systems -------------- - -S9:255:0:44:M536:.:PalmOS:Tungsten T3/C -S5:255:0:44:M536:.:PalmOS:3/4 -S4:255:0:44:M536:.:PalmOS:3.5 -2948:255:0:44:M536:.:PalmOS:3.5.3 (Handera) -S29:255:0:44:M536:.:PalmOS:5.0 -16384:255:0:44:M1398:.:PalmOS:5.2 (Clie) -S14:255:0:44:M1350:.:PalmOS:5.2.1 (Treo) -16384:255:0:44:M1400:.:PalmOS:5.2 (Sony) - -S23:64:1:64:N,W1,N,N,T,N,N,S,M1460:.:SymbianOS:7 -8192:255:0:44:M1460:.:SymbianOS:6048 (Nokia 7650?) -8192:255:0:44:M536:.:SymbianOS:(Nokia 9210?) -S22:64:1:56:M1460,T,S:.:SymbianOS:? (SE P800?) -S36:64:1:56:M1360,T,S:.:SymbianOS:60xx (Nokia 6600?) -S36:64:1:60:M1360,T,S,W0,E:.:SymbianOS:60xx - -32768:32:1:44:M1460:.:Windows:CE 3 - -# Perhaps S4? -5840:64:1:60:M1452,S,T,N,W1:.:Zaurus:3.10 - -32768:128:1:64:M1460,N,W0,N,N,T0,N,N,S:.:PocketPC:2002 - -S1:255:0:44:M346:.:Contiki:1.1-rc0 - -4096:128:0:44:M1460:.:Sega:Dreamcast Dreamkey 3.0 -T5:64:0:44:M536:.:Sega:Dreamcast HKT-3020 (browser disc 51027) -S22:64:1:44:M1460:.:Sony:Playstation 2 (SOCOM?) - -S12:64:0:44:M1452:.:AXIS:Printer Server 5600 v5.64 - -3100:32:1:44:M1460:.:Windows:CE 2.0 - -#################### -# Fancy signatures # -#################### - -1024:64:0:40:.:.:-*NMAP:syn scan (1) -2048:64:0:40:.:.:-*NMAP:syn scan (2) -3072:64:0:40:.:.:-*NMAP:syn scan (3) -4096:64:0:40:.:.:-*NMAP:syn scan (4) - -1024:64:0:40:.:A:-*NMAP:TCP sweep probe (1) -2048:64:0:40:.:A:-*NMAP:TCP sweep probe (2) -3072:64:0:40:.:A:-*NMAP:TCP sweep probe (3) -4096:64:0:40:.:A:-*NMAP:TCP sweep probe (4) - -1024:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (1) -2048:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (2) -3072:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (3) -4096:64:0:60:W10,N,M265,T,E:P:-*NMAP:OS detection probe (4) - -1024:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (1) -2048:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (2) -3072:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (3) -4096:64:0:60:W10,N,M265,T,E:PF:-*NMAP:OS detection probe w/flags (4) - -32767:64:0:40:.:.:-*NAST:syn scan - -12345:255:0:40:.:A:-p0f:sendsyn utility - -# UFO - see tmp/*: -56922:128:0:40:.:A:-@Mysterious:port scanner (?) -5792:64:1:60:M1460,S,T,N,W0:T:-@Mysterious:NAT device (2nd tstamp) -S12:128:1:48:M1460,E:P:@Mysterious:Chello proxy (?) -S23:64:1:64:N,W1,N,N,T,N,N,S,M1380:.:@Mysterious:GPRS gateway (?) - -##################################### -# Generic signatures - just in case # -##################################### - -*:128:1:52:M*,N,W0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w, tstamp-) -*:128:1:52:M*,N,W*,N,N,S:.:@Windows:XP/2000 (RFC1323+, w+, tstamp-) -*:128:1:52:M*,N,N,T0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w-, tstamp+) -*:128:1:64:M*,N,W0,N,N,T0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w, tstamp+) -*:128:1:64:M*,N,W*,N,N,T0,N,N,S:.:@Windows:XP/2000 (RFC1323+, w+, tstamp+) - -*:128:1:48:M536,N,N,S:.:@Windows:98 -*:128:1:48:M*,N,N,S:.:@Windows:XP/2000 - - diff --git a/scripts/base/misc/version.bro b/scripts/base/misc/version.bro deleted file mode 100644 index 4066dadedd..0000000000 --- a/scripts/base/misc/version.bro +++ /dev/null @@ -1,90 +0,0 @@ -##! Provide information about the currently running Bro version. -##! The most convenient way to access this are the Version::number -##! and Version::info constants. - -@load base/frameworks/reporter -@load base/utils/strings - -module Version; - -export { - ## A type exactly describing a Bro version - type VersionDescription: record { - ## Number representing the version which can be used for easy comparison. - ## The format of the number is ABBCC with A being the major version, - ## bb being the minor version (2 digits) and CC being the patchlevel (2 digits). - ## As an example, Bro 2.4.1 results in the number 20401. - version_number: count; - ## Major version number (e.g. 2 for 2.5) - major: count; - ## Minor version number (e.g. 5 for 2.5) - minor: count; - ## Patch version number (e.g. 0 for 2.5 or 1 for 2.4.1) - patch: count; - ## Commit number for development versions, e.g. 12 for 2.4-12. 0 for non-development versions - commit: count; - ## If set to true, the version is a beta build of Bro - beta: bool; - ## If set to true, the version is a debug build - debug: bool; - ## String representation of this version - version_string: string; - }; - - ## Parse a given version string. - ## - ## version_string: Bro version string. - ## - ## Returns: `VersionDescription` record. - global parse: function(version_string: string): VersionDescription; - - ## Test if the current running version of Bro is greater or equal to the given version - ## string. - ## - ## version_string: Version to check against the current running version. - ## - ## Returns: True if running version greater or equal to the given version. - global at_least: function(version_string: string): bool; -} - -function parse(version_string: string): VersionDescription - { - if ( /[[:digit:]]\.[[:digit:]][[:digit:]]?(\.[[:digit:]][[:digit:]]?)?(\-beta[[:digit:]]?)?(-[[:digit:]]+)?(\-debug)?/ != version_string ) - { - Reporter::error(fmt("Version string %s cannot be parsed", version_string)); - return VersionDescription($version_number=0, $major=0, $minor=0, $patch=0, $commit=0, $beta=F, $debug=F, $version_string=version_string); - } - - local components = split_string1(version_string, /\-/); - local version_split = split_string(components[0], /\./); - local major = to_count(version_split[0]); - local minor = to_count(version_split[1]); - local patchlevel = ( |version_split| > 2) ? to_count(version_split[2]) : 0; - local version_number = major*10000+minor*100+patchlevel; - local beta = /\-beta/ in version_string; - local debug = /\-debug/ in version_string; - local commit = 0; - if ( |components| > 1 ) - { - local commitpart = find_last(cat("-", components[1]), /\-[[:digit:]]+/); - commit = ( |commitpart| > 0 ) ? to_count(sub_bytes(commitpart, 2, 999)) : 0; - } - - return VersionDescription($version_number=version_number, $major=major, $minor=minor, $patch=patchlevel, $commit=commit, $beta=beta, $debug=debug, $version_string=version_string); - } - -export { - ## version number of the currently running version of Bro as a numeric representation. - ## The format of the number is ABBCC with A being the major version, - ## bb being the minor version (2 digits) and CC being the patchlevel (2 digits). - ## As an example, Bro 2.4.1 results in the number 20401 - const number = Version::parse(bro_version())$version_number; - - ## `VersionDescription` record pertaining to the currently running version of Bro. - const info = Version::parse(bro_version()); -} - -function at_least(version_string: string): bool - { - return Version::number >= Version::parse(version_string)$version_number; - } diff --git a/scripts/base/misc/version.zeek b/scripts/base/misc/version.zeek new file mode 100644 index 0000000000..4d0894c49e --- /dev/null +++ b/scripts/base/misc/version.zeek @@ -0,0 +1,90 @@ +##! Provide information about the currently running Zeek version. +##! The most convenient way to access this are the Version::number +##! and Version::info constants. + +@load base/frameworks/reporter +@load base/utils/strings + +module Version; + +export { + ## A type exactly describing a Zeek version + type VersionDescription: record { + ## Number representing the version which can be used for easy comparison. + ## The format of the number is ABBCC with A being the major version, + ## bb being the minor version (2 digits) and CC being the patchlevel (2 digits). + ## As an example, Zeek 2.4.1 results in the number 20401. + version_number: count; + ## Major version number (e.g. 2 for 2.5) + major: count; + ## Minor version number (e.g. 5 for 2.5) + minor: count; + ## Patch version number (e.g. 0 for 2.5 or 1 for 2.4.1) + patch: count; + ## Commit number for development versions, e.g. 12 for 2.4-12. 0 for non-development versions + commit: count; + ## If set to true, the version is a beta build of Zeek + beta: bool; + ## If set to true, the version is a debug build + debug: bool; + ## String representation of this version + version_string: string; + }; + + ## Parse a given version string. + ## + ## version_string: Zeek version string. + ## + ## Returns: `VersionDescription` record. + global parse: function(version_string: string): VersionDescription; + + ## Test if the current running version of Zeek is greater or equal to the given version + ## string. + ## + ## version_string: Version to check against the current running version. + ## + ## Returns: True if running version greater or equal to the given version. + global at_least: function(version_string: string): bool; +} + +function parse(version_string: string): VersionDescription + { + if ( /[[:digit:]]\.[[:digit:]][[:digit:]]?(\.[[:digit:]][[:digit:]]?)?(\-beta[[:digit:]]?)?(-[[:digit:]]+)?(\-debug)?/ != version_string ) + { + Reporter::error(fmt("Version string %s cannot be parsed", version_string)); + return VersionDescription($version_number=0, $major=0, $minor=0, $patch=0, $commit=0, $beta=F, $debug=F, $version_string=version_string); + } + + local components = split_string1(version_string, /\-/); + local version_split = split_string(components[0], /\./); + local major = to_count(version_split[0]); + local minor = to_count(version_split[1]); + local patchlevel = ( |version_split| > 2) ? to_count(version_split[2]) : 0; + local version_number = major*10000+minor*100+patchlevel; + local beta = /\-beta/ in version_string; + local debug = /\-debug/ in version_string; + local commit = 0; + if ( |components| > 1 ) + { + local commitpart = find_last(cat("-", components[1]), /\-[[:digit:]]+/); + commit = ( |commitpart| > 0 ) ? to_count(sub_bytes(commitpart, 2, 999)) : 0; + } + + return VersionDescription($version_number=version_number, $major=major, $minor=minor, $patch=patchlevel, $commit=commit, $beta=beta, $debug=debug, $version_string=version_string); + } + +export { + ## version number of the currently running version of Zeek as a numeric representation. + ## The format of the number is ABBCC with A being the major version, + ## bb being the minor version (2 digits) and CC being the patchlevel (2 digits). + ## As an example, Zeek 2.4.1 results in the number 20401 + const number = Version::parse(zeek_version())$version_number; + + ## `VersionDescription` record pertaining to the currently running version of Zeek. + const info = Version::parse(zeek_version()); +} + +function at_least(version_string: string): bool + { + return Version::number >= Version::parse(version_string)$version_number; + } diff --git a/scripts/base/protocols/conn/__load__.bro b/scripts/base/protocols/conn/__load__.zeek similarity index 100% rename from scripts/base/protocols/conn/__load__.bro rename to scripts/base/protocols/conn/__load__.zeek diff --git a/scripts/base/protocols/conn/contents.bro b/scripts/base/protocols/conn/contents.bro deleted file mode 100644 index dbfbbd0dc1..0000000000 --- a/scripts/base/protocols/conn/contents.bro +++ /dev/null @@ -1,48 +0,0 @@ -##! This script can be used to extract either the originator's data or the -##! responders data or both. By default nothing is extracted, and in order -##! to actually extract data the ``c$extract_orig`` and/or the -##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this -##! would be to handle the :bro:id:`connection_established` event elsewhere -##! and set the ``extract_orig`` and ``extract_resp`` options there. -##! However, there may be trouble with the timing due to event queue delay. -##! -##! .. note:: -##! -##! This script does not work well in a cluster context unless it has a -##! remotely mounted disk to write the content files to. - -@load base/utils/files - -module Conn; - -export { - ## The prefix given to files containing extracted connections as they - ## are opened on disk. - option extraction_prefix = "contents"; - - ## If this variable is set to ``T``, then all contents of all - ## connections will be extracted. - option default_extract = F; -} - -redef record connection += { - extract_orig: bool &default=default_extract; - extract_resp: bool &default=default_extract; -}; - -event connection_established(c: connection) &priority=-5 - { - if ( c$extract_orig ) - { - local orig_file = generate_extraction_filename(extraction_prefix, c, "orig.dat"); - local orig_f = open(orig_file); - set_contents_file(c$id, CONTENTS_ORIG, orig_f); - } - - if ( c$extract_resp ) - { - local resp_file = generate_extraction_filename(extraction_prefix, c, "resp.dat"); - local resp_f = open(resp_file); - set_contents_file(c$id, CONTENTS_RESP, resp_f); - } - } diff --git a/scripts/base/protocols/conn/contents.zeek b/scripts/base/protocols/conn/contents.zeek new file mode 100644 index 0000000000..ea689c6350 --- /dev/null +++ b/scripts/base/protocols/conn/contents.zeek @@ -0,0 +1,48 @@ +##! This script can be used to extract either the originator's data or the +##! responders data or both. By default nothing is extracted, and in order +##! to actually extract data the ``c$extract_orig`` and/or the +##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this +##! would be to handle the :zeek:id:`connection_established` event elsewhere +##! and set the ``extract_orig`` and ``extract_resp`` options there. +##! However, there may be trouble with the timing due to event queue delay. +##! +##! .. note:: +##! +##! This script does not work well in a cluster context unless it has a +##! remotely mounted disk to write the content files to. + +@load base/utils/files + +module Conn; + +export { + ## The prefix given to files containing extracted connections as they + ## are opened on disk. + option extraction_prefix = "contents"; + + ## If this variable is set to ``T``, then all contents of all + ## connections will be extracted. + option default_extract = F; +} + +redef record connection += { + extract_orig: bool &default=default_extract; + extract_resp: bool &default=default_extract; +}; + +event connection_established(c: connection) &priority=-5 + { + if ( c$extract_orig ) + { + local orig_file = generate_extraction_filename(extraction_prefix, c, "orig.dat"); + local orig_f = open(orig_file); + set_contents_file(c$id, CONTENTS_ORIG, orig_f); + } + + if ( c$extract_resp ) + { + local resp_file = generate_extraction_filename(extraction_prefix, c, "resp.dat"); + local resp_f = open(resp_file); + set_contents_file(c$id, CONTENTS_RESP, resp_f); + } + } diff --git a/scripts/base/protocols/conn/inactivity.bro b/scripts/base/protocols/conn/inactivity.zeek similarity index 100% rename from scripts/base/protocols/conn/inactivity.bro rename to scripts/base/protocols/conn/inactivity.zeek diff --git a/scripts/base/protocols/conn/main.bro b/scripts/base/protocols/conn/main.bro deleted file mode 100644 index e2209b6e22..0000000000 --- a/scripts/base/protocols/conn/main.bro +++ /dev/null @@ -1,304 +0,0 @@ -##! This script manages the tracking/logging of general information regarding -##! TCP, UDP, and ICMP traffic. For UDP and ICMP, "connections" are to -##! be interpreted using flow semantics (sequence of packets from a source -##! host/port to a destination host/port). Further, ICMP "ports" are to -##! be interpreted as the source port meaning the ICMP message type and -##! the destination port being the ICMP message code. - -@load base/utils/site - -module Conn; - -export { - ## The connection logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The record type which contains column fields of the connection log. - type Info: record { - ## This is the time of the first packet. - ts: time &log; - ## A unique identifier of the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## The transport layer protocol of the connection. - proto: transport_proto &log; - ## An identification of an application protocol being sent over - ## the connection. - service: string &log &optional; - ## How long the connection lasted. For 3-way or 4-way connection - ## tear-downs, this will not include the final ACK. - duration: interval &log &optional; - ## The number of payload bytes the originator sent. For TCP - ## this is taken from sequence numbers and might be inaccurate - ## (e.g., due to large connections). - orig_bytes: count &log &optional; - ## The number of payload bytes the responder sent. See - ## *orig_bytes*. - resp_bytes: count &log &optional; - - ## Possible *conn_state* values: - ## - ## * S0: Connection attempt seen, no reply. - ## - ## * S1: Connection established, not terminated. - ## - ## * SF: Normal establishment and termination. - ## Note that this is the same symbol as for state S1. - ## You can tell the two apart because for S1 there will not be any - ## byte counts in the summary, while for SF there will be. - ## - ## * REJ: Connection attempt rejected. - ## - ## * S2: Connection established and close attempt by originator seen - ## (but no reply from responder). - ## - ## * S3: Connection established and close attempt by responder seen - ## (but no reply from originator). - ## - ## * RSTO: Connection established, originator aborted (sent a RST). - ## - ## * RSTR: Responder sent a RST. - ## - ## * RSTOS0: Originator sent a SYN followed by a RST, we never saw a - ## SYN-ACK from the responder. - ## - ## * RSTRH: Responder sent a SYN ACK followed by a RST, we never saw a - ## SYN from the (purported) originator. - ## - ## * SH: Originator sent a SYN followed by a FIN, we never saw a - ## SYN ACK from the responder (hence the connection was "half" open). - ## - ## * SHR: Responder sent a SYN ACK followed by a FIN, we never saw a - ## SYN from the originator. - ## - ## * OTH: No SYN seen, just midstream traffic (a "partial connection" - ## that was not later closed). - conn_state: string &log &optional; - - ## If the connection is originated locally, this value will be T. - ## If it was originated remotely it will be F. In the case that - ## the :bro:id:`Site::local_nets` variable is undefined, this - ## field will be left empty at all times. - local_orig: bool &log &optional; - - ## If the connection is responded to locally, this value will be T. - ## If it was responded to remotely it will be F. In the case that - ## the :bro:id:`Site::local_nets` variable is undefined, this - ## field will be left empty at all times. - local_resp: bool &log &optional; - - ## Indicates the number of bytes missed in content gaps, which - ## is representative of packet loss. A value other than zero - ## will normally cause protocol analysis to fail but some - ## analysis may have been completed prior to the packet loss. - missed_bytes: count &log &default=0; - - ## Records the state history of connections as a string of - ## letters. The meaning of those letters is: - ## - ## ====== ==================================================== - ## Letter Meaning - ## ====== ==================================================== - ## s a SYN w/o the ACK bit set - ## h a SYN+ACK ("handshake") - ## a a pure ACK - ## d packet with payload ("data") - ## f packet with FIN bit set - ## r packet with RST bit set - ## c packet with a bad checksum (applies to UDP too) - ## t packet with retransmitted payload - ## w packet with a zero window advertisement - ## i inconsistent packet (e.g. FIN+RST bits set) - ## q multi-flag packet (SYN+FIN or SYN+RST bits set) - ## ^ connection direction was flipped by Bro's heuristic - ## ====== ==================================================== - ## - ## If the event comes from the originator, the letter is in - ## upper-case; if it comes from the responder, it's in - ## lower-case. The 'a', 'd', 'i' and 'q' flags are - ## recorded a maximum of one time in either direction regardless - ## of how many are actually seen. 'f', 'h', 'r' and - ## 's' can be recorded multiple times for either direction - ## if the associated sequence number differs from the - ## last-seen packet of the same flag type. - ## 'c', 't' and 'w' are recorded in a logarithmic fashion: - ## the second instance represents that the event was seen - ## (at least) 10 times; the third instance, 100 times; etc. - history: string &log &optional; - ## Number of packets that the originator sent. - ## Only set if :bro:id:`use_conn_size_analyzer` = T. - orig_pkts: count &log &optional; - ## Number of IP level bytes that the originator sent (as seen on - ## the wire, taken from the IP total_length header field). - ## Only set if :bro:id:`use_conn_size_analyzer` = T. - orig_ip_bytes: count &log &optional; - ## Number of packets that the responder sent. - ## Only set if :bro:id:`use_conn_size_analyzer` = T. - resp_pkts: count &log &optional; - ## Number of IP level bytes that the responder sent (as seen on - ## the wire, taken from the IP total_length header field). - ## Only set if :bro:id:`use_conn_size_analyzer` = T. - resp_ip_bytes: count &log &optional; - ## If this connection was over a tunnel, indicate the - ## *uid* values for any encapsulating parent connections - ## used over the lifetime of this inner connection. - tunnel_parents: set[string] &log &optional; - }; - - ## Event that can be handled to access the :bro:type:`Conn::Info` - ## record as it is sent on to the logging framework. - global log_conn: event(rec: Info); -} - -redef record connection += { - conn: Info &optional; -}; - -event bro_init() &priority=5 - { - Log::create_stream(Conn::LOG, [$columns=Info, $ev=log_conn, $path="conn"]); - } - -function conn_state(c: connection, trans: transport_proto): string - { - local os = c$orig$state; - local rs = c$resp$state; - - local o_inactive = os == TCP_INACTIVE || os == TCP_PARTIAL; - local r_inactive = rs == TCP_INACTIVE || rs == TCP_PARTIAL; - - if ( trans == tcp ) - { - if ( rs == TCP_RESET ) - { - if ( os == TCP_SYN_SENT || os == TCP_SYN_ACK_SENT || - (os == TCP_RESET && - c$orig$size == 0 && c$resp$size == 0) ) - return "REJ"; - else if ( o_inactive ) - return "RSTRH"; - else - return "RSTR"; - } - else if ( os == TCP_RESET ) - return r_inactive ? "RSTOS0" : "RSTO"; - else if ( rs == TCP_CLOSED && os == TCP_CLOSED ) - return "SF"; - else if ( os == TCP_CLOSED ) - return r_inactive ? "SH" : "S2"; - else if ( rs == TCP_CLOSED ) - return o_inactive ? "SHR" : "S3"; - else if ( os == TCP_SYN_SENT && rs == TCP_INACTIVE ) - return "S0"; - else if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED ) - return "S1"; - else - return "OTH"; - } - - else if ( trans == udp ) - { - if ( os == UDP_ACTIVE ) - return rs == UDP_ACTIVE ? "SF" : "S0"; - else - return rs == UDP_ACTIVE ? "SHR" : "OTH"; - } - - else - return "OTH"; - } - -function determine_service(c: connection): string - { - local service = ""; - for ( s in c$service ) - { - if ( sub_bytes(s, 0, 1) != "-" ) - service = service == "" ? s : cat(service, ",", s); - } - - return to_lower(service); - } - -## Fill out the c$conn record for logging -function set_conn(c: connection, eoc: bool) - { - if ( ! c?$conn ) - { - local tmp: Info; - c$conn = tmp; - } - - c$conn$ts=c$start_time; - c$conn$uid=c$uid; - c$conn$id=c$id; - if ( c?$tunnel && |c$tunnel| > 0 ) - { - if ( ! c$conn?$tunnel_parents ) - c$conn$tunnel_parents = set(); - add c$conn$tunnel_parents[c$tunnel[|c$tunnel|-1]$uid]; - } - c$conn$proto=get_port_transport_proto(c$id$resp_p); - if( |Site::local_nets| > 0 ) - { - c$conn$local_orig=Site::is_local_addr(c$id$orig_h); - c$conn$local_resp=Site::is_local_addr(c$id$resp_h); - } - - if ( eoc ) - { - if ( c$duration > 0secs ) - { - c$conn$duration=c$duration; - c$conn$orig_bytes=c$orig$size; - c$conn$resp_bytes=c$resp$size; - } - if ( c$orig?$num_pkts ) - { - # these are set if use_conn_size_analyzer=T - # we can have counts in here even without duration>0 - c$conn$orig_pkts = c$orig$num_pkts; - c$conn$orig_ip_bytes = c$orig$num_bytes_ip; - c$conn$resp_pkts = c$resp$num_pkts; - c$conn$resp_ip_bytes = c$resp$num_bytes_ip; - } - local service = determine_service(c); - if ( service != "" ) - c$conn$service=service; - c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p)); - - if ( c$history != "" ) - c$conn$history=c$history; - } - } - -event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5 - { - set_conn(c, F); - - c$conn$missed_bytes = c$conn$missed_bytes + length; - } - -event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 - { - set_conn(c, F); - if ( |e| > 0 ) - { - if ( ! c$conn?$tunnel_parents ) - c$conn$tunnel_parents = set(); - add c$conn$tunnel_parents[e[|e|-1]$uid]; - } - c$tunnel = e; - } - -event connection_state_remove(c: connection) &priority=5 - { - set_conn(c, T); - } - -event connection_state_remove(c: connection) &priority=-5 - { - Log::write(Conn::LOG, c$conn); - } - diff --git a/scripts/base/protocols/conn/main.zeek b/scripts/base/protocols/conn/main.zeek new file mode 100644 index 0000000000..653aa6816d --- /dev/null +++ b/scripts/base/protocols/conn/main.zeek @@ -0,0 +1,305 @@ +##! This script manages the tracking/logging of general information regarding +##! TCP, UDP, and ICMP traffic. For UDP and ICMP, "connections" are to +##! be interpreted using flow semantics (sequence of packets from a source +##! host/port to a destination host/port). Further, ICMP "ports" are to +##! be interpreted as the source port meaning the ICMP message type and +##! the destination port being the ICMP message code. + +@load base/utils/site + +module Conn; + +export { + ## The connection logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The record type which contains column fields of the connection log. + type Info: record { + ## This is the time of the first packet. + ts: time &log; + ## A unique identifier of the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## The transport layer protocol of the connection. + proto: transport_proto &log; + ## An identification of an application protocol being sent over + ## the connection. + service: string &log &optional; + ## How long the connection lasted. For 3-way or 4-way connection + ## tear-downs, this will not include the final ACK. + duration: interval &log &optional; + ## The number of payload bytes the originator sent. For TCP + ## this is taken from sequence numbers and might be inaccurate + ## (e.g., due to large connections). + orig_bytes: count &log &optional; + ## The number of payload bytes the responder sent. See + ## *orig_bytes*. + resp_bytes: count &log &optional; + + ## Possible *conn_state* values: + ## + ## * S0: Connection attempt seen, no reply. + ## + ## * S1: Connection established, not terminated. + ## + ## * SF: Normal establishment and termination. + ## Note that this is the same symbol as for state S1. + ## You can tell the two apart because for S1 there will not be any + ## byte counts in the summary, while for SF there will be. + ## + ## * REJ: Connection attempt rejected. + ## + ## * S2: Connection established and close attempt by originator seen + ## (but no reply from responder). + ## + ## * S3: Connection established and close attempt by responder seen + ## (but no reply from originator). + ## + ## * RSTO: Connection established, originator aborted (sent a RST). + ## + ## * RSTR: Responder sent a RST. + ## + ## * RSTOS0: Originator sent a SYN followed by a RST, we never saw a + ## SYN-ACK from the responder. + ## + ## * RSTRH: Responder sent a SYN ACK followed by a RST, we never saw a + ## SYN from the (purported) originator. + ## + ## * SH: Originator sent a SYN followed by a FIN, we never saw a + ## SYN ACK from the responder (hence the connection was "half" open). + ## + ## * SHR: Responder sent a SYN ACK followed by a FIN, we never saw a + ## SYN from the originator. + ## + ## * OTH: No SYN seen, just midstream traffic (a "partial connection" + ## that was not later closed). + conn_state: string &log &optional; + + ## If the connection is originated locally, this value will be T. + ## If it was originated remotely it will be F. In the case that + ## the :zeek:id:`Site::local_nets` variable is undefined, this + ## field will be left empty at all times. + local_orig: bool &log &optional; + + ## If the connection is responded to locally, this value will be T. + ## If it was responded to remotely it will be F. In the case that + ## the :zeek:id:`Site::local_nets` variable is undefined, this + ## field will be left empty at all times. + local_resp: bool &log &optional; + + ## Indicates the number of bytes missed in content gaps, which + ## is representative of packet loss. A value other than zero + ## will normally cause protocol analysis to fail but some + ## analysis may have been completed prior to the packet loss. + missed_bytes: count &log &default=0; + + ## Records the state history of connections as a string of + ## letters. The meaning of those letters is: + ## + ## ====== ==================================================== + ## Letter Meaning + ## ====== ==================================================== + ## s a SYN w/o the ACK bit set + ## h a SYN+ACK ("handshake") + ## a a pure ACK + ## d packet with payload ("data") + ## f packet with FIN bit set + ## r packet with RST bit set + ## c packet with a bad checksum (applies to UDP too) + ## g a content gap + ## t packet with retransmitted payload + ## w packet with a zero window advertisement + ## i inconsistent packet (e.g. FIN+RST bits set) + ## q multi-flag packet (SYN+FIN or SYN+RST bits set) + ## ^ connection direction was flipped by Zeek's heuristic + ## ====== ==================================================== + ## + ## If the event comes from the originator, the letter is in + ## upper-case; if it comes from the responder, it's in + ## lower-case. The 'a', 'd', 'i' and 'q' flags are + ## recorded a maximum of one time in either direction regardless + ## of how many are actually seen. 'f', 'h', 'r' and + ## 's' can be recorded multiple times for either direction + ## if the associated sequence number differs from the + ## last-seen packet of the same flag type. + ## 'c', 'g', 't' and 'w' are recorded in a logarithmic fashion: + ## the second instance represents that the event was seen + ## (at least) 10 times; the third instance, 100 times; etc. + history: string &log &optional; + ## Number of packets that the originator sent. + ## Only set if :zeek:id:`use_conn_size_analyzer` = T. + orig_pkts: count &log &optional; + ## Number of IP level bytes that the originator sent (as seen on + ## the wire, taken from the IP total_length header field). + ## Only set if :zeek:id:`use_conn_size_analyzer` = T. + orig_ip_bytes: count &log &optional; + ## Number of packets that the responder sent. + ## Only set if :zeek:id:`use_conn_size_analyzer` = T. + resp_pkts: count &log &optional; + ## Number of IP level bytes that the responder sent (as seen on + ## the wire, taken from the IP total_length header field). + ## Only set if :zeek:id:`use_conn_size_analyzer` = T. + resp_ip_bytes: count &log &optional; + ## If this connection was over a tunnel, indicate the + ## *uid* values for any encapsulating parent connections + ## used over the lifetime of this inner connection. + tunnel_parents: set[string] &log &optional; + }; + + ## Event that can be handled to access the :zeek:type:`Conn::Info` + ## record as it is sent on to the logging framework. + global log_conn: event(rec: Info); +} + +redef record connection += { + conn: Info &optional; +}; + +event zeek_init() &priority=5 + { + Log::create_stream(Conn::LOG, [$columns=Info, $ev=log_conn, $path="conn"]); + } + +function conn_state(c: connection, trans: transport_proto): string + { + local os = c$orig$state; + local rs = c$resp$state; + + local o_inactive = os == TCP_INACTIVE || os == TCP_PARTIAL; + local r_inactive = rs == TCP_INACTIVE || rs == TCP_PARTIAL; + + if ( trans == tcp ) + { + if ( rs == TCP_RESET ) + { + if ( os == TCP_SYN_SENT || os == TCP_SYN_ACK_SENT || + (os == TCP_RESET && + c$orig$size == 0 && c$resp$size == 0) ) + return "REJ"; + else if ( o_inactive ) + return "RSTRH"; + else + return "RSTR"; + } + else if ( os == TCP_RESET ) + return r_inactive ? "RSTOS0" : "RSTO"; + else if ( rs == TCP_CLOSED && os == TCP_CLOSED ) + return "SF"; + else if ( os == TCP_CLOSED ) + return r_inactive ? "SH" : "S2"; + else if ( rs == TCP_CLOSED ) + return o_inactive ? "SHR" : "S3"; + else if ( os == TCP_SYN_SENT && rs == TCP_INACTIVE ) + return "S0"; + else if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED ) + return "S1"; + else + return "OTH"; + } + + else if ( trans == udp ) + { + if ( os == UDP_ACTIVE ) + return rs == UDP_ACTIVE ? "SF" : "S0"; + else + return rs == UDP_ACTIVE ? "SHR" : "OTH"; + } + + else + return "OTH"; + } + +function determine_service(c: connection): string + { + local service = ""; + for ( s in c$service ) + { + if ( sub_bytes(s, 0, 1) != "-" ) + service = service == "" ? s : cat(service, ",", s); + } + + return to_lower(service); + } + +## Fill out the c$conn record for logging +function set_conn(c: connection, eoc: bool) + { + if ( ! c?$conn ) + { + local tmp: Info; + c$conn = tmp; + } + + c$conn$ts=c$start_time; + c$conn$uid=c$uid; + c$conn$id=c$id; + if ( c?$tunnel && |c$tunnel| > 0 ) + { + if ( ! c$conn?$tunnel_parents ) + c$conn$tunnel_parents = set(); + add c$conn$tunnel_parents[c$tunnel[|c$tunnel|-1]$uid]; + } + c$conn$proto=get_port_transport_proto(c$id$resp_p); + if( |Site::local_nets| > 0 ) + { + c$conn$local_orig=Site::is_local_addr(c$id$orig_h); + c$conn$local_resp=Site::is_local_addr(c$id$resp_h); + } + + if ( eoc ) + { + if ( c$duration > 0secs ) + { + c$conn$duration=c$duration; + c$conn$orig_bytes=c$orig$size; + c$conn$resp_bytes=c$resp$size; + } + if ( c$orig?$num_pkts ) + { + # these are set if use_conn_size_analyzer=T + # we can have counts in here even without duration>0 + c$conn$orig_pkts = c$orig$num_pkts; + c$conn$orig_ip_bytes = c$orig$num_bytes_ip; + c$conn$resp_pkts = c$resp$num_pkts; + c$conn$resp_ip_bytes = c$resp$num_bytes_ip; + } + local service = determine_service(c); + if ( service != "" ) + c$conn$service=service; + c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p)); + + if ( c$history != "" ) + c$conn$history=c$history; + } + } + +event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5 + { + set_conn(c, F); + + c$conn$missed_bytes = c$conn$missed_bytes + length; + } + +event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5 + { + set_conn(c, F); + if ( |e| > 0 ) + { + if ( ! c$conn?$tunnel_parents ) + c$conn$tunnel_parents = set(); + add c$conn$tunnel_parents[e[|e|-1]$uid]; + } + c$tunnel = e; + } + +event connection_state_remove(c: connection) &priority=5 + { + set_conn(c, T); + } + +event connection_state_remove(c: connection) &priority=-5 + { + Log::write(Conn::LOG, c$conn); + } + diff --git a/scripts/base/protocols/conn/polling.bro b/scripts/base/protocols/conn/polling.zeek similarity index 100% rename from scripts/base/protocols/conn/polling.bro rename to scripts/base/protocols/conn/polling.zeek diff --git a/scripts/base/protocols/conn/thresholds.bro b/scripts/base/protocols/conn/thresholds.zeek similarity index 100% rename from scripts/base/protocols/conn/thresholds.bro rename to scripts/base/protocols/conn/thresholds.zeek diff --git a/scripts/base/protocols/dce-rpc/__load__.bro b/scripts/base/protocols/dce-rpc/__load__.zeek similarity index 100% rename from scripts/base/protocols/dce-rpc/__load__.bro rename to scripts/base/protocols/dce-rpc/__load__.zeek diff --git a/scripts/base/protocols/dce-rpc/consts.bro b/scripts/base/protocols/dce-rpc/consts.zeek similarity index 100% rename from scripts/base/protocols/dce-rpc/consts.bro rename to scripts/base/protocols/dce-rpc/consts.zeek diff --git a/scripts/base/protocols/dce-rpc/main.bro b/scripts/base/protocols/dce-rpc/main.bro deleted file mode 100644 index 7013ae15e9..0000000000 --- a/scripts/base/protocols/dce-rpc/main.bro +++ /dev/null @@ -1,244 +0,0 @@ -@load ./consts -@load base/frameworks/dpd - -module DCE_RPC; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp for when the event happened. - ts : time &log; - ## Unique ID for the connection. - uid : string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id : conn_id &log; - ## Round trip time from the request to the response. - ## If either the request or response wasn't seen, - ## this will be null. - rtt : interval &log &optional; - - ## Remote pipe name. - named_pipe : string &log &optional; - ## Endpoint name looked up from the uuid. - endpoint : string &log &optional; - ## Operation seen in the call. - operation : string &log &optional; - }; - - ## These are DCE-RPC operations that are ignored, typically due to - ## the operations being noisy and low value on most networks. - option ignored_operations: table[string] of set[string] = { - ["winreg"] = set("BaseRegCloseKey", "BaseRegGetVersion", "BaseRegOpenKey", "BaseRegQueryValue", "BaseRegDeleteKeyEx", "OpenLocalMachine", "BaseRegEnumKey", "OpenClassesRoot"), - ["spoolss"] = set("RpcSplOpenPrinter", "RpcClosePrinter"), - ["wkssvc"] = set("NetrWkstaGetInfo"), - }; - - type State: record { - uuid : string &optional; - named_pipe : string &optional; - ctx_to_uuid: table[count] of string &optional; - }; - - # This is to store the log and state information - # for multiple DCE/RPC bindings over a single TCP connection (named pipes). - type BackingState: record { - info: Info; - state: State; - }; -} - -redef DPD::ignore_violations += { Analyzer::ANALYZER_DCE_RPC }; - -redef record connection += { - dce_rpc: Info &optional; - dce_rpc_state: State &optional; - dce_rpc_backing: table[count] of BackingState &optional; -}; - -const ports = { 135/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(DCE_RPC::LOG, [$columns=Info, $path="dce_rpc"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_DCE_RPC, ports); - } - -function normalize_named_pipe_name(pn: string): string - { - local parts = split_string(pn, /\\[pP][iI][pP][eE]\\/); - if ( 1 in parts ) - return to_lower(parts[1]); - else - return to_lower(pn); - } - -function set_state(c: connection, state_x: BackingState) - { - c$dce_rpc = state_x$info; - c$dce_rpc_state = state_x$state; - - if ( c$dce_rpc_state?$uuid ) - c$dce_rpc$endpoint = uuid_endpoint_map[c$dce_rpc_state$uuid]; - if ( c$dce_rpc_state?$named_pipe ) - c$dce_rpc$named_pipe = c$dce_rpc_state$named_pipe; - } - -function set_session(c: connection, fid: count) - { - if ( ! c?$dce_rpc_backing ) - { - c$dce_rpc_backing = table(); - } - if ( fid !in c$dce_rpc_backing ) - { - local info = Info($ts=network_time(),$id=c$id,$uid=c$uid); - c$dce_rpc_backing[fid] = BackingState($info=info, $state=State()); - } - - local state_x = c$dce_rpc_backing[fid]; - set_state(c, state_x); - } - -event dce_rpc_bind(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 - { - set_session(c, fid); - - local uuid_str = uuid_to_string(uuid); - - if ( ! c$dce_rpc_state?$ctx_to_uuid ) - c$dce_rpc_state$ctx_to_uuid = table(); - - c$dce_rpc_state$ctx_to_uuid[ctx_id] = uuid_str; - c$dce_rpc_state$uuid = uuid_str; - c$dce_rpc$endpoint = uuid_endpoint_map[uuid_str]; - } - -event dce_rpc_alter_context(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 - { - set_session(c, fid); - - local uuid_str = uuid_to_string(uuid); - - if ( ! c$dce_rpc_state?$ctx_to_uuid ) - c$dce_rpc_state$ctx_to_uuid = table(); - - c$dce_rpc_state$ctx_to_uuid[ctx_id] = uuid_str; - c$dce_rpc_state$uuid = uuid_str; - c$dce_rpc$endpoint = uuid_endpoint_map[uuid_str]; - } - -event dce_rpc_bind_ack(c: connection, fid: count, sec_addr: string) &priority=5 - { - set_session(c, fid); - - if ( sec_addr != "" ) - { - c$dce_rpc_state$named_pipe = sec_addr; - c$dce_rpc$named_pipe = sec_addr; - } - } - -event dce_rpc_alter_context_resp(c: connection, fid: count) &priority=5 - { - set_session(c, fid); - } - -event dce_rpc_request(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count) &priority=5 - { - set_session(c, fid); - - if ( c?$dce_rpc ) - { - c$dce_rpc$ts = network_time(); - } - } - -event dce_rpc_response(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count) &priority=5 - { - set_session(c, fid); - - # In the event that the binding wasn't seen, but the pipe - # name is known, go ahead and see if we have a pipe name to - # uuid mapping... - if ( ! c$dce_rpc?$endpoint && c$dce_rpc?$named_pipe ) - { - local npn = normalize_named_pipe_name(c$dce_rpc$named_pipe); - if ( npn in pipe_name_to_common_uuid ) - { - c$dce_rpc_state$uuid = pipe_name_to_common_uuid[npn]; - } - } - - if ( c?$dce_rpc ) - { - if ( c$dce_rpc?$endpoint ) - { - c$dce_rpc$operation = operations[c$dce_rpc_state$uuid, opnum]; - if ( c$dce_rpc$ts != network_time() ) - c$dce_rpc$rtt = network_time() - c$dce_rpc$ts; - } - - if ( c$dce_rpc_state?$ctx_to_uuid && - ctx_id in c$dce_rpc_state$ctx_to_uuid ) - { - local u = c$dce_rpc_state$ctx_to_uuid[ctx_id]; - c$dce_rpc$endpoint = uuid_endpoint_map[u]; - c$dce_rpc$operation = operations[u, opnum]; - } - } - } - -event dce_rpc_response(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count) &priority=-5 - { - if ( c?$dce_rpc ) - { - # If there is no endpoint, there isn't much reason to log. - # This can happen if the request isn't seen. - if ( ( c$dce_rpc?$endpoint && c$dce_rpc?$operation ) && - ( c$dce_rpc$endpoint !in ignored_operations - || - ( c$dce_rpc?$endpoint && c$dce_rpc?$operation && - c$dce_rpc$operation !in ignored_operations[c$dce_rpc$endpoint] && - "*" !in ignored_operations[c$dce_rpc$endpoint] ) ) ) - { - Log::write(LOG, c$dce_rpc); - } - delete c$dce_rpc; - } - } - -event connection_state_remove(c: connection) - { - if ( ! c?$dce_rpc ) - return; - - # TODO: Go through any remaining dce_rpc requests that haven't been processed with replies. - for ( i, x in c$dce_rpc_backing ) - { - set_state(c, x); - - # In the event that the binding wasn't seen, but the pipe - # name is known, go ahead and see if we have a pipe name to - # uuid mapping... - if ( ! c$dce_rpc?$endpoint && c$dce_rpc?$named_pipe ) - { - local npn = normalize_named_pipe_name(c$dce_rpc$named_pipe); - if ( npn in pipe_name_to_common_uuid ) - { - c$dce_rpc_state$uuid = pipe_name_to_common_uuid[npn]; - } - } - - if ( ( c$dce_rpc?$endpoint && c$dce_rpc?$operation ) && - ( c$dce_rpc$endpoint !in ignored_operations - || - ( c$dce_rpc?$endpoint && c$dce_rpc?$operation && - c$dce_rpc$operation !in ignored_operations[c$dce_rpc$endpoint] && - "*" !in ignored_operations[c$dce_rpc$endpoint] ) ) ) - { - Log::write(LOG, c$dce_rpc); - } - } - } diff --git a/scripts/base/protocols/dce-rpc/main.zeek b/scripts/base/protocols/dce-rpc/main.zeek new file mode 100644 index 0000000000..1b318265e8 --- /dev/null +++ b/scripts/base/protocols/dce-rpc/main.zeek @@ -0,0 +1,244 @@ +@load ./consts +@load base/frameworks/dpd + +module DCE_RPC; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts : time &log; + ## Unique ID for the connection. + uid : string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id : conn_id &log; + ## Round trip time from the request to the response. + ## If either the request or response wasn't seen, + ## this will be null. + rtt : interval &log &optional; + + ## Remote pipe name. + named_pipe : string &log &optional; + ## Endpoint name looked up from the uuid. + endpoint : string &log &optional; + ## Operation seen in the call. + operation : string &log &optional; + }; + + ## These are DCE-RPC operations that are ignored, typically due to + ## the operations being noisy and low value on most networks. + option ignored_operations: table[string] of set[string] = { + ["winreg"] = set("BaseRegCloseKey", "BaseRegGetVersion", "BaseRegOpenKey", "BaseRegQueryValue", "BaseRegDeleteKeyEx", "OpenLocalMachine", "BaseRegEnumKey", "OpenClassesRoot"), + ["spoolss"] = set("RpcSplOpenPrinter", "RpcClosePrinter"), + ["wkssvc"] = set("NetrWkstaGetInfo"), + }; + + type State: record { + uuid : string &optional; + named_pipe : string &optional; + ctx_to_uuid: table[count] of string &optional; + }; + + # This is to store the log and state information + # for multiple DCE/RPC bindings over a single TCP connection (named pipes). + type BackingState: record { + info: Info; + state: State; + }; +} + +redef DPD::ignore_violations += { Analyzer::ANALYZER_DCE_RPC }; + +redef record connection += { + dce_rpc: Info &optional; + dce_rpc_state: State &optional; + dce_rpc_backing: table[count] of BackingState &optional; +}; + +const ports = { 135/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(DCE_RPC::LOG, [$columns=Info, $path="dce_rpc"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_DCE_RPC, ports); + } + +function normalize_named_pipe_name(pn: string): string + { + local parts = split_string(pn, /\\[pP][iI][pP][eE]\\/); + if ( 1 in parts ) + return to_lower(parts[1]); + else + return to_lower(pn); + } + +function set_state(c: connection, state_x: BackingState) + { + c$dce_rpc = state_x$info; + c$dce_rpc_state = state_x$state; + + if ( c$dce_rpc_state?$uuid ) + c$dce_rpc$endpoint = uuid_endpoint_map[c$dce_rpc_state$uuid]; + if ( c$dce_rpc_state?$named_pipe ) + c$dce_rpc$named_pipe = c$dce_rpc_state$named_pipe; + } + +function set_session(c: connection, fid: count) + { + if ( ! c?$dce_rpc_backing ) + { + c$dce_rpc_backing = table(); + } + if ( fid !in c$dce_rpc_backing ) + { + local info = Info($ts=network_time(),$id=c$id,$uid=c$uid); + c$dce_rpc_backing[fid] = BackingState($info=info, $state=State()); + } + + local state_x = c$dce_rpc_backing[fid]; + set_state(c, state_x); + } + +event dce_rpc_bind(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 + { + set_session(c, fid); + + local uuid_str = uuid_to_string(uuid); + + if ( ! c$dce_rpc_state?$ctx_to_uuid ) + c$dce_rpc_state$ctx_to_uuid = table(); + + c$dce_rpc_state$ctx_to_uuid[ctx_id] = uuid_str; + c$dce_rpc_state$uuid = uuid_str; + c$dce_rpc$endpoint = uuid_endpoint_map[uuid_str]; + } + +event dce_rpc_alter_context(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 + { + set_session(c, fid); + + local uuid_str = uuid_to_string(uuid); + + if ( ! c$dce_rpc_state?$ctx_to_uuid ) + c$dce_rpc_state$ctx_to_uuid = table(); + + c$dce_rpc_state$ctx_to_uuid[ctx_id] = uuid_str; + c$dce_rpc_state$uuid = uuid_str; + c$dce_rpc$endpoint = uuid_endpoint_map[uuid_str]; + } + +event dce_rpc_bind_ack(c: connection, fid: count, sec_addr: string) &priority=5 + { + set_session(c, fid); + + if ( sec_addr != "" ) + { + c$dce_rpc_state$named_pipe = sec_addr; + c$dce_rpc$named_pipe = sec_addr; + } + } + +event dce_rpc_alter_context_resp(c: connection, fid: count) &priority=5 + { + set_session(c, fid); + } + +event dce_rpc_request(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count) &priority=5 + { + set_session(c, fid); + + if ( c?$dce_rpc ) + { + c$dce_rpc$ts = network_time(); + } + } + +event dce_rpc_response(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count) &priority=5 + { + set_session(c, fid); + + # In the event that the binding wasn't seen, but the pipe + # name is known, go ahead and see if we have a pipe name to + # uuid mapping... + if ( ! c$dce_rpc?$endpoint && c$dce_rpc?$named_pipe ) + { + local npn = normalize_named_pipe_name(c$dce_rpc$named_pipe); + if ( npn in pipe_name_to_common_uuid ) + { + c$dce_rpc_state$uuid = pipe_name_to_common_uuid[npn]; + } + } + + if ( c?$dce_rpc ) + { + if ( c$dce_rpc?$endpoint ) + { + c$dce_rpc$operation = operations[c$dce_rpc_state$uuid, opnum]; + if ( c$dce_rpc$ts != network_time() ) + c$dce_rpc$rtt = network_time() - c$dce_rpc$ts; + } + + if ( c$dce_rpc_state?$ctx_to_uuid && + ctx_id in c$dce_rpc_state$ctx_to_uuid ) + { + local u = c$dce_rpc_state$ctx_to_uuid[ctx_id]; + c$dce_rpc$endpoint = uuid_endpoint_map[u]; + c$dce_rpc$operation = operations[u, opnum]; + } + } + } + +event dce_rpc_response(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count) &priority=-5 + { + if ( c?$dce_rpc ) + { + # If there is no endpoint, there isn't much reason to log. + # This can happen if the request isn't seen. + if ( ( c$dce_rpc?$endpoint && c$dce_rpc?$operation ) && + ( c$dce_rpc$endpoint !in ignored_operations + || + ( c$dce_rpc?$endpoint && c$dce_rpc?$operation && + c$dce_rpc$operation !in ignored_operations[c$dce_rpc$endpoint] && + "*" !in ignored_operations[c$dce_rpc$endpoint] ) ) ) + { + Log::write(LOG, c$dce_rpc); + } + delete c$dce_rpc; + } + } + +event connection_state_remove(c: connection) + { + if ( ! c?$dce_rpc ) + return; + + # TODO: Go through any remaining dce_rpc requests that haven't been processed with replies. + for ( i, x in c$dce_rpc_backing ) + { + set_state(c, x); + + # In the event that the binding wasn't seen, but the pipe + # name is known, go ahead and see if we have a pipe name to + # uuid mapping... + if ( ! c$dce_rpc?$endpoint && c$dce_rpc?$named_pipe ) + { + local npn = normalize_named_pipe_name(c$dce_rpc$named_pipe); + if ( npn in pipe_name_to_common_uuid ) + { + c$dce_rpc_state$uuid = pipe_name_to_common_uuid[npn]; + } + } + + if ( ( c$dce_rpc?$endpoint && c$dce_rpc?$operation ) && + ( c$dce_rpc$endpoint !in ignored_operations + || + ( c$dce_rpc?$endpoint && c$dce_rpc?$operation && + c$dce_rpc$operation !in ignored_operations[c$dce_rpc$endpoint] && + "*" !in ignored_operations[c$dce_rpc$endpoint] ) ) ) + { + Log::write(LOG, c$dce_rpc); + } + } + } diff --git a/scripts/base/protocols/dhcp/__load__.bro b/scripts/base/protocols/dhcp/__load__.zeek similarity index 100% rename from scripts/base/protocols/dhcp/__load__.bro rename to scripts/base/protocols/dhcp/__load__.zeek diff --git a/scripts/base/protocols/dhcp/consts.bro b/scripts/base/protocols/dhcp/consts.zeek similarity index 100% rename from scripts/base/protocols/dhcp/consts.bro rename to scripts/base/protocols/dhcp/consts.zeek diff --git a/scripts/base/protocols/dhcp/main.bro b/scripts/base/protocols/dhcp/main.bro deleted file mode 100644 index b31c623afa..0000000000 --- a/scripts/base/protocols/dhcp/main.bro +++ /dev/null @@ -1,272 +0,0 @@ -##! Analyze DHCP traffic and provide a log that is organized around -##! the idea of a DHCP "conversation" defined by messages exchanged within -##! a relatively short period of time using the same transaction ID. -##! The log will have information from clients and servers to give a more -##! complete picture of what happened. - -@load base/frameworks/cluster -@load ./consts - -module DHCP; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the column fields of the DHCP log. - type Info: record { - ## The earliest time at which a DHCP message over the - ## associated connection is observed. - ts: time &log; - - ## A series of unique identifiers of the connections over which - ## DHCP is occurring. This behavior with multiple connections is - ## unique to DHCP because of the way it uses broadcast packets - ## on local networks. - uids: set[string] &log; - - ## IP address of the client. If a transaction - ## is only a client sending INFORM messages then - ## there is no lease information exchanged so this - ## is helpful to know who sent the messages. - ## Getting an address in this field does require - ## that the client sources at least one DHCP message - ## using a non-broadcast address. - client_addr: addr &log &optional; - ## IP address of the server involved in actually - ## handing out the lease. There could be other - ## servers replying with OFFER messages which won't - ## be represented here. Getting an address in this - ## field also requires that the server handing out - ## the lease also sources packets from a non-broadcast - ## IP address. - server_addr: addr &log &optional; - - ## Client port number seen at time of server handing out IP (expected - ## as 68/udp). - client_port: port &optional; - ## Server port number seen at time of server handing out IP (expected - ## as 67/udp). - server_port: port &optional; - - ## Client's hardware address. - mac: string &log &optional; - - ## Name given by client in Hostname option 12. - host_name: string &log &optional; - ## FQDN given by client in Client FQDN option 81. - client_fqdn: string &log &optional; - ## Domain given by the server in option 15. - domain: string &log &optional; - - ## IP address requested by the client. - requested_addr: addr &log &optional; - ## IP address assigned by the server. - assigned_addr: addr &log &optional; - ## IP address lease interval. - lease_time: interval &log &optional; - - ## Message typically accompanied with a DHCP_DECLINE - ## so the client can tell the server why it rejected - ## an address. - client_message: string &log &optional; - ## Message typically accompanied with a DHCP_NAK to let - ## the client know why it rejected the request. - server_message: string &log &optional; - - ## The DHCP message types seen by this DHCP transaction - msg_types: vector of string &log &default=string_vec(); - - ## Duration of the DHCP "session" representing the - ## time from the first message to the last. - duration: interval &log &default=0secs; - }; - - ## The maximum amount of time that a transation ID will be watched - ## for to try and tie messages together into a single DHCP - ## transaction narrative. - option DHCP::max_txid_watch_time = 30secs; - - ## This event is used internally to distribute data around clusters - ## since DHCP doesn't follow the normal "connection" model used by - ## most protocols. It can also be handled to extend the DHCP log. - ## bro:see::`DHCP::log_info`. - global DHCP::aggregate_msgs: event(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options); - - ## This is a global variable that is only to be used in the - ## :bro::see::`DHCP::aggregate_msgs` event. It can be used to avoid - ## looking up the info record for a transaction ID in every event handler - ## for :bro:see::`DHCP::aggregate_msgs`. - global DHCP::log_info: Info; - - ## Event that can be handled to access the DHCP - ## record as it is sent on to the logging framework. - global log_dhcp: event(rec: Info); -} - -# Add the dhcp info to the connection record. -redef record connection += { - dhcp: Info &optional; -}; - -redef record Info += { - last_message_ts: time &optional; -}; - -# 67/udp is the server's port, 68/udp the client. -# 4011/udp seems to be some proxyDHCP thing. -const ports = { 67/udp, 68/udp, 4011/udp }; -redef likely_server_ports += { 67/udp }; - -event bro_init() &priority=5 - { - Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp, $path="dhcp"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports); - } - -@if ( Cluster::is_enabled() ) -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, DHCP::aggregate_msgs); - } -@endif - -function join_data_expiration(t: table[count] of Info, idx: count): interval - { - local info = t[idx]; - - local now = network_time(); - # If a message hasn't been seen in the past 5 seconds or the - # total time watching has been more than the maximum time - # allowed by the configuration then log this data and expire it. - # Also, if Bro is shutting down. - if ( (now - info$last_message_ts) > 5sec || - (now - info$ts) > max_txid_watch_time || - bro_is_terminating() ) - { - Log::write(LOG, info); - - # Go ahead and expire the data now that the log - # entry has been written. - return 0secs; - } - else - { - return 5secs; - } - } - -# This is where the data is stored as it's centralized. All data for a log must -# arrive within the expiration interval if it's to be logged fully. On a cluster, -# this data is only maintained on the manager. -global join_data: table[count] of Info = table() - &create_expire=10secs &expire_func=join_data_expiration; - - - -@if ( ! Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER ) -# We are handling this event at priority 1000 because we really want -# the DHCP::log_info global to be set correctly before a user might try -# to access it. -event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=1000 - { - if ( msg$xid !in join_data ) - { - join_data[msg$xid] = Info($ts=ts, - $uids=set(uid)); - } - - log_info = join_data[msg$xid]; - } - -event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=5 - { - log_info$duration = ts - log_info$ts; - - if ( uid !in log_info$uids ) - add log_info$uids[uid]; - - log_info$msg_types += DHCP::message_types[msg$m_type]; - - # Let's watch for messages in any DHCP message type - # and split them out based on client and server. - if ( options?$message ) - { - if ( is_orig ) - log_info$client_message = options$message; - else - log_info$server_message = options$message; - } - - # Update the last message time so that we can do some data - # expiration handling. - log_info$last_message_ts = ts; - - if ( is_orig ) # client requests - { - # Assign the client addr in case this is a session - # of only INFORM messages (no lease handed out). - # This also works if a normal lease handout uses - # unicast. - if ( id$orig_h != 0.0.0.0 && id$orig_h != 255.255.255.255 ) - log_info$client_addr = id$orig_h; - - if ( options?$host_name ) - log_info$host_name = options$host_name; - - if ( options?$client_fqdn ) - log_info$client_fqdn = options$client_fqdn$domain_name; - - if ( options?$client_id && - options$client_id$hwtype == 1 ) # ETHERNET - log_info$mac = options$client_id$hwaddr; - - if ( options?$addr_request ) - log_info$requested_addr = options$addr_request; - } - else # server reply messages - { - # Only log the address of the server if it handed out - # an IP address. - if ( msg$yiaddr != 0.0.0.0 && - id$resp_h != 255.255.255.255 ) - { - log_info$server_addr = id$resp_h; - log_info$server_port = id$resp_p; - log_info$client_port = id$orig_p; - } - - # Only use the client hardware address from the server - # if we didn't already pick one up from the client. - if ( msg$chaddr != "" && !log_info?$mac ) - log_info$mac = msg$chaddr; - - if ( msg$yiaddr != 0.0.0.0 ) - log_info$assigned_addr = msg$yiaddr; - - # If no client address has been seen yet, let's use the assigned addr. - if ( ! log_info?$client_addr && log_info?$assigned_addr ) - log_info$client_addr = log_info$assigned_addr; - - if ( options?$domain_name ) - log_info$domain = options$domain_name; - - if ( options?$lease ) - log_info$lease_time = options$lease; - } - } -@endif - - - -# Aggregate DHCP messages to the manager. -event dhcp_message(c: connection, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=-5 - { - event DHCP::aggregate_msgs(network_time(), c$id, c$uid, is_orig, msg, options); - } - -event bro_done() &priority=-5 - { - # Log any remaining data that hasn't already been logged! - for ( i in DHCP::join_data ) - join_data_expiration(DHCP::join_data, i); - } diff --git a/scripts/base/protocols/dhcp/main.zeek b/scripts/base/protocols/dhcp/main.zeek new file mode 100644 index 0000000000..3ba83ffae7 --- /dev/null +++ b/scripts/base/protocols/dhcp/main.zeek @@ -0,0 +1,272 @@ +##! Analyze DHCP traffic and provide a log that is organized around +##! the idea of a DHCP "conversation" defined by messages exchanged within +##! a relatively short period of time using the same transaction ID. +##! The log will have information from clients and servers to give a more +##! complete picture of what happened. + +@load base/frameworks/cluster +@load ./consts + +module DHCP; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the column fields of the DHCP log. + type Info: record { + ## The earliest time at which a DHCP message over the + ## associated connection is observed. + ts: time &log; + + ## A series of unique identifiers of the connections over which + ## DHCP is occurring. This behavior with multiple connections is + ## unique to DHCP because of the way it uses broadcast packets + ## on local networks. + uids: set[string] &log; + + ## IP address of the client. If a transaction + ## is only a client sending INFORM messages then + ## there is no lease information exchanged so this + ## is helpful to know who sent the messages. + ## Getting an address in this field does require + ## that the client sources at least one DHCP message + ## using a non-broadcast address. + client_addr: addr &log &optional; + ## IP address of the server involved in actually + ## handing out the lease. There could be other + ## servers replying with OFFER messages which won't + ## be represented here. Getting an address in this + ## field also requires that the server handing out + ## the lease also sources packets from a non-broadcast + ## IP address. + server_addr: addr &log &optional; + + ## Client port number seen at time of server handing out IP (expected + ## as 68/udp). + client_port: port &optional; + ## Server port number seen at time of server handing out IP (expected + ## as 67/udp). + server_port: port &optional; + + ## Client's hardware address. + mac: string &log &optional; + + ## Name given by client in Hostname option 12. + host_name: string &log &optional; + ## FQDN given by client in Client FQDN option 81. + client_fqdn: string &log &optional; + ## Domain given by the server in option 15. + domain: string &log &optional; + + ## IP address requested by the client. + requested_addr: addr &log &optional; + ## IP address assigned by the server. + assigned_addr: addr &log &optional; + ## IP address lease interval. + lease_time: interval &log &optional; + + ## Message typically accompanied with a DHCP_DECLINE + ## so the client can tell the server why it rejected + ## an address. + client_message: string &log &optional; + ## Message typically accompanied with a DHCP_NAK to let + ## the client know why it rejected the request. + server_message: string &log &optional; + + ## The DHCP message types seen by this DHCP transaction + msg_types: vector of string &log &default=string_vec(); + + ## Duration of the DHCP "session" representing the + ## time from the first message to the last. + duration: interval &log &default=0secs; + }; + + ## The maximum amount of time that a transation ID will be watched + ## for to try and tie messages together into a single DHCP + ## transaction narrative. + option DHCP::max_txid_watch_time = 30secs; + + ## This event is used internally to distribute data around clusters + ## since DHCP doesn't follow the normal "connection" model used by + ## most protocols. It can also be handled to extend the DHCP log. + ## :zeek:see:`DHCP::log_info`. + global DHCP::aggregate_msgs: event(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options); + + ## This is a global variable that is only to be used in the + ## :zeek:see:`DHCP::aggregate_msgs` event. It can be used to avoid + ## looking up the info record for a transaction ID in every event handler + ## for :zeek:see:`DHCP::aggregate_msgs`. + global DHCP::log_info: Info; + + ## Event that can be handled to access the DHCP + ## record as it is sent on to the logging framework. + global log_dhcp: event(rec: Info); +} + +# Add the dhcp info to the connection record. +redef record connection += { + dhcp: Info &optional; +}; + +redef record Info += { + last_message_ts: time &optional; +}; + +# 67/udp is the server's port, 68/udp the client. +# 4011/udp seems to be some proxyDHCP thing. +const ports = { 67/udp, 68/udp, 4011/udp }; +redef likely_server_ports += { 67/udp }; + +event zeek_init() &priority=5 + { + Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp, $path="dhcp"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports); + } + +@if ( Cluster::is_enabled() ) +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, DHCP::aggregate_msgs); + } +@endif + +function join_data_expiration(t: table[count] of Info, idx: count): interval + { + local info = t[idx]; + + local now = network_time(); + # If a message hasn't been seen in the past 5 seconds or the + # total time watching has been more than the maximum time + # allowed by the configuration then log this data and expire it. + # Also, if Zeek is shutting down. + if ( (now - info$last_message_ts) > 5sec || + (now - info$ts) > max_txid_watch_time || + zeek_is_terminating() ) + { + Log::write(LOG, info); + + # Go ahead and expire the data now that the log + # entry has been written. + return 0secs; + } + else + { + return 5secs; + } + } + +# This is where the data is stored as it's centralized. All data for a log must +# arrive within the expiration interval if it's to be logged fully. On a cluster, +# this data is only maintained on the manager. +global join_data: table[count] of Info = table() + &create_expire=10secs &expire_func=join_data_expiration; + + + +@if ( ! Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER ) +# We are handling this event at priority 1000 because we really want +# the DHCP::log_info global to be set correctly before a user might try +# to access it. +event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=1000 + { + if ( msg$xid !in join_data ) + { + join_data[msg$xid] = Info($ts=ts, + $uids=set(uid)); + } + + log_info = join_data[msg$xid]; + } + +event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=5 + { + log_info$duration = ts - log_info$ts; + + if ( uid !in log_info$uids ) + add log_info$uids[uid]; + + log_info$msg_types += DHCP::message_types[msg$m_type]; + + # Let's watch for messages in any DHCP message type + # and split them out based on client and server. + if ( options?$message ) + { + if ( is_orig ) + log_info$client_message = options$message; + else + log_info$server_message = options$message; + } + + # Update the last message time so that we can do some data + # expiration handling. + log_info$last_message_ts = ts; + + if ( is_orig ) # client requests + { + # Assign the client addr in case this is a session + # of only INFORM messages (no lease handed out). + # This also works if a normal lease handout uses + # unicast. + if ( id$orig_h != 0.0.0.0 && id$orig_h != 255.255.255.255 ) + log_info$client_addr = id$orig_h; + + if ( options?$host_name ) + log_info$host_name = options$host_name; + + if ( options?$client_fqdn ) + log_info$client_fqdn = options$client_fqdn$domain_name; + + if ( options?$client_id && + options$client_id$hwtype == 1 ) # ETHERNET + log_info$mac = options$client_id$hwaddr; + + if ( options?$addr_request ) + log_info$requested_addr = options$addr_request; + } + else # server reply messages + { + # Only log the address of the server if it handed out + # an IP address. + if ( msg$yiaddr != 0.0.0.0 && + id$resp_h != 255.255.255.255 ) + { + log_info$server_addr = id$resp_h; + log_info$server_port = id$resp_p; + log_info$client_port = id$orig_p; + } + + # Only use the client hardware address from the server + # if we didn't already pick one up from the client. + if ( msg$chaddr != "" && !log_info?$mac ) + log_info$mac = msg$chaddr; + + if ( msg$yiaddr != 0.0.0.0 ) + log_info$assigned_addr = msg$yiaddr; + + # If no client address has been seen yet, let's use the assigned addr. + if ( ! log_info?$client_addr && log_info?$assigned_addr ) + log_info$client_addr = log_info$assigned_addr; + + if ( options?$domain_name ) + log_info$domain = options$domain_name; + + if ( options?$lease ) + log_info$lease_time = options$lease; + } + } +@endif + + + +# Aggregate DHCP messages to the manager. +event dhcp_message(c: connection, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=-5 + { + event DHCP::aggregate_msgs(network_time(), c$id, c$uid, is_orig, msg, options); + } + +event zeek_done() &priority=-5 + { + # Log any remaining data that hasn't already been logged! + for ( i in DHCP::join_data ) + join_data_expiration(DHCP::join_data, i); + } diff --git a/scripts/base/protocols/dnp3/__load__.bro b/scripts/base/protocols/dnp3/__load__.zeek similarity index 100% rename from scripts/base/protocols/dnp3/__load__.bro rename to scripts/base/protocols/dnp3/__load__.zeek diff --git a/scripts/base/protocols/dnp3/consts.bro b/scripts/base/protocols/dnp3/consts.zeek similarity index 100% rename from scripts/base/protocols/dnp3/consts.bro rename to scripts/base/protocols/dnp3/consts.zeek diff --git a/scripts/base/protocols/dnp3/main.bro b/scripts/base/protocols/dnp3/main.bro deleted file mode 100644 index 35dd012d75..0000000000 --- a/scripts/base/protocols/dnp3/main.bro +++ /dev/null @@ -1,73 +0,0 @@ -##! A very basic DNP3 analysis script that just logs requests and replies. - -module DNP3; - -@load ./consts - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Time of the request. - ts: time &log; - ## Unique identifier for the connection. - uid: string &log; - ## Identifier for the connection. - id: conn_id &log; - ## The name of the function message in the request. - fc_request: string &log &optional; - ## The name of the function message in the reply. - fc_reply: string &log &optional; - ## The response's "internal indication number". - iin: count &log &optional; - }; - - ## Event that can be handled to access the DNP3 record as it is sent on - ## to the logging framework. - global log_dnp3: event(rec: Info); -} - -redef record connection += { - dnp3: Info &optional; -}; - -const ports = { 20000/tcp , 20000/udp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(DNP3::LOG, [$columns=Info, $ev=log_dnp3, $path="dnp3"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_DNP3_TCP, ports); - } - -event dnp3_application_request_header(c: connection, is_orig: bool, application_control: count, fc: count) - { - if ( ! c?$dnp3 ) - c$dnp3 = [$ts=network_time(), $uid=c$uid, $id=c$id]; - - c$dnp3$ts = network_time(); - c$dnp3$fc_request = function_codes[fc]; - } - -event dnp3_application_response_header(c: connection, is_orig: bool, application_control: count, fc: count, iin: count) - { - if ( ! c?$dnp3 ) - c$dnp3 = [$ts=network_time(), $uid=c$uid, $id=c$id]; - - c$dnp3$ts = network_time(); - c$dnp3$fc_reply = function_codes[fc]; - c$dnp3$iin = iin; - - Log::write(LOG, c$dnp3); - - delete c$dnp3; - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( ! c?$dnp3 ) - return; - - Log::write(LOG, c$dnp3); - delete c$dnp3; - } diff --git a/scripts/base/protocols/dnp3/main.zeek b/scripts/base/protocols/dnp3/main.zeek new file mode 100644 index 0000000000..184816c59f --- /dev/null +++ b/scripts/base/protocols/dnp3/main.zeek @@ -0,0 +1,73 @@ +##! A very basic DNP3 analysis script that just logs requests and replies. + +module DNP3; + +@load ./consts + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Time of the request. + ts: time &log; + ## Unique identifier for the connection. + uid: string &log; + ## Identifier for the connection. + id: conn_id &log; + ## The name of the function message in the request. + fc_request: string &log &optional; + ## The name of the function message in the reply. + fc_reply: string &log &optional; + ## The response's "internal indication number". + iin: count &log &optional; + }; + + ## Event that can be handled to access the DNP3 record as it is sent on + ## to the logging framework. + global log_dnp3: event(rec: Info); +} + +redef record connection += { + dnp3: Info &optional; +}; + +const ports = { 20000/tcp , 20000/udp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(DNP3::LOG, [$columns=Info, $ev=log_dnp3, $path="dnp3"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_DNP3_TCP, ports); + } + +event dnp3_application_request_header(c: connection, is_orig: bool, application_control: count, fc: count) + { + if ( ! c?$dnp3 ) + c$dnp3 = [$ts=network_time(), $uid=c$uid, $id=c$id]; + + c$dnp3$ts = network_time(); + c$dnp3$fc_request = function_codes[fc]; + } + +event dnp3_application_response_header(c: connection, is_orig: bool, application_control: count, fc: count, iin: count) + { + if ( ! c?$dnp3 ) + c$dnp3 = [$ts=network_time(), $uid=c$uid, $id=c$id]; + + c$dnp3$ts = network_time(); + c$dnp3$fc_reply = function_codes[fc]; + c$dnp3$iin = iin; + + Log::write(LOG, c$dnp3); + + delete c$dnp3; + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( ! c?$dnp3 ) + return; + + Log::write(LOG, c$dnp3); + delete c$dnp3; + } diff --git a/scripts/base/protocols/dns/__load__.bro b/scripts/base/protocols/dns/__load__.zeek similarity index 100% rename from scripts/base/protocols/dns/__load__.bro rename to scripts/base/protocols/dns/__load__.zeek diff --git a/scripts/base/protocols/dns/consts.bro b/scripts/base/protocols/dns/consts.zeek similarity index 100% rename from scripts/base/protocols/dns/consts.bro rename to scripts/base/protocols/dns/consts.zeek diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro deleted file mode 100644 index f8e655d826..0000000000 --- a/scripts/base/protocols/dns/main.bro +++ /dev/null @@ -1,574 +0,0 @@ -##! Base DNS analysis script which tracks and logs DNS queries along with -##! their responses. - -@load base/utils/queue -@load ./consts - -module DNS; - -export { - ## The DNS logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The record type which contains the column fields of the DNS log. - type Info: record { - ## The earliest time at which a DNS protocol message over the - ## associated connection is observed. - ts: time &log; - ## A unique identifier of the connection over which DNS messages - ## are being transferred. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## The transport layer protocol of the connection. - proto: transport_proto &log; - ## A 16-bit identifier assigned by the program that generated - ## the DNS query. Also used in responses to match up replies to - ## outstanding queries. - trans_id: count &log &optional; - ## Round trip time for the query and response. This indicates - ## the delay between when the request was seen until the - ## answer started. - rtt: interval &log &optional; - ## The domain name that is the subject of the DNS query. - query: string &log &optional; - ## The QCLASS value specifying the class of the query. - qclass: count &log &optional; - ## A descriptive name for the class of the query. - qclass_name: string &log &optional; - ## A QTYPE value specifying the type of the query. - qtype: count &log &optional; - ## A descriptive name for the type of the query. - qtype_name: string &log &optional; - ## The response code value in DNS response messages. - rcode: count &log &optional; - ## A descriptive name for the response code value. - rcode_name: string &log &optional; - ## The Authoritative Answer bit for response messages specifies - ## that the responding name server is an authority for the - ## domain name in the question section. - AA: bool &log &default=F; - ## The Truncation bit specifies that the message was truncated. - TC: bool &log &default=F; - ## The Recursion Desired bit in a request message indicates that - ## the client wants recursive service for this query. - RD: bool &log &default=F; - ## The Recursion Available bit in a response message indicates - ## that the name server supports recursive queries. - RA: bool &log &default=F; - ## A reserved field that is usually zero in - ## queries and responses. - Z: count &log &default=0; - ## The set of resource descriptions in the query answer. - answers: vector of string &log &optional; - ## The caching intervals of the associated RRs described by the - ## *answers* field. - TTLs: vector of interval &log &optional; - ## The DNS query was rejected by the server. - rejected: bool &log &default=F; - - ## The total number of resource records in a reply message's - ## answer section. - total_answers: count &optional; - ## The total number of resource records in a reply message's - ## answer, authority, and additional sections. - total_replies: count &optional; - - ## Whether the full DNS query has been seen. - saw_query: bool &default=F; - ## Whether the full DNS reply has been seen. - saw_reply: bool &default=F; - }; - - ## An event that can be handled to access the :bro:type:`DNS::Info` - ## record as it is sent to the logging framework. - global log_dns: event(rec: Info); - - ## This is called by the specific dns_*_reply events with a "reply" - ## which may not represent the full data available from the resource - ## record, but it's generally considered a summarization of the - ## responses. - ## - ## c: The connection record for which to fill in DNS reply data. - ## - ## msg: The DNS message header information for the response. - ## - ## ans: The general information of a RR response. - ## - ## reply: The specific response information according to RR type/class. - global do_reply: hook(c: connection, msg: dns_msg, ans: dns_answer, reply: string); - - ## A hook that is called whenever a session is being set. - ## This can be used if additional initialization logic needs to happen - ## when creating a new session value. - ## - ## c: The connection involved in the new session. - ## - ## msg: The DNS message header information. - ## - ## is_query: Indicator for if this is being called for a query or a response. - global set_session: hook(c: connection, msg: dns_msg, is_query: bool); - - ## Yields a queue of :bro:see:`DNS::Info` objects for a given - ## DNS message query/transaction ID. - type PendingMessages: table[count] of Queue::Queue; - - ## Give up trying to match pending DNS queries or replies for a given - ## query/transaction ID once this number of unmatched queries or replies - ## is reached (this shouldn't happen unless either the DNS server/resolver - ## is broken, Bro is not seeing all the DNS traffic, or an AXFR query - ## response is ongoing). - option max_pending_msgs = 50; - - ## Give up trying to match pending DNS queries or replies across all - ## query/transaction IDs once there is at least one unmatched query or - ## reply across this number of different query IDs. - option max_pending_query_ids = 50; - - ## A record type which tracks the status of DNS queries for a given - ## :bro:type:`connection`. - type State: record { - ## A single query that hasn't been matched with a response yet. - ## Note this is maintained separate from the *pending_queries* - ## field solely for performance reasons -- it's possible that - ## *pending_queries* contains further queries for which a response - ## has not yet been seen, even for the same transaction ID. - pending_query: Info &optional; - - ## Indexed by query id, returns Info record corresponding to - ## queries that haven't been matched with a response yet. - pending_queries: PendingMessages &optional; - - ## Indexed by query id, returns Info record corresponding to - ## replies that haven't been matched with a query yet. - pending_replies: PendingMessages &optional; - }; -} - - -redef record connection += { - dns: Info &optional; - dns_state: State &optional; -}; - -const ports = { 53/udp, 53/tcp, 137/udp, 5353/udp, 5355/udp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(DNS::LOG, [$columns=Info, $ev=log_dns, $path="dns"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_DNS, ports); - } - -function new_session(c: connection, trans_id: count): Info - { - local info: Info; - info$ts = network_time(); - info$id = c$id; - info$uid = c$uid; - info$proto = get_port_transport_proto(c$id$resp_p); - info$trans_id = trans_id; - return info; - } - -function log_unmatched_msgs_queue(q: Queue::Queue) - { - local infos: vector of Info; - Queue::get_vector(q, infos); - - for ( i in infos ) - { - Log::write(DNS::LOG, infos[i]); - } - } - -function log_unmatched_msgs(msgs: PendingMessages) - { - for ( trans_id, q in msgs ) - { - log_unmatched_msgs_queue(q); - } - - clear_table(msgs); - } - -function enqueue_new_msg(msgs: PendingMessages, id: count, msg: Info) - { - if ( id !in msgs ) - { - if ( |msgs| > max_pending_query_ids ) - { - # Throw away all unmatched on assumption they'll never be matched. - log_unmatched_msgs(msgs); - } - - msgs[id] = Queue::init(); - } - else - { - if ( Queue::len(msgs[id]) > max_pending_msgs ) - { - log_unmatched_msgs_queue(msgs[id]); - # Throw away all unmatched on assumption they'll never be matched. - msgs[id] = Queue::init(); - } - } - - Queue::put(msgs[id], msg); - } - -function pop_msg(msgs: PendingMessages, id: count): Info - { - local rval: Info = Queue::get(msgs[id]); - - if ( Queue::len(msgs[id]) == 0 ) - delete msgs[id]; - - return rval; - } - -hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 - { - if ( ! c?$dns_state ) - { - local state: State; - c$dns_state = state; - } - - if ( is_query ) - { - if ( c$dns_state?$pending_replies && msg$id in c$dns_state$pending_replies && - Queue::len(c$dns_state$pending_replies[msg$id]) > 0 ) - { - # Match this DNS query w/ what's at head of pending reply queue. - c$dns = pop_msg(c$dns_state$pending_replies, msg$id); - } - else - { - # Create a new DNS session and put it in the query queue so - # we can wait for a matching reply. - c$dns = new_session(c, msg$id); - - if( ! c$dns_state?$pending_query ) - c$dns_state$pending_query = c$dns; - else - { - if( !c$dns_state?$pending_queries ) - c$dns_state$pending_queries = table(); - - enqueue_new_msg(c$dns_state$pending_queries, msg$id, c$dns); - } - } - } - else - { - if ( c$dns_state?$pending_query && c$dns_state$pending_query$trans_id == msg$id ) - { - c$dns = c$dns_state$pending_query; - delete c$dns_state$pending_query; - - if ( c$dns_state?$pending_queries ) - { - # Popping off an arbitrary, unpaired query to set as the - # new fastpath is necessary in order to preserve the overall - # queuing order of any pending queries that may share a - # transaction ID. If we didn't fill c$dns_state$pending_query - # back in, then it's possible a new query would jump ahead in - # the queue of some other pending query since - # c$dns_state$pending_query is filled first if available. - - if ( msg$id in c$dns_state$pending_queries && - Queue::len(c$dns_state$pending_queries[msg$id]) > 0 ) - # Prioritize any pending query with matching ID to the one - # that just got paired with a response. - c$dns_state$pending_query = pop_msg(c$dns_state$pending_queries, msg$id); - else - { - # Just pick an arbitrary, unpaired query. - for ( trans_id, q in c$dns_state$pending_queries ) - if ( Queue::len(q) > 0 ) - { - c$dns_state$pending_query = pop_msg(c$dns_state$pending_queries, trans_id); - break; - } - } - } - } - else if ( c$dns_state?$pending_queries && msg$id in c$dns_state$pending_queries && - Queue::len(c$dns_state$pending_queries[msg$id]) > 0 ) - { - # Match this DNS reply w/ what's at head of pending query queue. - c$dns = pop_msg(c$dns_state$pending_queries, msg$id); - } - else - { - # Create a new DNS session and put it in the reply queue so - # we can wait for a matching query. - c$dns = new_session(c, msg$id); - - if( ! c$dns_state?$pending_replies ) - c$dns_state$pending_replies = table(); - - enqueue_new_msg(c$dns_state$pending_replies, msg$id, c$dns); - } - } - - if ( ! is_query ) - { - c$dns$rcode = msg$rcode; - c$dns$rcode_name = base_errors[msg$rcode]; - - if ( ! c$dns?$total_answers ) - c$dns$total_answers = msg$num_answers; - - if ( ! c$dns?$total_replies ) - c$dns$total_replies = msg$num_answers + msg$num_addl + msg$num_auth; - - if ( msg$rcode != 0 && msg$num_queries == 0 ) - c$dns$rejected = T; - } - } - -event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &priority=5 - { - if ( msg$opcode != 0 ) - # Currently only standard queries are tracked. - return; - - hook set_session(c, msg, ! msg$QR); - } - -hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 - { - if ( msg$opcode != 0 ) - # Currently only standard queries are tracked. - return; - - if ( ! msg$QR ) - # This is weird: the inquirer must also be providing answers in - # the request, which is not what we want to track. - return; - - if ( ans$answer_type == DNS_ANS ) - { - if ( ! c$dns?$query ) - c$dns$query = ans$query; - - c$dns$AA = msg$AA; - c$dns$RA = msg$RA; - - if ( ! c$dns?$rtt ) - { - c$dns$rtt = network_time() - c$dns$ts; - # This could mean that only a reply was seen since - # we assume there must be some passage of time between - # request and response. - if ( c$dns$rtt == 0secs ) - delete c$dns$rtt; - } - - if ( reply != "" ) - { - if ( ! c$dns?$answers ) - c$dns$answers = vector(); - c$dns$answers += reply; - - if ( ! c$dns?$TTLs ) - c$dns$TTLs = vector(); - c$dns$TTLs += ans$TTL; - } - } - } - -event dns_end(c: connection, msg: dns_msg) &priority=5 - { - if ( ! c?$dns ) - return; - - if ( msg$QR ) - c$dns$saw_reply = T; - else - c$dns$saw_query = T; - } - -event dns_end(c: connection, msg: dns_msg) &priority=-5 - { - if ( c?$dns && c$dns$saw_reply && c$dns$saw_query ) - { - Log::write(DNS::LOG, c$dns); - delete c$dns; - } - } - -event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5 - { - if ( msg$opcode != 0 ) - # Currently only standard queries are tracked. - return; - - c$dns$RD = msg$RD; - c$dns$TC = msg$TC; - c$dns$qclass = qclass; - c$dns$qclass_name = classes[qclass]; - c$dns$qtype = qtype; - c$dns$qtype_name = query_types[qtype]; - c$dns$Z = msg$Z; - - # Decode netbios name queries - # Note: I'm ignoring the name type for now. Not sure if this should be - # worked into the query/response in some fashion. - if ( c$id$resp_p == 137/udp ) - { - query = decode_netbios_name(query); - if ( c$dns$qtype_name == "SRV" ) - { - # The SRV RFC used the ID used for NetBios Status RRs. - # So if this is NetBios Name Service we name it correctly. - c$dns$qtype_name = "NBSTAT"; - } - } - c$dns$query = query; - } - - -event dns_unknown_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 - { - hook DNS::do_reply(c, msg, ans, fmt("", ans$qtype)); - } - -event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 - { - hook DNS::do_reply(c, msg, ans, fmt("%s", a)); - } - -event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, strs: string_vec) &priority=5 - { - local txt_strings: string = ""; - - for ( i in strs ) - { - if ( i > 0 ) - txt_strings += " "; - - txt_strings += fmt("TXT %d %s", |strs[i]|, strs[i]); - } - - hook DNS::do_reply(c, msg, ans, txt_strings); - } - -event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 - { - hook DNS::do_reply(c, msg, ans, fmt("%s", a)); - } - -event dns_A6_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 - { - hook DNS::do_reply(c, msg, ans, fmt("%s", a)); - } - -event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 - { - hook DNS::do_reply(c, msg, ans, name); - } - -event dns_CNAME_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 - { - hook DNS::do_reply(c, msg, ans, name); - } - -event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string, - preference: count) &priority=5 - { - hook DNS::do_reply(c, msg, ans, name); - } - -event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 - { - hook DNS::do_reply(c, msg, ans, name); - } - -event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa) &priority=5 - { - hook DNS::do_reply(c, msg, ans, soa$mname); - } - -event dns_WKS_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 - { - hook DNS::do_reply(c, msg, ans, ""); - } - -event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer, target: string, priority: count, weight: count, p: count) &priority=5 - { - hook DNS::do_reply(c, msg, ans, target); - } - -# TODO: figure out how to handle these -#event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer) -# { -# -# } -# -#event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional) -# { -# -# } -# -#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional) -# { -# -# } - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) &priority=5 - { - local s: string; - s = fmt("RRSIG %s %s", rrsig$type_covered, - rrsig$signer_name == "" ? "" : rrsig$signer_name); - hook DNS::do_reply(c, msg, ans, s); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) &priority=5 - { - local s: string; - s = fmt("DNSKEY %s", dnskey$algorithm); - hook DNS::do_reply(c, msg, ans, s); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) &priority=5 - { - hook DNS::do_reply(c, msg, ans, fmt("NSEC %s %s", ans$query, next_name)); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) &priority=5 - { - hook DNS::do_reply(c, msg, ans, "NSEC3"); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) &priority=5 - { - local s: string; - s = fmt("DS %s %s", ds$algorithm, ds$digest_type); - hook DNS::do_reply(c, msg, ans, s); - } - -event dns_rejected(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5 - { - if ( c?$dns ) - c$dns$rejected = T; - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( ! c?$dns_state ) - return; - - # If Bro is expiring state, we should go ahead and log all unmatched - # queries and replies now. - if( c$dns_state?$pending_query ) - Log::write(DNS::LOG, c$dns_state$pending_query); - - if( c$dns_state?$pending_queries ) - log_unmatched_msgs(c$dns_state$pending_queries); - - if( c$dns_state?$pending_replies ) - log_unmatched_msgs(c$dns_state$pending_replies); - } diff --git a/scripts/base/protocols/dns/main.zeek b/scripts/base/protocols/dns/main.zeek new file mode 100644 index 0000000000..3906ab5cf0 --- /dev/null +++ b/scripts/base/protocols/dns/main.zeek @@ -0,0 +1,589 @@ +##! Base DNS analysis script which tracks and logs DNS queries along with +##! their responses. + +@load base/utils/queue +@load ./consts + +module DNS; + +export { + ## The DNS logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The record type which contains the column fields of the DNS log. + type Info: record { + ## The earliest time at which a DNS protocol message over the + ## associated connection is observed. + ts: time &log; + ## A unique identifier of the connection over which DNS messages + ## are being transferred. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## The transport layer protocol of the connection. + proto: transport_proto &log; + ## A 16-bit identifier assigned by the program that generated + ## the DNS query. Also used in responses to match up replies to + ## outstanding queries. + trans_id: count &log &optional; + ## Round trip time for the query and response. This indicates + ## the delay between when the request was seen until the + ## answer started. + rtt: interval &log &optional; + ## The domain name that is the subject of the DNS query. + query: string &log &optional; + ## The QCLASS value specifying the class of the query. + qclass: count &log &optional; + ## A descriptive name for the class of the query. + qclass_name: string &log &optional; + ## A QTYPE value specifying the type of the query. + qtype: count &log &optional; + ## A descriptive name for the type of the query. + qtype_name: string &log &optional; + ## The response code value in DNS response messages. + rcode: count &log &optional; + ## A descriptive name for the response code value. + rcode_name: string &log &optional; + ## The Authoritative Answer bit for response messages specifies + ## that the responding name server is an authority for the + ## domain name in the question section. + AA: bool &log &default=F; + ## The Truncation bit specifies that the message was truncated. + TC: bool &log &default=F; + ## The Recursion Desired bit in a request message indicates that + ## the client wants recursive service for this query. + RD: bool &log &default=F; + ## The Recursion Available bit in a response message indicates + ## that the name server supports recursive queries. + RA: bool &log &default=F; + ## A reserved field that is usually zero in + ## queries and responses. + Z: count &log &default=0; + ## The set of resource descriptions in the query answer. + answers: vector of string &log &optional; + ## The caching intervals of the associated RRs described by the + ## *answers* field. + TTLs: vector of interval &log &optional; + ## The DNS query was rejected by the server. + rejected: bool &log &default=F; + + ## The total number of resource records in a reply message's + ## answer section. + total_answers: count &optional; + ## The total number of resource records in a reply message's + ## answer, authority, and additional sections. + total_replies: count &optional; + + ## Whether the full DNS query has been seen. + saw_query: bool &default=F; + ## Whether the full DNS reply has been seen. + saw_reply: bool &default=F; + }; + + ## An event that can be handled to access the :zeek:type:`DNS::Info` + ## record as it is sent to the logging framework. + global log_dns: event(rec: Info); + + ## This is called by the specific dns_*_reply events with a "reply" + ## which may not represent the full data available from the resource + ## record, but it's generally considered a summarization of the + ## responses. + ## + ## c: The connection record for which to fill in DNS reply data. + ## + ## msg: The DNS message header information for the response. + ## + ## ans: The general information of a RR response. + ## + ## reply: The specific response information according to RR type/class. + global do_reply: hook(c: connection, msg: dns_msg, ans: dns_answer, reply: string); + + ## A hook that is called whenever a session is being set. + ## This can be used if additional initialization logic needs to happen + ## when creating a new session value. + ## + ## c: The connection involved in the new session. + ## + ## msg: The DNS message header information. + ## + ## is_query: Indicator for if this is being called for a query or a response. + global set_session: hook(c: connection, msg: dns_msg, is_query: bool); + + ## Yields a queue of :zeek:see:`DNS::Info` objects for a given + ## DNS message query/transaction ID. + type PendingMessages: table[count] of Queue::Queue; + + ## Give up trying to match pending DNS queries or replies for a given + ## query/transaction ID once this number of unmatched queries or replies + ## is reached (this shouldn't happen unless either the DNS server/resolver + ## is broken, Zeek is not seeing all the DNS traffic, or an AXFR query + ## response is ongoing). + option max_pending_msgs = 50; + + ## Give up trying to match pending DNS queries or replies across all + ## query/transaction IDs once there is at least one unmatched query or + ## reply across this number of different query IDs. + option max_pending_query_ids = 50; + + ## A record type which tracks the status of DNS queries for a given + ## :zeek:type:`connection`. + type State: record { + ## A single query that hasn't been matched with a response yet. + ## Note this is maintained separate from the *pending_queries* + ## field solely for performance reasons -- it's possible that + ## *pending_queries* contains further queries for which a response + ## has not yet been seen, even for the same transaction ID. + pending_query: Info &optional; + + ## Indexed by query id, returns Info record corresponding to + ## queries that haven't been matched with a response yet. + pending_queries: PendingMessages &optional; + + ## Indexed by query id, returns Info record corresponding to + ## replies that haven't been matched with a query yet. + pending_replies: PendingMessages &optional; + }; +} + + +redef record connection += { + dns: Info &optional; + dns_state: State &optional; +}; + +const ports = { 53/udp, 53/tcp, 137/udp, 5353/udp, 5355/udp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(DNS::LOG, [$columns=Info, $ev=log_dns, $path="dns"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_DNS, ports); + } + +function new_session(c: connection, trans_id: count): Info + { + local info: Info; + info$ts = network_time(); + info$id = c$id; + info$uid = c$uid; + info$proto = get_port_transport_proto(c$id$resp_p); + info$trans_id = trans_id; + return info; + } + +function log_unmatched_msgs_queue(q: Queue::Queue) + { + local infos: vector of Info; + Queue::get_vector(q, infos); + + for ( i in infos ) + { + Log::write(DNS::LOG, infos[i]); + } + } + +function log_unmatched_msgs(msgs: PendingMessages) + { + for ( trans_id, q in msgs ) + { + log_unmatched_msgs_queue(q); + } + + clear_table(msgs); + } + +function enqueue_new_msg(msgs: PendingMessages, id: count, msg: Info) + { + if ( id !in msgs ) + { + if ( |msgs| > max_pending_query_ids ) + { + # Throw away all unmatched on assumption they'll never be matched. + log_unmatched_msgs(msgs); + } + + msgs[id] = Queue::init(); + } + else + { + if ( Queue::len(msgs[id]) > max_pending_msgs ) + { + log_unmatched_msgs_queue(msgs[id]); + # Throw away all unmatched on assumption they'll never be matched. + msgs[id] = Queue::init(); + } + } + + Queue::put(msgs[id], msg); + } + +function pop_msg(msgs: PendingMessages, id: count): Info + { + local rval: Info = Queue::get(msgs[id]); + + if ( Queue::len(msgs[id]) == 0 ) + delete msgs[id]; + + return rval; + } + +hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5 + { + if ( ! c?$dns_state ) + { + local state: State; + c$dns_state = state; + } + + if ( is_query ) + { + if ( c$dns_state?$pending_replies && msg$id in c$dns_state$pending_replies && + Queue::len(c$dns_state$pending_replies[msg$id]) > 0 ) + { + # Match this DNS query w/ what's at head of pending reply queue. + c$dns = pop_msg(c$dns_state$pending_replies, msg$id); + } + else + { + # Create a new DNS session and put it in the query queue so + # we can wait for a matching reply. + c$dns = new_session(c, msg$id); + + if( ! c$dns_state?$pending_query ) + c$dns_state$pending_query = c$dns; + else + { + if( !c$dns_state?$pending_queries ) + c$dns_state$pending_queries = table(); + + enqueue_new_msg(c$dns_state$pending_queries, msg$id, c$dns); + } + } + } + else + { + if ( c$dns_state?$pending_query && c$dns_state$pending_query$trans_id == msg$id ) + { + c$dns = c$dns_state$pending_query; + delete c$dns_state$pending_query; + + if ( c$dns_state?$pending_queries ) + { + # Popping off an arbitrary, unpaired query to set as the + # new fastpath is necessary in order to preserve the overall + # queuing order of any pending queries that may share a + # transaction ID. If we didn't fill c$dns_state$pending_query + # back in, then it's possible a new query would jump ahead in + # the queue of some other pending query since + # c$dns_state$pending_query is filled first if available. + + if ( msg$id in c$dns_state$pending_queries && + Queue::len(c$dns_state$pending_queries[msg$id]) > 0 ) + # Prioritize any pending query with matching ID to the one + # that just got paired with a response. + c$dns_state$pending_query = pop_msg(c$dns_state$pending_queries, msg$id); + else + { + # Just pick an arbitrary, unpaired query. + for ( trans_id, q in c$dns_state$pending_queries ) + if ( Queue::len(q) > 0 ) + { + c$dns_state$pending_query = pop_msg(c$dns_state$pending_queries, trans_id); + break; + } + } + } + } + else if ( c$dns_state?$pending_queries && msg$id in c$dns_state$pending_queries && + Queue::len(c$dns_state$pending_queries[msg$id]) > 0 ) + { + # Match this DNS reply w/ what's at head of pending query queue. + c$dns = pop_msg(c$dns_state$pending_queries, msg$id); + } + else + { + # Create a new DNS session and put it in the reply queue so + # we can wait for a matching query. + c$dns = new_session(c, msg$id); + + if( ! c$dns_state?$pending_replies ) + c$dns_state$pending_replies = table(); + + enqueue_new_msg(c$dns_state$pending_replies, msg$id, c$dns); + } + } + + if ( ! is_query ) + { + c$dns$rcode = msg$rcode; + c$dns$rcode_name = base_errors[msg$rcode]; + + if ( ! c$dns?$total_answers ) + c$dns$total_answers = msg$num_answers; + + if ( ! c$dns?$total_replies ) + c$dns$total_replies = msg$num_answers + msg$num_addl + msg$num_auth; + + if ( msg$rcode != 0 && msg$num_queries == 0 ) + c$dns$rejected = T; + } + } + +event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &priority=5 + { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + + hook set_session(c, msg, ! msg$QR); + } + +hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5 + { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + + if ( ! msg$QR ) + # This is weird: the inquirer must also be providing answers in + # the request, which is not what we want to track. + return; + + if ( ans$answer_type == DNS_ANS ) + { + if ( ! c$dns?$query ) + c$dns$query = ans$query; + + c$dns$AA = msg$AA; + c$dns$RA = msg$RA; + + if ( ! c$dns?$rtt ) + { + c$dns$rtt = network_time() - c$dns$ts; + # This could mean that only a reply was seen since + # we assume there must be some passage of time between + # request and response. + if ( c$dns$rtt == 0secs ) + delete c$dns$rtt; + } + + if ( reply != "" ) + { + if ( ! c$dns?$answers ) + c$dns$answers = vector(); + c$dns$answers += reply; + + if ( ! c$dns?$TTLs ) + c$dns$TTLs = vector(); + c$dns$TTLs += ans$TTL; + } + } + } + +event dns_end(c: connection, msg: dns_msg) &priority=5 + { + if ( ! c?$dns ) + return; + + if ( msg$QR ) + c$dns$saw_reply = T; + else + c$dns$saw_query = T; + } + +event dns_end(c: connection, msg: dns_msg) &priority=-5 + { + if ( c?$dns && c$dns$saw_reply && c$dns$saw_query ) + { + Log::write(DNS::LOG, c$dns); + delete c$dns; + } + } + +event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5 + { + if ( msg$opcode != 0 ) + # Currently only standard queries are tracked. + return; + + c$dns$RD = msg$RD; + c$dns$TC = msg$TC; + c$dns$qclass = qclass; + c$dns$qclass_name = classes[qclass]; + c$dns$qtype = qtype; + c$dns$qtype_name = query_types[qtype]; + c$dns$Z = msg$Z; + + # Decode netbios name queries + # Note: I'm ignoring the name type for now. Not sure if this should be + # worked into the query/response in some fashion. + if ( c$id$resp_p == 137/udp ) + { + query = decode_netbios_name(query); + if ( c$dns$qtype_name == "SRV" ) + { + # The SRV RFC used the ID used for NetBios Status RRs. + # So if this is NetBios Name Service we name it correctly. + c$dns$qtype_name = "NBSTAT"; + } + } + c$dns$query = query; + } + + +event dns_unknown_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 + { + hook DNS::do_reply(c, msg, ans, fmt("", ans$qtype)); + } + +event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 + { + hook DNS::do_reply(c, msg, ans, fmt("%s", a)); + } + +event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, strs: string_vec) &priority=5 + { + local txt_strings: string = ""; + + for ( i in strs ) + { + if ( i > 0 ) + txt_strings += " "; + + txt_strings += fmt("TXT %d %s", |strs[i]|, strs[i]); + } + + hook DNS::do_reply(c, msg, ans, txt_strings); + } + +event dns_SPF_reply(c: connection, msg: dns_msg, ans: dns_answer, strs: string_vec) &priority=5 + { + local spf_strings: string = ""; + + for ( i in strs ) + { + if ( i > 0 ) + spf_strings += " "; + + spf_strings += fmt("SPF %d %s", |strs[i]|, strs[i]); + } + + hook DNS::do_reply(c, msg, ans, spf_strings); + } + +event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 + { + hook DNS::do_reply(c, msg, ans, fmt("%s", a)); + } + +event dns_A6_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 + { + hook DNS::do_reply(c, msg, ans, fmt("%s", a)); + } + +event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 + { + hook DNS::do_reply(c, msg, ans, name); + } + +event dns_CNAME_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 + { + hook DNS::do_reply(c, msg, ans, name); + } + +event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string, + preference: count) &priority=5 + { + hook DNS::do_reply(c, msg, ans, name); + } + +event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5 + { + hook DNS::do_reply(c, msg, ans, name); + } + +event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa) &priority=5 + { + hook DNS::do_reply(c, msg, ans, soa$mname); + } + +event dns_WKS_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5 + { + hook DNS::do_reply(c, msg, ans, ""); + } + +event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer, target: string, priority: count, weight: count, p: count) &priority=5 + { + hook DNS::do_reply(c, msg, ans, target); + } + +# TODO: figure out how to handle these +#event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer) +# { +# +# } +# +#event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional) +# { +# +# } +# +#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional) +# { +# +# } + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) &priority=5 + { + local s: string; + s = fmt("RRSIG %s %s", rrsig$type_covered, + rrsig$signer_name == "" ? "" : rrsig$signer_name); + hook DNS::do_reply(c, msg, ans, s); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) &priority=5 + { + local s: string; + s = fmt("DNSKEY %s", dnskey$algorithm); + hook DNS::do_reply(c, msg, ans, s); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) &priority=5 + { + hook DNS::do_reply(c, msg, ans, fmt("NSEC %s %s", ans$query, next_name)); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) &priority=5 + { + hook DNS::do_reply(c, msg, ans, "NSEC3"); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) &priority=5 + { + local s: string; + s = fmt("DS %s %s", ds$algorithm, ds$digest_type); + hook DNS::do_reply(c, msg, ans, s); + } + +event dns_rejected(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5 + { + if ( c?$dns ) + c$dns$rejected = T; + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( ! c?$dns_state ) + return; + + # If Zeek is expiring state, we should go ahead and log all unmatched + # queries and replies now. + if( c$dns_state?$pending_query ) + Log::write(DNS::LOG, c$dns_state$pending_query); + + if( c$dns_state?$pending_queries ) + log_unmatched_msgs(c$dns_state$pending_queries); + + if( c$dns_state?$pending_replies ) + log_unmatched_msgs(c$dns_state$pending_replies); + } diff --git a/scripts/base/protocols/ftp/__load__.bro b/scripts/base/protocols/ftp/__load__.zeek similarity index 100% rename from scripts/base/protocols/ftp/__load__.bro rename to scripts/base/protocols/ftp/__load__.zeek diff --git a/scripts/base/protocols/ftp/files.bro b/scripts/base/protocols/ftp/files.bro deleted file mode 100644 index e84eda7a5a..0000000000 --- a/scripts/base/protocols/ftp/files.bro +++ /dev/null @@ -1,75 +0,0 @@ -@load ./info -@load ./main -@load ./utils -@load base/utils/conn-ids -@load base/frameworks/files - -module FTP; - -export { - redef record Info += { - ## File unique ID. - fuid: string &optional &log; - }; - - ## Default file handle provider for FTP. - global get_file_handle: function(c: connection, is_orig: bool): string; - - ## Describe the file being transferred. - global describe_file: function(f: fa_file): string; - - redef record fa_file += { - ftp: FTP::Info &optional; - }; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected ) - return ""; - - return cat(Analyzer::ANALYZER_FTP_DATA, c$start_time, c$id, is_orig); - } - -function describe_file(f: fa_file): string - { - # This shouldn't be needed, but just in case... - if ( f$source != "FTP" ) - return ""; - - for ( cid, c in f$conns ) - { - if ( c?$ftp ) - return FTP::describe(c$ftp); - } - return ""; - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_FTP_DATA, - [$get_file_handle = FTP::get_file_handle, - $describe = FTP::describe_file]); - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected ) - return; - - local ftp = ftp_data_expected[c$id$resp_h, c$id$resp_p]; - ftp$fuid = f$id; - - f$ftp = ftp; - } - -event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 - { - if ( ! f?$ftp ) - return; - - if ( ! meta?$mime_type ) - return; - - f$ftp$mime_type = meta$mime_type; - } diff --git a/scripts/base/protocols/ftp/files.zeek b/scripts/base/protocols/ftp/files.zeek new file mode 100644 index 0000000000..f2c2625bdb --- /dev/null +++ b/scripts/base/protocols/ftp/files.zeek @@ -0,0 +1,75 @@ +@load ./info +@load ./main +@load ./utils +@load base/utils/conn-ids +@load base/frameworks/files + +module FTP; + +export { + redef record Info += { + ## File unique ID. + fuid: string &optional &log; + }; + + ## Default file handle provider for FTP. + global get_file_handle: function(c: connection, is_orig: bool): string; + + ## Describe the file being transferred. + global describe_file: function(f: fa_file): string; + + redef record fa_file += { + ftp: FTP::Info &optional; + }; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected ) + return ""; + + return cat(Analyzer::ANALYZER_FTP_DATA, c$start_time, c$id, is_orig); + } + +function describe_file(f: fa_file): string + { + # This shouldn't be needed, but just in case... + if ( f$source != "FTP" ) + return ""; + + for ( cid, c in f$conns ) + { + if ( c?$ftp ) + return FTP::describe(c$ftp); + } + return ""; + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_FTP_DATA, + [$get_file_handle = FTP::get_file_handle, + $describe = FTP::describe_file]); + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected ) + return; + + local ftp = ftp_data_expected[c$id$resp_h, c$id$resp_p]; + ftp$fuid = f$id; + + f$ftp = ftp; + } + +event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 + { + if ( ! f?$ftp ) + return; + + if ( ! meta?$mime_type ) + return; + + f$ftp$mime_type = meta$mime_type; + } diff --git a/scripts/base/protocols/ftp/gridftp.bro b/scripts/base/protocols/ftp/gridftp.bro deleted file mode 100644 index cdbe354a08..0000000000 --- a/scripts/base/protocols/ftp/gridftp.bro +++ /dev/null @@ -1,124 +0,0 @@ -##! A detection script for GridFTP data and control channels. -##! -##! GridFTP control channels are identified by FTP control channels -##! that successfully negotiate the GSSAPI method of an AUTH request -##! and for which the exchange involved an encoded TLS/SSL handshake, -##! indicating the GSI mechanism for GSSAPI was used. This analysis -##! is all supported internally, this script simply adds the "gridftp" -##! label to the *service* field of the control channel's -##! :bro:type:`connection` record. -##! -##! GridFTP data channels are identified by a heuristic that relies on -##! the fact that default settings for GridFTP clients typically -##! mutually authenticate the data channel with TLS/SSL and negotiate a -##! NULL bulk cipher (no encryption). Connections with those attributes -##! are marked as GridFTP if the data transfer within the first two minutes -##! is big enough to indicate a GripFTP data channel that would be -##! undesirable to analyze further (e.g. stop TCP reassembly). A side -##! effect is that true connection sizes are not logged, but at the benefit -##! of saving CPU cycles that would otherwise go to analyzing the large -##! (and likely benign) connections. - -@load ./info -@load ./main -@load base/protocols/conn -@load base/protocols/ssl -@load base/frameworks/notice - -module GridFTP; - -export { - ## Number of bytes transferred before guessing a connection is a - ## GridFTP data channel. - option size_threshold = 1073741824; - - ## Time during which we check whether a connection's size exceeds the - ## :bro:see:`GridFTP::size_threshold`. - option max_time = 2 min; - - ## Whether to skip further processing of the GridFTP data channel once - ## detected, which may help performance. - option skip_data = T; - - ## Raised when a GridFTP data channel is detected. - ## - ## c: The connection pertaining to the GridFTP data channel. - global data_channel_detected: event(c: connection); - - ## The initial criteria used to determine whether to start polling - ## the connection for the :bro:see:`GridFTP::size_threshold` to have - ## been exceeded. This is called in a :bro:see:`ssl_established` event - ## handler and by default looks for both a client and server certificate - ## and for a NULL bulk cipher. One way in which this function could be - ## redefined is to make it also consider client/server certificate - ## issuer subjects. - ## - ## c: The connection which may possibly be a GridFTP data channel. - ## - ## Returns: true if the connection should be further polled for an - ## exceeded :bro:see:`GridFTP::size_threshold`, else false. - const data_channel_initial_criteria: function(c: connection): bool &redef; -} - -redef record FTP::Info += { - last_auth_requested: string &optional; -}; - -event ftp_request(c: connection, command: string, arg: string) &priority=4 - { - if ( command == "AUTH" && c?$ftp ) - c$ftp$last_auth_requested = arg; - } - -event ConnThreshold::bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) - { - if ( threshold < size_threshold || "gridftp-data" in c$service || c$duration > max_time ) - return; - - if ( ! data_channel_initial_criteria(c) ) - return; - - add c$service["gridftp-data"]; - event GridFTP::data_channel_detected(c); - - if ( skip_data ) - skip_further_processing(c$id); - } - -event gridftp_possibility_timeout(c: connection) - { - # only remove if we did not already detect it and the connection - # is not yet at its end. - if ( "gridftp-data" !in c$service && ! (c?$conn && c$conn?$service) ) - { - ConnThreshold::delete_bytes_threshold(c, size_threshold, T); - ConnThreshold::delete_bytes_threshold(c, size_threshold, F); - } - } - -event ssl_established(c: connection) &priority=5 - { - # If an FTP client requests AUTH GSSAPI and later an SSL handshake - # finishes, it's likely a GridFTP control channel, so add service label. - if ( c?$ftp && c$ftp?$last_auth_requested && - /GSSAPI/ in c$ftp$last_auth_requested ) - add c$service["gridftp"]; - } - -function data_channel_initial_criteria(c: connection): bool - { - return ( c?$ssl && c$ssl?$client_subject && c$ssl?$subject && - c$ssl?$cipher && /WITH_NULL/ in c$ssl$cipher ); - } - -event ssl_established(c: connection) &priority=-3 - { - # By default GridFTP data channels do mutual authentication and - # negotiate a cipher suite with a NULL bulk cipher. - if ( data_channel_initial_criteria(c) ) - { - ConnThreshold::set_bytes_threshold(c, size_threshold, T); - ConnThreshold::set_bytes_threshold(c, size_threshold, F); - schedule max_time { gridftp_possibility_timeout(c) }; - } - } diff --git a/scripts/base/protocols/ftp/gridftp.zeek b/scripts/base/protocols/ftp/gridftp.zeek new file mode 100644 index 0000000000..ef6965d3ca --- /dev/null +++ b/scripts/base/protocols/ftp/gridftp.zeek @@ -0,0 +1,124 @@ +##! A detection script for GridFTP data and control channels. +##! +##! GridFTP control channels are identified by FTP control channels +##! that successfully negotiate the GSSAPI method of an AUTH request +##! and for which the exchange involved an encoded TLS/SSL handshake, +##! indicating the GSI mechanism for GSSAPI was used. This analysis +##! is all supported internally, this script simply adds the "gridftp" +##! label to the *service* field of the control channel's +##! :zeek:type:`connection` record. +##! +##! GridFTP data channels are identified by a heuristic that relies on +##! the fact that default settings for GridFTP clients typically +##! mutually authenticate the data channel with TLS/SSL and negotiate a +##! NULL bulk cipher (no encryption). Connections with those attributes +##! are marked as GridFTP if the data transfer within the first two minutes +##! is big enough to indicate a GripFTP data channel that would be +##! undesirable to analyze further (e.g. stop TCP reassembly). A side +##! effect is that true connection sizes are not logged, but at the benefit +##! of saving CPU cycles that would otherwise go to analyzing the large +##! (and likely benign) connections. + +@load ./info +@load ./main +@load base/protocols/conn +@load base/protocols/ssl +@load base/frameworks/notice + +module GridFTP; + +export { + ## Number of bytes transferred before guessing a connection is a + ## GridFTP data channel. + option size_threshold = 1073741824; + + ## Time during which we check whether a connection's size exceeds the + ## :zeek:see:`GridFTP::size_threshold`. + option max_time = 2 min; + + ## Whether to skip further processing of the GridFTP data channel once + ## detected, which may help performance. + option skip_data = T; + + ## Raised when a GridFTP data channel is detected. + ## + ## c: The connection pertaining to the GridFTP data channel. + global data_channel_detected: event(c: connection); + + ## The initial criteria used to determine whether to start polling + ## the connection for the :zeek:see:`GridFTP::size_threshold` to have + ## been exceeded. This is called in a :zeek:see:`ssl_established` event + ## handler and by default looks for both a client and server certificate + ## and for a NULL bulk cipher. One way in which this function could be + ## redefined is to make it also consider client/server certificate + ## issuer subjects. + ## + ## c: The connection which may possibly be a GridFTP data channel. + ## + ## Returns: true if the connection should be further polled for an + ## exceeded :zeek:see:`GridFTP::size_threshold`, else false. + const data_channel_initial_criteria: function(c: connection): bool &redef; +} + +redef record FTP::Info += { + last_auth_requested: string &optional; +}; + +event ftp_request(c: connection, command: string, arg: string) &priority=4 + { + if ( command == "AUTH" && c?$ftp ) + c$ftp$last_auth_requested = arg; + } + +event ConnThreshold::bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) + { + if ( threshold < size_threshold || "gridftp-data" in c$service || c$duration > max_time ) + return; + + if ( ! data_channel_initial_criteria(c) ) + return; + + add c$service["gridftp-data"]; + event GridFTP::data_channel_detected(c); + + if ( skip_data ) + skip_further_processing(c$id); + } + +event gridftp_possibility_timeout(c: connection) + { + # only remove if we did not already detect it and the connection + # is not yet at its end. + if ( "gridftp-data" !in c$service && ! (c?$conn && c$conn?$service) ) + { + ConnThreshold::delete_bytes_threshold(c, size_threshold, T); + ConnThreshold::delete_bytes_threshold(c, size_threshold, F); + } + } + +event ssl_established(c: connection) &priority=5 + { + # If an FTP client requests AUTH GSSAPI and later an SSL handshake + # finishes, it's likely a GridFTP control channel, so add service label. + if ( c?$ftp && c$ftp?$last_auth_requested && + /GSSAPI/ in c$ftp$last_auth_requested ) + add c$service["gridftp"]; + } + +function data_channel_initial_criteria(c: connection): bool + { + return ( c?$ssl && c$ssl?$client_subject && c$ssl?$subject && + c$ssl?$cipher && /WITH_NULL/ in c$ssl$cipher ); + } + +event ssl_established(c: connection) &priority=-3 + { + # By default GridFTP data channels do mutual authentication and + # negotiate a cipher suite with a NULL bulk cipher. + if ( data_channel_initial_criteria(c) ) + { + ConnThreshold::set_bytes_threshold(c, size_threshold, T); + ConnThreshold::set_bytes_threshold(c, size_threshold, F); + schedule max_time { gridftp_possibility_timeout(c) }; + } + } diff --git a/scripts/base/protocols/ftp/info.bro b/scripts/base/protocols/ftp/info.zeek similarity index 100% rename from scripts/base/protocols/ftp/info.bro rename to scripts/base/protocols/ftp/info.zeek diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro deleted file mode 100644 index 9b64345a12..0000000000 --- a/scripts/base/protocols/ftp/main.bro +++ /dev/null @@ -1,303 +0,0 @@ -##! The logging this script does is primarily focused on logging FTP commands -##! along with metadata. For example, if files are transferred, the argument -##! will take on the full path that the client is at along with the requested -##! file name. - -@load ./info -@load ./utils -@load ./utils-commands -@load base/utils/paths -@load base/utils/numbers -@load base/utils/addrs - -module FTP; - -export { - ## The FTP protocol logging stream identifier. - redef enum Log::ID += { LOG }; - - ## List of commands that should have their command/response pairs logged. - option logged_commands = { - "APPE", "DELE", "RETR", "STOR", "STOU", "ACCT", "PORT", "PASV", "EPRT", - "EPSV" - }; - - ## User IDs that can be considered "anonymous". - option guest_ids = { "anonymous", "ftp", "ftpuser", "guest" }; - - ## This record is to hold a parsed FTP reply code. For example, for the - ## 201 status code, the digits would be parsed as: x->2, y->0, z->1. - type ReplyCode: record { - x: count; - y: count; - z: count; - }; - - ## Parse FTP reply codes into the three constituent single digit values. - global parse_ftp_reply_code: function(code: count): ReplyCode; - - ## Event that can be handled to access the :bro:type:`FTP::Info` - ## record as it is sent on to the logging framework. - global log_ftp: event(rec: Info); -} - -# Add the state tracking information variable to the connection record -redef record connection += { - ftp: Info &optional; - ftp_data_reuse: bool &default=F; -}; - -const ports = { 21/tcp, 2811/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(FTP::LOG, [$columns=Info, $ev=log_ftp, $path="ftp"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_FTP, ports); - } - -# Establish the variable for tracking expected connections. -global ftp_data_expected: table[addr, port] of Info &read_expire=5mins; - -## A set of commands where the argument can be expected to refer -## to a file or directory. -const file_cmds = { - "APPE", "CWD", "DELE", "MKD", "RETR", "RMD", "RNFR", "RNTO", - "STOR", "STOU", "REST", "SIZE", "MDTM", -}; - -## Commands that either display or change the current working directory along -## with the response codes to indicate a successful command. -const directory_cmds = { - ["CWD", 250], - ["CDUP", 200], # typo in RFC? - ["CDUP", 250], # as found in traces - ["PWD", 257], - ["XPWD", 257], -}; - -function parse_ftp_reply_code(code: count): ReplyCode - { - local a: ReplyCode; - - a$z = code % 10; - - code = code / 10; - a$y = code % 10; - - code = code / 10; - a$x = code % 10; - - return a; - } - -function set_ftp_session(c: connection) - { - if ( ! c?$ftp ) - { - local s: Info; - s$ts=network_time(); - s$uid=c$uid; - s$id=c$id; - c$ftp=s; - - # Add a shim command so the server can respond with some init response. - add_pending_cmd(c$ftp$pending_commands, "", ""); - } - } - -function ftp_message(s: Info) - { - s$ts=s$cmdarg$ts; - s$command=s$cmdarg$cmd; - - s$arg = s$cmdarg$arg; - if ( s$cmdarg$cmd in file_cmds ) - s$arg = build_url_ftp(s); - - if ( s$arg == "" ) - delete s$arg; - - if ( s?$password && - ! s$capture_password && - to_lower(s$user) !in guest_ids ) - { - s$password = ""; - } - - if ( s?$cmdarg && s$command in logged_commands) - Log::write(FTP::LOG, s); - - # The MIME and file_size fields are specific to file transfer commands - # and may not be used in all commands so they need reset to "blank" - # values after logging. - delete s$mime_type; - delete s$file_size; - # Same with data channel. - delete s$data_channel; - } - -function add_expected_data_channel(s: Info, chan: ExpectedDataChannel) - { - s$passive = chan$passive; - s$data_channel = chan; - ftp_data_expected[chan$resp_h, chan$resp_p] = s; - Analyzer::schedule_analyzer(chan$orig_h, chan$resp_h, chan$resp_p, - Analyzer::ANALYZER_FTP_DATA, - 5mins); - } - -event ftp_request(c: connection, command: string, arg: string) &priority=5 - { - # Write out the previous command when a new command is seen. - # The downside here is that commands definitely aren't logged until the - # next command is issued or the control session ends. In practicality - # this isn't an issue, but I suppose it could be a delay tactic for - # attackers. - if ( c?$ftp && c$ftp?$cmdarg && c$ftp?$reply_code ) - { - remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg); - ftp_message(c$ftp); - } - - local id = c$id; - set_ftp_session(c); - - # Queue up the new command and argument - add_pending_cmd(c$ftp$pending_commands, command, arg); - - if ( command == "USER" ) - c$ftp$user = arg; - - else if ( command == "PASS" ) - c$ftp$password = arg; - - else if ( command == "PORT" || command == "EPRT" ) - { - local data = (command == "PORT") ? - parse_ftp_port(arg) : parse_eftp_port(arg); - - if ( data$valid ) - { - add_expected_data_channel(c$ftp, [$passive=F, $orig_h=id$resp_h, - $resp_h=data$h, $resp_p=data$p]); - } - else - { - # TODO: raise a notice? does anyone care? - } - } - } - - -event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=5 - { - set_ftp_session(c); - c$ftp$cmdarg = get_pending_cmd(c$ftp$pending_commands, code, msg); - c$ftp$reply_code = code; - c$ftp$reply_msg = msg; - - # TODO: figure out what to do with continued FTP response (not used much) - if ( cont_resp ) return; - - # TODO: do some sort of generic clear text login processing here. - local response_xyz = parse_ftp_reply_code(code); - #if ( response_xyz$x == 2 && # successful - # session$cmdarg$cmd == "PASS" ) - # do_ftp_login(c, session); - - if ( (code == 150 && c$ftp$cmdarg$cmd == "RETR") || - (code == 213 && c$ftp$cmdarg$cmd == "SIZE") ) - { - # NOTE: This isn't exactly the right thing to do for SIZE since the size - # on a different file could be checked, but the file size will - # be overwritten by the server response to the RETR command - # if that's given as well which would be more correct. - c$ftp$file_size = extract_count(msg, F); - } - - # PASV and EPSV processing - else if ( (code == 227 || code == 229) && - (c$ftp$cmdarg$cmd == "PASV" || c$ftp$cmdarg$cmd == "EPSV") ) - { - local data = (code == 227) ? parse_ftp_pasv(msg) : parse_ftp_epsv(msg); - - if ( data$valid ) - { - c$ftp$passive=T; - - if ( code == 229 && data$h == [::] ) - data$h = c$id$resp_h; - - add_expected_data_channel(c$ftp, [$passive=T, $orig_h=c$id$orig_h, - $resp_h=data$h, $resp_p=data$p]); - } - else - { - # TODO: do something if there was a problem parsing the PASV message? - } - } - - if ( [c$ftp$cmdarg$cmd, code] in directory_cmds ) - { - if ( c$ftp$cmdarg$cmd == "CWD" ) - c$ftp$cwd = build_path_compressed(c$ftp$cwd, c$ftp$cmdarg$arg); - - else if ( c$ftp$cmdarg$cmd == "CDUP" ) - c$ftp$cwd = build_path_compressed(c$ftp$cwd, "/.."); - - else if ( c$ftp$cmdarg$cmd == "PWD" || c$ftp$cmdarg$cmd == "XPWD" ) - c$ftp$cwd = extract_path(msg); - } - - # In case there are multiple commands queued, go ahead and remove the - # command here and log because we can't do the normal processing pipeline - # to wait for a new command before logging the command/response pair. - if ( |c$ftp$pending_commands| > 1 ) - { - remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg); - ftp_message(c$ftp); - } - } - -event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10 - { - local id = c$id; - if ( [id$resp_h, id$resp_p] in ftp_data_expected ) - add c$service["ftp-data"]; - } - -event file_transferred(c: connection, prefix: string, descr: string, - mime_type: string) &priority=5 - { - local id = c$id; - if ( [id$resp_h, id$resp_p] in ftp_data_expected ) - { - local s = ftp_data_expected[id$resp_h, id$resp_p]; - s$mime_type = split_string1(mime_type, /;/)[0]; - } - } - -event connection_reused(c: connection) &priority=5 - { - if ( "ftp-data" in c$service ) - c$ftp_data_reuse = T; - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c$ftp_data_reuse ) return; - delete ftp_data_expected[c$id$resp_h, c$id$resp_p]; - } - -# Use state remove event to cover connections terminated by RST. -event connection_state_remove(c: connection) &priority=-5 - { - if ( ! c?$ftp ) return; - - for ( ca, cmdarg in c$ftp$pending_commands ) - { - c$ftp$cmdarg = cmdarg; - ftp_message(c$ftp); - } - } diff --git a/scripts/base/protocols/ftp/main.zeek b/scripts/base/protocols/ftp/main.zeek new file mode 100644 index 0000000000..1c2dce17f8 --- /dev/null +++ b/scripts/base/protocols/ftp/main.zeek @@ -0,0 +1,303 @@ +##! The logging this script does is primarily focused on logging FTP commands +##! along with metadata. For example, if files are transferred, the argument +##! will take on the full path that the client is at along with the requested +##! file name. + +@load ./info +@load ./utils +@load ./utils-commands +@load base/utils/paths +@load base/utils/numbers +@load base/utils/addrs + +module FTP; + +export { + ## The FTP protocol logging stream identifier. + redef enum Log::ID += { LOG }; + + ## List of commands that should have their command/response pairs logged. + option logged_commands = { + "APPE", "DELE", "RETR", "STOR", "STOU", "ACCT", "PORT", "PASV", "EPRT", + "EPSV" + }; + + ## User IDs that can be considered "anonymous". + option guest_ids = { "anonymous", "ftp", "ftpuser", "guest" }; + + ## This record is to hold a parsed FTP reply code. For example, for the + ## 201 status code, the digits would be parsed as: x->2, y->0, z->1. + type ReplyCode: record { + x: count; + y: count; + z: count; + }; + + ## Parse FTP reply codes into the three constituent single digit values. + global parse_ftp_reply_code: function(code: count): ReplyCode; + + ## Event that can be handled to access the :zeek:type:`FTP::Info` + ## record as it is sent on to the logging framework. + global log_ftp: event(rec: Info); +} + +# Add the state tracking information variable to the connection record +redef record connection += { + ftp: Info &optional; + ftp_data_reuse: bool &default=F; +}; + +const ports = { 21/tcp, 2811/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(FTP::LOG, [$columns=Info, $ev=log_ftp, $path="ftp"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_FTP, ports); + } + +# Establish the variable for tracking expected connections. +global ftp_data_expected: table[addr, port] of Info &read_expire=5mins; + +## A set of commands where the argument can be expected to refer +## to a file or directory. +const file_cmds = { + "APPE", "CWD", "DELE", "MKD", "RETR", "RMD", "RNFR", "RNTO", + "STOR", "STOU", "REST", "SIZE", "MDTM", +}; + +## Commands that either display or change the current working directory along +## with the response codes to indicate a successful command. +const directory_cmds = { + ["CWD", 250], + ["CDUP", 200], # typo in RFC? + ["CDUP", 250], # as found in traces + ["PWD", 257], + ["XPWD", 257], +}; + +function parse_ftp_reply_code(code: count): ReplyCode + { + local a: ReplyCode; + + a$z = code % 10; + + code = code / 10; + a$y = code % 10; + + code = code / 10; + a$x = code % 10; + + return a; + } + +function set_ftp_session(c: connection) + { + if ( ! c?$ftp ) + { + local s: Info; + s$ts=network_time(); + s$uid=c$uid; + s$id=c$id; + c$ftp=s; + + # Add a shim command so the server can respond with some init response. + add_pending_cmd(c$ftp$pending_commands, "", ""); + } + } + +function ftp_message(s: Info) + { + s$ts=s$cmdarg$ts; + s$command=s$cmdarg$cmd; + + s$arg = s$cmdarg$arg; + if ( s$cmdarg$cmd in file_cmds ) + s$arg = build_url_ftp(s); + + if ( s$arg == "" ) + delete s$arg; + + if ( s?$password && + ! s$capture_password && + to_lower(s$user) !in guest_ids ) + { + s$password = ""; + } + + if ( s?$cmdarg && s$command in logged_commands) + Log::write(FTP::LOG, s); + + # The MIME and file_size fields are specific to file transfer commands + # and may not be used in all commands so they need reset to "blank" + # values after logging. + delete s$mime_type; + delete s$file_size; + # Same with data channel. + delete s$data_channel; + } + +function add_expected_data_channel(s: Info, chan: ExpectedDataChannel) + { + s$passive = chan$passive; + s$data_channel = chan; + ftp_data_expected[chan$resp_h, chan$resp_p] = s; + Analyzer::schedule_analyzer(chan$orig_h, chan$resp_h, chan$resp_p, + Analyzer::ANALYZER_FTP_DATA, + 5mins); + } + +event ftp_request(c: connection, command: string, arg: string) &priority=5 + { + # Write out the previous command when a new command is seen. + # The downside here is that commands definitely aren't logged until the + # next command is issued or the control session ends. In practicality + # this isn't an issue, but I suppose it could be a delay tactic for + # attackers. + if ( c?$ftp && c$ftp?$cmdarg && c$ftp?$reply_code ) + { + remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg); + ftp_message(c$ftp); + } + + local id = c$id; + set_ftp_session(c); + + # Queue up the new command and argument + add_pending_cmd(c$ftp$pending_commands, command, arg); + + if ( command == "USER" ) + c$ftp$user = arg; + + else if ( command == "PASS" ) + c$ftp$password = arg; + + else if ( command == "PORT" || command == "EPRT" ) + { + local data = (command == "PORT") ? + parse_ftp_port(arg) : parse_eftp_port(arg); + + if ( data$valid ) + { + add_expected_data_channel(c$ftp, [$passive=F, $orig_h=id$resp_h, + $resp_h=data$h, $resp_p=data$p]); + } + else + { + # TODO: raise a notice? does anyone care? + } + } + } + + +event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=5 + { + set_ftp_session(c); + c$ftp$cmdarg = get_pending_cmd(c$ftp$pending_commands, code, msg); + c$ftp$reply_code = code; + c$ftp$reply_msg = msg; + + # TODO: figure out what to do with continued FTP response (not used much) + if ( cont_resp ) return; + + # TODO: do some sort of generic clear text login processing here. + local response_xyz = parse_ftp_reply_code(code); + #if ( response_xyz$x == 2 && # successful + # session$cmdarg$cmd == "PASS" ) + # do_ftp_login(c, session); + + if ( (code == 150 && c$ftp$cmdarg$cmd == "RETR") || + (code == 213 && c$ftp$cmdarg$cmd == "SIZE") ) + { + # NOTE: This isn't exactly the right thing to do for SIZE since the size + # on a different file could be checked, but the file size will + # be overwritten by the server response to the RETR command + # if that's given as well which would be more correct. + c$ftp$file_size = extract_count(msg, F); + } + + # PASV and EPSV processing + else if ( (code == 227 || code == 229) && + (c$ftp$cmdarg$cmd == "PASV" || c$ftp$cmdarg$cmd == "EPSV") ) + { + local data = (code == 227) ? parse_ftp_pasv(msg) : parse_ftp_epsv(msg); + + if ( data$valid ) + { + c$ftp$passive=T; + + if ( code == 229 && data$h == [::] ) + data$h = c$id$resp_h; + + add_expected_data_channel(c$ftp, [$passive=T, $orig_h=c$id$orig_h, + $resp_h=data$h, $resp_p=data$p]); + } + else + { + # TODO: do something if there was a problem parsing the PASV message? + } + } + + if ( [c$ftp$cmdarg$cmd, code] in directory_cmds ) + { + if ( c$ftp$cmdarg$cmd == "CWD" ) + c$ftp$cwd = build_path_compressed(c$ftp$cwd, c$ftp$cmdarg$arg); + + else if ( c$ftp$cmdarg$cmd == "CDUP" ) + c$ftp$cwd = build_path_compressed(c$ftp$cwd, "/.."); + + else if ( c$ftp$cmdarg$cmd == "PWD" || c$ftp$cmdarg$cmd == "XPWD" ) + c$ftp$cwd = extract_path(msg); + } + + # In case there are multiple commands queued, go ahead and remove the + # command here and log because we can't do the normal processing pipeline + # to wait for a new command before logging the command/response pair. + if ( |c$ftp$pending_commands| > 1 ) + { + remove_pending_cmd(c$ftp$pending_commands, c$ftp$cmdarg); + ftp_message(c$ftp); + } + } + +event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10 + { + local id = c$id; + if ( [id$resp_h, id$resp_p] in ftp_data_expected ) + add c$service["ftp-data"]; + } + +event file_transferred(c: connection, prefix: string, descr: string, + mime_type: string) &priority=5 + { + local id = c$id; + if ( [id$resp_h, id$resp_p] in ftp_data_expected ) + { + local s = ftp_data_expected[id$resp_h, id$resp_p]; + s$mime_type = split_string1(mime_type, /;/)[0]; + } + } + +event connection_reused(c: connection) &priority=5 + { + if ( "ftp-data" in c$service ) + c$ftp_data_reuse = T; + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c$ftp_data_reuse ) return; + delete ftp_data_expected[c$id$resp_h, c$id$resp_p]; + } + +# Use state remove event to cover connections terminated by RST. +event connection_state_remove(c: connection) &priority=-5 + { + if ( ! c?$ftp ) return; + + for ( ca, cmdarg in c$ftp$pending_commands ) + { + c$ftp$cmdarg = cmdarg; + ftp_message(c$ftp); + } + } diff --git a/scripts/base/protocols/ftp/utils-commands.bro b/scripts/base/protocols/ftp/utils-commands.zeek similarity index 100% rename from scripts/base/protocols/ftp/utils-commands.bro rename to scripts/base/protocols/ftp/utils-commands.zeek diff --git a/scripts/base/protocols/ftp/utils.bro b/scripts/base/protocols/ftp/utils.bro deleted file mode 100644 index 74aeaa1e03..0000000000 --- a/scripts/base/protocols/ftp/utils.bro +++ /dev/null @@ -1,48 +0,0 @@ -##! Utilities specific for FTP processing. - -@load ./info -@load base/utils/addrs -@load base/utils/paths - -module FTP; - -export { - ## Creates a URL from an :bro:type:`FTP::Info` record. - ## - ## rec: An :bro:type:`FTP::Info` record. - ## - ## Returns: A URL, not prefixed by ``"ftp://"``. - global build_url: function(rec: Info): string; - - ## Creates a URL from an :bro:type:`FTP::Info` record. - ## - ## rec: An :bro:type:`FTP::Info` record. - ## - ## Returns: A URL prefixed with ``"ftp://"``. - global build_url_ftp: function(rec: Info): string; - - ## Create an extremely shortened representation of a log line. - global describe: function(rec: Info): string; -} - -function build_url(rec: Info): string - { - if ( !rec?$arg ) - return ""; - - local comp_path = build_path_compressed(rec$cwd, rec$arg); - if ( comp_path[0] != "/" ) - comp_path = cat("/", comp_path); - - return fmt("%s%s", addr_to_uri(rec$id$resp_h), comp_path); - } - -function build_url_ftp(rec: Info): string - { - return fmt("ftp://%s", build_url(rec)); - } - -function describe(rec: Info): string - { - return build_url_ftp(rec); - } diff --git a/scripts/base/protocols/ftp/utils.zeek b/scripts/base/protocols/ftp/utils.zeek new file mode 100644 index 0000000000..44c621b361 --- /dev/null +++ b/scripts/base/protocols/ftp/utils.zeek @@ -0,0 +1,48 @@ +##! Utilities specific for FTP processing. + +@load ./info +@load base/utils/addrs +@load base/utils/paths + +module FTP; + +export { + ## Creates a URL from an :zeek:type:`FTP::Info` record. + ## + ## rec: An :zeek:type:`FTP::Info` record. + ## + ## Returns: A URL, not prefixed by ``"ftp://"``. + global build_url: function(rec: Info): string; + + ## Creates a URL from an :zeek:type:`FTP::Info` record. + ## + ## rec: An :zeek:type:`FTP::Info` record. + ## + ## Returns: A URL prefixed with ``"ftp://"``. + global build_url_ftp: function(rec: Info): string; + + ## Create an extremely shortened representation of a log line. + global describe: function(rec: Info): string; +} + +function build_url(rec: Info): string + { + if ( !rec?$arg ) + return ""; + + local comp_path = build_path_compressed(rec$cwd, rec$arg); + if ( comp_path[0] != "/" ) + comp_path = cat("/", comp_path); + + return fmt("%s%s", addr_to_uri(rec$id$resp_h), comp_path); + } + +function build_url_ftp(rec: Info): string + { + return fmt("ftp://%s", build_url(rec)); + } + +function describe(rec: Info): string + { + return build_url_ftp(rec); + } diff --git a/scripts/base/protocols/http/__load__.bro b/scripts/base/protocols/http/__load__.zeek similarity index 100% rename from scripts/base/protocols/http/__load__.bro rename to scripts/base/protocols/http/__load__.zeek diff --git a/scripts/base/protocols/http/entities.bro b/scripts/base/protocols/http/entities.bro deleted file mode 100644 index c16bb3f630..0000000000 --- a/scripts/base/protocols/http/entities.bro +++ /dev/null @@ -1,204 +0,0 @@ -##! Analysis and logging for MIME entities found in HTTP sessions. - -@load base/frameworks/files -@load base/utils/strings -@load base/utils/files -@load ./main - -module HTTP; - -export { - type Entity: record { - ## Filename for the entity if discovered from a header. - filename: string &optional; - }; - - ## Maximum number of originator files to log. - ## :bro:see:`HTTP::max_files_policy` even is called once this - ## limit is reached to determine if it's enforced. - option max_files_orig = 15; - - ## Maximum number of responder files to log. - ## :bro:see:`HTTP::max_files_policy` even is called once this - ## limit is reached to determine if it's enforced. - option max_files_resp = 15; - - ## Called when reaching the max number of files across a given HTTP - ## connection according to :bro:see:`HTTP::max_files_orig` - ## or :bro:see:`HTTP::max_files_resp`. Break from the hook - ## early to signal that the file limit should not be applied. - global max_files_policy: hook(f: fa_file, is_orig: bool); - - redef record Info += { - ## An ordered vector of file unique IDs. - ## Limited to :bro:see:`HTTP::max_files_orig` entries. - orig_fuids: vector of string &log &optional; - - ## An ordered vector of filenames from the client. - ## Limited to :bro:see:`HTTP::max_files_orig` entries. - orig_filenames: vector of string &log &optional; - - ## An ordered vector of mime types. - ## Limited to :bro:see:`HTTP::max_files_orig` entries. - orig_mime_types: vector of string &log &optional; - - ## An ordered vector of file unique IDs. - ## Limited to :bro:see:`HTTP::max_files_resp` entries. - resp_fuids: vector of string &log &optional; - - ## An ordered vector of filenames from the server. - ## Limited to :bro:see:`HTTP::max_files_resp` entries. - resp_filenames: vector of string &log &optional; - - ## An ordered vector of mime types. - ## Limited to :bro:see:`HTTP::max_files_resp` entries. - resp_mime_types: vector of string &log &optional; - - ## The current entity. - current_entity: Entity &optional; - ## Current number of MIME entities in the HTTP request message - ## body. - orig_mime_depth: count &default=0; - ## Current number of MIME entities in the HTTP response message - ## body. - resp_mime_depth: count &default=0; - }; - - redef record fa_file += { - http: HTTP::Info &optional; - }; -} - -event http_begin_entity(c: connection, is_orig: bool) &priority=10 - { - set_state(c, is_orig); - - if ( is_orig ) - ++c$http$orig_mime_depth; - else - ++c$http$resp_mime_depth; - - c$http$current_entity = Entity(); - } - -event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=3 - { - if ( name == "CONTENT-DISPOSITION" && - /[fF][iI][lL][eE][nN][aA][mM][eE]/ in value ) - { - c$http$current_entity$filename = extract_filename_from_content_disposition(value); - } - else if ( name == "CONTENT-TYPE" && - /[nN][aA][mM][eE][:blank:]*=/ in value ) - { - c$http$current_entity$filename = extract_filename_from_content_disposition(value); - } - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( f$source == "HTTP" && c?$http ) - { - f$http = c$http; - - if ( c$http?$current_entity && c$http$current_entity?$filename ) - f$info$filename = c$http$current_entity$filename; - - local size: count; - local max: count; - - if ( f$is_orig ) - { - size = f$http?$orig_fuids ? |f$http$orig_fuids| : 0; - max = max_files_orig; - } - else - { - size = f$http?$resp_fuids ? |f$http$resp_fuids| : 0; - max = max_files_resp; - } - - if ( size >= max && hook HTTP::max_files_policy(f, f$is_orig) ) - return; - - if ( f$is_orig ) - { - if ( ! c$http?$orig_fuids ) - c$http$orig_fuids = string_vec(f$id); - else - c$http$orig_fuids += f$id; - - if ( f$info?$filename ) - { - if ( ! c$http?$orig_filenames ) - c$http$orig_filenames = string_vec(f$info$filename); - else - c$http$orig_filenames += f$info$filename; - } - } - - else - { - if ( ! c$http?$resp_fuids ) - c$http$resp_fuids = string_vec(f$id); - else - c$http$resp_fuids += f$id; - - if ( f$info?$filename ) - { - if ( ! c$http?$resp_filenames ) - c$http$resp_filenames = string_vec(f$info$filename); - else - c$http$resp_filenames += f$info$filename; - } - - } - } - } - -event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 - { - if ( ! f?$http || ! f?$is_orig ) - return; - - if ( ! meta?$mime_type ) - return; - - local size: count; - local max: count; - - if ( f$is_orig ) - { - size = f$http?$orig_mime_types ? |f$http$orig_mime_types| : 0; - max = max_files_orig; - } - else - { - size = f$http?$resp_mime_types ? |f$http$resp_mime_types| : 0; - max = max_files_resp; - } - - if ( size >= max && hook HTTP::max_files_policy(f, f$is_orig) ) - return; - - if ( f$is_orig ) - { - if ( ! f$http?$orig_mime_types ) - f$http$orig_mime_types = string_vec(meta$mime_type); - else - f$http$orig_mime_types += meta$mime_type; - } - else - { - if ( ! f$http?$resp_mime_types ) - f$http$resp_mime_types = string_vec(meta$mime_type); - else - f$http$resp_mime_types += meta$mime_type; - } - } - -event http_end_entity(c: connection, is_orig: bool) &priority=5 - { - if ( c?$http && c$http?$current_entity ) - delete c$http$current_entity; - } diff --git a/scripts/base/protocols/http/entities.zeek b/scripts/base/protocols/http/entities.zeek new file mode 100644 index 0000000000..0a72c6b76e --- /dev/null +++ b/scripts/base/protocols/http/entities.zeek @@ -0,0 +1,204 @@ +##! Analysis and logging for MIME entities found in HTTP sessions. + +@load base/frameworks/files +@load base/utils/strings +@load base/utils/files +@load ./main + +module HTTP; + +export { + type Entity: record { + ## Filename for the entity if discovered from a header. + filename: string &optional; + }; + + ## Maximum number of originator files to log. + ## :zeek:see:`HTTP::max_files_policy` even is called once this + ## limit is reached to determine if it's enforced. + option max_files_orig = 15; + + ## Maximum number of responder files to log. + ## :zeek:see:`HTTP::max_files_policy` even is called once this + ## limit is reached to determine if it's enforced. + option max_files_resp = 15; + + ## Called when reaching the max number of files across a given HTTP + ## connection according to :zeek:see:`HTTP::max_files_orig` + ## or :zeek:see:`HTTP::max_files_resp`. Break from the hook + ## early to signal that the file limit should not be applied. + global max_files_policy: hook(f: fa_file, is_orig: bool); + + redef record Info += { + ## An ordered vector of file unique IDs. + ## Limited to :zeek:see:`HTTP::max_files_orig` entries. + orig_fuids: vector of string &log &optional; + + ## An ordered vector of filenames from the client. + ## Limited to :zeek:see:`HTTP::max_files_orig` entries. + orig_filenames: vector of string &log &optional; + + ## An ordered vector of mime types. + ## Limited to :zeek:see:`HTTP::max_files_orig` entries. + orig_mime_types: vector of string &log &optional; + + ## An ordered vector of file unique IDs. + ## Limited to :zeek:see:`HTTP::max_files_resp` entries. + resp_fuids: vector of string &log &optional; + + ## An ordered vector of filenames from the server. + ## Limited to :zeek:see:`HTTP::max_files_resp` entries. + resp_filenames: vector of string &log &optional; + + ## An ordered vector of mime types. + ## Limited to :zeek:see:`HTTP::max_files_resp` entries. + resp_mime_types: vector of string &log &optional; + + ## The current entity. + current_entity: Entity &optional; + ## Current number of MIME entities in the HTTP request message + ## body. + orig_mime_depth: count &default=0; + ## Current number of MIME entities in the HTTP response message + ## body. + resp_mime_depth: count &default=0; + }; + + redef record fa_file += { + http: HTTP::Info &optional; + }; +} + +event http_begin_entity(c: connection, is_orig: bool) &priority=10 + { + set_state(c, is_orig); + + if ( is_orig ) + ++c$http$orig_mime_depth; + else + ++c$http$resp_mime_depth; + + c$http$current_entity = Entity(); + } + +event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=3 + { + if ( name == "CONTENT-DISPOSITION" && + /[fF][iI][lL][eE][nN][aA][mM][eE]/ in value ) + { + c$http$current_entity$filename = extract_filename_from_content_disposition(value); + } + else if ( name == "CONTENT-TYPE" && + /[nN][aA][mM][eE][:blank:]*=/ in value ) + { + c$http$current_entity$filename = extract_filename_from_content_disposition(value); + } + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( f$source == "HTTP" && c?$http ) + { + f$http = c$http; + + if ( c$http?$current_entity && c$http$current_entity?$filename ) + f$info$filename = c$http$current_entity$filename; + + local size: count; + local max: count; + + if ( f$is_orig ) + { + size = f$http?$orig_fuids ? |f$http$orig_fuids| : 0; + max = max_files_orig; + } + else + { + size = f$http?$resp_fuids ? |f$http$resp_fuids| : 0; + max = max_files_resp; + } + + if ( size >= max && hook HTTP::max_files_policy(f, f$is_orig) ) + return; + + if ( f$is_orig ) + { + if ( ! c$http?$orig_fuids ) + c$http$orig_fuids = string_vec(f$id); + else + c$http$orig_fuids += f$id; + + if ( f$info?$filename ) + { + if ( ! c$http?$orig_filenames ) + c$http$orig_filenames = string_vec(f$info$filename); + else + c$http$orig_filenames += f$info$filename; + } + } + + else + { + if ( ! c$http?$resp_fuids ) + c$http$resp_fuids = string_vec(f$id); + else + c$http$resp_fuids += f$id; + + if ( f$info?$filename ) + { + if ( ! c$http?$resp_filenames ) + c$http$resp_filenames = string_vec(f$info$filename); + else + c$http$resp_filenames += f$info$filename; + } + + } + } + } + +event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 + { + if ( ! f?$http || ! f?$is_orig ) + return; + + if ( ! meta?$mime_type ) + return; + + local size: count; + local max: count; + + if ( f$is_orig ) + { + size = f$http?$orig_mime_types ? |f$http$orig_mime_types| : 0; + max = max_files_orig; + } + else + { + size = f$http?$resp_mime_types ? |f$http$resp_mime_types| : 0; + max = max_files_resp; + } + + if ( size >= max && hook HTTP::max_files_policy(f, f$is_orig) ) + return; + + if ( f$is_orig ) + { + if ( ! f$http?$orig_mime_types ) + f$http$orig_mime_types = string_vec(meta$mime_type); + else + f$http$orig_mime_types += meta$mime_type; + } + else + { + if ( ! f$http?$resp_mime_types ) + f$http$resp_mime_types = string_vec(meta$mime_type); + else + f$http$resp_mime_types += meta$mime_type; + } + } + +event http_end_entity(c: connection, is_orig: bool) &priority=5 + { + if ( c?$http && c$http?$current_entity ) + delete c$http$current_entity; + } diff --git a/scripts/base/protocols/http/files.bro b/scripts/base/protocols/http/files.bro deleted file mode 100644 index 078c6d2e66..0000000000 --- a/scripts/base/protocols/http/files.bro +++ /dev/null @@ -1,56 +0,0 @@ -@load ./main -@load ./entities -@load ./utils -@load base/utils/conn-ids -@load base/frameworks/files - -module HTTP; - -export { - ## Default file handle provider for HTTP. - global get_file_handle: function(c: connection, is_orig: bool): string; - - ## Default file describer for HTTP. - global describe_file: function(f: fa_file): string; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - if ( ! c?$http ) - return ""; - - if ( c$http$range_request && ! is_orig ) - { - # Any multipart responses from the server are pieces of same file - # that correspond to range requests, so don't use mime depth to - # identify the file. - return cat(Analyzer::ANALYZER_HTTP, is_orig, c$id$orig_h, build_url(c$http)); - } - else - { - local mime_depth = is_orig ? c$http$orig_mime_depth : c$http$resp_mime_depth; - return cat(Analyzer::ANALYZER_HTTP, c$start_time, is_orig, - c$http$trans_depth, mime_depth, id_string(c$id)); - } - } - -function describe_file(f: fa_file): string - { - # This shouldn't be needed, but just in case... - if ( f$source != "HTTP" ) - return ""; - - for ( cid, c in f$conns ) - { - if ( c?$http ) - return build_url_http(c$http); - } - return ""; - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_HTTP, - [$get_file_handle = HTTP::get_file_handle, - $describe = HTTP::describe_file]); - } diff --git a/scripts/base/protocols/http/files.zeek b/scripts/base/protocols/http/files.zeek new file mode 100644 index 0000000000..a8a67762d4 --- /dev/null +++ b/scripts/base/protocols/http/files.zeek @@ -0,0 +1,56 @@ +@load ./main +@load ./entities +@load ./utils +@load base/utils/conn-ids +@load base/frameworks/files + +module HTTP; + +export { + ## Default file handle provider for HTTP. + global get_file_handle: function(c: connection, is_orig: bool): string; + + ## Default file describer for HTTP. + global describe_file: function(f: fa_file): string; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + if ( ! c?$http ) + return ""; + + if ( c$http$range_request && ! is_orig ) + { + # Any multipart responses from the server are pieces of same file + # that correspond to range requests, so don't use mime depth to + # identify the file. + return cat(Analyzer::ANALYZER_HTTP, is_orig, c$id$orig_h, build_url(c$http)); + } + else + { + local mime_depth = is_orig ? c$http$orig_mime_depth : c$http$resp_mime_depth; + return cat(Analyzer::ANALYZER_HTTP, c$start_time, is_orig, + c$http$trans_depth, mime_depth, id_string(c$id)); + } + } + +function describe_file(f: fa_file): string + { + # This shouldn't be needed, but just in case... + if ( f$source != "HTTP" ) + return ""; + + for ( cid, c in f$conns ) + { + if ( c?$http ) + return build_url_http(c$http); + } + return ""; + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_HTTP, + [$get_file_handle = HTTP::get_file_handle, + $describe = HTTP::describe_file]); + } diff --git a/scripts/base/protocols/http/main.bro b/scripts/base/protocols/http/main.bro deleted file mode 100644 index ea86367bb1..0000000000 --- a/scripts/base/protocols/http/main.bro +++ /dev/null @@ -1,337 +0,0 @@ -##! Implements base functionality for HTTP analysis. The logging model is -##! to log request/response pairs and all relevant metadata together in -##! a single record. - -@load base/utils/numbers -@load base/utils/files -@load base/frameworks/tunnels - -module HTTP; - -export { - redef enum Log::ID += { LOG }; - - ## Indicate a type of attack or compromise in the record to be logged. - type Tags: enum { - ## Placeholder. - EMPTY - }; - - ## This setting changes if passwords used in Basic-Auth are captured or - ## not. - option default_capture_password = F; - - ## The record type which contains the fields of the HTTP log. - type Info: record { - ## Timestamp for when the request happened. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Represents the pipelined depth into the connection of this - ## request/response transaction. - trans_depth: count &log; - ## Verb used in the HTTP request (GET, POST, HEAD, etc.). - method: string &log &optional; - ## Value of the HOST header. - host: string &log &optional; - ## URI used in the request. - uri: string &log &optional; - ## Value of the "referer" header. The comment is deliberately - ## misspelled like the standard declares, but the name used here - ## is "referrer" spelled correctly. - referrer: string &log &optional; - ## Value of the version portion of the request. - version: string &log &optional; - ## Value of the User-Agent header from the client. - user_agent: string &log &optional; - ## Value of the Origin header from the client. - origin: string &log &optional; - ## Actual uncompressed content size of the data transferred from - ## the client. - request_body_len: count &log &default=0; - ## Actual uncompressed content size of the data transferred from - ## the server. - response_body_len: count &log &default=0; - ## Status code returned by the server. - status_code: count &log &optional; - ## Status message returned by the server. - status_msg: string &log &optional; - ## Last seen 1xx informational reply code returned by the server. - info_code: count &log &optional; - ## Last seen 1xx informational reply message returned by the server. - info_msg: string &log &optional; - ## A set of indicators of various attributes discovered and - ## related to a particular request/response pair. - tags: set[Tags] &log; - - ## Username if basic-auth is performed for the request. - username: string &log &optional; - ## Password if basic-auth is performed for the request. - password: string &log &optional; - - ## Determines if the password will be captured for this request. - capture_password: bool &default=default_capture_password; - - ## All of the headers that may indicate if the request was proxied. - proxied: set[string] &log &optional; - - ## Indicates if this request can assume 206 partial content in - ## response. - range_request: bool &default=F; - }; - - ## Structure to maintain state for an HTTP connection with multiple - ## requests and responses. - type State: record { - ## Pending requests. - pending: table[count] of Info; - ## Current request in the pending queue. - current_request: count &default=0; - ## Current response in the pending queue. - current_response: count &default=0; - ## Track the current deepest transaction. - ## This is meant to cope with missing requests - ## and responses. - trans_depth: count &default=0; - }; - - ## A list of HTTP headers typically used to indicate proxied requests. - option proxy_headers: set[string] = { - "FORWARDED", - "X-FORWARDED-FOR", - "X-FORWARDED-FROM", - "CLIENT-IP", - "VIA", - "XROXY-CONNECTION", - "PROXY-CONNECTION", - }; - - ## A list of HTTP methods. Other methods will generate a weird. Note - ## that the HTTP analyzer will only accept methods consisting solely - ## of letters ``[A-Za-z]``. - option http_methods: set[string] = { - "GET", "POST", "HEAD", "OPTIONS", - "PUT", "DELETE", "TRACE", "CONNECT", - # HTTP methods for distributed authoring: - "PROPFIND", "PROPPATCH", "MKCOL", - "COPY", "MOVE", "LOCK", "UNLOCK", - "POLL", "REPORT", "SUBSCRIBE", "BMOVE", - "SEARCH" - }; - - ## Event that can be handled to access the HTTP record as it is sent on - ## to the logging framework. - global log_http: event(rec: Info); -} - -# Add the http state tracking fields to the connection record. -redef record connection += { - http: Info &optional; - http_state: State &optional; -}; - -const ports = { - 80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp, - 8000/tcp, 8080/tcp, 8888/tcp, -}; -redef likely_server_ports += { ports }; - -# Initialize the HTTP logging stream and ports. -event bro_init() &priority=5 - { - Log::create_stream(HTTP::LOG, [$columns=Info, $ev=log_http, $path="http"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_HTTP, ports); - } - -function code_in_range(c: count, min: count, max: count) : bool - { - return c >= min && c <= max; - } - -function new_http_session(c: connection): Info - { - local tmp: Info; - tmp$ts=network_time(); - tmp$uid=c$uid; - tmp$id=c$id; - tmp$trans_depth = ++c$http_state$trans_depth; - return tmp; - } - -function set_state(c: connection, is_orig: bool) - { - if ( ! c?$http_state ) - { - local s: State; - c$http_state = s; - } - - # These deal with new requests and responses. - if ( is_orig ) - { - if ( c$http_state$current_request !in c$http_state$pending ) - c$http_state$pending[c$http_state$current_request] = new_http_session(c); - - c$http = c$http_state$pending[c$http_state$current_request]; - } - else - { - if ( c$http_state$current_response !in c$http_state$pending ) - c$http_state$pending[c$http_state$current_response] = new_http_session(c); - - c$http = c$http_state$pending[c$http_state$current_response]; - } - } - -event http_request(c: connection, method: string, original_URI: string, - unescaped_URI: string, version: string) &priority=5 - { - if ( ! c?$http_state ) - { - local s: State; - c$http_state = s; - } - - ++c$http_state$current_request; - set_state(c, T); - - c$http$method = method; - c$http$uri = unescaped_URI; - - if ( method !in http_methods ) - Reporter::conn_weird("unknown_HTTP_method", c, method); - } - -event http_reply(c: connection, version: string, code: count, reason: string) &priority=5 - { - if ( ! c?$http_state ) - { - local s: State; - c$http_state = s; - } - - # If the last response was an informational 1xx, we're still expecting - # the real response to the request, so don't create a new Info record yet. - if ( c$http_state$current_response !in c$http_state$pending || - (c$http_state$pending[c$http_state$current_response]?$status_code && - ! code_in_range(c$http_state$pending[c$http_state$current_response]$status_code, 100, 199)) ) - { - ++c$http_state$current_response; - } - set_state(c, F); - - c$http$status_code = code; - c$http$status_msg = reason; - c$http$version = version; - - if ( code_in_range(code, 100, 199) ) - { - c$http$info_code = code; - c$http$info_msg = reason; - } - - if ( c$http?$method && c$http$method == "CONNECT" && code == 200 ) - { - # Copy this conn_id and set the orig_p to zero because in the case of CONNECT - # proxies there will be potentially many source ports since a new proxy connection - # is established for each proxied connection. We treat this as a singular - # "tunnel". - local tid = copy(c$id); - tid$orig_p = 0/tcp; - Tunnel::register([$cid=tid, $tunnel_type=Tunnel::HTTP]); - } - } - -event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=5 - { - set_state(c, is_orig); - - if ( is_orig ) # client headers - { - if ( name == "REFERER" ) - c$http$referrer = value; - - else if ( name == "HOST" ) - # The split is done to remove the occasional port value that shows up here. - c$http$host = split_string1(value, /:/)[0]; - - else if ( name == "RANGE" ) - c$http$range_request = T; - - else if ( name == "ORIGIN" ) - c$http$origin = value; - - else if ( name == "USER-AGENT" ) - c$http$user_agent = value; - - else if ( name in proxy_headers ) - { - if ( ! c$http?$proxied ) - c$http$proxied = set(); - add c$http$proxied[fmt("%s -> %s", name, value)]; - } - - else if ( name == "AUTHORIZATION" || name == "PROXY-AUTHORIZATION" ) - { - if ( /^[bB][aA][sS][iI][cC] / in value ) - { - local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]/, "")); - local up = split_string(userpass, /:/); - if ( |up| >= 2 ) - { - c$http$username = up[0]; - if ( c$http$capture_password ) - c$http$password = up[1]; - } - else - { - c$http$username = fmt(" (%s)", value); - if ( c$http$capture_password ) - c$http$password = userpass; - } - } - } - } - } - -event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = 5 - { - set_state(c, is_orig); - - if ( is_orig ) - c$http$request_body_len = stat$body_length; - else - c$http$response_body_len = stat$body_length; - } - -event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = -5 - { - # The reply body is done so we're ready to log. - if ( ! is_orig ) - { - # If the response was an informational 1xx, we're still expecting - # the real response later, so we'll continue using the same record. - if ( ! (c$http?$status_code && code_in_range(c$http$status_code, 100, 199)) ) - { - Log::write(HTTP::LOG, c$http); - delete c$http_state$pending[c$http_state$current_response]; - } - } - } - -event connection_state_remove(c: connection) &priority=-5 - { - # Flush all pending but incomplete request/response pairs. - if ( c?$http_state ) - { - for ( r, info in c$http_state$pending ) - { - # We don't use pending elements at index 0. - if ( r == 0 ) next; - Log::write(HTTP::LOG, info); - } - } - } - diff --git a/scripts/base/protocols/http/main.zeek b/scripts/base/protocols/http/main.zeek new file mode 100644 index 0000000000..ef0561efb4 --- /dev/null +++ b/scripts/base/protocols/http/main.zeek @@ -0,0 +1,337 @@ +##! Implements base functionality for HTTP analysis. The logging model is +##! to log request/response pairs and all relevant metadata together in +##! a single record. + +@load base/utils/numbers +@load base/utils/files +@load base/frameworks/tunnels + +module HTTP; + +export { + redef enum Log::ID += { LOG }; + + ## Indicate a type of attack or compromise in the record to be logged. + type Tags: enum { + ## Placeholder. + EMPTY + }; + + ## This setting changes if passwords used in Basic-Auth are captured or + ## not. + option default_capture_password = F; + + ## The record type which contains the fields of the HTTP log. + type Info: record { + ## Timestamp for when the request happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Represents the pipelined depth into the connection of this + ## request/response transaction. + trans_depth: count &log; + ## Verb used in the HTTP request (GET, POST, HEAD, etc.). + method: string &log &optional; + ## Value of the HOST header. + host: string &log &optional; + ## URI used in the request. + uri: string &log &optional; + ## Value of the "referer" header. The comment is deliberately + ## misspelled like the standard declares, but the name used here + ## is "referrer" spelled correctly. + referrer: string &log &optional; + ## Value of the version portion of the request. + version: string &log &optional; + ## Value of the User-Agent header from the client. + user_agent: string &log &optional; + ## Value of the Origin header from the client. + origin: string &log &optional; + ## Actual uncompressed content size of the data transferred from + ## the client. + request_body_len: count &log &default=0; + ## Actual uncompressed content size of the data transferred from + ## the server. + response_body_len: count &log &default=0; + ## Status code returned by the server. + status_code: count &log &optional; + ## Status message returned by the server. + status_msg: string &log &optional; + ## Last seen 1xx informational reply code returned by the server. + info_code: count &log &optional; + ## Last seen 1xx informational reply message returned by the server. + info_msg: string &log &optional; + ## A set of indicators of various attributes discovered and + ## related to a particular request/response pair. + tags: set[Tags] &log; + + ## Username if basic-auth is performed for the request. + username: string &log &optional; + ## Password if basic-auth is performed for the request. + password: string &log &optional; + + ## Determines if the password will be captured for this request. + capture_password: bool &default=default_capture_password; + + ## All of the headers that may indicate if the request was proxied. + proxied: set[string] &log &optional; + + ## Indicates if this request can assume 206 partial content in + ## response. + range_request: bool &default=F; + }; + + ## Structure to maintain state for an HTTP connection with multiple + ## requests and responses. + type State: record { + ## Pending requests. + pending: table[count] of Info; + ## Current request in the pending queue. + current_request: count &default=0; + ## Current response in the pending queue. + current_response: count &default=0; + ## Track the current deepest transaction. + ## This is meant to cope with missing requests + ## and responses. + trans_depth: count &default=0; + }; + + ## A list of HTTP headers typically used to indicate proxied requests. + option proxy_headers: set[string] = { + "FORWARDED", + "X-FORWARDED-FOR", + "X-FORWARDED-FROM", + "CLIENT-IP", + "VIA", + "XROXY-CONNECTION", + "PROXY-CONNECTION", + }; + + ## A list of HTTP methods. Other methods will generate a weird. Note + ## that the HTTP analyzer will only accept methods consisting solely + ## of letters ``[A-Za-z]``. + option http_methods: set[string] = { + "GET", "POST", "HEAD", "OPTIONS", + "PUT", "DELETE", "TRACE", "CONNECT", + # HTTP methods for distributed authoring: + "PROPFIND", "PROPPATCH", "MKCOL", + "COPY", "MOVE", "LOCK", "UNLOCK", + "POLL", "REPORT", "SUBSCRIBE", "BMOVE", + "SEARCH" + }; + + ## Event that can be handled to access the HTTP record as it is sent on + ## to the logging framework. + global log_http: event(rec: Info); +} + +# Add the http state tracking fields to the connection record. +redef record connection += { + http: Info &optional; + http_state: State &optional; +}; + +const ports = { + 80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp, + 8000/tcp, 8080/tcp, 8888/tcp, +}; +redef likely_server_ports += { ports }; + +# Initialize the HTTP logging stream and ports. +event zeek_init() &priority=5 + { + Log::create_stream(HTTP::LOG, [$columns=Info, $ev=log_http, $path="http"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_HTTP, ports); + } + +function code_in_range(c: count, min: count, max: count) : bool + { + return c >= min && c <= max; + } + +function new_http_session(c: connection): Info + { + local tmp: Info; + tmp$ts=network_time(); + tmp$uid=c$uid; + tmp$id=c$id; + tmp$trans_depth = ++c$http_state$trans_depth; + return tmp; + } + +function set_state(c: connection, is_orig: bool) + { + if ( ! c?$http_state ) + { + local s: State; + c$http_state = s; + } + + # These deal with new requests and responses. + if ( is_orig ) + { + if ( c$http_state$current_request !in c$http_state$pending ) + c$http_state$pending[c$http_state$current_request] = new_http_session(c); + + c$http = c$http_state$pending[c$http_state$current_request]; + } + else + { + if ( c$http_state$current_response !in c$http_state$pending ) + c$http_state$pending[c$http_state$current_response] = new_http_session(c); + + c$http = c$http_state$pending[c$http_state$current_response]; + } + } + +event http_request(c: connection, method: string, original_URI: string, + unescaped_URI: string, version: string) &priority=5 + { + if ( ! c?$http_state ) + { + local s: State; + c$http_state = s; + } + + ++c$http_state$current_request; + set_state(c, T); + + c$http$method = method; + c$http$uri = unescaped_URI; + + if ( method !in http_methods ) + Reporter::conn_weird("unknown_HTTP_method", c, method); + } + +event http_reply(c: connection, version: string, code: count, reason: string) &priority=5 + { + if ( ! c?$http_state ) + { + local s: State; + c$http_state = s; + } + + # If the last response was an informational 1xx, we're still expecting + # the real response to the request, so don't create a new Info record yet. + if ( c$http_state$current_response !in c$http_state$pending || + (c$http_state$pending[c$http_state$current_response]?$status_code && + ! code_in_range(c$http_state$pending[c$http_state$current_response]$status_code, 100, 199)) ) + { + ++c$http_state$current_response; + } + set_state(c, F); + + c$http$status_code = code; + c$http$status_msg = reason; + c$http$version = version; + + if ( code_in_range(code, 100, 199) ) + { + c$http$info_code = code; + c$http$info_msg = reason; + } + + if ( c$http?$method && c$http$method == "CONNECT" && code == 200 ) + { + # Copy this conn_id and set the orig_p to zero because in the case of CONNECT + # proxies there will be potentially many source ports since a new proxy connection + # is established for each proxied connection. We treat this as a singular + # "tunnel". + local tid = copy(c$id); + tid$orig_p = 0/tcp; + Tunnel::register([$cid=tid, $tunnel_type=Tunnel::HTTP]); + } + } + +event http_header(c: connection, is_orig: bool, name: string, value: string) &priority=5 + { + set_state(c, is_orig); + + if ( is_orig ) # client headers + { + if ( name == "REFERER" ) + c$http$referrer = value; + + else if ( name == "HOST" ) + # The split is done to remove the occasional port value that shows up here. + c$http$host = split_string1(value, /:/)[0]; + + else if ( name == "RANGE" ) + c$http$range_request = T; + + else if ( name == "ORIGIN" ) + c$http$origin = value; + + else if ( name == "USER-AGENT" ) + c$http$user_agent = value; + + else if ( name in proxy_headers ) + { + if ( ! c$http?$proxied ) + c$http$proxied = set(); + add c$http$proxied[fmt("%s -> %s", name, value)]; + } + + else if ( name == "AUTHORIZATION" || name == "PROXY-AUTHORIZATION" ) + { + if ( /^[bB][aA][sS][iI][cC] / in value ) + { + local userpass = decode_base64_conn(c$id, sub(value, /[bB][aA][sS][iI][cC][[:blank:]]/, "")); + local up = split_string(userpass, /:/); + if ( |up| >= 2 ) + { + c$http$username = up[0]; + if ( c$http$capture_password ) + c$http$password = up[1]; + } + else + { + c$http$username = fmt(" (%s)", value); + if ( c$http$capture_password ) + c$http$password = userpass; + } + } + } + } + } + +event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = 5 + { + set_state(c, is_orig); + + if ( is_orig ) + c$http$request_body_len = stat$body_length; + else + c$http$response_body_len = stat$body_length; + } + +event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &priority = -5 + { + # The reply body is done so we're ready to log. + if ( ! is_orig ) + { + # If the response was an informational 1xx, we're still expecting + # the real response later, so we'll continue using the same record. + if ( ! (c$http?$status_code && code_in_range(c$http$status_code, 100, 199)) ) + { + Log::write(HTTP::LOG, c$http); + delete c$http_state$pending[c$http_state$current_response]; + } + } + } + +event connection_state_remove(c: connection) &priority=-5 + { + # Flush all pending but incomplete request/response pairs. + if ( c?$http_state ) + { + for ( r, info in c$http_state$pending ) + { + # We don't use pending elements at index 0. + if ( r == 0 ) next; + Log::write(HTTP::LOG, info); + } + } + } + diff --git a/scripts/base/protocols/http/utils.bro b/scripts/base/protocols/http/utils.bro deleted file mode 100644 index 67f13f2640..0000000000 --- a/scripts/base/protocols/http/utils.bro +++ /dev/null @@ -1,72 +0,0 @@ -##! Utilities specific for HTTP processing. - -@load ./main -@load base/utils/addrs - -module HTTP; - -export { - ## Given a string containing a series of key-value pairs separated - ## by "=", this function can be used to parse out all of the key names. - ## - ## data: The raw data, such as a URL or cookie value. - ## - ## kv_splitter: A regular expression representing the separator between - ## key-value pairs. - ## - ## Returns: A vector of strings containing the keys. - global extract_keys: function(data: string, kv_splitter: pattern): string_vec; - - ## Creates a URL from an :bro:type:`HTTP::Info` record. This should - ## handle edge cases such as proxied requests appropriately. - ## - ## rec: An :bro:type:`HTTP::Info` record. - ## - ## Returns: A URL, not prefixed by ``"http://"``. - global build_url: function(rec: Info): string; - - ## Creates a URL from an :bro:type:`HTTP::Info` record. This should - ## handle edge cases such as proxied requests appropriately. - ## - ## rec: An :bro:type:`HTTP::Info` record. - ## - ## Returns: A URL prefixed with ``"http://"``. - global build_url_http: function(rec: Info): string; - - ## Create an extremely shortened representation of a log line. - global describe: function(rec: Info): string; -} - - -function extract_keys(data: string, kv_splitter: pattern): string_vec - { - local key_vec: vector of string = vector(); - - local parts = split_string(data, kv_splitter); - for ( part_index in parts ) - { - local key_val = split_string1(parts[part_index], /=/); - if ( 0 in key_val ) - key_vec += key_val[0]; - } - return key_vec; - } - -function build_url(rec: Info): string - { - local uri = rec?$uri ? rec$uri : "/"; - local host = rec?$host ? rec$host : addr_to_uri(rec$id$resp_h); - if ( rec$id$resp_p != 80/tcp ) - host = fmt("%s:%s", host, rec$id$resp_p); - return fmt("%s%s", host, uri); - } - -function build_url_http(rec: Info): string - { - return fmt("http://%s", build_url(rec)); - } - -function describe(rec: Info): string - { - return build_url_http(rec); - } diff --git a/scripts/base/protocols/http/utils.zeek b/scripts/base/protocols/http/utils.zeek new file mode 100644 index 0000000000..a48841cef5 --- /dev/null +++ b/scripts/base/protocols/http/utils.zeek @@ -0,0 +1,72 @@ +##! Utilities specific for HTTP processing. + +@load ./main +@load base/utils/addrs + +module HTTP; + +export { + ## Given a string containing a series of key-value pairs separated + ## by "=", this function can be used to parse out all of the key names. + ## + ## data: The raw data, such as a URL or cookie value. + ## + ## kv_splitter: A regular expression representing the separator between + ## key-value pairs. + ## + ## Returns: A vector of strings containing the keys. + global extract_keys: function(data: string, kv_splitter: pattern): string_vec; + + ## Creates a URL from an :zeek:type:`HTTP::Info` record. This should + ## handle edge cases such as proxied requests appropriately. + ## + ## rec: An :zeek:type:`HTTP::Info` record. + ## + ## Returns: A URL, not prefixed by ``"http://"``. + global build_url: function(rec: Info): string; + + ## Creates a URL from an :zeek:type:`HTTP::Info` record. This should + ## handle edge cases such as proxied requests appropriately. + ## + ## rec: An :zeek:type:`HTTP::Info` record. + ## + ## Returns: A URL prefixed with ``"http://"``. + global build_url_http: function(rec: Info): string; + + ## Create an extremely shortened representation of a log line. + global describe: function(rec: Info): string; +} + + +function extract_keys(data: string, kv_splitter: pattern): string_vec + { + local key_vec: vector of string = vector(); + + local parts = split_string(data, kv_splitter); + for ( part_index in parts ) + { + local key_val = split_string1(parts[part_index], /=/); + if ( 0 in key_val ) + key_vec += key_val[0]; + } + return key_vec; + } + +function build_url(rec: Info): string + { + local uri = rec?$uri ? rec$uri : "/"; + local host = rec?$host ? rec$host : addr_to_uri(rec$id$resp_h); + if ( rec$id$resp_p != 80/tcp ) + host = fmt("%s:%s", host, rec$id$resp_p); + return fmt("%s%s", host, uri); + } + +function build_url_http(rec: Info): string + { + return fmt("http://%s", build_url(rec)); + } + +function describe(rec: Info): string + { + return build_url_http(rec); + } diff --git a/scripts/base/protocols/imap/__load__.bro b/scripts/base/protocols/imap/__load__.zeek similarity index 100% rename from scripts/base/protocols/imap/__load__.bro rename to scripts/base/protocols/imap/__load__.zeek diff --git a/scripts/base/protocols/imap/main.bro b/scripts/base/protocols/imap/main.bro deleted file mode 100644 index 9f0305c80c..0000000000 --- a/scripts/base/protocols/imap/main.bro +++ /dev/null @@ -1,11 +0,0 @@ - -module IMAP; - -const ports = { 143/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_IMAP, ports); - } - diff --git a/scripts/base/protocols/imap/main.zeek b/scripts/base/protocols/imap/main.zeek new file mode 100644 index 0000000000..30bfeab229 --- /dev/null +++ b/scripts/base/protocols/imap/main.zeek @@ -0,0 +1,11 @@ + +module IMAP; + +const ports = { 143/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_IMAP, ports); + } + diff --git a/scripts/base/protocols/irc/__load__.bro b/scripts/base/protocols/irc/__load__.zeek similarity index 100% rename from scripts/base/protocols/irc/__load__.bro rename to scripts/base/protocols/irc/__load__.zeek diff --git a/scripts/base/protocols/irc/dcc-send.bro b/scripts/base/protocols/irc/dcc-send.zeek similarity index 100% rename from scripts/base/protocols/irc/dcc-send.bro rename to scripts/base/protocols/irc/dcc-send.zeek diff --git a/scripts/base/protocols/irc/files.bro b/scripts/base/protocols/irc/files.bro deleted file mode 100644 index 759acdca81..0000000000 --- a/scripts/base/protocols/irc/files.bro +++ /dev/null @@ -1,49 +0,0 @@ -@load ./dcc-send -@load base/utils/conn-ids -@load base/frameworks/files - -module IRC; - -export { - redef record Info += { - ## File unique ID. - fuid: string &log &optional; - }; - - ## Default file handle provider for IRC. - global get_file_handle: function(c: connection, is_orig: bool): string; - - redef record fa_file += { - irc: IRC::Info &optional; - }; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - return cat(Analyzer::ANALYZER_IRC_DATA, c$start_time, c$id, is_orig); - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_IRC_DATA, - [$get_file_handle = IRC::get_file_handle]); - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( [c$id$resp_h, c$id$resp_p] !in dcc_expected_transfers ) - return; - - local irc = dcc_expected_transfers[c$id$resp_h, c$id$resp_p]; - irc$fuid = f$id; - if ( irc?$dcc_file_name ) - f$info$filename = irc$dcc_file_name; - - f$irc = irc; - } - -event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 - { - if ( f?$irc && meta?$mime_type ) - f$irc$dcc_mime_type = meta$mime_type; - } diff --git a/scripts/base/protocols/irc/files.zeek b/scripts/base/protocols/irc/files.zeek new file mode 100644 index 0000000000..59b178f4df --- /dev/null +++ b/scripts/base/protocols/irc/files.zeek @@ -0,0 +1,49 @@ +@load ./dcc-send +@load base/utils/conn-ids +@load base/frameworks/files + +module IRC; + +export { + redef record Info += { + ## File unique ID. + fuid: string &log &optional; + }; + + ## Default file handle provider for IRC. + global get_file_handle: function(c: connection, is_orig: bool): string; + + redef record fa_file += { + irc: IRC::Info &optional; + }; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + return cat(Analyzer::ANALYZER_IRC_DATA, c$start_time, c$id, is_orig); + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_IRC_DATA, + [$get_file_handle = IRC::get_file_handle]); + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( [c$id$resp_h, c$id$resp_p] !in dcc_expected_transfers ) + return; + + local irc = dcc_expected_transfers[c$id$resp_h, c$id$resp_p]; + irc$fuid = f$id; + if ( irc?$dcc_file_name ) + f$info$filename = irc$dcc_file_name; + + f$irc = irc; + } + +event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 + { + if ( f?$irc && meta?$mime_type ) + f$irc$dcc_mime_type = meta$mime_type; + } diff --git a/scripts/base/protocols/irc/main.bro b/scripts/base/protocols/irc/main.bro deleted file mode 100644 index c2de29da6a..0000000000 --- a/scripts/base/protocols/irc/main.bro +++ /dev/null @@ -1,124 +0,0 @@ -##! Implements the core IRC analysis support. The logging model is to log -##! IRC commands along with the associated response and some additional -##! metadata about the connection if it's available. - -module IRC; - -export { - - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp when the command was seen. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Nickname given for the connection. - nick: string &log &optional; - ## Username given for the connection. - user: string &log &optional; - - ## Command given by the client. - command: string &log &optional; - ## Value for the command given by the client. - value: string &log &optional; - ## Any additional data for the command. - addl: string &log &optional; - }; - - ## Event that can be handled to access the IRC record as it is sent on - ## to the logging framework. - global irc_log: event(rec: Info); -} - -redef record connection += { - ## IRC session information. - irc: Info &optional; -}; - -const ports = { 6666/tcp, 6667/tcp, 6668/tcp, 6669/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log, $path="irc"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, ports); - } - -function new_session(c: connection): Info - { - local info: Info; - info$ts = network_time(); - info$uid = c$uid; - info$id = c$id; - return info; - } - -function set_session(c: connection) - { - if ( ! c?$irc ) - c$irc = new_session(c); - - c$irc$ts=network_time(); - } - -event irc_nick_message(c: connection, is_orig: bool, who: string, newnick: string) &priority=5 - { - set_session(c); - if ( is_orig ) - { - c$irc$command = "NICK"; - c$irc$value = newnick; - } - } - -event irc_nick_message(c: connection, is_orig: bool, who: string, newnick: string) &priority=-5 - { - if ( is_orig ) - { - Log::write(IRC::LOG, c$irc); - c$irc$nick = newnick; - } - } - -event irc_user_message(c: connection, is_orig: bool, user: string, host: string, server: string, real_name: string) &priority=5 - { - set_session(c); - if ( is_orig ) - { - c$irc$command = "USER"; - c$irc$value = user; - c$irc$addl=fmt("%s %s %s", host, server, real_name); - } - } - -event irc_user_message(c: connection, is_orig: bool, user: string, host: string, server: string, real_name: string) &priority=-5 - { - if ( is_orig ) - { - Log::write(IRC::LOG, c$irc); - c$irc$user = user; - } - } - -event irc_join_message(c: connection, is_orig: bool, info_list: irc_join_list) &priority=5 - { - set_session(c); - if ( is_orig ) - c$irc$command = "JOIN"; - } - -event irc_join_message(c: connection, is_orig: bool, info_list: irc_join_list) &priority=-5 - { - if ( is_orig ) - { - for ( l in info_list ) - { - c$irc$value = l$channel; - c$irc$addl = (l$password != "" ? fmt(" with channel key: '%s'", l$password) : ""); - Log::write(IRC::LOG, c$irc); - } - } - } diff --git a/scripts/base/protocols/irc/main.zeek b/scripts/base/protocols/irc/main.zeek new file mode 100644 index 0000000000..85a8795e88 --- /dev/null +++ b/scripts/base/protocols/irc/main.zeek @@ -0,0 +1,124 @@ +##! Implements the core IRC analysis support. The logging model is to log +##! IRC commands along with the associated response and some additional +##! metadata about the connection if it's available. + +module IRC; + +export { + + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp when the command was seen. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Nickname given for the connection. + nick: string &log &optional; + ## Username given for the connection. + user: string &log &optional; + + ## Command given by the client. + command: string &log &optional; + ## Value for the command given by the client. + value: string &log &optional; + ## Any additional data for the command. + addl: string &log &optional; + }; + + ## Event that can be handled to access the IRC record as it is sent on + ## to the logging framework. + global irc_log: event(rec: Info); +} + +redef record connection += { + ## IRC session information. + irc: Info &optional; +}; + +const ports = { 6666/tcp, 6667/tcp, 6668/tcp, 6669/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log, $path="irc"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, ports); + } + +function new_session(c: connection): Info + { + local info: Info; + info$ts = network_time(); + info$uid = c$uid; + info$id = c$id; + return info; + } + +function set_session(c: connection) + { + if ( ! c?$irc ) + c$irc = new_session(c); + + c$irc$ts=network_time(); + } + +event irc_nick_message(c: connection, is_orig: bool, who: string, newnick: string) &priority=5 + { + set_session(c); + if ( is_orig ) + { + c$irc$command = "NICK"; + c$irc$value = newnick; + } + } + +event irc_nick_message(c: connection, is_orig: bool, who: string, newnick: string) &priority=-5 + { + if ( is_orig ) + { + Log::write(IRC::LOG, c$irc); + c$irc$nick = newnick; + } + } + +event irc_user_message(c: connection, is_orig: bool, user: string, host: string, server: string, real_name: string) &priority=5 + { + set_session(c); + if ( is_orig ) + { + c$irc$command = "USER"; + c$irc$value = user; + c$irc$addl=fmt("%s %s %s", host, server, real_name); + } + } + +event irc_user_message(c: connection, is_orig: bool, user: string, host: string, server: string, real_name: string) &priority=-5 + { + if ( is_orig ) + { + Log::write(IRC::LOG, c$irc); + c$irc$user = user; + } + } + +event irc_join_message(c: connection, is_orig: bool, info_list: irc_join_list) &priority=5 + { + set_session(c); + if ( is_orig ) + c$irc$command = "JOIN"; + } + +event irc_join_message(c: connection, is_orig: bool, info_list: irc_join_list) &priority=-5 + { + if ( is_orig ) + { + for ( l in info_list ) + { + c$irc$value = l$channel; + c$irc$addl = (l$password != "" ? fmt(" with channel key: '%s'", l$password) : ""); + Log::write(IRC::LOG, c$irc); + } + } + } diff --git a/scripts/base/protocols/krb/__load__.bro b/scripts/base/protocols/krb/__load__.zeek similarity index 100% rename from scripts/base/protocols/krb/__load__.bro rename to scripts/base/protocols/krb/__load__.zeek diff --git a/scripts/base/protocols/krb/consts.bro b/scripts/base/protocols/krb/consts.zeek similarity index 100% rename from scripts/base/protocols/krb/consts.bro rename to scripts/base/protocols/krb/consts.zeek diff --git a/scripts/base/protocols/krb/files.bro b/scripts/base/protocols/krb/files.bro deleted file mode 100644 index 18ee4da83f..0000000000 --- a/scripts/base/protocols/krb/files.bro +++ /dev/null @@ -1,124 +0,0 @@ -@load ./main -@load base/utils/conn-ids -@load base/frameworks/files -@load base/files/x509 - -module KRB; - -export { - redef record Info += { - ## Client certificate - client_cert: Files::Info &optional; - ## Subject of client certificate, if any - client_cert_subject: string &log &optional; - ## File unique ID of client cert, if any - client_cert_fuid: string &log &optional; - - ## Server certificate - server_cert: Files::Info &optional; - ## Subject of server certificate, if any - server_cert_subject: string &log &optional; - ## File unique ID of server cert, if any - server_cert_fuid: string &log &optional; - }; - - ## Default file handle provider for KRB. - global get_file_handle: function(c: connection, is_orig: bool): string; - - ## Default file describer for KRB. - global describe_file: function(f: fa_file): string; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - # Unused. File handles are generated in the analyzer. - return ""; - } - -function describe_file(f: fa_file): string - { - if ( f$source != "KRB_TCP" && f$source != "KRB" ) - return ""; - - if ( ! f?$info || ! f$info?$x509 || ! f$info$x509?$certificate ) - return ""; - - # It is difficult to reliably describe a certificate - especially since - # we do not know when this function is called (hence, if the data structures - # are already populated). - # - # Just return a bit of our connection information and hope that that is good enough. - for ( cid, c in f$conns ) - { - if ( c?$krb ) - { - return cat(c$id$resp_h, ":", c$id$resp_p); - } - } - - return cat("Serial: ", f$info$x509$certificate$serial, " Subject: ", - f$info$x509$certificate$subject, " Issuer: ", - f$info$x509$certificate$issuer); - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_KRB_TCP, - [$get_file_handle = KRB::get_file_handle, - $describe = KRB::describe_file]); - - Files::register_protocol(Analyzer::ANALYZER_KRB, - [$get_file_handle = KRB::get_file_handle, - $describe = KRB::describe_file]); - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( f$source != "KRB_TCP" && f$source != "KRB" ) - return; - - set_session(c); - - if ( is_orig ) - { - c$krb$client_cert = f$info; - c$krb$client_cert_fuid = f$id; - } - else - { - c$krb$server_cert = f$info; - c$krb$server_cert_fuid = f$id; - } - } - -function fill_in_subjects(c: connection) - { - if ( ! c?$krb ) - return; - - if ( c$krb?$client_cert && c$krb$client_cert?$x509 && c$krb$client_cert$x509?$certificate ) - c$krb$client_cert_subject = c$krb$client_cert$x509$certificate$subject; - - if ( c$krb?$server_cert && c$krb$server_cert?$x509 && c$krb$server_cert$x509?$certificate ) - c$krb$server_cert_subject = c$krb$server_cert$x509$certificate$subject; - } - -event krb_error(c: connection, msg: Error_Msg) - { - fill_in_subjects(c); - } - -event krb_as_response(c: connection, msg: KDC_Response) - { - fill_in_subjects(c); - } - -event krb_tgs_response(c: connection, msg: KDC_Response) - { - fill_in_subjects(c); - } - -event connection_state_remove(c: connection) - { - fill_in_subjects(c); - } diff --git a/scripts/base/protocols/krb/files.zeek b/scripts/base/protocols/krb/files.zeek new file mode 100644 index 0000000000..c7dde949f2 --- /dev/null +++ b/scripts/base/protocols/krb/files.zeek @@ -0,0 +1,124 @@ +@load ./main +@load base/utils/conn-ids +@load base/frameworks/files +@load base/files/x509 + +module KRB; + +export { + redef record Info += { + ## Client certificate + client_cert: Files::Info &optional; + ## Subject of client certificate, if any + client_cert_subject: string &log &optional; + ## File unique ID of client cert, if any + client_cert_fuid: string &log &optional; + + ## Server certificate + server_cert: Files::Info &optional; + ## Subject of server certificate, if any + server_cert_subject: string &log &optional; + ## File unique ID of server cert, if any + server_cert_fuid: string &log &optional; + }; + + ## Default file handle provider for KRB. + global get_file_handle: function(c: connection, is_orig: bool): string; + + ## Default file describer for KRB. + global describe_file: function(f: fa_file): string; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + # Unused. File handles are generated in the analyzer. + return ""; + } + +function describe_file(f: fa_file): string + { + if ( f$source != "KRB_TCP" && f$source != "KRB" ) + return ""; + + if ( ! f?$info || ! f$info?$x509 || ! f$info$x509?$certificate ) + return ""; + + # It is difficult to reliably describe a certificate - especially since + # we do not know when this function is called (hence, if the data structures + # are already populated). + # + # Just return a bit of our connection information and hope that that is good enough. + for ( cid, c in f$conns ) + { + if ( c?$krb ) + { + return cat(c$id$resp_h, ":", c$id$resp_p); + } + } + + return cat("Serial: ", f$info$x509$certificate$serial, " Subject: ", + f$info$x509$certificate$subject, " Issuer: ", + f$info$x509$certificate$issuer); + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_KRB_TCP, + [$get_file_handle = KRB::get_file_handle, + $describe = KRB::describe_file]); + + Files::register_protocol(Analyzer::ANALYZER_KRB, + [$get_file_handle = KRB::get_file_handle, + $describe = KRB::describe_file]); + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( f$source != "KRB_TCP" && f$source != "KRB" ) + return; + + set_session(c); + + if ( is_orig ) + { + c$krb$client_cert = f$info; + c$krb$client_cert_fuid = f$id; + } + else + { + c$krb$server_cert = f$info; + c$krb$server_cert_fuid = f$id; + } + } + +function fill_in_subjects(c: connection) + { + if ( ! c?$krb ) + return; + + if ( c$krb?$client_cert && c$krb$client_cert?$x509 && c$krb$client_cert$x509?$certificate ) + c$krb$client_cert_subject = c$krb$client_cert$x509$certificate$subject; + + if ( c$krb?$server_cert && c$krb$server_cert?$x509 && c$krb$server_cert$x509?$certificate ) + c$krb$server_cert_subject = c$krb$server_cert$x509$certificate$subject; + } + +event krb_error(c: connection, msg: Error_Msg) + { + fill_in_subjects(c); + } + +event krb_as_response(c: connection, msg: KDC_Response) + { + fill_in_subjects(c); + } + +event krb_tgs_response(c: connection, msg: KDC_Response) + { + fill_in_subjects(c); + } + +event connection_state_remove(c: connection) + { + fill_in_subjects(c); + } diff --git a/scripts/base/protocols/krb/main.bro b/scripts/base/protocols/krb/main.bro deleted file mode 100644 index 076ea0e171..0000000000 --- a/scripts/base/protocols/krb/main.bro +++ /dev/null @@ -1,221 +0,0 @@ -##! Implements base functionality for KRB analysis. Generates the kerberos.log -##! file. - -module KRB; - -@load ./consts - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp for when the event happened. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - - ## Request type - Authentication Service ("AS") or - ## Ticket Granting Service ("TGS") - request_type: string &log &optional; - ## Client - client: string &log &optional; - ## Service - service: string &log &optional; - - ## Request result - success: bool &log &optional; - ## Error code - error_code: count &optional; - ## Error message - error_msg: string &log &optional; - - ## Ticket valid from - from: time &log &optional; - ## Ticket valid till - till: time &log &optional; - ## Ticket encryption type - cipher: string &log &optional; - - ## Forwardable ticket requested - forwardable: bool &log &optional; - ## Renewable ticket requested - renewable: bool &log &optional; - - ## We've already logged this - logged: bool &default=F; - }; - - ## The server response error texts which are *not* logged. - option ignored_errors: set[string] = { - # This will significantly increase the noisiness of the log. - # However, one attack is to iterate over principals, looking - # for ones that don't require preauth, and then performn - # an offline attack on that ticket. To detect that attack, - # log NEEDED_PREAUTH. - "NEEDED_PREAUTH", - # This is a more specific version of NEEDED_PREAUTH that's used - # by Windows AD Kerberos. - "Need to use PA-ENC-TIMESTAMP/PA-PK-AS-REQ", - }; - - ## Event that can be handled to access the KRB record as it is sent on - ## to the logging framework. - global log_krb: event(rec: Info); -} - -redef record connection += { - krb: Info &optional; -}; - -const tcp_ports = { 88/tcp }; -const udp_ports = { 88/udp }; -redef likely_server_ports += { tcp_ports, udp_ports }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_KRB, udp_ports); - Analyzer::register_for_ports(Analyzer::ANALYZER_KRB_TCP, tcp_ports); - Log::create_stream(KRB::LOG, [$columns=Info, $ev=log_krb, $path="kerberos"]); - } - -function set_session(c: connection): bool - { - if ( ! c?$krb ) - { - c$krb = Info($ts = network_time(), - $uid = c$uid, - $id = c$id); - } - - return c$krb$logged; - } - -function do_log(c: connection) - { - if ( c?$krb && ! c$krb$logged ) - { - Log::write(KRB::LOG, c$krb); - c$krb$logged = T; - } - } - -event krb_error(c: connection, msg: Error_Msg) &priority=5 - { - if ( set_session(c) ) - return; - - if ( msg?$error_text && msg$error_text in ignored_errors ) - { - if ( c?$krb ) - delete c$krb; - - return; - } - - if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) - c$krb$client = fmt("%s%s", msg?$client_name ? msg$client_name + "/" : "", - msg?$client_realm ? msg$client_realm : ""); - - c$krb$service = msg$service_name; - c$krb$success = F; - c$krb$error_code = msg$error_code; - - if ( msg?$error_text ) - c$krb$error_msg = msg$error_text; - else if ( msg$error_code in error_msg ) - c$krb$error_msg = error_msg[msg$error_code]; - } - -event krb_error(c: connection, msg: Error_Msg) &priority=-5 - { - do_log(c); - } - -event krb_as_request(c: connection, msg: KDC_Request) &priority=5 - { - if ( set_session(c) ) - return; - - c$krb$request_type = "AS"; - c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", msg$service_realm); - if ( msg?$service_name ) - c$krb$service = msg$service_name; - - if ( msg?$from ) - c$krb$from = msg$from; - c$krb$till = msg$till; - - c$krb$forwardable = msg$kdc_options$forwardable; - c$krb$renewable = msg$kdc_options$renewable; - } - -event krb_as_response(c: connection, msg: KDC_Response) &priority=5 - { - if ( set_session(c) ) - return; - - if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) - { - c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", - msg?$client_realm ? msg$client_realm : ""); - } - - c$krb$service = msg$ticket$service_name; - c$krb$cipher = cipher_name[msg$ticket$cipher]; - c$krb$success = T; - } - -event krb_as_response(c: connection, msg: KDC_Response) &priority=-5 - { - do_log(c); - } - -event krb_ap_request(c: connection, ticket: KRB::Ticket, opts: KRB::AP_Options) &priority=5 - { - if ( set_session(c) ) - return; - } - -event krb_tgs_request(c: connection, msg: KDC_Request) &priority=5 - { - if ( set_session(c) ) - return; - - c$krb$request_type = "TGS"; - if ( msg?$service_name ) - c$krb$service = msg$service_name; - if ( msg?$from ) - c$krb$from = msg$from; - c$krb$till = msg$till; - - c$krb$forwardable = msg$kdc_options$forwardable; - c$krb$renewable = msg$kdc_options$renewable; - } - -event krb_tgs_response(c: connection, msg: KDC_Response) &priority=5 - { - if ( set_session(c) ) - return; - - if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) - { - c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", - msg?$client_realm ? msg$client_realm : ""); - } - - c$krb$service = msg$ticket$service_name; - c$krb$cipher = cipher_name[msg$ticket$cipher]; - c$krb$success = T; - } - -event krb_tgs_response(c: connection, msg: KDC_Response) &priority=-5 - { - do_log(c); - } - -event connection_state_remove(c: connection) &priority=-5 - { - do_log(c); - } diff --git a/scripts/base/protocols/krb/main.zeek b/scripts/base/protocols/krb/main.zeek new file mode 100644 index 0000000000..cdcdf48f58 --- /dev/null +++ b/scripts/base/protocols/krb/main.zeek @@ -0,0 +1,234 @@ +##! Implements base functionality for KRB analysis. Generates the kerberos.log +##! file. + +module KRB; + +@load ./consts + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + + ## Request type - Authentication Service ("AS") or + ## Ticket Granting Service ("TGS") + request_type: string &log &optional; + ## Client + client: string &log &optional; + ## Service + service: string &log &optional; + + ## Request result + success: bool &log &optional; + ## Error code + error_code: count &optional; + ## Error message + error_msg: string &log &optional; + + ## Ticket valid from + from: time &log &optional; + ## Ticket valid till + till: time &log &optional; + ## Ticket encryption type + cipher: string &log &optional; + + ## Forwardable ticket requested + forwardable: bool &log &optional; + ## Renewable ticket requested + renewable: bool &log &optional; + + ## We've already logged this + logged: bool &default=F; + }; + + ## The server response error texts which are *not* logged. + option ignored_errors: set[string] = { + # This will significantly increase the noisiness of the log. + # However, one attack is to iterate over principals, looking + # for ones that don't require preauth, and then performn + # an offline attack on that ticket. To detect that attack, + # log NEEDED_PREAUTH. + "NEEDED_PREAUTH", + # This is a more specific version of NEEDED_PREAUTH that's used + # by Windows AD Kerberos. + "Need to use PA-ENC-TIMESTAMP/PA-PK-AS-REQ", + }; + + ## Event that can be handled to access the KRB record as it is sent on + ## to the logging framework. + global log_krb: event(rec: Info); +} + +redef record connection += { + krb: Info &optional; +}; + +const tcp_ports = { 88/tcp }; +const udp_ports = { 88/udp }; +redef likely_server_ports += { tcp_ports, udp_ports }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_KRB, udp_ports); + Analyzer::register_for_ports(Analyzer::ANALYZER_KRB_TCP, tcp_ports); + Log::create_stream(KRB::LOG, [$columns=Info, $ev=log_krb, $path="kerberos"]); + } + +function set_session(c: connection): bool + { + if ( ! c?$krb ) + { + c$krb = Info($ts = network_time(), + $uid = c$uid, + $id = c$id); + } + + return c$krb$logged; + } + +function do_log(c: connection) + { + if ( c?$krb && ! c$krb$logged ) + { + Log::write(KRB::LOG, c$krb); + c$krb$logged = T; + } + } + +event krb_error(c: connection, msg: Error_Msg) &priority=5 + { + if ( set_session(c) ) + return; + + if ( msg?$error_text && msg$error_text in ignored_errors ) + { + if ( c?$krb ) + delete c$krb; + + return; + } + + if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) + c$krb$client = fmt("%s%s", msg?$client_name ? msg$client_name + "/" : "", + msg?$client_realm ? msg$client_realm : ""); + + if ( msg?$service_name ) + c$krb$service = msg$service_name; + + c$krb$success = F; + c$krb$error_code = msg$error_code; + + if ( msg?$error_text ) + c$krb$error_msg = msg$error_text; + else if ( msg$error_code in error_msg ) + c$krb$error_msg = error_msg[msg$error_code]; + } + +event krb_error(c: connection, msg: Error_Msg) &priority=-5 + { + do_log(c); + } + +event krb_as_request(c: connection, msg: KDC_Request) &priority=5 + { + if ( set_session(c) ) + return; + + c$krb$request_type = "AS"; + + c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", + msg?$service_realm ? msg$service_realm : ""); + + if ( msg?$service_name ) + c$krb$service = msg$service_name; + + if ( msg?$from ) + c$krb$from = msg$from; + if ( msg?$till ) + c$krb$till = msg$till; + + if ( msg?$kdc_options ) + { + c$krb$forwardable = msg$kdc_options$forwardable; + c$krb$renewable = msg$kdc_options$renewable; + } + } + +event krb_as_response(c: connection, msg: KDC_Response) &priority=5 + { + if ( set_session(c) ) + return; + + if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) + { + c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", + msg?$client_realm ? msg$client_realm : ""); + } + + c$krb$service = msg$ticket$service_name; + c$krb$cipher = cipher_name[msg$ticket$cipher]; + c$krb$success = T; + } + +event krb_as_response(c: connection, msg: KDC_Response) &priority=-5 + { + do_log(c); + } + +event krb_ap_request(c: connection, ticket: KRB::Ticket, opts: KRB::AP_Options) &priority=5 + { + if ( set_session(c) ) + return; + } + +event krb_tgs_request(c: connection, msg: KDC_Request) &priority=5 + { + if ( set_session(c) ) + return; + + c$krb$request_type = "TGS"; + if ( msg?$service_name ) + c$krb$service = msg$service_name; + if ( msg?$from ) + c$krb$from = msg$from; + if ( msg?$till ) + c$krb$till = msg$till; + + if ( msg?$kdc_options ) + { + c$krb$forwardable = msg$kdc_options$forwardable; + c$krb$renewable = msg$kdc_options$renewable; + } + } + +event krb_tgs_response(c: connection, msg: KDC_Response) &priority=5 + { + if ( set_session(c) ) + return; + + if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) ) + { + c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "", + msg?$client_realm ? msg$client_realm : ""); + } + + c$krb$service = msg$ticket$service_name; + c$krb$cipher = cipher_name[msg$ticket$cipher]; + c$krb$success = T; + } + +event krb_tgs_response(c: connection, msg: KDC_Response) &priority=-5 + { + do_log(c); + } + +event connection_state_remove(c: connection) &priority=-5 + { + do_log(c); + } diff --git a/scripts/base/protocols/modbus/__load__.bro b/scripts/base/protocols/modbus/__load__.zeek similarity index 100% rename from scripts/base/protocols/modbus/__load__.bro rename to scripts/base/protocols/modbus/__load__.zeek diff --git a/scripts/base/protocols/modbus/consts.bro b/scripts/base/protocols/modbus/consts.zeek similarity index 100% rename from scripts/base/protocols/modbus/consts.bro rename to scripts/base/protocols/modbus/consts.zeek diff --git a/scripts/base/protocols/modbus/main.bro b/scripts/base/protocols/modbus/main.bro deleted file mode 100644 index 5a30d170e5..0000000000 --- a/scripts/base/protocols/modbus/main.bro +++ /dev/null @@ -1,70 +0,0 @@ -##! Base Modbus analysis script. - -module Modbus; - -@load ./consts - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Time of the request. - ts: time &log; - ## Unique identifier for the connection. - uid: string &log; - ## Identifier for the connection. - id: conn_id &log; - ## The name of the function message that was sent. - func: string &log &optional; - ## The exception if the response was a failure. - exception: string &log &optional; - }; - - ## Event that can be handled to access the Modbus record as it is sent - ## on to the logging framework. - global log_modbus: event(rec: Info); -} - -redef record connection += { - modbus: Info &optional; -}; - -const ports = { 502/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(Modbus::LOG, [$columns=Info, $ev=log_modbus, $path="modbus"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_MODBUS, ports); - } - -event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) &priority=5 - { - if ( ! c?$modbus ) - { - c$modbus = [$ts=network_time(), $uid=c$uid, $id=c$id]; - } - - c$modbus$ts = network_time(); - c$modbus$func = function_codes[headers$function_code]; - } - -event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) &priority=-5 - { - # Only log upon replies. - # Also, don't log now if this is an exception (log in the exception event handler) - if ( ! is_orig && ( headers$function_code <= 0x81 || headers$function_code >= 0x98 ) ) - Log::write(LOG, c$modbus); - } - -event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &priority=5 - { - c$modbus$exception = exception_codes[code]; - } - -event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &priority=-5 - { - Log::write(LOG, c$modbus); - delete c$modbus$exception; - } - diff --git a/scripts/base/protocols/modbus/main.zeek b/scripts/base/protocols/modbus/main.zeek new file mode 100644 index 0000000000..d8866cefa1 --- /dev/null +++ b/scripts/base/protocols/modbus/main.zeek @@ -0,0 +1,70 @@ +##! Base Modbus analysis script. + +module Modbus; + +@load ./consts + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Time of the request. + ts: time &log; + ## Unique identifier for the connection. + uid: string &log; + ## Identifier for the connection. + id: conn_id &log; + ## The name of the function message that was sent. + func: string &log &optional; + ## The exception if the response was a failure. + exception: string &log &optional; + }; + + ## Event that can be handled to access the Modbus record as it is sent + ## on to the logging framework. + global log_modbus: event(rec: Info); +} + +redef record connection += { + modbus: Info &optional; +}; + +const ports = { 502/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(Modbus::LOG, [$columns=Info, $ev=log_modbus, $path="modbus"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_MODBUS, ports); + } + +event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) &priority=5 + { + if ( ! c?$modbus ) + { + c$modbus = [$ts=network_time(), $uid=c$uid, $id=c$id]; + } + + c$modbus$ts = network_time(); + c$modbus$func = function_codes[headers$function_code]; + } + +event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) &priority=-5 + { + # Only log upon replies. + # Also, don't log now if this is an exception (log in the exception event handler) + if ( ! is_orig && ( headers$function_code <= 0x81 || headers$function_code >= 0x98 ) ) + Log::write(LOG, c$modbus); + } + +event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &priority=5 + { + c$modbus$exception = exception_codes[code]; + } + +event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &priority=-5 + { + Log::write(LOG, c$modbus); + delete c$modbus$exception; + } + diff --git a/scripts/base/frameworks/tunnels/__load__.bro b/scripts/base/protocols/mysql/__load__.zeek similarity index 100% rename from scripts/base/frameworks/tunnels/__load__.bro rename to scripts/base/protocols/mysql/__load__.zeek diff --git a/scripts/base/protocols/mysql/consts.bro b/scripts/base/protocols/mysql/consts.zeek similarity index 100% rename from scripts/base/protocols/mysql/consts.bro rename to scripts/base/protocols/mysql/consts.zeek diff --git a/scripts/base/protocols/mysql/main.bro b/scripts/base/protocols/mysql/main.bro deleted file mode 100644 index e4ba07cbca..0000000000 --- a/scripts/base/protocols/mysql/main.bro +++ /dev/null @@ -1,132 +0,0 @@ -##! Implements base functionality for MySQL analysis. Generates the mysql.log file. - -module MySQL; - -@load ./consts - -export { - redef enum Log::ID += { mysql::LOG }; - - type Info: record { - ## Timestamp for when the event happened. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## The command that was issued - cmd: string &log; - ## The argument issued to the command - arg: string &log; - ## Did the server tell us that the command succeeded? - success: bool &log &optional; - ## The number of affected rows, if any - rows: count &log &optional; - ## Server message, if any - response: string &log &optional; - }; - - ## Event that can be handled to access the MySQL record as it is sent on - ## to the logging framework. - global log_mysql: event(rec: Info); -} - -redef record connection += { - mysql: Info &optional; -}; - -const ports = { 1434/tcp, 3306/tcp }; - -event bro_init() &priority=5 - { - Log::create_stream(mysql::LOG, [$columns=Info, $ev=log_mysql, $path="mysql"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_MYSQL, ports); - } - -event mysql_handshake(c: connection, username: string) - { - if ( ! c?$mysql ) - { - local info: Info; - info$ts = network_time(); - info$uid = c$uid; - info$id = c$id; - info$cmd = "login"; - info$arg = username; - c$mysql = info; - } - } - -event mysql_command_request(c: connection, command: count, arg: string) &priority=5 - { - if ( c?$mysql ) - { - # We got a request, but we haven't logged our - # previous request yet, so let's do that now. - Log::write(mysql::LOG, c$mysql); - delete c$mysql; - } - - local info: Info; - info$ts = network_time(); - info$uid = c$uid; - info$id = c$id; - info$cmd = commands[command]; - info$arg = sub(arg, /\0$/, ""); - c$mysql = info; - } - -event mysql_command_request(c: connection, command: count, arg: string) &priority=-5 - { - if ( c?$mysql && c$mysql?$cmd && c$mysql$cmd == "quit" ) - { - # We get no response for quits, so let's just log it now. - Log::write(mysql::LOG, c$mysql); - delete c$mysql; - } - } - -event mysql_error(c: connection, code: count, msg: string) &priority=5 - { - if ( c?$mysql ) - { - c$mysql$success = F; - c$mysql$response = msg; - } - } - -event mysql_error(c: connection, code: count, msg: string) &priority=-5 - { - if ( c?$mysql ) - { - Log::write(mysql::LOG, c$mysql); - delete c$mysql; - } - } - -event mysql_ok(c: connection, affected_rows: count) &priority=5 - { - if ( c?$mysql ) - { - c$mysql$success = T; - c$mysql$rows = affected_rows; - } - } - -event mysql_ok(c: connection, affected_rows: count) &priority=-5 - { - if ( c?$mysql ) - { - Log::write(mysql::LOG, c$mysql); - delete c$mysql; - } - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$mysql ) - { - Log::write(mysql::LOG, c$mysql); - delete c$mysql; - } - } diff --git a/scripts/base/protocols/mysql/main.zeek b/scripts/base/protocols/mysql/main.zeek new file mode 100644 index 0000000000..e4c76dd5bc --- /dev/null +++ b/scripts/base/protocols/mysql/main.zeek @@ -0,0 +1,132 @@ +##! Implements base functionality for MySQL analysis. Generates the mysql.log file. + +module MySQL; + +@load ./consts + +export { + redef enum Log::ID += { mysql::LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## The command that was issued + cmd: string &log; + ## The argument issued to the command + arg: string &log; + ## Did the server tell us that the command succeeded? + success: bool &log &optional; + ## The number of affected rows, if any + rows: count &log &optional; + ## Server message, if any + response: string &log &optional; + }; + + ## Event that can be handled to access the MySQL record as it is sent on + ## to the logging framework. + global log_mysql: event(rec: Info); +} + +redef record connection += { + mysql: Info &optional; +}; + +const ports = { 1434/tcp, 3306/tcp }; + +event zeek_init() &priority=5 + { + Log::create_stream(mysql::LOG, [$columns=Info, $ev=log_mysql, $path="mysql"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_MYSQL, ports); + } + +event mysql_handshake(c: connection, username: string) + { + if ( ! c?$mysql ) + { + local info: Info; + info$ts = network_time(); + info$uid = c$uid; + info$id = c$id; + info$cmd = "login"; + info$arg = username; + c$mysql = info; + } + } + +event mysql_command_request(c: connection, command: count, arg: string) &priority=5 + { + if ( c?$mysql ) + { + # We got a request, but we haven't logged our + # previous request yet, so let's do that now. + Log::write(mysql::LOG, c$mysql); + delete c$mysql; + } + + local info: Info; + info$ts = network_time(); + info$uid = c$uid; + info$id = c$id; + info$cmd = commands[command]; + info$arg = sub(arg, /\0$/, ""); + c$mysql = info; + } + +event mysql_command_request(c: connection, command: count, arg: string) &priority=-5 + { + if ( c?$mysql && c$mysql?$cmd && c$mysql$cmd == "quit" ) + { + # We get no response for quits, so let's just log it now. + Log::write(mysql::LOG, c$mysql); + delete c$mysql; + } + } + +event mysql_error(c: connection, code: count, msg: string) &priority=5 + { + if ( c?$mysql ) + { + c$mysql$success = F; + c$mysql$response = msg; + } + } + +event mysql_error(c: connection, code: count, msg: string) &priority=-5 + { + if ( c?$mysql ) + { + Log::write(mysql::LOG, c$mysql); + delete c$mysql; + } + } + +event mysql_ok(c: connection, affected_rows: count) &priority=5 + { + if ( c?$mysql ) + { + c$mysql$success = T; + c$mysql$rows = affected_rows; + } + } + +event mysql_ok(c: connection, affected_rows: count) &priority=-5 + { + if ( c?$mysql ) + { + Log::write(mysql::LOG, c$mysql); + delete c$mysql; + } + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$mysql ) + { + Log::write(mysql::LOG, c$mysql); + delete c$mysql; + } + } diff --git a/scripts/base/protocols/ntlm/__load__.bro b/scripts/base/protocols/ntlm/__load__.zeek similarity index 100% rename from scripts/base/protocols/ntlm/__load__.bro rename to scripts/base/protocols/ntlm/__load__.zeek diff --git a/scripts/base/protocols/ntlm/main.bro b/scripts/base/protocols/ntlm/main.bro deleted file mode 100644 index 88a484e090..0000000000 --- a/scripts/base/protocols/ntlm/main.bro +++ /dev/null @@ -1,115 +0,0 @@ -@load base/frameworks/dpd - -module NTLM; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp for when the event happened. - ts : time &log; - ## Unique ID for the connection. - uid : string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id : conn_id &log; - - ## Username given by the client. - username : string &log &optional; - ## Hostname given by the client. - hostname : string &log &optional; - ## Domainname given by the client. - domainname : string &log &optional; - - ## NetBIOS name given by the server in a CHALLENGE. - server_nb_computer_name: string &log &optional; - ## DNS name given by the server in a CHALLENGE. - server_dns_computer_name: string &log &optional; - ## Tree name given by the server in a CHALLENGE. - server_tree_name: string &log &optional; - - ## Indicate whether or not the authentication was successful. - success : bool &log &optional; - - ## Internally used field to indicate if the login attempt - ## has already been logged. - done: bool &default=F; - }; -} - -redef DPD::ignore_violations += { Analyzer::ANALYZER_NTLM }; - -redef record connection += { - ntlm: Info &optional; -}; - -event bro_init() &priority=5 - { - Log::create_stream(NTLM::LOG, [$columns=Info, $path="ntlm"]); - } - -function set_session(c: connection) - { - if ( ! c?$ntlm ) - c$ntlm = NTLM::Info($ts=network_time(), $uid=c$uid, $id=c$id); - } - -event ntlm_negotiate(c: connection, request: NTLM::Negotiate) &priority=5 - { - set_session(c); - } - -event ntlm_challenge(c: connection, challenge: NTLM::Challenge) &priority=5 - { - set_session(c); - - if ( challenge?$target_info ) - { - local ti = challenge$target_info; - if ( ti?$nb_domain_name ) - c$ntlm$server_nb_computer_name = ti$nb_computer_name; - if ( ti?$dns_domain_name ) - c$ntlm$server_dns_computer_name = ti$dns_computer_name; - if ( ti?$dns_tree_name ) - c$ntlm$server_tree_name = ti$dns_tree_name; - } - } - -event ntlm_authenticate(c: connection, request: NTLM::Authenticate) &priority=5 - { - set_session(c); - - if ( request?$domain_name ) - c$ntlm$domainname = request$domain_name; - if ( request?$workstation ) - c$ntlm$hostname = request$workstation; - if ( request?$user_name ) - c$ntlm$username = request$user_name; - } - -event gssapi_neg_result(c: connection, state: count) &priority=3 - { - # Ignore "incomplete" replies (state==1) - if ( c?$ntlm && state != 1 ) - c$ntlm$success = (state == 0); - } - -event gssapi_neg_result(c: connection, state: count) &priority=-3 - { - if ( c?$ntlm && ! c$ntlm$done ) - { - # Only write if success is actually set to something... - if ( c$ntlm?$success ) - { - Log::write(NTLM::LOG, c$ntlm); - c$ntlm$done = T; - } - } - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$ntlm && ! c$ntlm$done ) - { - Log::write(NTLM::LOG, c$ntlm); - } - } diff --git a/scripts/base/protocols/ntlm/main.zeek b/scripts/base/protocols/ntlm/main.zeek new file mode 100644 index 0000000000..231f90473d --- /dev/null +++ b/scripts/base/protocols/ntlm/main.zeek @@ -0,0 +1,115 @@ +@load base/frameworks/dpd + +module NTLM; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts : time &log; + ## Unique ID for the connection. + uid : string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id : conn_id &log; + + ## Username given by the client. + username : string &log &optional; + ## Hostname given by the client. + hostname : string &log &optional; + ## Domainname given by the client. + domainname : string &log &optional; + + ## NetBIOS name given by the server in a CHALLENGE. + server_nb_computer_name: string &log &optional; + ## DNS name given by the server in a CHALLENGE. + server_dns_computer_name: string &log &optional; + ## Tree name given by the server in a CHALLENGE. + server_tree_name: string &log &optional; + + ## Indicate whether or not the authentication was successful. + success : bool &log &optional; + + ## Internally used field to indicate if the login attempt + ## has already been logged. + done: bool &default=F; + }; +} + +redef DPD::ignore_violations += { Analyzer::ANALYZER_NTLM }; + +redef record connection += { + ntlm: Info &optional; +}; + +event zeek_init() &priority=5 + { + Log::create_stream(NTLM::LOG, [$columns=Info, $path="ntlm"]); + } + +function set_session(c: connection) + { + if ( ! c?$ntlm ) + c$ntlm = NTLM::Info($ts=network_time(), $uid=c$uid, $id=c$id); + } + +event ntlm_negotiate(c: connection, request: NTLM::Negotiate) &priority=5 + { + set_session(c); + } + +event ntlm_challenge(c: connection, challenge: NTLM::Challenge) &priority=5 + { + set_session(c); + + if ( challenge?$target_info ) + { + local ti = challenge$target_info; + if ( ti?$nb_domain_name ) + c$ntlm$server_nb_computer_name = ti$nb_computer_name; + if ( ti?$dns_domain_name ) + c$ntlm$server_dns_computer_name = ti$dns_computer_name; + if ( ti?$dns_tree_name ) + c$ntlm$server_tree_name = ti$dns_tree_name; + } + } + +event ntlm_authenticate(c: connection, request: NTLM::Authenticate) &priority=5 + { + set_session(c); + + if ( request?$domain_name ) + c$ntlm$domainname = request$domain_name; + if ( request?$workstation ) + c$ntlm$hostname = request$workstation; + if ( request?$user_name ) + c$ntlm$username = request$user_name; + } + +event gssapi_neg_result(c: connection, state: count) &priority=3 + { + # Ignore "incomplete" replies (state==1) + if ( c?$ntlm && state != 1 ) + c$ntlm$success = (state == 0); + } + +event gssapi_neg_result(c: connection, state: count) &priority=-3 + { + if ( c?$ntlm && ! c$ntlm$done ) + { + # Only write if success is actually set to something... + if ( c$ntlm?$success ) + { + Log::write(NTLM::LOG, c$ntlm); + c$ntlm$done = T; + } + } + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$ntlm && ! c$ntlm$done ) + { + Log::write(NTLM::LOG, c$ntlm); + } + } diff --git a/scripts/base/protocols/ntp/__load__.zeek b/scripts/base/protocols/ntp/__load__.zeek new file mode 100644 index 0000000000..af9669a968 --- /dev/null +++ b/scripts/base/protocols/ntp/__load__.zeek @@ -0,0 +1,2 @@ +@load ./main +@load ./consts diff --git a/scripts/base/protocols/ntp/consts.zeek b/scripts/base/protocols/ntp/consts.zeek new file mode 100644 index 0000000000..034ae73418 --- /dev/null +++ b/scripts/base/protocols/ntp/consts.zeek @@ -0,0 +1,15 @@ +module NTP; + +export { + ## The descriptions of the NTP mode value, as described + ## in :rfc:`5905`, Figure 1 + const modes: table[count] of string = { + [1] = "symmetric active", + [2] = "symmetric passive", + [3] = "client", + [4] = "server", + [5] = "broadcast server", + [6] = "broadcast client", + [7] = "reserved", + } &default=function(i: count):string { return fmt("unknown-%d", i); } &redef; +} diff --git a/scripts/base/protocols/ntp/main.zeek b/scripts/base/protocols/ntp/main.zeek new file mode 100644 index 0000000000..ed55ac4ee4 --- /dev/null +++ b/scripts/base/protocols/ntp/main.zeek @@ -0,0 +1,107 @@ +module NTP; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## The NTP version number (1, 2, 3, 4). + version: count &log; + ## The NTP mode being used. + mode: count &log; + ## The stratum (primary server, secondary server, etc.). + stratum: count &log; + ## The maximum interval between successive messages. + poll: interval &log; + ## The precision of the system clock. + precision: interval &log; + ## Total round-trip delay to the reference clock. + root_delay: interval &log; + ## Total dispersion to the reference clock. + root_disp: interval &log; + ## For stratum 0, 4 character string used for debugging. + ## For stratum 1, ID assigned to the reference clock by IANA. + ## Above stratum 1, when using IPv4, the IP address of the reference + ## clock. Note that the NTP protocol did not originally specify a + ## large enough field to represent IPv6 addresses, so they use + ## the first four bytes of the MD5 hash of the reference clock's + ## IPv6 address (i.e. an IPv4 address here is not necessarily IPv4). + ref_id: string &log; + ## Time when the system clock was last set or correct. + ref_time: time &log; + ## Time at the client when the request departed for the NTP server. + org_time: time &log; + ## Time at the server when the request arrived from the NTP client. + rec_time: time &log; + ## Time at the server when the response departed for the NTP client. + xmt_time: time &log; + ## Number of extension fields (which are not currently parsed). + num_exts: count &default=0 &log; + }; + + ## Event that can be handled to access the NTP record as it is sent on + ## to the logging framework. + global log_ntp: event(rec: Info); +} + +redef record connection += { + ntp: Info &optional; +}; + +const ports = { 123/udp }; +redef likely_server_ports += { ports }; + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) &priority=5 + { + local info: Info; + info$ts = network_time(); + info$uid = c$uid; + info$id = c$id; + info$version = msg$version; + info$mode = msg$mode; + + if ( msg$mode < 6 ) + { + info$stratum = msg$std_msg$stratum; + info$poll = msg$std_msg$poll; + info$precision = msg$std_msg$precision; + info$root_delay = msg$std_msg$root_delay; + info$root_disp = msg$std_msg$root_disp; + + if ( msg$std_msg?$kiss_code ) + info$ref_id = msg$std_msg$kiss_code; + else if ( msg$std_msg?$ref_id ) + info$ref_id = msg$std_msg$ref_id; + else if ( msg$std_msg?$ref_addr ) + info$ref_id= cat(msg$std_msg$ref_addr); + + info$ref_time = msg$std_msg$ref_time; + info$org_time = msg$std_msg$org_time; + info$rec_time = msg$std_msg$rec_time; + info$xmt_time = msg$std_msg$xmt_time; + + info$num_exts = msg$std_msg$num_exts; + } + + # Copy the present packet info into the connection record + # If more ntp packets are sent on the same connection, the newest one + # will overwrite the previous + c$ntp = info; + } + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) &priority=-5 + { + if ( msg$mode < 6 ) + Log::write(NTP::LOG, c$ntp); + } + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_NTP, ports); + Log::create_stream(NTP::LOG, [$columns = Info, $ev = log_ntp]); + } diff --git a/scripts/base/protocols/pop3/__load__.bro b/scripts/base/protocols/pop3/__load__.zeek similarity index 100% rename from scripts/base/protocols/pop3/__load__.bro rename to scripts/base/protocols/pop3/__load__.zeek diff --git a/scripts/base/protocols/mysql/__load__.bro b/scripts/base/protocols/radius/__load__.zeek similarity index 100% rename from scripts/base/protocols/mysql/__load__.bro rename to scripts/base/protocols/radius/__load__.zeek diff --git a/scripts/base/protocols/radius/consts.bro b/scripts/base/protocols/radius/consts.zeek similarity index 100% rename from scripts/base/protocols/radius/consts.bro rename to scripts/base/protocols/radius/consts.zeek diff --git a/scripts/base/protocols/radius/main.bro b/scripts/base/protocols/radius/main.bro deleted file mode 100644 index ea30b27911..0000000000 --- a/scripts/base/protocols/radius/main.bro +++ /dev/null @@ -1,147 +0,0 @@ -##! Implements base functionality for RADIUS analysis. Generates the radius.log file. - -module RADIUS; - -@load ./consts.bro -@load base/utils/addrs - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp for when the event happened. - ts : time &log; - ## Unique ID for the connection. - uid : string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id : conn_id &log; - ## The username, if present. - username : string &log &optional; - ## MAC address, if present. - mac : string &log &optional; - ## The address given to the network access server, if - ## present. This is only a hint from the RADIUS server - ## and the network access server is not required to honor - ## the address. - framed_addr : addr &log &optional; - ## Remote IP address, if present. This is collected - ## from the Tunnel-Client-Endpoint attribute. - remote_ip : addr &log &optional; - ## Connect info, if present. - connect_info : string &log &optional; - ## Reply message from the server challenge. This is - ## frequently shown to the user authenticating. - reply_msg : string &log &optional; - ## Successful or failed authentication. - result : string &log &optional; - ## The duration between the first request and - ## either the "Access-Accept" message or an error. - ## If the field is empty, it means that either - ## the request or response was not seen. - ttl : interval &log &optional; - - ## Whether this has already been logged and can be ignored. - logged : bool &default=F; - }; - - ## Event that can be handled to access the RADIUS record as it is sent on - ## to the logging framework. - global log_radius: event(rec: Info); -} - -redef record connection += { - radius: Info &optional; -}; - -const ports = { 1812/udp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(RADIUS::LOG, [$columns=Info, $ev=log_radius, $path="radius"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_RADIUS, ports); - } - -event radius_message(c: connection, result: RADIUS::Message) &priority=5 - { - if ( ! c?$radius ) - { - c$radius = Info($ts = network_time(), - $uid = c$uid, - $id = c$id); - } - - switch ( RADIUS::msg_types[result$code] ) - { - case "Access-Request": - if ( result?$attributes ) - { - # User-Name - if ( ! c$radius?$username && 1 in result$attributes ) - c$radius$username = result$attributes[1][0]; - - # Calling-Station-Id (we expect this to be a MAC) - if ( ! c$radius?$mac && 31 in result$attributes ) - c$radius$mac = normalize_mac(result$attributes[31][0]); - - # Tunnel-Client-EndPoint (useful for VPNs) - if ( ! c$radius?$remote_ip && 66 in result$attributes ) - c$radius$remote_ip = to_addr(result$attributes[66][0]); - - # Connect-Info - if ( ! c$radius?$connect_info && 77 in result$attributes ) - c$radius$connect_info = result$attributes[77][0]; - } - break; - - case "Access-Challenge": - if ( result?$attributes ) - { - # Framed-IP-Address - if ( ! c$radius?$framed_addr && 8 in result$attributes ) - c$radius$framed_addr = raw_bytes_to_v4_addr(result$attributes[8][0]); - - if ( ! c$radius?$reply_msg && 18 in result$attributes ) - c$radius$reply_msg = result$attributes[18][0]; - } - break; - - case "Access-Accept": - c$radius$result = "success"; - break; - - case "Access-Reject": - c$radius$result = "failed"; - break; - - # TODO: Support RADIUS accounting. (add port 1813/udp above too) - #case "Accounting-Request": - # break; - # - #case "Accounting-Response": - # break; - } - } - -event radius_message(c: connection, result: RADIUS::Message) &priority=-5 - { - if ( c$radius?$result ) - { - local ttl = network_time() - c$radius$ts; - if ( ttl != 0secs ) - c$radius$ttl = ttl; - - Log::write(RADIUS::LOG, c$radius); - - delete c$radius; - } - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$radius && ! c$radius$logged ) - { - c$radius$result = "unknown"; - Log::write(RADIUS::LOG, c$radius); - } - } diff --git a/scripts/base/protocols/radius/main.zeek b/scripts/base/protocols/radius/main.zeek new file mode 100644 index 0000000000..bffe996402 --- /dev/null +++ b/scripts/base/protocols/radius/main.zeek @@ -0,0 +1,148 @@ +##! Implements base functionality for RADIUS analysis. Generates the radius.log file. + +module RADIUS; + +@load ./consts +@load base/utils/addrs + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts : time &log; + ## Unique ID for the connection. + uid : string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id : conn_id &log; + ## The username, if present. + username : string &log &optional; + ## MAC address, if present. + mac : string &log &optional; + ## The address given to the network access server, if + ## present. This is only a hint from the RADIUS server + ## and the network access server is not required to honor + ## the address. + framed_addr : addr &log &optional; + ## Address (IPv4, IPv6, or FQDN) of the initiator end of the tunnel, + ## if present. This is collected from the Tunnel-Client-Endpoint + ## attribute. + tunnel_client: string &log &optional; + ## Connect info, if present. + connect_info : string &log &optional; + ## Reply message from the server challenge. This is + ## frequently shown to the user authenticating. + reply_msg : string &log &optional; + ## Successful or failed authentication. + result : string &log &optional; + ## The duration between the first request and + ## either the "Access-Accept" message or an error. + ## If the field is empty, it means that either + ## the request or response was not seen. + ttl : interval &log &optional; + + ## Whether this has already been logged and can be ignored. + logged : bool &default=F; + }; + + ## Event that can be handled to access the RADIUS record as it is sent on + ## to the logging framework. + global log_radius: event(rec: Info); +} + +redef record connection += { + radius: Info &optional; +}; + +const ports = { 1812/udp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(RADIUS::LOG, [$columns=Info, $ev=log_radius, $path="radius"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_RADIUS, ports); + } + +event radius_message(c: connection, result: RADIUS::Message) &priority=5 + { + if ( ! c?$radius ) + { + c$radius = Info($ts = network_time(), + $uid = c$uid, + $id = c$id); + } + + switch ( RADIUS::msg_types[result$code] ) + { + case "Access-Request": + if ( result?$attributes ) + { + # User-Name + if ( ! c$radius?$username && 1 in result$attributes ) + c$radius$username = result$attributes[1][0]; + + # Calling-Station-Id (we expect this to be a MAC) + if ( ! c$radius?$mac && 31 in result$attributes ) + c$radius$mac = normalize_mac(result$attributes[31][0]); + + # Tunnel-Client-EndPoint (useful for VPNs) + if ( ! c$radius?$tunnel_client && 66 in result$attributes ) + c$radius$tunnel_client = result$attributes[66][0]; + + # Connect-Info + if ( ! c$radius?$connect_info && 77 in result$attributes ) + c$radius$connect_info = result$attributes[77][0]; + } + break; + + case "Access-Challenge": + if ( result?$attributes ) + { + # Framed-IP-Address + if ( ! c$radius?$framed_addr && 8 in result$attributes ) + c$radius$framed_addr = raw_bytes_to_v4_addr(result$attributes[8][0]); + + if ( ! c$radius?$reply_msg && 18 in result$attributes ) + c$radius$reply_msg = result$attributes[18][0]; + } + break; + + case "Access-Accept": + c$radius$result = "success"; + break; + + case "Access-Reject": + c$radius$result = "failed"; + break; + + # TODO: Support RADIUS accounting. (add port 1813/udp above too) + #case "Accounting-Request": + # break; + # + #case "Accounting-Response": + # break; + } + } + +event radius_message(c: connection, result: RADIUS::Message) &priority=-5 + { + if ( c$radius?$result ) + { + local ttl = network_time() - c$radius$ts; + if ( ttl != 0secs ) + c$radius$ttl = ttl; + + Log::write(RADIUS::LOG, c$radius); + + delete c$radius; + } + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$radius && ! c$radius$logged ) + { + c$radius$result = "unknown"; + Log::write(RADIUS::LOG, c$radius); + } + } diff --git a/scripts/base/protocols/rdp/__load__.bro b/scripts/base/protocols/rdp/__load__.zeek similarity index 100% rename from scripts/base/protocols/rdp/__load__.bro rename to scripts/base/protocols/rdp/__load__.zeek diff --git a/scripts/base/protocols/rdp/consts.bro b/scripts/base/protocols/rdp/consts.zeek similarity index 100% rename from scripts/base/protocols/rdp/consts.bro rename to scripts/base/protocols/rdp/consts.zeek diff --git a/scripts/base/protocols/rdp/main.bro b/scripts/base/protocols/rdp/main.bro deleted file mode 100644 index 30d5764ce3..0000000000 --- a/scripts/base/protocols/rdp/main.bro +++ /dev/null @@ -1,265 +0,0 @@ -##! Implements base functionality for RDP analysis. Generates the rdp.log file. - -@load ./consts - -module RDP; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp for when the event happened. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Cookie value used by the client machine. - ## This is typically a username. - cookie: string &log &optional; - ## Status result for the connection. It's a mix between - ## RDP negotation failure messages and GCC server create - ## response messages. - result: string &log &optional; - ## Security protocol chosen by the server. - security_protocol: string &log &optional; - - ## Keyboard layout (language) of the client machine. - keyboard_layout: string &log &optional; - ## RDP client version used by the client machine. - client_build: string &log &optional; - ## Name of the client machine. - client_name: string &log &optional; - ## Product ID of the client machine. - client_dig_product_id: string &log &optional; - ## Desktop width of the client machine. - desktop_width: count &log &optional; - ## Desktop height of the client machine. - desktop_height: count &log &optional; - ## The color depth requested by the client in - ## the high_color_depth field. - requested_color_depth: string &log &optional; - - ## If the connection is being encrypted with native - ## RDP encryption, this is the type of cert - ## being used. - cert_type: string &log &optional; - ## The number of certs seen. X.509 can transfer an - ## entire certificate chain. - cert_count: count &log &default=0; - ## Indicates if the provided certificate or certificate - ## chain is permanent or temporary. - cert_permanent: bool &log &optional; - ## Encryption level of the connection. - encryption_level: string &log &optional; - ## Encryption method of the connection. - encryption_method: string &log &optional; - }; - - ## If true, detach the RDP analyzer from the connection to prevent - ## continuing to process encrypted traffic. - option disable_analyzer_after_detection = F; - - ## The amount of time to monitor an RDP session from when it is first - ## identified. When this interval is reached, the session is logged. - option rdp_check_interval = 10secs; - - ## Event that can be handled to access the rdp record as it is sent on - ## to the logging framework. - global log_rdp: event(rec: Info); -} - -# Internal fields that aren't useful externally -redef record Info += { - ## The analyzer ID used for the analyzer instance attached - ## to each connection. It is not used for logging since it's a - ## meaningless arbitrary number. - analyzer_id: count &optional; - ## Track status of logging RDP connections. - done: bool &default=F; -}; - -redef record connection += { - rdp: Info &optional; -}; - -const ports = { 3389/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(RDP::LOG, [$columns=RDP::Info, $ev=log_rdp, $path="rdp"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_RDP, ports); - } - -function write_log(c: connection) - { - local info = c$rdp; - - if ( info$done ) - return; - - # Mark this record as fully logged and finished. - info$done = T; - - # Verify that the RDP session contains - # RDP data before writing it to the log. - if ( info?$cookie || info?$keyboard_layout || info?$result ) - Log::write(RDP::LOG, info); - } - -event check_record(c: connection) - { - # If the record was logged, then stop processing. - if ( c$rdp$done ) - return; - - # If the value rdp_check_interval has passed since the - # RDP session was started, then log the record. - local diff = network_time() - c$rdp$ts; - if ( diff > rdp_check_interval ) - { - write_log(c); - - # Remove the analyzer if it is still attached. - if ( disable_analyzer_after_detection && - connection_exists(c$id) && - c$rdp?$analyzer_id ) - { - disable_analyzer(c$id, c$rdp$analyzer_id); - } - - return; - } - else - { - # If the analyzer is attached and the duration - # to monitor the RDP session was not met, then - # reschedule the logging event. - schedule rdp_check_interval { check_record(c) }; - } - } - -function set_session(c: connection) - { - if ( ! c?$rdp ) - { - c$rdp = [$ts=network_time(),$id=c$id,$uid=c$uid]; - # The RDP session is scheduled to be logged from - # the time it is first initiated. - schedule rdp_check_interval { check_record(c) }; - } - } - -event rdp_connect_request(c: connection, cookie: string) &priority=5 - { - set_session(c); - - c$rdp$cookie = cookie; - } - -event rdp_negotiation_response(c: connection, security_protocol: count) &priority=5 - { - set_session(c); - - c$rdp$security_protocol = security_protocols[security_protocol]; - } - -event rdp_negotiation_failure(c: connection, failure_code: count) &priority=5 - { - set_session(c); - - c$rdp$result = failure_codes[failure_code]; - } - -event rdp_client_core_data(c: connection, data: RDP::ClientCoreData) &priority=5 - { - set_session(c); - - c$rdp$keyboard_layout = RDP::languages[data$keyboard_layout]; - c$rdp$client_build = RDP::builds[data$client_build]; - c$rdp$client_name = data$client_name; - c$rdp$client_dig_product_id = data$dig_product_id; - c$rdp$desktop_width = data$desktop_width; - c$rdp$desktop_height = data$desktop_height; - - if ( data?$ec_flags && data$ec_flags$want_32bpp_session ) - c$rdp$requested_color_depth = "32bit"; - else - c$rdp$requested_color_depth = RDP::high_color_depths[data$high_color_depth]; - } - -event rdp_gcc_server_create_response(c: connection, result: count) &priority=5 - { - set_session(c); - - c$rdp$result = RDP::results[result]; - } - -event rdp_server_security(c: connection, encryption_method: count, encryption_level: count) &priority=5 - { - set_session(c); - - c$rdp$encryption_method = RDP::encryption_methods[encryption_method]; - c$rdp$encryption_level = RDP::encryption_levels[encryption_level]; - } - -event rdp_server_certificate(c: connection, cert_type: count, permanently_issued: bool) &priority=5 - { - set_session(c); - - c$rdp$cert_type = RDP::cert_types[cert_type]; - - # There are no events for proprietary/RSA certs right - # now so we manually count this one. - if ( c$rdp$cert_type == "RSA" ) - ++c$rdp$cert_count; - - c$rdp$cert_permanent = permanently_issued; - } - -event rdp_begin_encryption(c: connection, security_protocol: count) &priority=5 - { - set_session(c); - - if ( ! c$rdp?$result ) - { - c$rdp$result = "encrypted"; - } - - c$rdp$security_protocol = security_protocols[security_protocol]; - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( c?$rdp && f$source == "RDP" ) - { - # Count up X509 certs. - ++c$rdp$cert_count; - } - } - -event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 - { - if ( atype == Analyzer::ANALYZER_RDP ) - { - set_session(c); - c$rdp$analyzer_id = aid; - } - } - -event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5 - { - # If a protocol violation occurs, then log the record immediately. - if ( c?$rdp ) - write_log(c); - } - -event connection_state_remove(c: connection) &priority=-5 - { - # If the connection is removed, then log the record immediately. - if ( c?$rdp ) - { - write_log(c); - } - } diff --git a/scripts/base/protocols/rdp/main.zeek b/scripts/base/protocols/rdp/main.zeek new file mode 100644 index 0000000000..11148de27c --- /dev/null +++ b/scripts/base/protocols/rdp/main.zeek @@ -0,0 +1,282 @@ +##! Implements base functionality for RDP analysis. Generates the rdp.log file. + +@load ./consts + +module RDP; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp for when the event happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Cookie value used by the client machine. + ## This is typically a username. + cookie: string &log &optional; + ## Status result for the connection. It's a mix between + ## RDP negotation failure messages and GCC server create + ## response messages. + result: string &log &optional; + ## Security protocol chosen by the server. + security_protocol: string &log &optional; + ## The channels requested by the client + client_channels: vector of string &log &optional; + + ## Keyboard layout (language) of the client machine. + keyboard_layout: string &log &optional; + ## RDP client version used by the client machine. + client_build: string &log &optional; + ## Name of the client machine. + client_name: string &log &optional; + ## Product ID of the client machine. + client_dig_product_id: string &log &optional; + ## Desktop width of the client machine. + desktop_width: count &log &optional; + ## Desktop height of the client machine. + desktop_height: count &log &optional; + ## The color depth requested by the client in + ## the high_color_depth field. + requested_color_depth: string &log &optional; + + ## If the connection is being encrypted with native + ## RDP encryption, this is the type of cert + ## being used. + cert_type: string &log &optional; + ## The number of certs seen. X.509 can transfer an + ## entire certificate chain. + cert_count: count &log &default=0; + ## Indicates if the provided certificate or certificate + ## chain is permanent or temporary. + cert_permanent: bool &log &optional; + ## Encryption level of the connection. + encryption_level: string &log &optional; + ## Encryption method of the connection. + encryption_method: string &log &optional; + }; + + ## If true, detach the RDP analyzer from the connection to prevent + ## continuing to process encrypted traffic. + option disable_analyzer_after_detection = F; + + ## The amount of time to monitor an RDP session from when it is first + ## identified. When this interval is reached, the session is logged. + option rdp_check_interval = 10secs; + + ## Event that can be handled to access the rdp record as it is sent on + ## to the logging framework. + global log_rdp: event(rec: Info); +} + +# Internal fields that aren't useful externally +redef record Info += { + ## The analyzer ID used for the analyzer instance attached + ## to each connection. It is not used for logging since it's a + ## meaningless arbitrary number. + analyzer_id: count &optional; + ## Track status of logging RDP connections. + done: bool &default=F; +}; + +redef record connection += { + rdp: Info &optional; +}; + +const ports = { 3389/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(RDP::LOG, [$columns=RDP::Info, $ev=log_rdp, $path="rdp"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_RDP, ports); + } + +function write_log(c: connection) + { + local info = c$rdp; + + if ( info$done ) + return; + + # Mark this record as fully logged and finished. + info$done = T; + + # Verify that the RDP session contains + # RDP data before writing it to the log. + if ( info?$cookie || info?$keyboard_layout || info?$result ) + Log::write(RDP::LOG, info); + } + +event check_record(c: connection) + { + # If the record was logged, then stop processing. + if ( c$rdp$done ) + return; + + # If the value rdp_check_interval has passed since the + # RDP session was started, then log the record. + local diff = network_time() - c$rdp$ts; + if ( diff > rdp_check_interval ) + { + write_log(c); + + # Remove the analyzer if it is still attached. + if ( disable_analyzer_after_detection && + connection_exists(c$id) && + c$rdp?$analyzer_id ) + { + disable_analyzer(c$id, c$rdp$analyzer_id); + } + + return; + } + else + { + # If the analyzer is attached and the duration + # to monitor the RDP session was not met, then + # reschedule the logging event. + schedule rdp_check_interval { check_record(c) }; + } + } + +function set_session(c: connection) + { + if ( ! c?$rdp ) + { + c$rdp = [$ts=network_time(),$id=c$id,$uid=c$uid]; + # The RDP session is scheduled to be logged from + # the time it is first initiated. + schedule rdp_check_interval { check_record(c) }; + } + } + +event rdp_connect_request(c: connection, cookie: string) &priority=5 + { + set_session(c); + + c$rdp$cookie = cookie; + } + +event rdp_negotiation_response(c: connection, security_protocol: count) &priority=5 + { + set_session(c); + + c$rdp$security_protocol = security_protocols[security_protocol]; + } + +event rdp_negotiation_failure(c: connection, failure_code: count) &priority=5 + { + set_session(c); + + c$rdp$result = failure_codes[failure_code]; + } + +event rdp_client_core_data(c: connection, data: RDP::ClientCoreData) &priority=5 + { + set_session(c); + + c$rdp$keyboard_layout = RDP::languages[data$keyboard_layout]; + c$rdp$client_build = RDP::builds[data$client_build]; + c$rdp$client_name = data$client_name; + c$rdp$client_dig_product_id = data$dig_product_id; + c$rdp$desktop_width = data$desktop_width; + c$rdp$desktop_height = data$desktop_height; + + if ( data?$ec_flags && data$ec_flags$want_32bpp_session ) + c$rdp$requested_color_depth = "32bit"; + else + c$rdp$requested_color_depth = RDP::high_color_depths[data$high_color_depth]; + } + +event rdp_client_network_data(c: connection, channels: ClientChannelList) + { + set_session(c); + + if ( ! c$rdp?$client_channels ) + c$rdp$client_channels = vector(); + + for ( i in channels ) + # Remove the NULs at the end + c$rdp$client_channels[i] = gsub(channels[i]$name, /\x00+$/, ""); + + if ( |channels| > 31 ) + Reporter::conn_weird("RDP_channels_requested_exceeds_max", c, fmt("%s", |channels|)); + } + +event rdp_gcc_server_create_response(c: connection, result: count) &priority=5 + { + set_session(c); + + c$rdp$result = RDP::results[result]; + } + +event rdp_server_security(c: connection, encryption_method: count, encryption_level: count) &priority=5 + { + set_session(c); + + c$rdp$encryption_method = RDP::encryption_methods[encryption_method]; + c$rdp$encryption_level = RDP::encryption_levels[encryption_level]; + } + +event rdp_server_certificate(c: connection, cert_type: count, permanently_issued: bool) &priority=5 + { + set_session(c); + + c$rdp$cert_type = RDP::cert_types[cert_type]; + + # There are no events for proprietary/RSA certs right + # now so we manually count this one. + if ( c$rdp$cert_type == "RSA" ) + ++c$rdp$cert_count; + + c$rdp$cert_permanent = permanently_issued; + } + +event rdp_begin_encryption(c: connection, security_protocol: count) &priority=5 + { + set_session(c); + + if ( ! c$rdp?$result ) + { + c$rdp$result = "encrypted"; + } + + c$rdp$security_protocol = security_protocols[security_protocol]; + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( c?$rdp && f$source == "RDP" ) + { + # Count up X509 certs. + ++c$rdp$cert_count; + } + } + +event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 + { + if ( atype == Analyzer::ANALYZER_RDP ) + { + set_session(c); + c$rdp$analyzer_id = aid; + } + } + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) &priority=5 + { + # If a protocol violation occurs, then log the record immediately. + if ( c?$rdp ) + write_log(c); + } + +event connection_state_remove(c: connection) &priority=-5 + { + # If the connection is removed, then log the record immediately. + if ( c?$rdp ) + { + write_log(c); + } + } diff --git a/scripts/base/protocols/rfb/__load__.bro b/scripts/base/protocols/rfb/__load__.zeek similarity index 100% rename from scripts/base/protocols/rfb/__load__.bro rename to scripts/base/protocols/rfb/__load__.zeek diff --git a/scripts/base/protocols/rfb/main.bro b/scripts/base/protocols/rfb/main.bro deleted file mode 100644 index ff05063538..0000000000 --- a/scripts/base/protocols/rfb/main.bro +++ /dev/null @@ -1,165 +0,0 @@ -module RFB; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the RFB log. - type Info: record { - ## Timestamp for when the event happened. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - - ## Major version of the client. - client_major_version: string &log &optional; - ## Minor version of the client. - client_minor_version: string &log &optional; - ## Major version of the server. - server_major_version: string &log &optional; - ## Minor version of the server. - server_minor_version: string &log &optional; - - ## Identifier of authentication method used. - authentication_method: string &log &optional; - ## Whether or not authentication was successful. - auth: bool &log &optional; - - ## Whether the client has an exclusive or a shared session. - share_flag: bool &log &optional; - ## Name of the screen that is being shared. - desktop_name: string &log &optional; - ## Width of the screen that is being shared. - width: count &log &optional; - ## Height of the screen that is being shared. - height: count &log &optional; - - ## Internally used value to determine if this connection - ## has already been logged. - done: bool &default=F; - }; - - global log_rfb: event(rec: Info); -} - -function friendly_auth_name(auth: count): string - { - switch (auth) { - case 0: - return "Invalid"; - case 1: - return "None"; - case 2: - return "VNC"; - case 16: - return "Tight"; - case 17: - return "Ultra"; - case 18: - return "TLS"; - case 19: - return "VeNCrypt"; - case 20: - return "GTK-VNC SASL"; - case 21: - return "MD5 hash authentication"; - case 22: - return "Colin Dean xvp"; - case 30: - return "Apple Remote Desktop"; - } - return "RealVNC"; -} - -redef record connection += { - rfb: Info &optional; -}; - -event bro_init() &priority=5 - { - Log::create_stream(RFB::LOG, [$columns=Info, $ev=log_rfb, $path="rfb"]); - } - -function write_log(c:connection) - { - local state = c$rfb; - if ( state$done ) - { - return; - } - - Log::write(RFB::LOG, c$rfb); - c$rfb$done = T; - } - -function set_session(c: connection) - { - if ( ! c?$rfb ) - { - local info: Info; - info$ts = network_time(); - info$uid = c$uid; - info$id = c$id; - - c$rfb = info; - } - } - -event rfb_event(c: connection) &priority=5 - { - set_session(c); - } - -event rfb_client_version(c: connection, major_version: string, minor_version: string) &priority=5 - { - set_session(c); - c$rfb$client_major_version = major_version; - c$rfb$client_minor_version = minor_version; - } - -event rfb_server_version(c: connection, major_version: string, minor_version: string) &priority=5 - { - set_session(c); - c$rfb$server_major_version = major_version; - c$rfb$server_minor_version = minor_version; - } - -event rfb_authentication_type(c: connection, authtype: count) &priority=5 - { - set_session(c); - - c$rfb$authentication_method = friendly_auth_name(authtype); - } - -event rfb_server_parameters(c: connection, name: string, width: count, height: count) &priority=5 - { - set_session(c); - - c$rfb$desktop_name = name; - c$rfb$width = width; - c$rfb$height = height; - } - -event rfb_server_parameters(c: connection, name: string, width: count, height: count) &priority=-5 - { - write_log(c); - } - -event rfb_auth_result(c: connection, result: bool) &priority=5 - { - c$rfb$auth = !result; - } - -event rfb_share_flag(c: connection, flag: bool) &priority=5 - { - c$rfb$share_flag = flag; - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$rfb ) - { - write_log(c); - } - } diff --git a/scripts/base/protocols/rfb/main.zeek b/scripts/base/protocols/rfb/main.zeek new file mode 100644 index 0000000000..ae9d3ca508 --- /dev/null +++ b/scripts/base/protocols/rfb/main.zeek @@ -0,0 +1,165 @@ +module RFB; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the RFB log. + type Info: record { + ## Timestamp for when the event happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + + ## Major version of the client. + client_major_version: string &log &optional; + ## Minor version of the client. + client_minor_version: string &log &optional; + ## Major version of the server. + server_major_version: string &log &optional; + ## Minor version of the server. + server_minor_version: string &log &optional; + + ## Identifier of authentication method used. + authentication_method: string &log &optional; + ## Whether or not authentication was successful. + auth: bool &log &optional; + + ## Whether the client has an exclusive or a shared session. + share_flag: bool &log &optional; + ## Name of the screen that is being shared. + desktop_name: string &log &optional; + ## Width of the screen that is being shared. + width: count &log &optional; + ## Height of the screen that is being shared. + height: count &log &optional; + + ## Internally used value to determine if this connection + ## has already been logged. + done: bool &default=F; + }; + + global log_rfb: event(rec: Info); +} + +function friendly_auth_name(auth: count): string + { + switch (auth) { + case 0: + return "Invalid"; + case 1: + return "None"; + case 2: + return "VNC"; + case 16: + return "Tight"; + case 17: + return "Ultra"; + case 18: + return "TLS"; + case 19: + return "VeNCrypt"; + case 20: + return "GTK-VNC SASL"; + case 21: + return "MD5 hash authentication"; + case 22: + return "Colin Dean xvp"; + case 30: + return "Apple Remote Desktop"; + } + return "RealVNC"; +} + +redef record connection += { + rfb: Info &optional; +}; + +event zeek_init() &priority=5 + { + Log::create_stream(RFB::LOG, [$columns=Info, $ev=log_rfb, $path="rfb"]); + } + +function write_log(c:connection) + { + local state = c$rfb; + if ( state$done ) + { + return; + } + + Log::write(RFB::LOG, c$rfb); + c$rfb$done = T; + } + +function set_session(c: connection) + { + if ( ! c?$rfb ) + { + local info: Info; + info$ts = network_time(); + info$uid = c$uid; + info$id = c$id; + + c$rfb = info; + } + } + +event rfb_event(c: connection) &priority=5 + { + set_session(c); + } + +event rfb_client_version(c: connection, major_version: string, minor_version: string) &priority=5 + { + set_session(c); + c$rfb$client_major_version = major_version; + c$rfb$client_minor_version = minor_version; + } + +event rfb_server_version(c: connection, major_version: string, minor_version: string) &priority=5 + { + set_session(c); + c$rfb$server_major_version = major_version; + c$rfb$server_minor_version = minor_version; + } + +event rfb_authentication_type(c: connection, authtype: count) &priority=5 + { + set_session(c); + + c$rfb$authentication_method = friendly_auth_name(authtype); + } + +event rfb_server_parameters(c: connection, name: string, width: count, height: count) &priority=5 + { + set_session(c); + + c$rfb$desktop_name = name; + c$rfb$width = width; + c$rfb$height = height; + } + +event rfb_server_parameters(c: connection, name: string, width: count, height: count) &priority=-5 + { + write_log(c); + } + +event rfb_auth_result(c: connection, result: bool) &priority=5 + { + c$rfb$auth = !result; + } + +event rfb_share_flag(c: connection, flag: bool) &priority=5 + { + c$rfb$share_flag = flag; + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$rfb ) + { + write_log(c); + } + } diff --git a/scripts/base/protocols/sip/__load__.bro b/scripts/base/protocols/sip/__load__.zeek similarity index 100% rename from scripts/base/protocols/sip/__load__.bro rename to scripts/base/protocols/sip/__load__.zeek diff --git a/scripts/base/protocols/sip/main.bro b/scripts/base/protocols/sip/main.bro deleted file mode 100644 index 68ebb9b222..0000000000 --- a/scripts/base/protocols/sip/main.bro +++ /dev/null @@ -1,302 +0,0 @@ -##! Implements base functionality for SIP analysis. The logging model is -##! to log request/response pairs and all relevant metadata together in -##! a single record. - -@load base/utils/numbers -@load base/utils/files - -module SIP; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the SIP log. - type Info: record { - ## Timestamp for when the request happened. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Represents the pipelined depth into the connection of this - ## request/response transaction. - trans_depth: count &log; - ## Verb used in the SIP request (INVITE, REGISTER etc.). - method: string &log &optional; - ## URI used in the request. - uri: string &log &optional; - ## Contents of the Date: header from the client - date: string &log &optional; - ## Contents of the request From: header - ## Note: The tag= value that's usually appended to the sender - ## is stripped off and not logged. - request_from: string &log &optional; - ## Contents of the To: header - request_to: string &log &optional; - ## Contents of the response From: header - ## Note: The ``tag=`` value that's usually appended to the sender - ## is stripped off and not logged. - response_from: string &log &optional; - ## Contents of the response To: header - response_to: string &log &optional; - - ## Contents of the Reply-To: header - reply_to: string &log &optional; - ## Contents of the Call-ID: header from the client - call_id: string &log &optional; - ## Contents of the CSeq: header from the client - seq: string &log &optional; - ## Contents of the Subject: header from the client - subject: string &log &optional; - ## The client message transmission path, as extracted from the headers. - request_path: vector of string &log &optional; - ## The server message transmission path, as extracted from the headers. - response_path: vector of string &log &optional; - ## Contents of the User-Agent: header from the client - user_agent: string &log &optional; - ## Status code returned by the server. - status_code: count &log &optional; - ## Status message returned by the server. - status_msg: string &log &optional; - ## Contents of the Warning: header - warning: string &log &optional; - ## Contents of the Content-Length: header from the client - request_body_len: count &log &optional; - ## Contents of the Content-Length: header from the server - response_body_len: count &log &optional; - ## Contents of the Content-Type: header from the server - content_type: string &log &optional; - }; - - type State: record { - ## Pending requests. - pending: table[count] of Info; - ## Current request in the pending queue. - current_request: count &default=0; - ## Current response in the pending queue. - current_response: count &default=0; - }; - - ## A list of SIP methods. Other methods will generate a weird. Note - ## that the SIP analyzer will only accept methods consisting solely - ## of letters ``[A-Za-z]``. - option sip_methods: set[string] = { - "REGISTER", "INVITE", "ACK", "CANCEL", "BYE", "OPTIONS", "NOTIFY", "SUBSCRIBE" - }; - - ## Event that can be handled to access the SIP record as it is sent on - ## to the logging framework. - global log_sip: event(rec: Info); -} - -# Add the sip state tracking fields to the connection record. -redef record connection += { - sip: Info &optional; - sip_state: State &optional; -}; - -const ports = { 5060/udp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(SIP::LOG, [$columns=Info, $ev=log_sip, $path="sip"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_SIP, ports); - } - -function new_sip_session(c: connection): Info - { - local tmp: Info; - tmp$ts=network_time(); - tmp$uid=c$uid; - tmp$id=c$id; - # $current_request is set prior to the Info record creation so we - # can use the value directly here. - tmp$trans_depth = c$sip_state$current_request; - - tmp$request_path = vector(); - tmp$response_path = vector(); - - return tmp; - } - -function set_state(c: connection, is_request: bool) - { - if ( ! c?$sip_state ) - { - local s: State; - c$sip_state = s; - } - - if ( is_request ) - { - if ( c$sip_state$current_request !in c$sip_state$pending ) - c$sip_state$pending[c$sip_state$current_request] = new_sip_session(c); - - c$sip = c$sip_state$pending[c$sip_state$current_request]; - } - else - { - if ( c$sip_state$current_response !in c$sip_state$pending ) - c$sip_state$pending[c$sip_state$current_response] = new_sip_session(c); - - c$sip = c$sip_state$pending[c$sip_state$current_response]; - } - } - -function flush_pending(c: connection) - { - # Flush all pending but incomplete request/response pairs. - if ( c?$sip_state ) - { - for ( r, info in c$sip_state$pending ) - { - # We don't use pending elements at index 0. - if ( r == 0 ) - next; - - Log::write(SIP::LOG, info); - } - } - } - -event sip_request(c: connection, method: string, original_URI: string, version: string) &priority=5 - { - set_state(c, T); - - c$sip$method = method; - c$sip$uri = original_URI; - - if ( method !in sip_methods ) - Reporter::conn_weird("unknown_SIP_method", c, method); - } - -event sip_reply(c: connection, version: string, code: count, reason: string) &priority=5 - { - set_state(c, F); - - if ( c$sip_state$current_response !in c$sip_state$pending && - (code < 100 && 200 <= code) ) - ++c$sip_state$current_response; - - c$sip$status_code = code; - c$sip$status_msg = reason; - } - -event sip_header(c: connection, is_request: bool, name: string, value: string) &priority=5 - { - if ( ! c?$sip_state ) - { - local s: State; - c$sip_state = s; - } - - if ( is_request ) # from client - { - if ( c$sip_state$current_request !in c$sip_state$pending ) - ++c$sip_state$current_request; - set_state(c, is_request); - switch ( name ) - { - case "CALL-ID": - c$sip$call_id = value; - break; - case "CONTENT-LENGTH", "L": - c$sip$request_body_len = to_count(value); - break; - case "CSEQ": - c$sip$seq = value; - break; - case "DATE": - c$sip$date = value; - break; - case "FROM", "F": - c$sip$request_from = split_string1(value, /;[ ]?tag=/)[0]; - break; - case "REPLY-TO": - c$sip$reply_to = value; - break; - case "SUBJECT", "S": - c$sip$subject = value; - break; - case "TO", "T": - c$sip$request_to = value; - break; - case "USER-AGENT": - c$sip$user_agent = value; - break; - case "VIA", "V": - c$sip$request_path += split_string1(value, /;[ ]?branch/)[0]; - break; - } - - c$sip_state$pending[c$sip_state$current_request] = c$sip; - } - else # from server - { - if ( c$sip_state$current_response !in c$sip_state$pending ) - ++c$sip_state$current_response; - - set_state(c, is_request); - switch ( name ) - { - case "CONTENT-LENGTH", "L": - c$sip$response_body_len = to_count(value); - break; - case "CONTENT-TYPE", "C": - c$sip$content_type = value; - break; - case "WARNING": - c$sip$warning = value; - break; - case "FROM", "F": - c$sip$response_from = split_string1(value, /;[ ]?tag=/)[0]; - break; - case "TO", "T": - c$sip$response_to = value; - break; - case "VIA", "V": - c$sip$response_path += split_string1(value, /;[ ]?branch/)[0]; - break; - } - - c$sip_state$pending[c$sip_state$current_response] = c$sip; - } - } - -event sip_end_entity(c: connection, is_request: bool) &priority = 5 - { - set_state(c, is_request); - } - -event sip_end_entity(c: connection, is_request: bool) &priority = -5 - { - # The reply body is done so we're ready to log. - if ( ! is_request ) - { - Log::write(SIP::LOG, c$sip); - - if ( c$sip$status_code < 100 || 200 <= c$sip$status_code ) - delete c$sip_state$pending[c$sip_state$current_response]; - - if ( ! c$sip?$method || ( c$sip$method == "BYE" && - c$sip$status_code >= 200 && c$sip$status_code < 300 ) ) - { - flush_pending(c); - delete c$sip; - delete c$sip_state; - } - } - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$sip_state ) - { - for ( r, info in c$sip_state$pending ) - { - Log::write(SIP::LOG, info); - } - } - } - diff --git a/scripts/base/protocols/sip/main.zeek b/scripts/base/protocols/sip/main.zeek new file mode 100644 index 0000000000..e0647e6494 --- /dev/null +++ b/scripts/base/protocols/sip/main.zeek @@ -0,0 +1,302 @@ +##! Implements base functionality for SIP analysis. The logging model is +##! to log request/response pairs and all relevant metadata together in +##! a single record. + +@load base/utils/numbers +@load base/utils/files + +module SIP; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the SIP log. + type Info: record { + ## Timestamp for when the request happened. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Represents the pipelined depth into the connection of this + ## request/response transaction. + trans_depth: count &log; + ## Verb used in the SIP request (INVITE, REGISTER etc.). + method: string &log &optional; + ## URI used in the request. + uri: string &log &optional; + ## Contents of the Date: header from the client + date: string &log &optional; + ## Contents of the request From: header + ## Note: The tag= value that's usually appended to the sender + ## is stripped off and not logged. + request_from: string &log &optional; + ## Contents of the To: header + request_to: string &log &optional; + ## Contents of the response From: header + ## Note: The ``tag=`` value that's usually appended to the sender + ## is stripped off and not logged. + response_from: string &log &optional; + ## Contents of the response To: header + response_to: string &log &optional; + + ## Contents of the Reply-To: header + reply_to: string &log &optional; + ## Contents of the Call-ID: header from the client + call_id: string &log &optional; + ## Contents of the CSeq: header from the client + seq: string &log &optional; + ## Contents of the Subject: header from the client + subject: string &log &optional; + ## The client message transmission path, as extracted from the headers. + request_path: vector of string &log &optional; + ## The server message transmission path, as extracted from the headers. + response_path: vector of string &log &optional; + ## Contents of the User-Agent: header from the client + user_agent: string &log &optional; + ## Status code returned by the server. + status_code: count &log &optional; + ## Status message returned by the server. + status_msg: string &log &optional; + ## Contents of the Warning: header + warning: string &log &optional; + ## Contents of the Content-Length: header from the client + request_body_len: count &log &optional; + ## Contents of the Content-Length: header from the server + response_body_len: count &log &optional; + ## Contents of the Content-Type: header from the server + content_type: string &log &optional; + }; + + type State: record { + ## Pending requests. + pending: table[count] of Info; + ## Current request in the pending queue. + current_request: count &default=0; + ## Current response in the pending queue. + current_response: count &default=0; + }; + + ## A list of SIP methods. Other methods will generate a weird. Note + ## that the SIP analyzer will only accept methods consisting solely + ## of letters ``[A-Za-z]``. + option sip_methods: set[string] = { + "REGISTER", "INVITE", "ACK", "CANCEL", "BYE", "OPTIONS", "NOTIFY", "SUBSCRIBE" + }; + + ## Event that can be handled to access the SIP record as it is sent on + ## to the logging framework. + global log_sip: event(rec: Info); +} + +# Add the sip state tracking fields to the connection record. +redef record connection += { + sip: Info &optional; + sip_state: State &optional; +}; + +const ports = { 5060/udp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(SIP::LOG, [$columns=Info, $ev=log_sip, $path="sip"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_SIP, ports); + } + +function new_sip_session(c: connection): Info + { + local tmp: Info; + tmp$ts=network_time(); + tmp$uid=c$uid; + tmp$id=c$id; + # $current_request is set prior to the Info record creation so we + # can use the value directly here. + tmp$trans_depth = c$sip_state$current_request; + + tmp$request_path = vector(); + tmp$response_path = vector(); + + return tmp; + } + +function set_state(c: connection, is_request: bool) + { + if ( ! c?$sip_state ) + { + local s: State; + c$sip_state = s; + } + + if ( is_request ) + { + if ( c$sip_state$current_request !in c$sip_state$pending ) + c$sip_state$pending[c$sip_state$current_request] = new_sip_session(c); + + c$sip = c$sip_state$pending[c$sip_state$current_request]; + } + else + { + if ( c$sip_state$current_response !in c$sip_state$pending ) + c$sip_state$pending[c$sip_state$current_response] = new_sip_session(c); + + c$sip = c$sip_state$pending[c$sip_state$current_response]; + } + } + +function flush_pending(c: connection) + { + # Flush all pending but incomplete request/response pairs. + if ( c?$sip_state ) + { + for ( r, info in c$sip_state$pending ) + { + # We don't use pending elements at index 0. + if ( r == 0 ) + next; + + Log::write(SIP::LOG, info); + } + } + } + +event sip_request(c: connection, method: string, original_URI: string, version: string) &priority=5 + { + set_state(c, T); + + c$sip$method = method; + c$sip$uri = original_URI; + + if ( method !in sip_methods ) + Reporter::conn_weird("unknown_SIP_method", c, method); + } + +event sip_reply(c: connection, version: string, code: count, reason: string) &priority=5 + { + set_state(c, F); + + if ( c$sip_state$current_response !in c$sip_state$pending && + (code < 100 && 200 <= code) ) + ++c$sip_state$current_response; + + c$sip$status_code = code; + c$sip$status_msg = reason; + } + +event sip_header(c: connection, is_request: bool, name: string, value: string) &priority=5 + { + if ( ! c?$sip_state ) + { + local s: State; + c$sip_state = s; + } + + if ( is_request ) # from client + { + if ( c$sip_state$current_request !in c$sip_state$pending ) + ++c$sip_state$current_request; + set_state(c, is_request); + switch ( name ) + { + case "CALL-ID": + c$sip$call_id = value; + break; + case "CONTENT-LENGTH", "L": + c$sip$request_body_len = to_count(value); + break; + case "CSEQ": + c$sip$seq = value; + break; + case "DATE": + c$sip$date = value; + break; + case "FROM", "F": + c$sip$request_from = split_string1(value, /;[ ]?tag=/)[0]; + break; + case "REPLY-TO": + c$sip$reply_to = value; + break; + case "SUBJECT", "S": + c$sip$subject = value; + break; + case "TO", "T": + c$sip$request_to = value; + break; + case "USER-AGENT": + c$sip$user_agent = value; + break; + case "VIA", "V": + c$sip$request_path += split_string1(value, /;[ ]?branch/)[0]; + break; + } + + c$sip_state$pending[c$sip_state$current_request] = c$sip; + } + else # from server + { + if ( c$sip_state$current_response !in c$sip_state$pending ) + ++c$sip_state$current_response; + + set_state(c, is_request); + switch ( name ) + { + case "CONTENT-LENGTH", "L": + c$sip$response_body_len = to_count(value); + break; + case "CONTENT-TYPE", "C": + c$sip$content_type = value; + break; + case "WARNING": + c$sip$warning = value; + break; + case "FROM", "F": + c$sip$response_from = split_string1(value, /;[ ]?tag=/)[0]; + break; + case "TO", "T": + c$sip$response_to = value; + break; + case "VIA", "V": + c$sip$response_path += split_string1(value, /;[ ]?branch/)[0]; + break; + } + + c$sip_state$pending[c$sip_state$current_response] = c$sip; + } + } + +event sip_end_entity(c: connection, is_request: bool) &priority = 5 + { + set_state(c, is_request); + } + +event sip_end_entity(c: connection, is_request: bool) &priority = -5 + { + # The reply body is done so we're ready to log. + if ( ! is_request ) + { + Log::write(SIP::LOG, c$sip); + + if ( c$sip$status_code < 100 || 200 <= c$sip$status_code ) + delete c$sip_state$pending[c$sip_state$current_response]; + + if ( ! c$sip?$method || ( c$sip$method == "BYE" && + c$sip$status_code >= 200 && c$sip$status_code < 300 ) ) + { + flush_pending(c); + delete c$sip; + delete c$sip_state; + } + } + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$sip_state ) + { + for ( r, info in c$sip_state$pending ) + { + Log::write(SIP::LOG, info); + } + } + } + diff --git a/scripts/base/protocols/smb/__load__.bro b/scripts/base/protocols/smb/__load__.zeek similarity index 100% rename from scripts/base/protocols/smb/__load__.bro rename to scripts/base/protocols/smb/__load__.zeek diff --git a/scripts/base/protocols/smb/const-dos-error.bro b/scripts/base/protocols/smb/const-dos-error.zeek similarity index 100% rename from scripts/base/protocols/smb/const-dos-error.bro rename to scripts/base/protocols/smb/const-dos-error.zeek diff --git a/scripts/base/protocols/smb/const-nt-status.bro b/scripts/base/protocols/smb/const-nt-status.zeek similarity index 100% rename from scripts/base/protocols/smb/const-nt-status.bro rename to scripts/base/protocols/smb/const-nt-status.zeek diff --git a/scripts/base/protocols/smb/consts.bro b/scripts/base/protocols/smb/consts.bro deleted file mode 100644 index f36d029be9..0000000000 --- a/scripts/base/protocols/smb/consts.bro +++ /dev/null @@ -1,271 +0,0 @@ -module SMB; - -export { - type StatusCode: record { - id: string; - desc: string; - }; - - const statuses: table[count] of StatusCode = { - [0x00000000] = [$id="SUCCESS", $desc="The operation completed successfully."], - } &redef &default=function(i: count):StatusCode { local unknown=fmt("unknown-%d", i); return [$id=unknown, $desc=unknown]; }; - - ## Heuristic detection of named pipes when the pipe - ## mapping isn't seen. This variable is defined in - ## init-bare.bro. - redef SMB::pipe_filenames = { - "spoolss", - "winreg", - "samr", - "srvsvc", - "netdfs", - "lsarpc", - "wkssvc", - "MsFteWds", - }; - - ## The UUIDs used by the various RPC endpoints. - const rpc_uuids: table[string] of string = { - ["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = "Server Service", - ["6bffd098-a112-3610-9833-46c3f87e345a"] = "Workstation Service", - } &redef &default=function(i: string):string { return fmt("unknown-uuid-%s", i); }; - - ## Server service sub commands. - const srv_cmds: table[count] of string = { - [8] = "NetrConnectionEnum", - [9] = "NetrFileEnum", - [10] = "NetrFileGetInfo", - [11] = "NetrFileClose", - [12] = "NetrSessionEnum", - [13] = "NetrSessionDel", - [14] = "NetrShareAdd", - [15] = "NetrShareEnum", - [16] = "NetrShareGetInfo", - [17] = "NetrShareSetInfo", - [18] = "NetrShareDel", - [19] = "NetrShareDelSticky", - [20] = "NetrShareCheck", - [21] = "NetrServerGetInfo", - [22] = "NetrServerSetInfo", - [23] = "NetrServerDiskEnum", - [24] = "NetrServerStatisticsGet", - [25] = "NetrServerTransportAdd", - [26] = "NetrServerTransportEnum", - [27] = "NetrServerTransportDel", - [28] = "NetrRemoteTOD", - [30] = "NetprPathType", - [31] = "NetprPathCanonicalize", - [32] = "NetprPathCompare", - [33] = "NetprNameValidate", - [34] = "NetprNameCanonicalize", - [35] = "NetprNameCompare", - [36] = "NetrShareEnumSticky", - [37] = "NetrShareDelStart", - [38] = "NetrShareDelCommit", - [39] = "NetrGetFileSecurity", - [40] = "NetrSetFileSecurity", - [41] = "NetrServerTransportAddEx", - [43] = "NetrDfsGetVersion", - [44] = "NetrDfsCreateLocalPartition", - [45] = "NetrDfsDeleteLocalPartition", - [46] = "NetrDfsSetLocalVolumeState", - [48] = "NetrDfsCreateExitPoint", - [49] = "NetrDfsDeleteExitPoint", - [50] = "NetrDfsModifyPrefix", - [51] = "NetrDfsFixLocalVolume", - [52] = "NetrDfsManagerReportSiteInfo", - [53] = "NetrServerTransportDelEx", - [54] = "NetrServerAliasAdd", - [55] = "NetrServerAliasEnum", - [56] = "NetrServerAliasDel", - [57] = "NetrShareDelEx", - } &redef &default=function(i: count):string { return fmt("unknown-srv-command-%d", i); }; - - ## Workstation service sub commands. - const wksta_cmds: table[count] of string = { - [0] = "NetrWkstaGetInfo", - [1] = "NetrWkstaSetInfo", - [2] = "NetrWkstaUserEnum", - [5] = "NetrWkstaTransportEnum", - [6] = "NetrWkstaTransportAdd", - [7] = "NetrWkstaTransportDel", - [8] = "NetrUseAdd", - [9] = "NetrUseGetInfo", - [10] = "NetrUseDel", - [11] = "NetrUseEnum", - [13] = "NetrWorkstationStatisticsGet", - [20] = "NetrGetJoinInformation", - [22] = "NetrJoinDomain2", - [23] = "NetrUnjoinDomain2", - [24] = "NetrRenameMachineInDomain2", - [25] = "NetrValidateName2", - [26] = "NetrGetJoinableOUs2", - [27] = "NetrAddAlternateComputerName", - [28] = "NetrRemoveAlternateComputerName", - [29] = "NetrSetPrimaryComputerName", - [30] = "NetrEnumerateComputerNames", - } &redef &default=function(i: count):string { return fmt("unknown-wksta-command-%d", i); }; - - type rpc_cmd_table: table[count] of string; - - ## The subcommands for RPC endpoints. - const rpc_sub_cmds: table[string] of rpc_cmd_table = { - ["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = srv_cmds, - ["6bffd098-a112-3610-9833-46c3f87e345a"] = wksta_cmds, - } &redef &default=function(i: string):rpc_cmd_table { return table() &default=function(j: string):string { return fmt("unknown-uuid-%s", j); }; }; - -} - -module SMB1; - -export { - const commands: table[count] of string = { - [0x00] = "CREATE_DIRECTORY", - [0x01] = "DELETE_DIRECTORY", - [0x02] = "OPEN", - [0x03] = "CREATE", - [0x04] = "CLOSE", - [0x05] = "FLUSH", - [0x06] = "DELETE", - [0x07] = "RENAME", - [0x08] = "QUERY_INFORMATION", - [0x09] = "SET_INFORMATION", - [0x0A] = "READ", - [0x0B] = "WRITE", - [0x0C] = "LOCK_BYTE_RANGE", - [0x0D] = "UNLOCK_BYTE_RANGE", - [0x0E] = "CREATE_TEMPORARY", - [0x0F] = "CREATE_NEW", - [0x10] = "CHECK_DIRECTORY", - [0x11] = "PROCESS_EXIT", - [0x12] = "SEEK", - [0x13] = "LOCK_AND_READ", - [0x14] = "WRITE_AND_UNLOCK", - [0x1A] = "READ_RAW", - [0x1B] = "READ_MPX", - [0x1C] = "READ_MPX_SECONDARY", - [0x1D] = "WRITE_RAW", - [0x1E] = "WRITE_MPX", - [0x1F] = "WRITE_MPX_SECONDARY", - [0x20] = "WRITE_COMPLETE", - [0x21] = "QUERY_SERVER", - [0x22] = "SET_INFORMATION2", - [0x23] = "QUERY_INFORMATION2", - [0x24] = "LOCKING_ANDX", - [0x25] = "TRANSACTION", - [0x26] = "TRANSACTION_SECONDARY", - [0x27] = "IOCTL", - [0x28] = "IOCTL_SECONDARY", - [0x29] = "COPY", - [0x2A] = "MOVE", - [0x2B] = "ECHO", - [0x2C] = "WRITE_AND_CLOSE", - [0x2D] = "OPEN_ANDX", - [0x2E] = "READ_ANDX", - [0x2F] = "WRITE_ANDX", - [0x30] = "NEW_FILE_SIZE", - [0x31] = "CLOSE_AND_TREE_DISC", - [0x32] = "TRANSACTION2", - [0x33] = "TRANSACTION2_SECONDARY", - [0x34] = "FIND_CLOSE2", - [0x35] = "FIND_NOTIFY_CLOSE", - [0x70] = "TREE_CONNECT", - [0x71] = "TREE_DISCONNECT", - [0x72] = "NEGOTIATE", - [0x73] = "SESSION_SETUP_ANDX", - [0x74] = "LOGOFF_ANDX", - [0x75] = "TREE_CONNECT_ANDX", - [0x80] = "QUERY_INFORMATION_DISK", - [0x81] = "SEARCH", - [0x82] = "FIND", - [0x83] = "FIND_UNIQUE", - [0x84] = "FIND_CLOSE", - [0xA0] = "NT_TRANSACT", - [0xA1] = "NT_TRANSACT_SECONDARY", - [0xA2] = "NT_CREATE_ANDX", - [0xA4] = "NT_CANCEL", - [0xA5] = "NT_RENAME", - [0xC0] = "OPEN_PRINT_FILE", - [0xC1] = "WRITE_PRINT_FILE", - [0xC2] = "CLOSE_PRINT_FILE", - [0xC3] = "GET_PRINT_QUEUE", - [0xD8] = "READ_BULK", - [0xD9] = "WRITE_BULK", - [0xDA] = "WRITE_BULK_DATA", - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - const trans2_sub_commands: table[count] of string = { - [0x00] = "OPEN2", - [0x01] = "FIND_FIRST2", - [0x02] = "FIND_NEXT2", - [0x03] = "QUERY_FS_INFORMATION", - [0x04] = "SET_FS_INFORMATION", - [0x05] = "QUERY_PATH_INFORMATION", - [0x06] = "SET_PATH_INFORMATION", - [0x07] = "QUERY_FILE_INFORMATION", - [0x08] = "SET_FILE_INFORMATION", - [0x09] = "FSCTL", - [0x0A] = "IOCTL", - [0x0B] = "FIND_NOTIFY_FIRST", - [0x0C] = "FIND_NOTIFY_NEXT", - [0x0D] = "CREATE_DIRECTORY", - [0x0E] = "SESSION_SETUP", - [0x10] = "GET_DFS_REFERRAL", - [0x11] = "REPORT_DFS_INCONSISTENCY", - } &default=function(i: count):string { return fmt("unknown-trans2-sub-cmd-%d", i); }; - - const trans_sub_commands: table[count] of string = { - [0x01] = "SET_NMPIPE_STATE", - [0x11] = "RAW_READ_NMPIPE", - [0x21] = "QUERY_NMPIPE_STATE", - [0x22] = "QUERY_NMPIPE_INFO", - [0x23] = "PEEK_NMPIPE", - [0x26] = "TRANSACT_NMPIPE", - [0x31] = "RAW_WRITE_NMPIPE", - [0x36] = "READ_NMPIPE", - [0x37] = "WRITE_NMPIPE", - [0x53] = "WAIT_NMPIPE", - [0x54] = "CALL_NMPIPE", - } &default=function(i: count):string { return fmt("unknown-trans-sub-cmd-%d", i); }; -} - -module SMB2; - -export { - const commands: table[count] of string = { - [0] = "NEGOTIATE_PROTOCOL", - [1] = "SESSION_SETUP", - [2] = "LOGOFF", - [3] = "TREE_CONNECT", - [4] = "TREE_DISCONNECT", - [5] = "CREATE", - [6] = "CLOSE", - [7] = "FLUSH", - [8] = "READ", - [9] = "WRITE", - [10] = "LOCK", - [11] = "IOCTL", - [12] = "CANCEL", - [13] = "ECHO", - [14] = "QUERY_DIRECTORY", - [15] = "CHANGE_NOTIFY", - [16] = "QUERY_INFO", - [17] = "SET_INFO", - [18] = "OPLOCK_BREAK" - } &default=function(i: count): string { return fmt("unknown-%d", i); }; - - const dialects: table[count] of string = { - [0x0202] = "2.0.2", - [0x0210] = "2.1", - [0x0300] = "3.0", - [0x0302] = "3.0.2", - [0x0311] = "3.1.1", - [0x02FF] = "2.1+", - } &default=function(i: count): string { return fmt("unknown-%d", i); }; - - const share_types: table[count] of string = { - [1] = "DISK", - [2] = "PIPE", - [3] = "PRINT", - } &default=function(i: count): string { return fmt("unknown-%d", i); }; -} diff --git a/scripts/base/protocols/smb/consts.zeek b/scripts/base/protocols/smb/consts.zeek new file mode 100644 index 0000000000..32a03dd17d --- /dev/null +++ b/scripts/base/protocols/smb/consts.zeek @@ -0,0 +1,271 @@ +module SMB; + +export { + type StatusCode: record { + id: string; + desc: string; + }; + + const statuses: table[count] of StatusCode = { + [0x00000000] = [$id="SUCCESS", $desc="The operation completed successfully."], + } &redef &default=function(i: count):StatusCode { local unknown=fmt("unknown-%d", i); return [$id=unknown, $desc=unknown]; }; + + ## Heuristic detection of named pipes when the pipe + ## mapping isn't seen. This variable is defined in + ## init-bare.zeek. + redef SMB::pipe_filenames = { + "spoolss", + "winreg", + "samr", + "srvsvc", + "netdfs", + "lsarpc", + "wkssvc", + "MsFteWds", + }; + + ## The UUIDs used by the various RPC endpoints. + const rpc_uuids: table[string] of string = { + ["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = "Server Service", + ["6bffd098-a112-3610-9833-46c3f87e345a"] = "Workstation Service", + } &redef &default=function(i: string):string { return fmt("unknown-uuid-%s", i); }; + + ## Server service sub commands. + const srv_cmds: table[count] of string = { + [8] = "NetrConnectionEnum", + [9] = "NetrFileEnum", + [10] = "NetrFileGetInfo", + [11] = "NetrFileClose", + [12] = "NetrSessionEnum", + [13] = "NetrSessionDel", + [14] = "NetrShareAdd", + [15] = "NetrShareEnum", + [16] = "NetrShareGetInfo", + [17] = "NetrShareSetInfo", + [18] = "NetrShareDel", + [19] = "NetrShareDelSticky", + [20] = "NetrShareCheck", + [21] = "NetrServerGetInfo", + [22] = "NetrServerSetInfo", + [23] = "NetrServerDiskEnum", + [24] = "NetrServerStatisticsGet", + [25] = "NetrServerTransportAdd", + [26] = "NetrServerTransportEnum", + [27] = "NetrServerTransportDel", + [28] = "NetrRemoteTOD", + [30] = "NetprPathType", + [31] = "NetprPathCanonicalize", + [32] = "NetprPathCompare", + [33] = "NetprNameValidate", + [34] = "NetprNameCanonicalize", + [35] = "NetprNameCompare", + [36] = "NetrShareEnumSticky", + [37] = "NetrShareDelStart", + [38] = "NetrShareDelCommit", + [39] = "NetrGetFileSecurity", + [40] = "NetrSetFileSecurity", + [41] = "NetrServerTransportAddEx", + [43] = "NetrDfsGetVersion", + [44] = "NetrDfsCreateLocalPartition", + [45] = "NetrDfsDeleteLocalPartition", + [46] = "NetrDfsSetLocalVolumeState", + [48] = "NetrDfsCreateExitPoint", + [49] = "NetrDfsDeleteExitPoint", + [50] = "NetrDfsModifyPrefix", + [51] = "NetrDfsFixLocalVolume", + [52] = "NetrDfsManagerReportSiteInfo", + [53] = "NetrServerTransportDelEx", + [54] = "NetrServerAliasAdd", + [55] = "NetrServerAliasEnum", + [56] = "NetrServerAliasDel", + [57] = "NetrShareDelEx", + } &redef &default=function(i: count):string { return fmt("unknown-srv-command-%d", i); }; + + ## Workstation service sub commands. + const wksta_cmds: table[count] of string = { + [0] = "NetrWkstaGetInfo", + [1] = "NetrWkstaSetInfo", + [2] = "NetrWkstaUserEnum", + [5] = "NetrWkstaTransportEnum", + [6] = "NetrWkstaTransportAdd", + [7] = "NetrWkstaTransportDel", + [8] = "NetrUseAdd", + [9] = "NetrUseGetInfo", + [10] = "NetrUseDel", + [11] = "NetrUseEnum", + [13] = "NetrWorkstationStatisticsGet", + [20] = "NetrGetJoinInformation", + [22] = "NetrJoinDomain2", + [23] = "NetrUnjoinDomain2", + [24] = "NetrRenameMachineInDomain2", + [25] = "NetrValidateName2", + [26] = "NetrGetJoinableOUs2", + [27] = "NetrAddAlternateComputerName", + [28] = "NetrRemoveAlternateComputerName", + [29] = "NetrSetPrimaryComputerName", + [30] = "NetrEnumerateComputerNames", + } &redef &default=function(i: count):string { return fmt("unknown-wksta-command-%d", i); }; + + type rpc_cmd_table: table[count] of string; + + ## The subcommands for RPC endpoints. + const rpc_sub_cmds: table[string] of rpc_cmd_table = { + ["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = srv_cmds, + ["6bffd098-a112-3610-9833-46c3f87e345a"] = wksta_cmds, + } &redef &default=function(i: string):rpc_cmd_table { return table() &default=function(j: string):string { return fmt("unknown-uuid-%s", j); }; }; + +} + +module SMB1; + +export { + const commands: table[count] of string = { + [0x00] = "CREATE_DIRECTORY", + [0x01] = "DELETE_DIRECTORY", + [0x02] = "OPEN", + [0x03] = "CREATE", + [0x04] = "CLOSE", + [0x05] = "FLUSH", + [0x06] = "DELETE", + [0x07] = "RENAME", + [0x08] = "QUERY_INFORMATION", + [0x09] = "SET_INFORMATION", + [0x0A] = "READ", + [0x0B] = "WRITE", + [0x0C] = "LOCK_BYTE_RANGE", + [0x0D] = "UNLOCK_BYTE_RANGE", + [0x0E] = "CREATE_TEMPORARY", + [0x0F] = "CREATE_NEW", + [0x10] = "CHECK_DIRECTORY", + [0x11] = "PROCESS_EXIT", + [0x12] = "SEEK", + [0x13] = "LOCK_AND_READ", + [0x14] = "WRITE_AND_UNLOCK", + [0x1A] = "READ_RAW", + [0x1B] = "READ_MPX", + [0x1C] = "READ_MPX_SECONDARY", + [0x1D] = "WRITE_RAW", + [0x1E] = "WRITE_MPX", + [0x1F] = "WRITE_MPX_SECONDARY", + [0x20] = "WRITE_COMPLETE", + [0x21] = "QUERY_SERVER", + [0x22] = "SET_INFORMATION2", + [0x23] = "QUERY_INFORMATION2", + [0x24] = "LOCKING_ANDX", + [0x25] = "TRANSACTION", + [0x26] = "TRANSACTION_SECONDARY", + [0x27] = "IOCTL", + [0x28] = "IOCTL_SECONDARY", + [0x29] = "COPY", + [0x2A] = "MOVE", + [0x2B] = "ECHO", + [0x2C] = "WRITE_AND_CLOSE", + [0x2D] = "OPEN_ANDX", + [0x2E] = "READ_ANDX", + [0x2F] = "WRITE_ANDX", + [0x30] = "NEW_FILE_SIZE", + [0x31] = "CLOSE_AND_TREE_DISC", + [0x32] = "TRANSACTION2", + [0x33] = "TRANSACTION2_SECONDARY", + [0x34] = "FIND_CLOSE2", + [0x35] = "FIND_NOTIFY_CLOSE", + [0x70] = "TREE_CONNECT", + [0x71] = "TREE_DISCONNECT", + [0x72] = "NEGOTIATE", + [0x73] = "SESSION_SETUP_ANDX", + [0x74] = "LOGOFF_ANDX", + [0x75] = "TREE_CONNECT_ANDX", + [0x80] = "QUERY_INFORMATION_DISK", + [0x81] = "SEARCH", + [0x82] = "FIND", + [0x83] = "FIND_UNIQUE", + [0x84] = "FIND_CLOSE", + [0xA0] = "NT_TRANSACT", + [0xA1] = "NT_TRANSACT_SECONDARY", + [0xA2] = "NT_CREATE_ANDX", + [0xA4] = "NT_CANCEL", + [0xA5] = "NT_RENAME", + [0xC0] = "OPEN_PRINT_FILE", + [0xC1] = "WRITE_PRINT_FILE", + [0xC2] = "CLOSE_PRINT_FILE", + [0xC3] = "GET_PRINT_QUEUE", + [0xD8] = "READ_BULK", + [0xD9] = "WRITE_BULK", + [0xDA] = "WRITE_BULK_DATA", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + const trans2_sub_commands: table[count] of string = { + [0x00] = "OPEN2", + [0x01] = "FIND_FIRST2", + [0x02] = "FIND_NEXT2", + [0x03] = "QUERY_FS_INFORMATION", + [0x04] = "SET_FS_INFORMATION", + [0x05] = "QUERY_PATH_INFORMATION", + [0x06] = "SET_PATH_INFORMATION", + [0x07] = "QUERY_FILE_INFORMATION", + [0x08] = "SET_FILE_INFORMATION", + [0x09] = "FSCTL", + [0x0A] = "IOCTL", + [0x0B] = "FIND_NOTIFY_FIRST", + [0x0C] = "FIND_NOTIFY_NEXT", + [0x0D] = "CREATE_DIRECTORY", + [0x0E] = "SESSION_SETUP", + [0x10] = "GET_DFS_REFERRAL", + [0x11] = "REPORT_DFS_INCONSISTENCY", + } &default=function(i: count):string { return fmt("unknown-trans2-sub-cmd-%d", i); }; + + const trans_sub_commands: table[count] of string = { + [0x01] = "SET_NMPIPE_STATE", + [0x11] = "RAW_READ_NMPIPE", + [0x21] = "QUERY_NMPIPE_STATE", + [0x22] = "QUERY_NMPIPE_INFO", + [0x23] = "PEEK_NMPIPE", + [0x26] = "TRANSACT_NMPIPE", + [0x31] = "RAW_WRITE_NMPIPE", + [0x36] = "READ_NMPIPE", + [0x37] = "WRITE_NMPIPE", + [0x53] = "WAIT_NMPIPE", + [0x54] = "CALL_NMPIPE", + } &default=function(i: count):string { return fmt("unknown-trans-sub-cmd-%d", i); }; +} + +module SMB2; + +export { + const commands: table[count] of string = { + [0] = "NEGOTIATE_PROTOCOL", + [1] = "SESSION_SETUP", + [2] = "LOGOFF", + [3] = "TREE_CONNECT", + [4] = "TREE_DISCONNECT", + [5] = "CREATE", + [6] = "CLOSE", + [7] = "FLUSH", + [8] = "READ", + [9] = "WRITE", + [10] = "LOCK", + [11] = "IOCTL", + [12] = "CANCEL", + [13] = "ECHO", + [14] = "QUERY_DIRECTORY", + [15] = "CHANGE_NOTIFY", + [16] = "QUERY_INFO", + [17] = "SET_INFO", + [18] = "OPLOCK_BREAK" + } &default=function(i: count): string { return fmt("unknown-%d", i); }; + + const dialects: table[count] of string = { + [0x0202] = "2.0.2", + [0x0210] = "2.1", + [0x0300] = "3.0", + [0x0302] = "3.0.2", + [0x0311] = "3.1.1", + [0x02FF] = "2.1+", + } &default=function(i: count): string { return fmt("unknown-%d", i); }; + + const share_types: table[count] of string = { + [1] = "DISK", + [2] = "PIPE", + [3] = "PRINT", + } &default=function(i: count): string { return fmt("unknown-%d", i); }; +} diff --git a/scripts/base/protocols/smb/files.bro b/scripts/base/protocols/smb/files.bro deleted file mode 100644 index 5916624941..0000000000 --- a/scripts/base/protocols/smb/files.bro +++ /dev/null @@ -1,69 +0,0 @@ -@load base/frameworks/files -@load ./main - -module SMB; - -export { - ## Default file handle provider for SMB. - global get_file_handle: function(c: connection, is_orig: bool): string; - - ## Default file describer for SMB. - global describe_file: function(f: fa_file): string; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - if ( ! (c$smb_state?$current_file && - (c$smb_state$current_file?$name || - c$smb_state$current_file?$path)) ) - { - # TODO - figure out what are the cases where this happens. - return ""; - } - local current_file = c$smb_state$current_file; - local path_name = current_file?$path ? current_file$path : ""; - local file_name = current_file?$name ? current_file$name : ""; - # Include last_mod time if available because if a file has been modified it - # should be considered a new file. - local last_mod = cat(current_file?$times ? current_file$times$modified : double_to_time(0.0)); - # TODO: This is doing hexdump to avoid problems due to file analysis handling - # using CheckString which is not immune to encapsulated null bytes. - # This needs to be fixed lower in the file analysis code later. - return hexdump(cat(Analyzer::ANALYZER_SMB, c$id$orig_h, c$id$resp_h, path_name, file_name, last_mod)); - } - -function describe_file(f: fa_file): string - { - # This shouldn't be needed, but just in case... - if ( f$source != "SMB" ) - return ""; - - for ( cid, c in f$conns ) - { - if ( c?$smb_state && c$smb_state?$current_file && c$smb_state$current_file?$name ) - return c$smb_state$current_file$name; - } - return ""; - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_SMB, - [$get_file_handle = SMB::get_file_handle, - $describe = SMB::describe_file]); - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( c?$smb_state && c$smb_state?$current_file ) - { - c$smb_state$current_file$fuid = f$id; - - if ( c$smb_state$current_file$size > 0 ) - f$total_bytes = c$smb_state$current_file$size; - - if ( c$smb_state$current_file?$name ) - f$info$filename = c$smb_state$current_file$name; - write_file_log(c$smb_state); - } - } diff --git a/scripts/base/protocols/smb/files.zeek b/scripts/base/protocols/smb/files.zeek new file mode 100644 index 0000000000..ac719d728f --- /dev/null +++ b/scripts/base/protocols/smb/files.zeek @@ -0,0 +1,69 @@ +@load base/frameworks/files +@load ./main + +module SMB; + +export { + ## Default file handle provider for SMB. + global get_file_handle: function(c: connection, is_orig: bool): string; + + ## Default file describer for SMB. + global describe_file: function(f: fa_file): string; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + if ( ! (c$smb_state?$current_file && + (c$smb_state$current_file?$name || + c$smb_state$current_file?$path)) ) + { + # TODO - figure out what are the cases where this happens. + return ""; + } + local current_file = c$smb_state$current_file; + local path_name = current_file?$path ? current_file$path : ""; + local file_name = current_file?$name ? current_file$name : ""; + # Include last_mod time if available because if a file has been modified it + # should be considered a new file. + local last_mod = cat(current_file?$times ? current_file$times$modified : double_to_time(0.0)); + # TODO: This is doing hexdump to avoid problems due to file analysis handling + # using CheckString which is not immune to encapsulated null bytes. + # This needs to be fixed lower in the file analysis code later. + return hexdump(cat(Analyzer::ANALYZER_SMB, c$id$orig_h, c$id$resp_h, path_name, file_name, last_mod)); + } + +function describe_file(f: fa_file): string + { + # This shouldn't be needed, but just in case... + if ( f$source != "SMB" ) + return ""; + + for ( cid, c in f$conns ) + { + if ( c?$smb_state && c$smb_state?$current_file && c$smb_state$current_file?$name ) + return c$smb_state$current_file$name; + } + return ""; + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_SMB, + [$get_file_handle = SMB::get_file_handle, + $describe = SMB::describe_file]); + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( c?$smb_state && c$smb_state?$current_file ) + { + c$smb_state$current_file$fuid = f$id; + + if ( c$smb_state$current_file$size > 0 ) + f$total_bytes = c$smb_state$current_file$size; + + if ( c$smb_state$current_file?$name ) + f$info$filename = c$smb_state$current_file$name; + write_file_log(c$smb_state); + } + } diff --git a/scripts/base/protocols/smb/main.bro b/scripts/base/protocols/smb/main.bro deleted file mode 100644 index 07225548be..0000000000 --- a/scripts/base/protocols/smb/main.bro +++ /dev/null @@ -1,249 +0,0 @@ -@load ./consts -@load ./const-dos-error -@load ./const-nt-status - -module SMB; - -export { - redef enum Log::ID += { - AUTH_LOG, - MAPPING_LOG, - FILES_LOG - }; - - ## Abstracted actions for SMB file actions. - type Action: enum { - FILE_READ, - FILE_WRITE, - FILE_OPEN, - FILE_CLOSE, - FILE_DELETE, - FILE_RENAME, - FILE_SET_ATTRIBUTE, - - PIPE_READ, - PIPE_WRITE, - PIPE_OPEN, - PIPE_CLOSE, - - PRINT_READ, - PRINT_WRITE, - PRINT_OPEN, - PRINT_CLOSE, - }; - - ## The file actions which are logged. - option logged_file_actions: set[Action] = { - FILE_OPEN, - FILE_RENAME, - FILE_DELETE, - - PRINT_OPEN, - PRINT_CLOSE, - }; - - ## This record is for the smb_files.log - type FileInfo: record { - ## Time when the file was first discovered. - ts : time &log; - ## Unique ID of the connection the file was sent over. - uid : string &log; - ## ID of the connection the file was sent over. - id : conn_id &log; - ## Unique ID of the file. - fuid : string &log &optional; - - ## Action this log record represents. - action : Action &log &optional; - ## Path pulled from the tree this file was transferred to or from. - path : string &log &optional; - ## Filename if one was seen. - name : string &log &optional; - ## Total size of the file. - size : count &log &default=0; - ## If the rename action was seen, this will be - ## the file's previous name. - prev_name : string &log &optional; - ## Last time this file was modified. - times : SMB::MACTimes &log &optional; - }; - - ## This record is for the smb_mapping.log - type TreeInfo: record { - ## Time when the tree was mapped. - ts : time &log &optional; - ## Unique ID of the connection the tree was mapped over. - uid : string &log; - ## ID of the connection the tree was mapped over. - id : conn_id &log; - - ## Name of the tree path. - path : string &log &optional; - ## The type of resource of the tree (disk share, printer share, named pipe, etc.). - service : string &log &optional; - ## File system of the tree. - native_file_system : string &log &optional; - ## If this is SMB2, a share type will be included. For SMB1, - ## the type of share will be deduced and included as well. - share_type : string &log &default="DISK"; - }; - - ## This record is for the smb_cmd.log - type CmdInfo: record { - ## Timestamp of the command request. - ts : time &log; - ## Unique ID of the connection the request was sent over. - uid : string &log; - ## ID of the connection the request was sent over. - id : conn_id &log; - - ## The command sent by the client. - command : string &log; - ## The subcommand sent by the client, if present. - sub_command : string &log &optional; - ## Command argument sent by the client, if any. - argument : string &log &optional; - - ## Server reply to the client's command. - status : string &log &optional; - ## Round trip time from the request to the response. - rtt : interval &log &optional; - ## Version of SMB for the command. - version : string &log; - - ## Authenticated username, if available. - username : string &log &optional; - - ## If this is related to a tree, this is the tree - ## that was used for the current command. - tree : string &log &optional; - ## The type of tree (disk share, printer share, named pipe, etc.). - tree_service : string &log &optional; - - ## If the command referenced a file, store it here. - referenced_file : FileInfo &log &optional; - ## If the command referenced a tree, store it here. - referenced_tree : TreeInfo &optional; - }; - - ## This record stores the SMB state of in-flight commands, - ## the file and tree map of the connection. - type State: record { - ## A reference to the current command. - current_cmd : CmdInfo &optional; - ## A reference to the current file. - current_file : FileInfo &optional; - ## A reference to the current tree. - current_tree : TreeInfo &optional; - - ## Indexed on MID to map responses to requests. - pending_cmds : table[count] of CmdInfo &optional; - ## File map to retrieve file information based on the file ID. - fid_map : table[count] of FileInfo &optional; - ## Tree map to retrieve tree information based on the tree ID. - tid_map : table[count] of TreeInfo &optional; - ## User map to retrieve user name based on the user ID. - uid_map : table[count] of string &optional; - ## Pipe map to retrieve UUID based on the file ID of a pipe. - pipe_map : table[count] of string &optional; - - ## A set of recent files to avoid logging the same - ## files over and over in the smb files log. - ## This only applies to files seen in a single connection. - recent_files : set[string] &default=string_set() &read_expire=3min; - }; - - ## Everything below here is used internally in the SMB scripts. - - redef record connection += { - smb_state : State &optional; - }; - - ## This is an internally used function. - const set_current_file: function(smb_state: State, file_id: count) &redef; - - ## This is an internally used function. - const write_file_log: function(state: State) &redef; -} - -redef record FileInfo += { - ## ID referencing this file. - fid : count &optional; - - ## UUID referencing this file if DCE/RPC. - uuid : string &optional; -}; - -const ports = { 139/tcp, 445/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(SMB::FILES_LOG, [$columns=SMB::FileInfo, $path="smb_files"]); - Log::create_stream(SMB::MAPPING_LOG, [$columns=SMB::TreeInfo, $path="smb_mapping"]); - - Analyzer::register_for_ports(Analyzer::ANALYZER_SMB, ports); - } - -function set_current_file(smb_state: State, file_id: count) - { - if ( file_id !in smb_state$fid_map ) - { - smb_state$fid_map[file_id] = smb_state$current_cmd$referenced_file; - smb_state$fid_map[file_id]$fid = file_id; - } - - smb_state$current_cmd$referenced_file = smb_state$fid_map[file_id]; - smb_state$current_file = smb_state$current_cmd$referenced_file; - } - -function write_file_log(state: State) - { - local f = state$current_file; - if ( f?$name && - f$action in logged_file_actions ) - { - # Everything in this if statement is to avoid overlogging - # of the same data from a single connection based on recently - # seen files in the SMB::State $recent_files field. - if ( f?$times ) - { - local file_ident = cat(f$action, - f?$fuid ? f$fuid : "", - f?$name ? f$name : "", - f?$path ? f$path : "", - f$size, - f$times); - if ( file_ident in state$recent_files ) - { - # We've already seen this file and don't want to log it again. - return; - } - else - add state$recent_files[file_ident]; - } - - Log::write(FILES_LOG, f); - } - } - -event smb_pipe_connect_heuristic(c: connection) &priority=5 - { - c$smb_state$current_tree$path = ""; - c$smb_state$current_tree$share_type = "PIPE"; - } - -event file_state_remove(f: fa_file) &priority=-5 - { - if ( f$source != "SMB" ) - return; - - for ( id, c in f$conns ) - { - if ( c?$smb_state && c$smb_state?$current_file) - { - write_file_log(c$smb_state); - } - return; - } - } diff --git a/scripts/base/protocols/smb/main.zeek b/scripts/base/protocols/smb/main.zeek new file mode 100644 index 0000000000..5524bde4f0 --- /dev/null +++ b/scripts/base/protocols/smb/main.zeek @@ -0,0 +1,249 @@ +@load ./consts +@load ./const-dos-error +@load ./const-nt-status + +module SMB; + +export { + redef enum Log::ID += { + AUTH_LOG, + MAPPING_LOG, + FILES_LOG + }; + + ## Abstracted actions for SMB file actions. + type Action: enum { + FILE_READ, + FILE_WRITE, + FILE_OPEN, + FILE_CLOSE, + FILE_DELETE, + FILE_RENAME, + FILE_SET_ATTRIBUTE, + + PIPE_READ, + PIPE_WRITE, + PIPE_OPEN, + PIPE_CLOSE, + + PRINT_READ, + PRINT_WRITE, + PRINT_OPEN, + PRINT_CLOSE, + }; + + ## The file actions which are logged. + option logged_file_actions: set[Action] = { + FILE_OPEN, + FILE_RENAME, + FILE_DELETE, + + PRINT_OPEN, + PRINT_CLOSE, + }; + + ## This record is for the smb_files.log + type FileInfo: record { + ## Time when the file was first discovered. + ts : time &log; + ## Unique ID of the connection the file was sent over. + uid : string &log; + ## ID of the connection the file was sent over. + id : conn_id &log; + ## Unique ID of the file. + fuid : string &log &optional; + + ## Action this log record represents. + action : Action &log &optional; + ## Path pulled from the tree this file was transferred to or from. + path : string &log &optional; + ## Filename if one was seen. + name : string &log &optional; + ## Total size of the file. + size : count &log &default=0; + ## If the rename action was seen, this will be + ## the file's previous name. + prev_name : string &log &optional; + ## Last time this file was modified. + times : SMB::MACTimes &log &optional; + }; + + ## This record is for the smb_mapping.log + type TreeInfo: record { + ## Time when the tree was mapped. + ts : time &log &optional; + ## Unique ID of the connection the tree was mapped over. + uid : string &log; + ## ID of the connection the tree was mapped over. + id : conn_id &log; + + ## Name of the tree path. + path : string &log &optional; + ## The type of resource of the tree (disk share, printer share, named pipe, etc.). + service : string &log &optional; + ## File system of the tree. + native_file_system : string &log &optional; + ## If this is SMB2, a share type will be included. For SMB1, + ## the type of share will be deduced and included as well. + share_type : string &log &default="DISK"; + }; + + ## This record is for the smb_cmd.log + type CmdInfo: record { + ## Timestamp of the command request. + ts : time &log; + ## Unique ID of the connection the request was sent over. + uid : string &log; + ## ID of the connection the request was sent over. + id : conn_id &log; + + ## The command sent by the client. + command : string &log; + ## The subcommand sent by the client, if present. + sub_command : string &log &optional; + ## Command argument sent by the client, if any. + argument : string &log &optional; + + ## Server reply to the client's command. + status : string &log &optional; + ## Round trip time from the request to the response. + rtt : interval &log &optional; + ## Version of SMB for the command. + version : string &log; + + ## Authenticated username, if available. + username : string &log &optional; + + ## If this is related to a tree, this is the tree + ## that was used for the current command. + tree : string &log &optional; + ## The type of tree (disk share, printer share, named pipe, etc.). + tree_service : string &log &optional; + + ## If the command referenced a file, store it here. + referenced_file : FileInfo &log &optional; + ## If the command referenced a tree, store it here. + referenced_tree : TreeInfo &optional; + }; + + ## This record stores the SMB state of in-flight commands, + ## the file and tree map of the connection. + type State: record { + ## A reference to the current command. + current_cmd : CmdInfo &optional; + ## A reference to the current file. + current_file : FileInfo &optional; + ## A reference to the current tree. + current_tree : TreeInfo &optional; + + ## Indexed on MID to map responses to requests. + pending_cmds : table[count] of CmdInfo &optional; + ## File map to retrieve file information based on the file ID. + fid_map : table[count] of FileInfo &optional; + ## Tree map to retrieve tree information based on the tree ID. + tid_map : table[count] of TreeInfo &optional; + ## User map to retrieve user name based on the user ID. + uid_map : table[count] of string &optional; + ## Pipe map to retrieve UUID based on the file ID of a pipe. + pipe_map : table[count] of string &optional; + + ## A set of recent files to avoid logging the same + ## files over and over in the smb files log. + ## This only applies to files seen in a single connection. + recent_files : set[string] &default=string_set() &read_expire=3min; + }; + + ## Everything below here is used internally in the SMB scripts. + + redef record connection += { + smb_state : State &optional; + }; + + ## This is an internally used function. + const set_current_file: function(smb_state: State, file_id: count) &redef; + + ## This is an internally used function. + const write_file_log: function(state: State) &redef; +} + +redef record FileInfo += { + ## ID referencing this file. + fid : count &optional; + + ## UUID referencing this file if DCE/RPC. + uuid : string &optional; +}; + +const ports = { 139/tcp, 445/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(SMB::FILES_LOG, [$columns=SMB::FileInfo, $path="smb_files"]); + Log::create_stream(SMB::MAPPING_LOG, [$columns=SMB::TreeInfo, $path="smb_mapping"]); + + Analyzer::register_for_ports(Analyzer::ANALYZER_SMB, ports); + } + +function set_current_file(smb_state: State, file_id: count) + { + if ( file_id !in smb_state$fid_map ) + { + smb_state$fid_map[file_id] = smb_state$current_cmd$referenced_file; + smb_state$fid_map[file_id]$fid = file_id; + } + + smb_state$current_cmd$referenced_file = smb_state$fid_map[file_id]; + smb_state$current_file = smb_state$current_cmd$referenced_file; + } + +function write_file_log(state: State) + { + local f = state$current_file; + if ( f?$name && + f$action in logged_file_actions ) + { + # Everything in this if statement is to avoid overlogging + # of the same data from a single connection based on recently + # seen files in the SMB::State $recent_files field. + if ( f?$times ) + { + local file_ident = cat(f$action, + f?$fuid ? f$fuid : "", + f?$name ? f$name : "", + f?$path ? f$path : "", + f$size, + f$times); + if ( file_ident in state$recent_files ) + { + # We've already seen this file and don't want to log it again. + return; + } + else + add state$recent_files[file_ident]; + } + + Log::write(FILES_LOG, f); + } + } + +event smb_pipe_connect_heuristic(c: connection) &priority=5 + { + c$smb_state$current_tree$path = ""; + c$smb_state$current_tree$share_type = "PIPE"; + } + +event file_state_remove(f: fa_file) &priority=-5 + { + if ( f$source != "SMB" ) + return; + + for ( id, c in f$conns ) + { + if ( c?$smb_state && c$smb_state?$current_file) + { + write_file_log(c$smb_state); + } + return; + } + } diff --git a/scripts/base/protocols/smb/smb1-main.bro b/scripts/base/protocols/smb/smb1-main.zeek similarity index 100% rename from scripts/base/protocols/smb/smb1-main.bro rename to scripts/base/protocols/smb/smb1-main.zeek diff --git a/scripts/base/protocols/smb/smb2-main.bro b/scripts/base/protocols/smb/smb2-main.zeek similarity index 100% rename from scripts/base/protocols/smb/smb2-main.bro rename to scripts/base/protocols/smb/smb2-main.zeek diff --git a/scripts/base/protocols/smtp/__load__.bro b/scripts/base/protocols/smtp/__load__.zeek similarity index 100% rename from scripts/base/protocols/smtp/__load__.bro rename to scripts/base/protocols/smtp/__load__.zeek diff --git a/scripts/base/protocols/smtp/entities.bro b/scripts/base/protocols/smtp/entities.zeek similarity index 100% rename from scripts/base/protocols/smtp/entities.bro rename to scripts/base/protocols/smtp/entities.zeek diff --git a/scripts/base/protocols/smtp/files.bro b/scripts/base/protocols/smtp/files.bro deleted file mode 100644 index bf410fa201..0000000000 --- a/scripts/base/protocols/smtp/files.bro +++ /dev/null @@ -1,52 +0,0 @@ -@load ./main -@load ./entities -@load base/utils/conn-ids -@load base/frameworks/files - -module SMTP; - -export { - redef record Info += { - ## An ordered vector of file unique IDs seen attached to - ## the message. - fuids: vector of string &log &default=string_vec(); - }; - - ## Default file handle provider for SMTP. - global get_file_handle: function(c: connection, is_orig: bool): string; - - ## Default file describer for SMTP. - global describe_file: function(f: fa_file): string; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - return cat(Analyzer::ANALYZER_SMTP, c$start_time, c$smtp$trans_depth, - c$smtp_state$mime_depth); - } - -function describe_file(f: fa_file): string - { - # This shouldn't be needed, but just in case... - if ( f$source != "SMTP" ) - return ""; - - for ( cid, c in f$conns ) - { - return SMTP::describe(c$smtp); - } - return ""; - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_SMTP, - [$get_file_handle = SMTP::get_file_handle, - $describe = SMTP::describe_file]); - } - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 - { - if ( c?$smtp && !c$smtp$tls ) - c$smtp$fuids += f$id; - } diff --git a/scripts/base/protocols/smtp/files.zeek b/scripts/base/protocols/smtp/files.zeek new file mode 100644 index 0000000000..cb38c27c97 --- /dev/null +++ b/scripts/base/protocols/smtp/files.zeek @@ -0,0 +1,52 @@ +@load ./main +@load ./entities +@load base/utils/conn-ids +@load base/frameworks/files + +module SMTP; + +export { + redef record Info += { + ## An ordered vector of file unique IDs seen attached to + ## the message. + fuids: vector of string &log &default=string_vec(); + }; + + ## Default file handle provider for SMTP. + global get_file_handle: function(c: connection, is_orig: bool): string; + + ## Default file describer for SMTP. + global describe_file: function(f: fa_file): string; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + return cat(Analyzer::ANALYZER_SMTP, c$start_time, c$smtp$trans_depth, + c$smtp_state$mime_depth); + } + +function describe_file(f: fa_file): string + { + # This shouldn't be needed, but just in case... + if ( f$source != "SMTP" ) + return ""; + + for ( cid, c in f$conns ) + { + return SMTP::describe(c$smtp); + } + return ""; + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_SMTP, + [$get_file_handle = SMTP::get_file_handle, + $describe = SMTP::describe_file]); + } + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 + { + if ( c?$smtp && !c$smtp$tls ) + c$smtp$fuids += f$id; + } diff --git a/scripts/base/protocols/smtp/main.bro b/scripts/base/protocols/smtp/main.bro deleted file mode 100644 index faa73d2412..0000000000 --- a/scripts/base/protocols/smtp/main.bro +++ /dev/null @@ -1,340 +0,0 @@ -@load base/utils/addrs -@load base/utils/directions-and-hosts -@load base/utils/email - -module SMTP; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Time when the message was first seen. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## A count to represent the depth of this message transaction in - ## a single connection where multiple messages were transferred. - trans_depth: count &log; - ## Contents of the Helo header. - helo: string &log &optional; - ## Email addresses found in the From header. - mailfrom: string &log &optional; - ## Email addresses found in the Rcpt header. - rcptto: set[string] &log &optional; - ## Contents of the Date header. - date: string &log &optional; - ## Contents of the From header. - from: string &log &optional; - ## Contents of the To header. - to: set[string] &log &optional; - ## Contents of the CC header. - cc: set[string] &log &optional; - ## Contents of the ReplyTo header. - reply_to: string &log &optional; - ## Contents of the MsgID header. - msg_id: string &log &optional; - ## Contents of the In-Reply-To header. - in_reply_to: string &log &optional; - ## Contents of the Subject header. - subject: string &log &optional; - ## Contents of the X-Originating-IP header. - x_originating_ip: addr &log &optional; - ## Contents of the first Received header. - first_received: string &log &optional; - ## Contents of the second Received header. - second_received: string &log &optional; - ## The last message that the server sent to the client. - last_reply: string &log &optional; - ## The message transmission path, as extracted from the headers. - path: vector of addr &log &optional; - ## Value of the User-Agent header from the client. - user_agent: string &log &optional; - - ## Indicates that the connection has switched to using TLS. - tls: bool &log &default=F; - ## Indicates if the "Received: from" headers should still be - ## processed. - process_received_from: bool &default=T; - ## Indicates if client activity has been seen, but not yet logged. - has_client_activity: bool &default=F; - }; - - type State: record { - helo: string &optional; - ## Count the number of individual messages transmitted during - ## this SMTP session. Note, this is not the number of - ## recipients, but the number of message bodies transferred. - messages_transferred: count &default=0; - - pending_messages: set[Info] &optional; - }; - - ## Direction to capture the full "Received from" path. - ## REMOTE_HOSTS - only capture the path until an internal host is found. - ## LOCAL_HOSTS - only capture the path until the external host is discovered. - ## ALL_HOSTS - always capture the entire path. - ## NO_HOSTS - never capture the path. - option mail_path_capture = ALL_HOSTS; - - ## Create an extremely shortened representation of a log line. - global describe: function(rec: Info): string; - - global log_smtp: event(rec: Info); -} - -redef record connection += { - smtp: Info &optional; - smtp_state: State &optional; -}; - -const ports = { 25/tcp, 587/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp, $path="smtp"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, ports); - } - -function find_address_in_smtp_header(header: string): string - { - local ips = extract_ip_addresses(header); - # If there are more than one IP address found, return the second. - if ( |ips| > 1 ) - return ips[1]; - # Otherwise, return the first. - else if ( |ips| > 0 ) - return ips[0]; - # Otherwise, there wasn't an IP address found. - else - return ""; - } - -function new_smtp_log(c: connection): Info - { - local l: Info; - l$ts=network_time(); - l$uid=c$uid; - l$id=c$id; - # The messages_transferred count isn't incremented until the message is - # finished so we need to increment the count by 1 here. - l$trans_depth = c$smtp_state$messages_transferred+1; - - if ( c$smtp_state?$helo ) - l$helo = c$smtp_state$helo; - - # The path will always end with the hosts involved in this connection. - # The lower values in the vector are the end of the path. - l$path = vector(c$id$resp_h, c$id$orig_h); - - return l; - } - -function set_smtp_session(c: connection) - { - if ( ! c?$smtp_state ) - c$smtp_state = []; - - if ( ! c?$smtp ) - c$smtp = new_smtp_log(c); - } - -function smtp_message(c: connection) - { - if ( c$smtp$has_client_activity ) - { - Log::write(SMTP::LOG, c$smtp); - c$smtp = new_smtp_log(c); - } - } - -event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5 - { - set_smtp_session(c); - local upper_command = to_upper(command); - - if ( upper_command == "HELO" || upper_command == "EHLO" ) - { - c$smtp_state$helo = arg; - c$smtp$helo = arg; - } - - else if ( upper_command == "RCPT" && /^[tT][oO]:/ in arg ) - { - if ( ! c$smtp?$rcptto ) - c$smtp$rcptto = set(); - - local rcptto_addrs = extract_email_addrs_set(arg); - for ( rcptto_addr in rcptto_addrs ) - { - rcptto_addr = gsub(rcptto_addr, /ORCPT=rfc822;?/, ""); - add c$smtp$rcptto[rcptto_addr]; - } - - c$smtp$has_client_activity = T; - } - - else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg ) - { - # Flush last message in case we didn't see the server's acknowledgement. - smtp_message(c); - - local mailfrom = extract_first_email_addr(arg); - if ( mailfrom != "" ) - c$smtp$mailfrom = mailfrom; - c$smtp$has_client_activity = T; - } - } - -event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, - msg: string, cont_resp: bool) &priority=5 - { - set_smtp_session(c); - - # This continually overwrites, but we want the last reply, - # so this actually works fine. - c$smtp$last_reply = fmt("%d %s", code, msg); - } - -event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, - msg: string, cont_resp: bool) &priority=-5 - { - if ( cmd == "." ) - { - # Track the number of messages seen in this session. - ++c$smtp_state$messages_transferred; - smtp_message(c); - c$smtp = new_smtp_log(c); - } - } - -event mime_one_header(c: connection, h: mime_header_rec) &priority=5 - { - if ( ! c?$smtp ) return; - - if ( h$name == "MESSAGE-ID" ) - c$smtp$msg_id = h$value; - - else if ( h$name == "RECEIVED" ) - { - if ( c$smtp?$first_received ) - c$smtp$second_received = c$smtp$first_received; - c$smtp$first_received = h$value; - } - - else if ( h$name == "IN-REPLY-TO" ) - c$smtp$in_reply_to = h$value; - - else if ( h$name == "SUBJECT" ) - c$smtp$subject = h$value; - - else if ( h$name == "FROM" ) - c$smtp$from = h$value; - - else if ( h$name == "REPLY-TO" ) - c$smtp$reply_to = h$value; - - else if ( h$name == "DATE" ) - c$smtp$date = h$value; - - else if ( h$name == "TO" ) - { - if ( ! c$smtp?$to ) - c$smtp$to = set(); - - local to_email_addrs = split_mime_email_addresses(h$value); - for ( to_email_addr in to_email_addrs ) - { - add c$smtp$to[to_email_addr]; - } - } - - else if ( h$name == "CC" ) - { - if ( ! c$smtp?$cc ) - c$smtp$cc = set(); - - local cc_parts = split_mime_email_addresses(h$value); - for ( cc_part in cc_parts ) - add c$smtp$cc[cc_part]; - } - - else if ( h$name == "X-ORIGINATING-IP" ) - { - local addresses = extract_ip_addresses(h$value); - if ( 0 in addresses ) - c$smtp$x_originating_ip = to_addr(addresses[0]); - } - - else if ( h$name == "X-MAILER" || - h$name == "USER-AGENT" || - h$name == "X-USER-AGENT" ) - c$smtp$user_agent = h$value; - } - -# This event handler builds the "Received From" path by reading the -# headers in the mail -event mime_one_header(c: connection, h: mime_header_rec) &priority=3 - { - # If we've decided that we're done watching the received headers for - # whatever reason, we're done. Could be due to only watching until - # local addresses are seen in the received from headers. - if ( ! c?$smtp || h$name != "RECEIVED" || ! c$smtp$process_received_from ) - return; - - local text_ip = find_address_in_smtp_header(h$value); - if ( text_ip == "" ) - return; - local ip = to_addr(text_ip); - - if ( ! addr_matches_host(ip, mail_path_capture) && - ! Site::is_private_addr(ip) ) - { - c$smtp$process_received_from = F; - } - if ( c$smtp$path[|c$smtp$path|-1] != ip ) - c$smtp$path += ip; - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$smtp ) - smtp_message(c); - } - -event smtp_starttls(c: connection) &priority=5 - { - if ( c?$smtp ) - { - c$smtp$tls = T; - c$smtp$has_client_activity = T; - } - } - -function describe(rec: Info): string - { - if ( rec?$mailfrom && rec?$rcptto ) - { - local one_to = ""; - for ( email in rec$rcptto ) - { - one_to = email; - break; - } - local abbrev_subject = ""; - if ( rec?$subject ) - { - if ( |rec$subject| > 20 ) - { - abbrev_subject = rec$subject[0:21] + "..."; - } - } - - return fmt("%s -> %s%s%s", rec$mailfrom, one_to, - (|rec$rcptto|>1 ? fmt(" (plus %d others)", |rec$rcptto|-1) : ""), - (abbrev_subject != "" ? fmt(": %s", abbrev_subject) : "")); - } - return ""; - } diff --git a/scripts/base/protocols/smtp/main.zeek b/scripts/base/protocols/smtp/main.zeek new file mode 100644 index 0000000000..b13bbadb8d --- /dev/null +++ b/scripts/base/protocols/smtp/main.zeek @@ -0,0 +1,340 @@ +@load base/utils/addrs +@load base/utils/directions-and-hosts +@load base/utils/email + +module SMTP; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Time when the message was first seen. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## A count to represent the depth of this message transaction in + ## a single connection where multiple messages were transferred. + trans_depth: count &log; + ## Contents of the Helo header. + helo: string &log &optional; + ## Email addresses found in the From header. + mailfrom: string &log &optional; + ## Email addresses found in the Rcpt header. + rcptto: set[string] &log &optional; + ## Contents of the Date header. + date: string &log &optional; + ## Contents of the From header. + from: string &log &optional; + ## Contents of the To header. + to: set[string] &log &optional; + ## Contents of the CC header. + cc: set[string] &log &optional; + ## Contents of the ReplyTo header. + reply_to: string &log &optional; + ## Contents of the MsgID header. + msg_id: string &log &optional; + ## Contents of the In-Reply-To header. + in_reply_to: string &log &optional; + ## Contents of the Subject header. + subject: string &log &optional; + ## Contents of the X-Originating-IP header. + x_originating_ip: addr &log &optional; + ## Contents of the first Received header. + first_received: string &log &optional; + ## Contents of the second Received header. + second_received: string &log &optional; + ## The last message that the server sent to the client. + last_reply: string &log &optional; + ## The message transmission path, as extracted from the headers. + path: vector of addr &log &optional; + ## Value of the User-Agent header from the client. + user_agent: string &log &optional; + + ## Indicates that the connection has switched to using TLS. + tls: bool &log &default=F; + ## Indicates if the "Received: from" headers should still be + ## processed. + process_received_from: bool &default=T; + ## Indicates if client activity has been seen, but not yet logged. + has_client_activity: bool &default=F; + }; + + type State: record { + helo: string &optional; + ## Count the number of individual messages transmitted during + ## this SMTP session. Note, this is not the number of + ## recipients, but the number of message bodies transferred. + messages_transferred: count &default=0; + + pending_messages: set[Info] &optional; + }; + + ## Direction to capture the full "Received from" path. + ## REMOTE_HOSTS - only capture the path until an internal host is found. + ## LOCAL_HOSTS - only capture the path until the external host is discovered. + ## ALL_HOSTS - always capture the entire path. + ## NO_HOSTS - never capture the path. + option mail_path_capture = ALL_HOSTS; + + ## Create an extremely shortened representation of a log line. + global describe: function(rec: Info): string; + + global log_smtp: event(rec: Info); +} + +redef record connection += { + smtp: Info &optional; + smtp_state: State &optional; +}; + +const ports = { 25/tcp, 587/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp, $path="smtp"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, ports); + } + +function find_address_in_smtp_header(header: string): string + { + local ips = extract_ip_addresses(header); + # If there are more than one IP address found, return the second. + if ( |ips| > 1 ) + return ips[1]; + # Otherwise, return the first. + else if ( |ips| > 0 ) + return ips[0]; + # Otherwise, there wasn't an IP address found. + else + return ""; + } + +function new_smtp_log(c: connection): Info + { + local l: Info; + l$ts=network_time(); + l$uid=c$uid; + l$id=c$id; + # The messages_transferred count isn't incremented until the message is + # finished so we need to increment the count by 1 here. + l$trans_depth = c$smtp_state$messages_transferred+1; + + if ( c$smtp_state?$helo ) + l$helo = c$smtp_state$helo; + + # The path will always end with the hosts involved in this connection. + # The lower values in the vector are the end of the path. + l$path = vector(c$id$resp_h, c$id$orig_h); + + return l; + } + +function set_smtp_session(c: connection) + { + if ( ! c?$smtp_state ) + c$smtp_state = []; + + if ( ! c?$smtp ) + c$smtp = new_smtp_log(c); + } + +function smtp_message(c: connection) + { + if ( c$smtp$has_client_activity ) + { + Log::write(SMTP::LOG, c$smtp); + c$smtp = new_smtp_log(c); + } + } + +event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5 + { + set_smtp_session(c); + local upper_command = to_upper(command); + + if ( upper_command == "HELO" || upper_command == "EHLO" ) + { + c$smtp_state$helo = arg; + c$smtp$helo = arg; + } + + else if ( upper_command == "RCPT" && /^[tT][oO]:/ in arg ) + { + if ( ! c$smtp?$rcptto ) + c$smtp$rcptto = set(); + + local rcptto_addrs = extract_email_addrs_set(arg); + for ( rcptto_addr in rcptto_addrs ) + { + rcptto_addr = gsub(rcptto_addr, /ORCPT=rfc822;?/, ""); + add c$smtp$rcptto[rcptto_addr]; + } + + c$smtp$has_client_activity = T; + } + + else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg ) + { + # Flush last message in case we didn't see the server's acknowledgement. + smtp_message(c); + + local mailfrom = extract_first_email_addr(arg); + if ( mailfrom != "" ) + c$smtp$mailfrom = mailfrom; + c$smtp$has_client_activity = T; + } + } + +event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, + msg: string, cont_resp: bool) &priority=5 + { + set_smtp_session(c); + + # This continually overwrites, but we want the last reply, + # so this actually works fine. + c$smtp$last_reply = fmt("%d %s", code, msg); + } + +event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, + msg: string, cont_resp: bool) &priority=-5 + { + if ( cmd == "." ) + { + # Track the number of messages seen in this session. + ++c$smtp_state$messages_transferred; + smtp_message(c); + c$smtp = new_smtp_log(c); + } + } + +event mime_one_header(c: connection, h: mime_header_rec) &priority=5 + { + if ( ! c?$smtp ) return; + + if ( h$name == "MESSAGE-ID" ) + c$smtp$msg_id = h$value; + + else if ( h$name == "RECEIVED" ) + { + if ( c$smtp?$first_received ) + c$smtp$second_received = c$smtp$first_received; + c$smtp$first_received = h$value; + } + + else if ( h$name == "IN-REPLY-TO" ) + c$smtp$in_reply_to = h$value; + + else if ( h$name == "SUBJECT" ) + c$smtp$subject = h$value; + + else if ( h$name == "FROM" ) + c$smtp$from = h$value; + + else if ( h$name == "REPLY-TO" ) + c$smtp$reply_to = h$value; + + else if ( h$name == "DATE" ) + c$smtp$date = h$value; + + else if ( h$name == "TO" ) + { + if ( ! c$smtp?$to ) + c$smtp$to = set(); + + local to_email_addrs = split_mime_email_addresses(h$value); + for ( to_email_addr in to_email_addrs ) + { + add c$smtp$to[to_email_addr]; + } + } + + else if ( h$name == "CC" ) + { + if ( ! c$smtp?$cc ) + c$smtp$cc = set(); + + local cc_parts = split_mime_email_addresses(h$value); + for ( cc_part in cc_parts ) + add c$smtp$cc[cc_part]; + } + + else if ( h$name == "X-ORIGINATING-IP" ) + { + local addresses = extract_ip_addresses(h$value); + if ( 0 in addresses ) + c$smtp$x_originating_ip = to_addr(addresses[0]); + } + + else if ( h$name == "X-MAILER" || + h$name == "USER-AGENT" || + h$name == "X-USER-AGENT" ) + c$smtp$user_agent = h$value; + } + +# This event handler builds the "Received From" path by reading the +# headers in the mail +event mime_one_header(c: connection, h: mime_header_rec) &priority=3 + { + # If we've decided that we're done watching the received headers for + # whatever reason, we're done. Could be due to only watching until + # local addresses are seen in the received from headers. + if ( ! c?$smtp || h$name != "RECEIVED" || ! c$smtp$process_received_from ) + return; + + local text_ip = find_address_in_smtp_header(h$value); + if ( text_ip == "" ) + return; + local ip = to_addr(text_ip); + + if ( ! addr_matches_host(ip, mail_path_capture) && + ! Site::is_private_addr(ip) ) + { + c$smtp$process_received_from = F; + } + if ( c$smtp$path[|c$smtp$path|-1] != ip ) + c$smtp$path += ip; + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$smtp ) + smtp_message(c); + } + +event smtp_starttls(c: connection) &priority=5 + { + if ( c?$smtp ) + { + c$smtp$tls = T; + c$smtp$has_client_activity = T; + } + } + +function describe(rec: Info): string + { + if ( rec?$mailfrom && rec?$rcptto ) + { + local one_to = ""; + for ( email in rec$rcptto ) + { + one_to = email; + break; + } + local abbrev_subject = ""; + if ( rec?$subject ) + { + if ( |rec$subject| > 20 ) + { + abbrev_subject = rec$subject[0:21] + "..."; + } + } + + return fmt("%s -> %s%s%s", rec$mailfrom, one_to, + (|rec$rcptto|>1 ? fmt(" (plus %d others)", |rec$rcptto|-1) : ""), + (abbrev_subject != "" ? fmt(": %s", abbrev_subject) : "")); + } + return ""; + } diff --git a/scripts/base/protocols/radius/__load__.bro b/scripts/base/protocols/snmp/__load__.zeek similarity index 100% rename from scripts/base/protocols/radius/__load__.bro rename to scripts/base/protocols/snmp/__load__.zeek diff --git a/scripts/base/protocols/snmp/main.bro b/scripts/base/protocols/snmp/main.bro deleted file mode 100644 index ec45d59440..0000000000 --- a/scripts/base/protocols/snmp/main.bro +++ /dev/null @@ -1,182 +0,0 @@ -##! Enables analysis and logging of SNMP datagrams. - -module SNMP; - -export { - redef enum Log::ID += { LOG }; - - ## Information tracked per SNMP session. - type Info: record { - ## Timestamp of first packet belonging to the SNMP session. - ts: time &log; - ## The unique ID for the connection. - uid: string &log; - ## The connection's 5-tuple of addresses/ports (ports inherently - ## include transport protocol information) - id: conn_id &log; - ## The amount of time between the first packet beloning to - ## the SNMP session and the latest one seen. - duration: interval &log &default=0secs; - ## The version of SNMP being used. - version: string &log; - ## The community string of the first SNMP packet associated with - ## the session. This is used as part of SNMP's (v1 and v2c) - ## administrative/security framework. See :rfc:`1157` or :rfc:`1901`. - community: string &log &optional; - - ## The number of variable bindings in GetRequest/GetNextRequest PDUs - ## seen for the session. - get_requests: count &log &default=0; - ## The number of variable bindings in GetBulkRequest PDUs seen for - ## the session. - get_bulk_requests: count &log &default=0; - ## The number of variable bindings in GetResponse/Response PDUs seen - ## for the session. - get_responses: count &log &default=0; - ## The number of variable bindings in SetRequest PDUs seen for - ## the session. - set_requests: count &log &default=0; - - ## A system description of the SNMP responder endpoint. - display_string: string &log &optional; - ## The time at which the SNMP responder endpoint claims it's been - ## up since. - up_since: time &log &optional; - }; - - ## Maps an SNMP version integer to a human readable string. - const version_map: table[count] of string = { - [0] = "1", - [1] = "2c", - [3] = "3", - } &redef &default="unknown"; - - ## Event that can be handled to access the SNMP record as it is sent on - ## to the logging framework. - global log_snmp: event(rec: Info); -} - -redef record connection += { - snmp: SNMP::Info &optional; -}; - -const ports = { 161/udp, 162/udp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_SNMP, ports); - Log::create_stream(SNMP::LOG, [$columns=SNMP::Info, $ev=log_snmp, $path="snmp"]); - } - -function init_state(c: connection, h: SNMP::Header): Info - { - if ( ! c?$snmp ) - { - c$snmp = Info($ts=network_time(), - $uid=c$uid, $id=c$id, - $version=version_map[h$version]); - } - - local s = c$snmp; - - if ( ! s?$community ) - { - if ( h?$v1 ) - s$community = h$v1$community; - else if ( h?$v2 ) - s$community = h$v2$community; - } - - s$duration = network_time() - s$ts; - return s; - } - - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$snmp ) - Log::write(LOG, c$snmp); - } - -event snmp_get_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - local s = init_state(c, header); - s$get_requests += |pdu$bindings|; - } - -event snmp_get_bulk_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::BulkPDU) &priority=5 - { - local s = init_state(c, header); - s$get_bulk_requests += |pdu$bindings|; - } - -event snmp_get_next_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - local s = init_state(c, header); - s$get_requests += |pdu$bindings|; - } - -event snmp_response(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - local s = init_state(c, header); - s$get_responses += |pdu$bindings|; - - for ( i in pdu$bindings ) - { - local binding = pdu$bindings[i]; - - if ( binding$oid == "1.3.6.1.2.1.1.1.0" && binding$value?$octets ) - c$snmp$display_string = binding$value$octets; - else if ( binding$oid == "1.3.6.1.2.1.1.3.0" && binding$value?$unsigned ) - { - local up_seconds = binding$value$unsigned / 100.0; - s$up_since = network_time() - double_to_interval(up_seconds); - } - } - } - -event snmp_set_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - local s = init_state(c, header); - s$set_requests += |pdu$bindings|; - } - -event snmp_trap(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::TrapPDU) &priority=5 - { - init_state(c, header); - } - -event snmp_inform_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - init_state(c, header); - } - -event snmp_trapV2(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - init_state(c, header); - } - -event snmp_report(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 - { - init_state(c, header); - } - -event snmp_unknown_pdu(c: connection, is_orig: bool, header: SNMP::Header, tag: count) &priority=5 - { - init_state(c, header); - } - -event snmp_unknown_scoped_pdu(c: connection, is_orig: bool, header: SNMP::Header, tag: count) &priority=5 - { - init_state(c, header); - } - -event snmp_encrypted_pdu(c: connection, is_orig: bool, header: SNMP::Header) &priority=5 - { - init_state(c, header); - } - -#event snmp_unknown_header_version(c: connection, is_orig: bool, version: count) &priority=5 -# { -# } diff --git a/scripts/base/protocols/snmp/main.zeek b/scripts/base/protocols/snmp/main.zeek new file mode 100644 index 0000000000..606d3e9c76 --- /dev/null +++ b/scripts/base/protocols/snmp/main.zeek @@ -0,0 +1,182 @@ +##! Enables analysis and logging of SNMP datagrams. + +module SNMP; + +export { + redef enum Log::ID += { LOG }; + + ## Information tracked per SNMP session. + type Info: record { + ## Timestamp of first packet belonging to the SNMP session. + ts: time &log; + ## The unique ID for the connection. + uid: string &log; + ## The connection's 5-tuple of addresses/ports (ports inherently + ## include transport protocol information) + id: conn_id &log; + ## The amount of time between the first packet beloning to + ## the SNMP session and the latest one seen. + duration: interval &log &default=0secs; + ## The version of SNMP being used. + version: string &log; + ## The community string of the first SNMP packet associated with + ## the session. This is used as part of SNMP's (v1 and v2c) + ## administrative/security framework. See :rfc:`1157` or :rfc:`1901`. + community: string &log &optional; + + ## The number of variable bindings in GetRequest/GetNextRequest PDUs + ## seen for the session. + get_requests: count &log &default=0; + ## The number of variable bindings in GetBulkRequest PDUs seen for + ## the session. + get_bulk_requests: count &log &default=0; + ## The number of variable bindings in GetResponse/Response PDUs seen + ## for the session. + get_responses: count &log &default=0; + ## The number of variable bindings in SetRequest PDUs seen for + ## the session. + set_requests: count &log &default=0; + + ## A system description of the SNMP responder endpoint. + display_string: string &log &optional; + ## The time at which the SNMP responder endpoint claims it's been + ## up since. + up_since: time &log &optional; + }; + + ## Maps an SNMP version integer to a human readable string. + const version_map: table[count] of string = { + [0] = "1", + [1] = "2c", + [3] = "3", + } &redef &default="unknown"; + + ## Event that can be handled to access the SNMP record as it is sent on + ## to the logging framework. + global log_snmp: event(rec: Info); +} + +redef record connection += { + snmp: SNMP::Info &optional; +}; + +const ports = { 161/udp, 162/udp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_SNMP, ports); + Log::create_stream(SNMP::LOG, [$columns=SNMP::Info, $ev=log_snmp, $path="snmp"]); + } + +function init_state(c: connection, h: SNMP::Header): Info + { + if ( ! c?$snmp ) + { + c$snmp = Info($ts=network_time(), + $uid=c$uid, $id=c$id, + $version=version_map[h$version]); + } + + local s = c$snmp; + + if ( ! s?$community ) + { + if ( h?$v1 ) + s$community = h$v1$community; + else if ( h?$v2 ) + s$community = h$v2$community; + } + + s$duration = network_time() - s$ts; + return s; + } + + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$snmp ) + Log::write(LOG, c$snmp); + } + +event snmp_get_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + local s = init_state(c, header); + s$get_requests += |pdu$bindings|; + } + +event snmp_get_bulk_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::BulkPDU) &priority=5 + { + local s = init_state(c, header); + s$get_bulk_requests += |pdu$bindings|; + } + +event snmp_get_next_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + local s = init_state(c, header); + s$get_requests += |pdu$bindings|; + } + +event snmp_response(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + local s = init_state(c, header); + s$get_responses += |pdu$bindings|; + + for ( i in pdu$bindings ) + { + local binding = pdu$bindings[i]; + + if ( binding$oid == "1.3.6.1.2.1.1.1.0" && binding$value?$octets ) + c$snmp$display_string = binding$value$octets; + else if ( binding$oid == "1.3.6.1.2.1.1.3.0" && binding$value?$unsigned ) + { + local up_seconds = binding$value$unsigned / 100.0; + s$up_since = network_time() - double_to_interval(up_seconds); + } + } + } + +event snmp_set_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + local s = init_state(c, header); + s$set_requests += |pdu$bindings|; + } + +event snmp_trap(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::TrapPDU) &priority=5 + { + init_state(c, header); + } + +event snmp_inform_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + init_state(c, header); + } + +event snmp_trapV2(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + init_state(c, header); + } + +event snmp_report(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5 + { + init_state(c, header); + } + +event snmp_unknown_pdu(c: connection, is_orig: bool, header: SNMP::Header, tag: count) &priority=5 + { + init_state(c, header); + } + +event snmp_unknown_scoped_pdu(c: connection, is_orig: bool, header: SNMP::Header, tag: count) &priority=5 + { + init_state(c, header); + } + +event snmp_encrypted_pdu(c: connection, is_orig: bool, header: SNMP::Header) &priority=5 + { + init_state(c, header); + } + +#event snmp_unknown_header_version(c: connection, is_orig: bool, version: count) &priority=5 +# { +# } diff --git a/scripts/base/protocols/socks/__load__.bro b/scripts/base/protocols/socks/__load__.zeek similarity index 100% rename from scripts/base/protocols/socks/__load__.bro rename to scripts/base/protocols/socks/__load__.zeek diff --git a/scripts/base/protocols/socks/consts.bro b/scripts/base/protocols/socks/consts.zeek similarity index 100% rename from scripts/base/protocols/socks/consts.bro rename to scripts/base/protocols/socks/consts.zeek diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro deleted file mode 100644 index 341b6bbc84..0000000000 --- a/scripts/base/protocols/socks/main.bro +++ /dev/null @@ -1,120 +0,0 @@ -@load base/frameworks/tunnels -@load ./consts - -module SOCKS; - -export { - redef enum Log::ID += { LOG }; - - ## Whether passwords are captured or not. - option default_capture_password = F; - - ## The record type which contains the fields of the SOCKS log. - type Info: record { - ## Time when the proxy connection was first detected. - ts: time &log; - ## Unique ID for the tunnel - may correspond to connection uid - ## or be non-existent. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Protocol version of SOCKS. - version: count &log; - ## Username used to request a login to the proxy. - user: string &log &optional; - ## Password used to request a login to the proxy. - password: string &log &optional; - ## Server status for the attempt at using the proxy. - status: string &log &optional; - ## Client requested SOCKS address. Could be an address, a name - ## or both. - request: SOCKS::Address &log &optional; - ## Client requested port. - request_p: port &log &optional; - ## Server bound address. Could be an address, a name or both. - bound: SOCKS::Address &log &optional; - ## Server bound port. - bound_p: port &log &optional; - ## Determines if the password will be captured for this request. - capture_password: bool &default=default_capture_password; - }; - - ## Event that can be handled to access the SOCKS - ## record as it is sent on to the logging framework. - global log_socks: event(rec: Info); -} - -const ports = { 1080/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(SOCKS::LOG, [$columns=Info, $ev=log_socks, $path="socks"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_SOCKS, ports); - } - -redef record connection += { - socks: SOCKS::Info &optional; -}; - -function set_session(c: connection, version: count) - { - if ( ! c?$socks ) - c$socks = [$ts=network_time(), $id=c$id, $uid=c$uid, $version=version]; - } - -event socks_request(c: connection, version: count, request_type: count, - sa: SOCKS::Address, p: port, user: string) &priority=5 - { - set_session(c, version); - - c$socks$request = sa; - c$socks$request_p = p; - - # Copy this conn_id and set the orig_p to zero because in the case of SOCKS proxies there will - # be potentially many source ports since a new proxy connection is established for each - # proxied connection. We treat this as a singular "tunnel". - local cid = copy(c$id); - cid$orig_p = 0/tcp; - Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS]); - } - -event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=5 - { - set_session(c, version); - - if ( version == 5 ) - c$socks$status = v5_status[reply]; - else if ( version == 4 ) - c$socks$status = v4_status[reply]; - - c$socks$bound = sa; - c$socks$bound_p = p; - } - -event socks_login_userpass_request(c: connection, user: string, password: string) &priority=5 - { - # Authentication only possible with the version 5. - set_session(c, 5); - - c$socks$user = user; - - if ( c$socks$capture_password ) - c$socks$password = password; - } - -event socks_login_userpass_reply(c: connection, code: count) &priority=5 - { - # Authentication only possible with the version 5. - set_session(c, 5); - - c$socks$status = v5_status[code]; - } - -event connection_state_remove(c: connection) - { - # This will handle the case where the analyzer failed in some way and was - # removed. We probably don't want to log these connections. - if ( "SOCKS" in c$service ) - Log::write(SOCKS::LOG, c$socks); - } diff --git a/scripts/base/protocols/socks/main.zeek b/scripts/base/protocols/socks/main.zeek new file mode 100644 index 0000000000..2ca9dfc175 --- /dev/null +++ b/scripts/base/protocols/socks/main.zeek @@ -0,0 +1,120 @@ +@load base/frameworks/tunnels +@load ./consts + +module SOCKS; + +export { + redef enum Log::ID += { LOG }; + + ## Whether passwords are captured or not. + option default_capture_password = F; + + ## The record type which contains the fields of the SOCKS log. + type Info: record { + ## Time when the proxy connection was first detected. + ts: time &log; + ## Unique ID for the tunnel - may correspond to connection uid + ## or be non-existent. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Protocol version of SOCKS. + version: count &log; + ## Username used to request a login to the proxy. + user: string &log &optional; + ## Password used to request a login to the proxy. + password: string &log &optional; + ## Server status for the attempt at using the proxy. + status: string &log &optional; + ## Client requested SOCKS address. Could be an address, a name + ## or both. + request: SOCKS::Address &log &optional; + ## Client requested port. + request_p: port &log &optional; + ## Server bound address. Could be an address, a name or both. + bound: SOCKS::Address &log &optional; + ## Server bound port. + bound_p: port &log &optional; + ## Determines if the password will be captured for this request. + capture_password: bool &default=default_capture_password; + }; + + ## Event that can be handled to access the SOCKS + ## record as it is sent on to the logging framework. + global log_socks: event(rec: Info); +} + +const ports = { 1080/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(SOCKS::LOG, [$columns=Info, $ev=log_socks, $path="socks"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_SOCKS, ports); + } + +redef record connection += { + socks: SOCKS::Info &optional; +}; + +function set_session(c: connection, version: count) + { + if ( ! c?$socks ) + c$socks = [$ts=network_time(), $id=c$id, $uid=c$uid, $version=version]; + } + +event socks_request(c: connection, version: count, request_type: count, + sa: SOCKS::Address, p: port, user: string) &priority=5 + { + set_session(c, version); + + c$socks$request = sa; + c$socks$request_p = p; + + # Copy this conn_id and set the orig_p to zero because in the case of SOCKS proxies there will + # be potentially many source ports since a new proxy connection is established for each + # proxied connection. We treat this as a singular "tunnel". + local cid = copy(c$id); + cid$orig_p = 0/tcp; + Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS]); + } + +event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=5 + { + set_session(c, version); + + if ( version == 5 ) + c$socks$status = v5_status[reply]; + else if ( version == 4 ) + c$socks$status = v4_status[reply]; + + c$socks$bound = sa; + c$socks$bound_p = p; + } + +event socks_login_userpass_request(c: connection, user: string, password: string) &priority=5 + { + # Authentication only possible with the version 5. + set_session(c, 5); + + c$socks$user = user; + + if ( c$socks$capture_password ) + c$socks$password = password; + } + +event socks_login_userpass_reply(c: connection, code: count) &priority=5 + { + # Authentication only possible with the version 5. + set_session(c, 5); + + c$socks$status = v5_status[code]; + } + +event connection_state_remove(c: connection) + { + # This will handle the case where the analyzer failed in some way and was + # removed. We probably don't want to log these connections. + if ( "SOCKS" in c$service ) + Log::write(SOCKS::LOG, c$socks); + } diff --git a/scripts/base/protocols/ssh/__load__.bro b/scripts/base/protocols/ssh/__load__.zeek similarity index 100% rename from scripts/base/protocols/ssh/__load__.bro rename to scripts/base/protocols/ssh/__load__.zeek diff --git a/scripts/base/protocols/ssh/main.bro b/scripts/base/protocols/ssh/main.bro deleted file mode 100644 index 4452424512..0000000000 --- a/scripts/base/protocols/ssh/main.bro +++ /dev/null @@ -1,313 +0,0 @@ -##! Implements base functionality for SSH analysis. Generates the ssh.log file. - -@load base/utils/directions-and-hosts - -module SSH; - -export { - ## The SSH protocol logging stream identifier. - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the SSH log. - type Info: record { - ## Time when the SSH connection began. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## SSH major version (1 or 2) - version: count &log; - ## Authentication result (T=success, F=failure, unset=unknown) - auth_success: bool &log &optional; - ## The number of authentication attemps we observed. There's always - ## at least one, since some servers might support no authentication at all. - ## It's important to note that not all of these are failures, since - ## some servers require two-factor auth (e.g. password AND pubkey) - auth_attempts: count &log &default=0; - ## Direction of the connection. If the client was a local host - ## logging into an external host, this would be OUTBOUND. INBOUND - ## would be set for the opposite situation. - # TODO - handle local-local and remote-remote better. - direction: Direction &log &optional; - ## The client's version string - client: string &log &optional; - ## The server's version string - server: string &log &optional; - ## The encryption algorithm in use - cipher_alg: string &log &optional; - ## The signing (MAC) algorithm in use - mac_alg: string &log &optional; - ## The compression algorithm in use - compression_alg: string &log &optional; - ## The key exchange algorithm in use - kex_alg: string &log &optional; - ## The server host key's algorithm - host_key_alg: string &log &optional; - ## The server's key fingerprint - host_key: string &log &optional; - }; - - ## The set of compression algorithms. We can't accurately determine - ## authentication success or failure when compression is enabled. - option compression_algorithms = set("zlib", "zlib@openssh.com"); - - ## If true, after detection detach the SSH analyzer from the connection - ## to prevent continuing to process encrypted traffic. Helps with performance - ## (especially with large file transfers). - option disable_analyzer_after_detection = T; - - ## Event that can be handled to access the SSH record as it is sent on - ## to the logging framework. - global log_ssh: event(rec: Info); -} - -module GLOBAL; -export { - ## This event is generated when an :abbr:`SSH (Secure Shell)` - ## connection was determined to have had a failed authentication. This - ## determination is based on packet size analysis, and errs on the - ## side of caution - that is, if there's any doubt about the - ## authentication failure, this event is *not* raised. - ## - ## This event is only raised once per connection. - ## - ## c: The connection over which the :abbr:`SSH (Secure Shell)` - ## connection took place. - ## - ## .. bro:see:: ssh_server_version ssh_client_version - ## ssh_auth_successful ssh_auth_result ssh_auth_attempted - ## ssh_capabilities ssh2_server_host_key ssh1_server_host_key - ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params - ## ssh2_gss_error ssh2_ecc_key - global ssh_auth_failed: event(c: connection); - - ## This event is generated when a determination has been made about - ## the final authentication result of an :abbr:`SSH (Secure Shell)` - ## connection. This determination is based on packet size analysis, - ## and errs on the side of caution - that is, if there's any doubt - ## about the result of the authentication, this event is *not* raised. - ## - ## This event is only raised once per connection. - ## - ## c: The connection over which the :abbr:`SSH (Secure Shell)` - ## connection took place. - ## - ## result: True if the authentication was successful, false if not. - ## - ## auth_attempts: The number of authentication attempts that were - ## observed. - ## - ## .. bro:see:: ssh_server_version ssh_client_version - ## ssh_auth_successful ssh_auth_failed ssh_auth_attempted - ## ssh_capabilities ssh2_server_host_key ssh1_server_host_key - ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params - ## ssh2_gss_error ssh2_ecc_key - global ssh_auth_result: event(c: connection, result: bool, auth_attempts: count); - - ## Event that can be handled when the analyzer sees an SSH server host - ## key. This abstracts :bro:id:`ssh1_server_host_key` and - ## :bro:id:`ssh2_server_host_key`. - ## - ## .. bro:see:: ssh_server_version ssh_client_version - ## ssh_auth_successful ssh_auth_failed ssh_auth_result - ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key - ## ssh1_server_host_key ssh_encrypted_packet ssh2_dh_server_params - ## ssh2_gss_error ssh2_ecc_key - global ssh_server_host_key: event(c: connection, hash: string); -} - -module SSH; - -redef record Info += { - # This connection has been logged (internal use) - logged: bool &default=F; - # Store capabilities from the first host for - # comparison with the second (internal use) - capabilities: Capabilities &optional; - ## Analzyer ID - analyzer_id: count &optional; -}; - -redef record connection += { - ssh: Info &optional; -}; - -const ports = { 22/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_SSH, ports); - Log::create_stream(SSH::LOG, [$columns=Info, $ev=log_ssh, $path="ssh"]); - } - -function set_session(c: connection) - { - if ( ! c?$ssh ) - { - local info: SSH::Info; - info$ts = network_time(); - info$uid = c$uid; - info$id = c$id; - - # If both hosts are local or non-local, we can't reliably set a direction. - if ( Site::is_local_addr(c$id$orig_h) != Site::is_local_addr(c$id$resp_h) ) - info$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND: INBOUND; - c$ssh = info; - } - } - -event ssh_server_version(c: connection, version: string) - { - set_session(c); - c$ssh$server = version; - } - -event ssh_client_version(c: connection, version: string) - { - set_session(c); - c$ssh$client = version; - - if ( ( |version| > 3 ) && ( version[4] == "1" ) ) - c$ssh$version = 1; - if ( ( |version| > 3 ) && ( version[4] == "2" ) ) - c$ssh$version = 2; - } - -event ssh_auth_attempted(c: connection, authenticated: bool) &priority=5 - { - if ( !c?$ssh || ( c$ssh?$auth_success && c$ssh$auth_success ) ) - return; - - # We can't accurately tell for compressed streams - if ( c$ssh?$compression_alg && ( c$ssh$compression_alg in compression_algorithms ) ) - return; - - c$ssh$auth_success = authenticated; - c$ssh$auth_attempts += 1; - - if ( authenticated && disable_analyzer_after_detection ) - disable_analyzer(c$id, c$ssh$analyzer_id); - } - -event ssh_auth_attempted(c: connection, authenticated: bool) &priority=-5 - { - if ( authenticated && c?$ssh && !c$ssh$logged ) - { - event ssh_auth_result(c, authenticated, c$ssh$auth_attempts); - c$ssh$logged = T; - Log::write(SSH::LOG, c$ssh); - } - } - -# Determine the negotiated algorithm -function find_alg(client_algorithms: vector of string, server_algorithms: vector of string): string - { - for ( i in client_algorithms ) - for ( j in server_algorithms ) - if ( client_algorithms[i] == server_algorithms[j] ) - return client_algorithms[i]; - return "Algorithm negotiation failed"; - } - -# This is a simple wrapper around find_alg for cases where client to server and server to client -# negotiate different algorithms. This is rare, but provided for completeness. -function find_bidirectional_alg(client_prefs: Algorithm_Prefs, server_prefs: Algorithm_Prefs): string - { - local c_to_s = find_alg(client_prefs$client_to_server, server_prefs$client_to_server); - local s_to_c = find_alg(client_prefs$server_to_client, server_prefs$server_to_client); - - # Usually these are the same, but if they're not, return the details - return c_to_s == s_to_c ? c_to_s : fmt("To server: %s, to client: %s", c_to_s, s_to_c); - } - -event ssh_capabilities(c: connection, cookie: string, capabilities: Capabilities) - { - if ( !c?$ssh || ( c$ssh?$capabilities && c$ssh$capabilities$is_server == capabilities$is_server ) ) - return; - - if ( !c$ssh?$capabilities ) - { - c$ssh$capabilities = capabilities; - return; - } - - local client_caps = capabilities$is_server ? c$ssh$capabilities : capabilities; - local server_caps = capabilities$is_server ? capabilities : c$ssh$capabilities; - - c$ssh$cipher_alg = find_bidirectional_alg(client_caps$encryption_algorithms, - server_caps$encryption_algorithms); - c$ssh$mac_alg = find_bidirectional_alg(client_caps$mac_algorithms, - server_caps$mac_algorithms); - c$ssh$compression_alg = find_bidirectional_alg(client_caps$compression_algorithms, - server_caps$compression_algorithms); - c$ssh$kex_alg = find_alg(client_caps$kex_algorithms, server_caps$kex_algorithms); - c$ssh$host_key_alg = find_alg(client_caps$server_host_key_algorithms, - server_caps$server_host_key_algorithms); - } - -event connection_state_remove(c: connection) - { - if ( c?$ssh && !c$ssh$logged ) - { - # Do we have enough information to make a determination about auth success? - if ( c$ssh?$client && c$ssh?$server && c$ssh?$auth_success ) - { - # Successes get logged immediately. To protect against a race condition, we'll double check: - if ( c$ssh$auth_success ) - return; - - # Now that we know it's a failure, we'll raise the event. - event ssh_auth_failed(c); - } - # If not, we'll just log what we have - else - { - c$ssh$logged = T; - Log::write(SSH::LOG, c$ssh); - } - } - } - -event ssh_auth_failed(c: connection) &priority=-5 - { - # This should not happen; prevent double-logging just in case - if ( ! c?$ssh || c$ssh$logged ) - return; - - c$ssh$logged = T; - Log::write(SSH::LOG, c$ssh); - - event ssh_auth_result(c, F, c$ssh$auth_attempts); - } - - -function generate_fingerprint(c: connection, key: string) - { - if ( !c?$ssh ) - return; - - local lx = str_split(md5_hash(key), vector(2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); - lx[0] = ""; - c$ssh$host_key = sub(join_string_vec(lx, ":"), /:/, ""); - } - -event ssh1_server_host_key(c: connection, p: string, e: string) &priority=5 - { - generate_fingerprint(c, e + p); - } - -event ssh2_server_host_key(c: connection, key: string) &priority=5 - { - generate_fingerprint(c, key); - } - -event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=20 - { - if ( atype == Analyzer::ANALYZER_SSH ) - { - set_session(c); - c$ssh$analyzer_id = aid; - } - } diff --git a/scripts/base/protocols/ssh/main.zeek b/scripts/base/protocols/ssh/main.zeek new file mode 100644 index 0000000000..293c529b6d --- /dev/null +++ b/scripts/base/protocols/ssh/main.zeek @@ -0,0 +1,313 @@ +##! Implements base functionality for SSH analysis. Generates the ssh.log file. + +@load base/utils/directions-and-hosts + +module SSH; + +export { + ## The SSH protocol logging stream identifier. + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the SSH log. + type Info: record { + ## Time when the SSH connection began. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## SSH major version (1 or 2) + version: count &log; + ## Authentication result (T=success, F=failure, unset=unknown) + auth_success: bool &log &optional; + ## The number of authentication attemps we observed. There's always + ## at least one, since some servers might support no authentication at all. + ## It's important to note that not all of these are failures, since + ## some servers require two-factor auth (e.g. password AND pubkey) + auth_attempts: count &log &default=0; + ## Direction of the connection. If the client was a local host + ## logging into an external host, this would be OUTBOUND. INBOUND + ## would be set for the opposite situation. + # TODO - handle local-local and remote-remote better. + direction: Direction &log &optional; + ## The client's version string + client: string &log &optional; + ## The server's version string + server: string &log &optional; + ## The encryption algorithm in use + cipher_alg: string &log &optional; + ## The signing (MAC) algorithm in use + mac_alg: string &log &optional; + ## The compression algorithm in use + compression_alg: string &log &optional; + ## The key exchange algorithm in use + kex_alg: string &log &optional; + ## The server host key's algorithm + host_key_alg: string &log &optional; + ## The server's key fingerprint + host_key: string &log &optional; + }; + + ## The set of compression algorithms. We can't accurately determine + ## authentication success or failure when compression is enabled. + option compression_algorithms = set("zlib", "zlib@openssh.com"); + + ## If true, after detection detach the SSH analyzer from the connection + ## to prevent continuing to process encrypted traffic. Helps with performance + ## (especially with large file transfers). + option disable_analyzer_after_detection = T; + + ## Event that can be handled to access the SSH record as it is sent on + ## to the logging framework. + global log_ssh: event(rec: Info); +} + +module GLOBAL; +export { + ## This event is generated when an :abbr:`SSH (Secure Shell)` + ## connection was determined to have had a failed authentication. This + ## determination is based on packet size analysis, and errs on the + ## side of caution - that is, if there's any doubt about the + ## authentication failure, this event is *not* raised. + ## + ## This event is only raised once per connection. + ## + ## c: The connection over which the :abbr:`SSH (Secure Shell)` + ## connection took place. + ## + ## .. zeek:see:: ssh_server_version ssh_client_version + ## ssh_auth_successful ssh_auth_result ssh_auth_attempted + ## ssh_capabilities ssh2_server_host_key ssh1_server_host_key + ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params + ## ssh2_gss_error ssh2_ecc_key + global ssh_auth_failed: event(c: connection); + + ## This event is generated when a determination has been made about + ## the final authentication result of an :abbr:`SSH (Secure Shell)` + ## connection. This determination is based on packet size analysis, + ## and errs on the side of caution - that is, if there's any doubt + ## about the result of the authentication, this event is *not* raised. + ## + ## This event is only raised once per connection. + ## + ## c: The connection over which the :abbr:`SSH (Secure Shell)` + ## connection took place. + ## + ## result: True if the authentication was successful, false if not. + ## + ## auth_attempts: The number of authentication attempts that were + ## observed. + ## + ## .. zeek:see:: ssh_server_version ssh_client_version + ## ssh_auth_successful ssh_auth_failed ssh_auth_attempted + ## ssh_capabilities ssh2_server_host_key ssh1_server_host_key + ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params + ## ssh2_gss_error ssh2_ecc_key + global ssh_auth_result: event(c: connection, result: bool, auth_attempts: count); + + ## Event that can be handled when the analyzer sees an SSH server host + ## key. This abstracts :zeek:id:`ssh1_server_host_key` and + ## :zeek:id:`ssh2_server_host_key`. + ## + ## .. zeek:see:: ssh_server_version ssh_client_version + ## ssh_auth_successful ssh_auth_failed ssh_auth_result + ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key + ## ssh1_server_host_key ssh_encrypted_packet ssh2_dh_server_params + ## ssh2_gss_error ssh2_ecc_key + global ssh_server_host_key: event(c: connection, hash: string); +} + +module SSH; + +redef record Info += { + # This connection has been logged (internal use) + logged: bool &default=F; + # Store capabilities from the first host for + # comparison with the second (internal use) + capabilities: Capabilities &optional; + ## Analzyer ID + analyzer_id: count &optional; +}; + +redef record connection += { + ssh: Info &optional; +}; + +const ports = { 22/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_SSH, ports); + Log::create_stream(SSH::LOG, [$columns=Info, $ev=log_ssh, $path="ssh"]); + } + +function set_session(c: connection) + { + if ( ! c?$ssh ) + { + local info: SSH::Info; + info$ts = network_time(); + info$uid = c$uid; + info$id = c$id; + + # If both hosts are local or non-local, we can't reliably set a direction. + if ( Site::is_local_addr(c$id$orig_h) != Site::is_local_addr(c$id$resp_h) ) + info$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND: INBOUND; + c$ssh = info; + } + } + +event ssh_server_version(c: connection, version: string) + { + set_session(c); + c$ssh$server = version; + } + +event ssh_client_version(c: connection, version: string) + { + set_session(c); + c$ssh$client = version; + + if ( ( |version| > 3 ) && ( version[4] == "1" ) ) + c$ssh$version = 1; + if ( ( |version| > 3 ) && ( version[4] == "2" ) ) + c$ssh$version = 2; + } + +event ssh_auth_attempted(c: connection, authenticated: bool) &priority=5 + { + if ( !c?$ssh || ( c$ssh?$auth_success && c$ssh$auth_success ) ) + return; + + # We can't accurately tell for compressed streams + if ( c$ssh?$compression_alg && ( c$ssh$compression_alg in compression_algorithms ) ) + return; + + c$ssh$auth_success = authenticated; + c$ssh$auth_attempts += 1; + + if ( authenticated && disable_analyzer_after_detection ) + disable_analyzer(c$id, c$ssh$analyzer_id); + } + +event ssh_auth_attempted(c: connection, authenticated: bool) &priority=-5 + { + if ( authenticated && c?$ssh && !c$ssh$logged ) + { + event ssh_auth_result(c, authenticated, c$ssh$auth_attempts); + c$ssh$logged = T; + Log::write(SSH::LOG, c$ssh); + } + } + +# Determine the negotiated algorithm +function find_alg(client_algorithms: vector of string, server_algorithms: vector of string): string + { + for ( i in client_algorithms ) + for ( j in server_algorithms ) + if ( client_algorithms[i] == server_algorithms[j] ) + return client_algorithms[i]; + return "Algorithm negotiation failed"; + } + +# This is a simple wrapper around find_alg for cases where client to server and server to client +# negotiate different algorithms. This is rare, but provided for completeness. +function find_bidirectional_alg(client_prefs: Algorithm_Prefs, server_prefs: Algorithm_Prefs): string + { + local c_to_s = find_alg(client_prefs$client_to_server, server_prefs$client_to_server); + local s_to_c = find_alg(client_prefs$server_to_client, server_prefs$server_to_client); + + # Usually these are the same, but if they're not, return the details + return c_to_s == s_to_c ? c_to_s : fmt("To server: %s, to client: %s", c_to_s, s_to_c); + } + +event ssh_capabilities(c: connection, cookie: string, capabilities: Capabilities) + { + if ( !c?$ssh || ( c$ssh?$capabilities && c$ssh$capabilities$is_server == capabilities$is_server ) ) + return; + + if ( !c$ssh?$capabilities ) + { + c$ssh$capabilities = capabilities; + return; + } + + local client_caps = capabilities$is_server ? c$ssh$capabilities : capabilities; + local server_caps = capabilities$is_server ? capabilities : c$ssh$capabilities; + + c$ssh$cipher_alg = find_bidirectional_alg(client_caps$encryption_algorithms, + server_caps$encryption_algorithms); + c$ssh$mac_alg = find_bidirectional_alg(client_caps$mac_algorithms, + server_caps$mac_algorithms); + c$ssh$compression_alg = find_bidirectional_alg(client_caps$compression_algorithms, + server_caps$compression_algorithms); + c$ssh$kex_alg = find_alg(client_caps$kex_algorithms, server_caps$kex_algorithms); + c$ssh$host_key_alg = find_alg(client_caps$server_host_key_algorithms, + server_caps$server_host_key_algorithms); + } + +event connection_state_remove(c: connection) + { + if ( c?$ssh && !c$ssh$logged ) + { + # Do we have enough information to make a determination about auth success? + if ( c$ssh?$client && c$ssh?$server && c$ssh?$auth_success ) + { + # Successes get logged immediately. To protect against a race condition, we'll double check: + if ( c$ssh$auth_success ) + return; + + # Now that we know it's a failure, we'll raise the event. + event ssh_auth_failed(c); + } + # If not, we'll just log what we have + else + { + c$ssh$logged = T; + Log::write(SSH::LOG, c$ssh); + } + } + } + +event ssh_auth_failed(c: connection) &priority=-5 + { + # This should not happen; prevent double-logging just in case + if ( ! c?$ssh || c$ssh$logged ) + return; + + c$ssh$logged = T; + Log::write(SSH::LOG, c$ssh); + + event ssh_auth_result(c, F, c$ssh$auth_attempts); + } + + +function generate_fingerprint(c: connection, key: string) + { + if ( !c?$ssh ) + return; + + local lx = str_split(md5_hash(key), vector(2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30)); + lx[0] = ""; + c$ssh$host_key = sub(join_string_vec(lx, ":"), /:/, ""); + } + +event ssh1_server_host_key(c: connection, p: string, e: string) &priority=5 + { + generate_fingerprint(c, e + p); + } + +event ssh2_server_host_key(c: connection, key: string) &priority=5 + { + generate_fingerprint(c, key); + } + +event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=20 + { + if ( atype == Analyzer::ANALYZER_SSH ) + { + set_session(c); + c$ssh$analyzer_id = aid; + } + } diff --git a/scripts/base/protocols/ssl/__load__.bro b/scripts/base/protocols/ssl/__load__.zeek similarity index 100% rename from scripts/base/protocols/ssl/__load__.bro rename to scripts/base/protocols/ssl/__load__.zeek diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro deleted file mode 100644 index aaac5aab84..0000000000 --- a/scripts/base/protocols/ssl/consts.bro +++ /dev/null @@ -1,1010 +0,0 @@ -module SSL; - -export { - const SSLv2 = 0x0002; - const SSLv3 = 0x0300; - const TLSv10 = 0x0301; - const TLSv11 = 0x0302; - const TLSv12 = 0x0303; - const TLSv13 = 0x0304; - - const DTLSv10 = 0xFEFF; - # DTLSv11 does not exist - const DTLSv12 = 0xFEFD; - - ## Mapping between the constants and string values for SSL/TLS versions. - const version_strings: table[count] of string = { - [SSLv2] = "SSLv2", - [SSLv3] = "SSLv3", - [TLSv10] = "TLSv10", - [TLSv11] = "TLSv11", - [TLSv12] = "TLSv12", - [TLSv13] = "TLSv13", - [DTLSv10] = "DTLSv10", - [DTLSv12] = "DTLSv12" - } &default=function(i: count):string - { - if ( i/0xFF == 0x7F ) # TLS 1.3 draft - return fmt("TLSv13-draft%d", i % 0x7F ); - - return fmt("unknown-%d", i); - }; - - # TLS content types: - const CHANGE_CIPHER_SPEC = 20; - const ALERT = 21; - const HANDSHAKE = 22; - const APPLICATION_DATA = 23; - const HEARTBEAT = 24; - const V2_ERROR = 300; - const V2_CLIENT_HELLO = 301; - const V2_CLIENT_MASTER_KEY = 302; - const V2_SERVER_HELLO = 304; - - # TLS Handshake types: - const HELLO_REQUEST = 0; - const CLIENT_HELLO = 1; - const SERVER_HELLO = 2; - const HELLO_VERIFY_REQUEST = 3; # RFC 6347 - const SESSION_TICKET = 4; # RFC 5077 - const HELLO_RETRY_REQUEST = 6; # draft-ietf-tls-tls13-16 - const ENCRYPTED_EXTENSIONS = 8; # draft-ietf-tls-tls13-16 - const CERTIFICATE = 11; - const SERVER_KEY_EXCHANGE = 12; - const CERTIFICATE_REQUEST = 13; - const SERVER_HELLO_DONE = 14; - const CERTIFICATE_VERIFY = 15; - const CLIENT_KEY_EXCHANGE = 16; - const FINISHED = 20; - const CERTIFICATE_URL = 21; # RFC 3546 - const CERTIFICATE_STATUS = 22; # RFC 3546 - const SUPPLEMENTAL_DATA = 23; # RFC 4680 - const KEY_UPDATE = 24; # draft-ietf-tls-tls13-16 - - ## Mapping between numeric codes and human readable strings for alert - ## levels. - const alert_levels: table[count] of string = { - [1] = "warning", - [2] = "fatal", - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - ## Mapping between numeric codes and human readable strings for hash - ## algorithms. - const hash_algorithms: table[count] of string = { - [0] = "none", - [1] = "md5", - [2] = "sha1", - [3] = "sha224", - [4] = "sha256", - [5] = "sha384", - [6] = "sha512", - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - ## Mapping between numeric codes and human readable strings for signature - ## algorithms. - const signature_algorithms: table[count] of string = { - [0] = "anonymous", - [1] = "rsa", - [2] = "dsa", - [3] = "ecdsa", - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - ## Mapping between numeric codes and human readable strings for alert - ## descriptions. - const alert_descriptions: table[count] of string = { - [0] = "close_notify", - [10] = "unexpected_message", - [20] = "bad_record_mac", - [21] = "decryption_failed", - [22] = "record_overflow", - [30] = "decompression_failure", - [40] = "handshake_failure", - [41] = "no_certificate", - [42] = "bad_certificate", - [43] = "unsupported_certificate", - [44] = "certificate_revoked", - [45] = "certificate_expired", - [46] = "certificate_unknown", - [47] = "illegal_parameter", - [48] = "unknown_ca", - [49] = "access_denied", - [50] = "decode_error", - [51] = "decrypt_error", - [60] = "export_restriction", - [70] = "protocol_version", - [71] = "insufficient_security", - [80] = "internal_error", - [86] = "inappropriate_fallback", - [90] = "user_canceled", - [100] = "no_renegotiation", - [110] = "unsupported_extension", - [111] = "certificate_unobtainable", - [112] = "unrecognized_name", - [113] = "bad_certificate_status_response", - [114] = "bad_certificate_hash_value", - [115] = "unknown_psk_identity", - [120] = "no_application_protocol", - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - ## Mapping between numeric codes and human readable strings for SSL/TLS - ## extensions. - # More information can be found here: - # http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xml - const extensions: table[count] of string = { - [0] = "server_name", - [1] = "max_fragment_length", - [2] = "client_certificate_url", - [3] = "trusted_ca_keys", - [4] = "truncated_hmac", - [5] = "status_request", - [6] = "user_mapping", - [7] = "client_authz", - [8] = "server_authz", - [9] = "cert_type", - [10] = "supported_groups", # old name: elliptic_curves - draft-ietf-tls-negotiated-ff-dhe - [11] = "ec_point_formats", - [12] = "srp", - [13] = "signature_algorithms", - [14] = "use_srtp", - [15] = "heartbeat", - [16] = "application_layer_protocol_negotiation", - [17] = "status_request_v2", - [18] = "signed_certificate_timestamp", - [19] = "client_certificate_type", - [20] = "server_certificate_type", - [21] = "padding", - [22] = "encrypt_then_mac", - [23] = "extended_master_secret", - [24] = "token_binding", # temporary till 2017-03-06 - draft-ietf-tokbind-negotiation - [25] = "cached_info", - [35] = "SessionTicket TLS", - [40] = "key_share", # new for TLS 1.3; was used for extended_random before. State as of TLS 1.3 draft 16 - [41] = "pre_shared_key", # new for 1.3, state of draft-16 - [42] = "early_data", # new for 1.3, state of draft-16 - [43] = "supported_versions", # new for 1.3, state of draft-16 - [44] = "cookie", # new for 1.3, state of draft-16 - [45] = "psk_key_exchange_modes", # new for 1.3, state of draft-18 - [46] = "TicketEarlyDataInfo", # new for 1.3, state of draft-16 - [47] = "certificate_authorities", # new for 1.3, state of draft-18 - [48] = "oid_filters", # new for 1.3, state of draft-18 - [13172] = "next_protocol_negotiation", - [13175] = "origin_bound_certificates", - [13180] = "encrypted_client_certificates", - [30031] = "channel_id", - [30032] = "channel_id_new", - [35655] = "padding", - [65281] = "renegotiation_info" - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - ## Mapping between numeric codes and human readable string for SSL/TLS elliptic curves. - # See http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8 - const ec_curves: table[count] of string = { - [1] = "sect163k1", # 1-23 are TLS 1.3 obsoleted - [2] = "sect163r1", - [3] = "sect163r2", - [4] = "sect193r1", - [5] = "sect193r2", - [6] = "sect233k1", - [7] = "sect233r1", - [8] = "sect239k1", - [9] = "sect283k1", - [10] = "sect283r1", - [11] = "sect409k1", - [12] = "sect409r1", - [13] = "sect571k1", - [14] = "sect571r1", - [15] = "secp160k1", - [16] = "secp160r1", - [17] = "secp160r2", - [18] = "secp192k1", - [19] = "secp192r1", - [20] = "secp224k1", - [21] = "secp224r1", - [22] = "secp256k1", - [23] = "secp256r1", # TLS 1.3 valid - [24] = "secp384r1", # TLS 1.3 valid - [25] = "secp521r1", # TLS 1.3 valid - [26] = "brainpoolP256r1", # 26-28 are TLS 1.3 obsoleted - [27] = "brainpoolP384r1", - [28] = "brainpoolP512r1", - # Temporary till 2017-01-09 - draft-ietf-tls-rfc4492bis - [29] = "x25519", # TLS 1.3 valid - [30] = "x448", # TLS 1.3 valid - # draft-ietf-tls-negotiated-ff-dhe-10 - [256] = "ffdhe2048", # 256-260 are TLS 1.3 valid - [257] = "ffdhe3072", - [258] = "ffdhe4096", - [259] = "ffdhe6144", - [260] = "ffdhe8192", - [0xFF01] = "arbitrary_explicit_prime_curves", - [0xFF02] = "arbitrary_explicit_char2_curves" - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - ## Mapping between numeric codes and human readable string for SSL/TLS EC point formats. - # See http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-9 - const ec_point_formats: table[count] of string = { - [0] = "uncompressed", - [1] = "ansiX962_compressed_prime", - [2] = "ansiX962_compressed_char2" - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - - # SSLv2 - const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080; - const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080; - const SSLv20_CK_RC2_128_CBC_WITH_MD5 = 0x030080; - const SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5 = 0x040080; - const SSLv20_CK_IDEA_128_CBC_WITH_MD5 = 0x050080; - const SSLv20_CK_DES_64_CBC_WITH_MD5 = 0x060040; - const SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5 = 0x0700C0; - - # TLS - const TLS_NULL_WITH_NULL_NULL = 0x0000; - const TLS_RSA_WITH_NULL_MD5 = 0x0001; - const TLS_RSA_WITH_NULL_SHA = 0x0002; - const TLS_RSA_EXPORT_WITH_RC4_40_MD5 = 0x0003; - const TLS_RSA_WITH_RC4_128_MD5 = 0x0004; - const TLS_RSA_WITH_RC4_128_SHA = 0x0005; - const TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = 0x0006; - const TLS_RSA_WITH_IDEA_CBC_SHA = 0x0007; - const TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0008; - const TLS_RSA_WITH_DES_CBC_SHA = 0x0009; - const TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A; - const TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x000B; - const TLS_DH_DSS_WITH_DES_CBC_SHA = 0x000C; - const TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = 0x000D; - const TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x000E; - const TLS_DH_RSA_WITH_DES_CBC_SHA = 0x000F; - const TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = 0x0010; - const TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x0011; - const TLS_DHE_DSS_WITH_DES_CBC_SHA = 0x0012; - const TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = 0x0013; - const TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0014; - const TLS_DHE_RSA_WITH_DES_CBC_SHA = 0x0015; - const TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = 0x0016; - const TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5 = 0x0017; - const TLS_DH_ANON_WITH_RC4_128_MD5 = 0x0018; - const TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA = 0x0019; - const TLS_DH_ANON_WITH_DES_CBC_SHA = 0x001A; - const TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA = 0x001B; - const SSL_FORTEZZA_KEA_WITH_NULL_SHA = 0x001C; - const SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA = 0x001D; - const TLS_KRB5_WITH_DES_CBC_SHA = 0x001E; - const TLS_KRB5_WITH_3DES_EDE_CBC_SHA = 0x001F; - const TLS_KRB5_WITH_RC4_128_SHA = 0x0020; - const TLS_KRB5_WITH_IDEA_CBC_SHA = 0x0021; - const TLS_KRB5_WITH_DES_CBC_MD5 = 0x0022; - const TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = 0x0023; - const TLS_KRB5_WITH_RC4_128_MD5 = 0x0024; - const TLS_KRB5_WITH_IDEA_CBC_MD5 = 0x0025; - const TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = 0x0026; - const TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = 0x0027; - const TLS_KRB5_EXPORT_WITH_RC4_40_SHA = 0x0028; - const TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = 0x0029; - const TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = 0x002A; - const TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = 0x002B; - const TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F; - const TLS_DH_DSS_WITH_AES_128_CBC_SHA = 0x0030; - const TLS_DH_RSA_WITH_AES_128_CBC_SHA = 0x0031; - const TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032; - const TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033; - const TLS_DH_ANON_WITH_AES_128_CBC_SHA = 0x0034; - const TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035; - const TLS_DH_DSS_WITH_AES_256_CBC_SHA = 0x0036; - const TLS_DH_RSA_WITH_AES_256_CBC_SHA = 0x0037; - const TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038; - const TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039; - const TLS_DH_ANON_WITH_AES_256_CBC_SHA = 0x003A; - const TLS_RSA_WITH_NULL_SHA256 = 0x003B; - const TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C; - const TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D; - const TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = 0x003E; - const TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = 0x003F; - const TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040; - const TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0041; - const TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0042; - const TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0043; - const TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0044; - const TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0045; - const TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA = 0x0046; - const TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 = 0x0060; - const TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 = 0x0061; - const TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA = 0x0062; - const TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA = 0x0063; - const TLS_RSA_EXPORT1024_WITH_RC4_56_SHA = 0x0064; - const TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA = 0x0065; - const TLS_DHE_DSS_WITH_RC4_128_SHA = 0x0066; - const TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067; - const TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = 0x0068; - const TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = 0x0069; - const TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A; - const TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B; - const TLS_DH_ANON_WITH_AES_128_CBC_SHA256 = 0x006C; - const TLS_DH_ANON_WITH_AES_256_CBC_SHA256 = 0x006D; - # draft-ietf-tls-openpgp-keys-06 - const TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD = 0x0072; - const TLS_DHE_DSS_WITH_AES_128_CBC_RMD = 0x0073; - const TLS_DHE_DSS_WITH_AES_256_CBC_RMD = 0x0074; - const TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD = 0x0077; - const TLS_DHE_RSA_WITH_AES_128_CBC_RMD = 0x0078; - const TLS_DHE_RSA_WITH_AES_256_CBC_RMD = 0x0079; - const TLS_RSA_WITH_3DES_EDE_CBC_RMD = 0x007C; - const TLS_RSA_WITH_AES_128_CBC_RMD = 0x007D; - const TLS_RSA_WITH_AES_256_CBC_RMD = 0x007E; - # draft-chudov-cryptopro-cptls-04 - const TLS_GOSTR341094_WITH_28147_CNT_IMIT = 0x0080; - const TLS_GOSTR341001_WITH_28147_CNT_IMIT = 0x0081; - const TLS_GOSTR341094_WITH_NULL_GOSTR3411 = 0x0082; - const TLS_GOSTR341001_WITH_NULL_GOSTR3411 = 0x0083; - const TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0084; - const TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0085; - const TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0086; - const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0087; - const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0088; - const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA = 0x0089; - const TLS_PSK_WITH_RC4_128_SHA = 0x008A; - const TLS_PSK_WITH_3DES_EDE_CBC_SHA = 0x008B; - const TLS_PSK_WITH_AES_128_CBC_SHA = 0x008C; - const TLS_PSK_WITH_AES_256_CBC_SHA = 0x008D; - const TLS_DHE_PSK_WITH_RC4_128_SHA = 0x008E; - const TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = 0x008F; - const TLS_DHE_PSK_WITH_AES_128_CBC_SHA = 0x0090; - const TLS_DHE_PSK_WITH_AES_256_CBC_SHA = 0x0091; - const TLS_RSA_PSK_WITH_RC4_128_SHA = 0x0092; - const TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = 0x0093; - const TLS_RSA_PSK_WITH_AES_128_CBC_SHA = 0x0094; - const TLS_RSA_PSK_WITH_AES_256_CBC_SHA = 0x0095; - const TLS_RSA_WITH_SEED_CBC_SHA = 0x0096; - const TLS_DH_DSS_WITH_SEED_CBC_SHA = 0x0097; - const TLS_DH_RSA_WITH_SEED_CBC_SHA = 0x0098; - const TLS_DHE_DSS_WITH_SEED_CBC_SHA = 0x0099; - const TLS_DHE_RSA_WITH_SEED_CBC_SHA = 0x009A; - const TLS_DH_ANON_WITH_SEED_CBC_SHA = 0x009B; - const TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C; - const TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D; - const TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E; - const TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F; - const TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = 0x00A0; - const TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = 0x00A1; - const TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2; - const TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3; - const TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = 0x00A4; - const TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = 0x00A5; - const TLS_DH_ANON_WITH_AES_128_GCM_SHA256 = 0x00A6; - const TLS_DH_ANON_WITH_AES_256_GCM_SHA384 = 0x00A7; - const TLS_PSK_WITH_AES_128_GCM_SHA256 = 0x00A8; - const TLS_PSK_WITH_AES_256_GCM_SHA384 = 0x00A9; - const TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = 0x00AA; - const TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = 0x00AB; - const TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = 0x00AC; - const TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = 0x00AD; - const TLS_PSK_WITH_AES_128_CBC_SHA256 = 0x00AE; - const TLS_PSK_WITH_AES_256_CBC_SHA384 = 0x00AF; - const TLS_PSK_WITH_NULL_SHA256 = 0x00B0; - const TLS_PSK_WITH_NULL_SHA384 = 0x00B1; - const TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = 0x00B2; - const TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = 0x00B3; - const TLS_DHE_PSK_WITH_NULL_SHA256 = 0x00B4; - const TLS_DHE_PSK_WITH_NULL_SHA384 = 0x00B5; - const TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = 0x00B6; - const TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = 0x00B7; - const TLS_RSA_PSK_WITH_NULL_SHA256 = 0x00B8; - const TLS_RSA_PSK_WITH_NULL_SHA384 = 0x00B9; - const TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BA; - const TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BB; - const TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BC; - const TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BD; - const TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BE; - const TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BF; - const TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C0; - const TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C1; - const TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C2; - const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C3; - const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C4; - const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C5; - # draft-ietf-tls-tls13-16 - const TLS_AES_128_GCM_SHA256 = 0x1301; - const TLS_AES_256_GCM_SHA384 = 0x1302; - const TLS_CHACHA20_POLY1305_SHA256 = 0x1303; - const TLS_AES_128_CCM_SHA256 = 0x1304; - const TLS_AES_128_CCM_8_SHA256 = 0x1305; - # Google... - const TLS_CECPQ1_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0x16b7; - const TLS_CECPQ1_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0x16b8; - const TLS_CECPQ1_RSA_WITH_AES_256_GCM_SHA384 = 0x16b9; - const TLS_CECPQ1_ECDSA_WITH_AES_256_GCM_SHA384 = 0x16ba; - # draft-bmoeller-tls-downgrade-scsv-01 - const TLS_FALLBACK_SCSV = 0x5600; - # RFC 4492 - const TLS_ECDH_ECDSA_WITH_NULL_SHA = 0xC001; - const TLS_ECDH_ECDSA_WITH_RC4_128_SHA = 0xC002; - const TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC003; - const TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = 0xC004; - const TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = 0xC005; - const TLS_ECDHE_ECDSA_WITH_NULL_SHA = 0xC006; - const TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = 0xC007; - const TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC008; - const TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009; - const TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A; - const TLS_ECDH_RSA_WITH_NULL_SHA = 0xC00B; - const TLS_ECDH_RSA_WITH_RC4_128_SHA = 0xC00C; - const TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = 0xC00D; - const TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = 0xC00E; - const TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = 0xC00F; - const TLS_ECDHE_RSA_WITH_NULL_SHA = 0xC010; - const TLS_ECDHE_RSA_WITH_RC4_128_SHA = 0xC011; - const TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = 0xC012; - const TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013; - const TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014; - const TLS_ECDH_ANON_WITH_NULL_SHA = 0xC015; - const TLS_ECDH_ANON_WITH_RC4_128_SHA = 0xC016; - const TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA = 0xC017; - const TLS_ECDH_ANON_WITH_AES_128_CBC_SHA = 0xC018; - const TLS_ECDH_ANON_WITH_AES_256_CBC_SHA = 0xC019; - const TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0xC01A; - const TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0xC01B; - const TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = 0xC01C; - const TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0xC01D; - const TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0xC01E; - const TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = 0xC01F; - const TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0xC020; - const TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0xC021; - const TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = 0xC022; - const TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023; - const TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024; - const TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC025; - const TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC026; - const TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027; - const TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028; - const TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = 0xC029; - const TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = 0xC02A; - const TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B; - const TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C; - const TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02D; - const TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02E; - const TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F; - const TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030; - const TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = 0xC031; - const TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = 0xC032; - const TLS_ECDHE_PSK_WITH_RC4_128_SHA = 0xC033; - const TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = 0xC034; - const TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = 0xC035; - const TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = 0xC036; - const TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = 0xC037; - const TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = 0xC038; - const TLS_ECDHE_PSK_WITH_NULL_SHA = 0xC039; - const TLS_ECDHE_PSK_WITH_NULL_SHA256 = 0xC03A; - const TLS_ECDHE_PSK_WITH_NULL_SHA384 = 0xC03B; - # RFC 6209 - const TLS_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC03C; - const TLS_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC03D; - const TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = 0xC03E; - const TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = 0xC03F; - const TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC040; - const TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC041; - const TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = 0xC042; - const TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = 0xC043; - const TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC044; - const TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC045; - const TLS_DH_ANON_WITH_ARIA_128_CBC_SHA256 = 0xC046; - const TLS_DH_ANON_WITH_ARIA_256_CBC_SHA384 = 0xC047; - const TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = 0xC048; - const TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = 0xC049; - const TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = 0xC04A; - const TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = 0xC04B; - const TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC04C; - const TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC04D; - const TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC04E; - const TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC04F; - const TLS_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC050; - const TLS_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC051; - const TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC052; - const TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC053; - const TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC054; - const TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC055; - const TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = 0xC056; - const TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = 0xC057; - const TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = 0xC058; - const TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = 0xC059; - const TLS_DH_ANON_WITH_ARIA_128_GCM_SHA256 = 0xC05A; - const TLS_DH_ANON_WITH_ARIA_256_GCM_SHA384 = 0xC05B; - const TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = 0xC05C; - const TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = 0xC05D; - const TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = 0xC05E; - const TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = 0xC05F; - const TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC060; - const TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC061; - const TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC062; - const TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC063; - const TLS_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC064; - const TLS_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC065; - const TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC066; - const TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC067; - const TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC068; - const TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC069; - const TLS_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06A; - const TLS_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06B; - const TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06C; - const TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06D; - const TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06E; - const TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06F; - const TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC070; - const TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC071; - # RFC 6367 - const TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC072; - const TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC073; - const TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC074; - const TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC075; - const TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC076; - const TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC077; - const TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC078; - const TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC079; - const TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07A; - const TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07B; - const TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07C; - const TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07D; - const TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07E; - const TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07F; - const TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = 0xC080; - const TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = 0xC081; - const TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = 0xC082; - const TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = 0xC083; - const TLS_DH_ANON_WITH_CAMELLIA_128_GCM_SHA256 = 0xC084; - const TLS_DH_ANON_WITH_CAMELLIA_256_GCM_SHA384 = 0xC085; - const TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC086; - const TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC087; - const TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC088; - const TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC089; - const TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08A; - const TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08B; - const TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08C; - const TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08D; - const TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08E; - const TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08F; - const TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC090; - const TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC091; - const TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC092; - const TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC093; - const TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC094; - const TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC095; - const TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC096; - const TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC097; - const TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC098; - const TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC099; - const TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC09A; - const TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC09B; - # RFC 6655 - const TLS_RSA_WITH_AES_128_CCM = 0xC09C; - const TLS_RSA_WITH_AES_256_CCM = 0xC09D; - const TLS_DHE_RSA_WITH_AES_128_CCM = 0xC09E; - const TLS_DHE_RSA_WITH_AES_256_CCM = 0xC09F; - const TLS_RSA_WITH_AES_128_CCM_8 = 0xC0A0; - const TLS_RSA_WITH_AES_256_CCM_8 = 0xC0A1; - const TLS_DHE_RSA_WITH_AES_128_CCM_8 = 0xC0A2; - const TLS_DHE_RSA_WITH_AES_256_CCM_8 = 0xC0A3; - const TLS_PSK_WITH_AES_128_CCM = 0xC0A4; - const TLS_PSK_WITH_AES_256_CCM = 0xC0A5; - const TLS_DHE_PSK_WITH_AES_128_CCM = 0xC0A6; - const TLS_DHE_PSK_WITH_AES_256_CCM = 0xC0A7; - const TLS_PSK_WITH_AES_128_CCM_8 = 0xC0A8; - const TLS_PSK_WITH_AES_256_CCM_8 = 0xC0A9; - const TLS_PSK_DHE_WITH_AES_128_CCM_8 = 0xC0AA; - const TLS_PSK_DHE_WITH_AES_256_CCM_8 = 0xC0AB; - const TLS_ECDHE_ECDSA_WITH_AES_128_CCM = 0xC0AC; - const TLS_ECDHE_ECDSA_WITH_AES_256_CCM = 0xC0AD; - const TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = 0xC0AE; - const TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = 0xC0AF; - # draft-agl-tls-chacha20poly1305-02 - const TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD = 0xCC13; - const TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD = 0xCC14; - const TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD = 0xCC15; - # RFC 7905 - const TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8; - const TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9; - const TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAA; - const TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAB; - const TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAC; - const TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAD; - const TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAE; - # draft-ietf-tls-ecdhe-psk-aead-05 - const TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = 0xD001; - const TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = 0xD002; - const TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = 0xD003; - const TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = 0xD004; - - const SSL_RSA_FIPS_WITH_DES_CBC_SHA = 0xFEFE; - const SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA = 0xFEFF; - const SSL_RSA_FIPS_WITH_DES_CBC_SHA_2 = 0xFFE1; - const SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2 = 0xFFE0; - const SSL_RSA_WITH_RC2_CBC_MD5 = 0xFF80; - const SSL_RSA_WITH_IDEA_CBC_MD5 = 0xFF81; - const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82; - const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83; - const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF; - - ## This is a table of all known cipher specs. It can be used for - ## detecting unknown ciphers and for converting the cipher spec - ## constants into a human readable format. - const cipher_desc: table[count] of string = { - [SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] = - "SSLv20_CK_RC4_128_EXPORT40_WITH_MD5", - [SSLv20_CK_RC4_128_WITH_MD5] = "SSLv20_CK_RC4_128_WITH_MD5", - [SSLv20_CK_RC2_128_CBC_WITH_MD5] = "SSLv20_CK_RC2_128_CBC_WITH_MD5", - [SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5] = - "SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5", - [SSLv20_CK_IDEA_128_CBC_WITH_MD5] = "SSLv20_CK_IDEA_128_CBC_WITH_MD5", - [SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5] = - "SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5", - [SSLv20_CK_DES_64_CBC_WITH_MD5] = "SSLv20_CK_DES_64_CBC_WITH_MD5", - - [TLS_NULL_WITH_NULL_NULL] = "TLS_NULL_WITH_NULL_NULL", - [TLS_RSA_WITH_NULL_MD5] = "TLS_RSA_WITH_NULL_MD5", - [TLS_RSA_WITH_NULL_SHA] = "TLS_RSA_WITH_NULL_SHA", - [TLS_RSA_EXPORT_WITH_RC4_40_MD5] = "TLS_RSA_EXPORT_WITH_RC4_40_MD5", - [TLS_RSA_WITH_RC4_128_MD5] = "TLS_RSA_WITH_RC4_128_MD5", - [TLS_RSA_WITH_RC4_128_SHA] = "TLS_RSA_WITH_RC4_128_SHA", - [TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5] = "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5", - [TLS_RSA_WITH_IDEA_CBC_SHA] = "TLS_RSA_WITH_IDEA_CBC_SHA", - [TLS_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_RSA_EXPORT_WITH_DES40_CBC_SHA", - [TLS_RSA_WITH_DES_CBC_SHA] = "TLS_RSA_WITH_DES_CBC_SHA", - [TLS_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - [TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA", - [TLS_DH_DSS_WITH_DES_CBC_SHA] = "TLS_DH_DSS_WITH_DES_CBC_SHA", - [TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA", - [TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA", - [TLS_DH_RSA_WITH_DES_CBC_SHA] = "TLS_DH_RSA_WITH_DES_CBC_SHA", - [TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA", - [TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA", - [TLS_DHE_DSS_WITH_DES_CBC_SHA] = "TLS_DHE_DSS_WITH_DES_CBC_SHA", - [TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA", - [TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA", - [TLS_DHE_RSA_WITH_DES_CBC_SHA] = "TLS_DHE_RSA_WITH_DES_CBC_SHA", - [TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA", - [TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5] = "TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5", - [TLS_DH_ANON_WITH_RC4_128_MD5] = "TLS_DH_ANON_WITH_RC4_128_MD5", - [TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA", - [TLS_DH_ANON_WITH_DES_CBC_SHA] = "TLS_DH_ANON_WITH_DES_CBC_SHA", - [TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA", - [SSL_FORTEZZA_KEA_WITH_NULL_SHA] = "SSL_FORTEZZA_KEA_WITH_NULL_SHA", - [SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA] = "SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA", - [TLS_KRB5_WITH_DES_CBC_SHA] = "TLS_KRB5_WITH_DES_CBC_SHA", - [TLS_KRB5_WITH_3DES_EDE_CBC_SHA] = "TLS_KRB5_WITH_3DES_EDE_CBC_SHA", - [TLS_KRB5_WITH_RC4_128_SHA] = "TLS_KRB5_WITH_RC4_128_SHA", - [TLS_KRB5_WITH_IDEA_CBC_SHA] = "TLS_KRB5_WITH_IDEA_CBC_SHA", - [TLS_KRB5_WITH_DES_CBC_MD5] = "TLS_KRB5_WITH_DES_CBC_MD5", - [TLS_KRB5_WITH_3DES_EDE_CBC_MD5] = "TLS_KRB5_WITH_3DES_EDE_CBC_MD5", - [TLS_KRB5_WITH_RC4_128_MD5] = "TLS_KRB5_WITH_RC4_128_MD5", - [TLS_KRB5_WITH_IDEA_CBC_MD5] = "TLS_KRB5_WITH_IDEA_CBC_MD5", - [TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA", - [TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA", - [TLS_KRB5_EXPORT_WITH_RC4_40_SHA] = "TLS_KRB5_EXPORT_WITH_RC4_40_SHA", - [TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5", - [TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5", - [TLS_KRB5_EXPORT_WITH_RC4_40_MD5] = "TLS_KRB5_EXPORT_WITH_RC4_40_MD5", - [TLS_RSA_WITH_AES_128_CBC_SHA] = "TLS_RSA_WITH_AES_128_CBC_SHA", - [TLS_DH_DSS_WITH_AES_128_CBC_SHA] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA", - [TLS_DH_RSA_WITH_AES_128_CBC_SHA] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA", - [TLS_DHE_DSS_WITH_AES_128_CBC_SHA] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA", - [TLS_DHE_RSA_WITH_AES_128_CBC_SHA] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", - [TLS_DH_ANON_WITH_AES_128_CBC_SHA] = "TLS_DH_ANON_WITH_AES_128_CBC_SHA", - [TLS_RSA_WITH_AES_256_CBC_SHA] = "TLS_RSA_WITH_AES_256_CBC_SHA", - [TLS_DH_DSS_WITH_AES_256_CBC_SHA] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA", - [TLS_DH_RSA_WITH_AES_256_CBC_SHA] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA", - [TLS_DHE_DSS_WITH_AES_256_CBC_SHA] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA", - [TLS_DHE_RSA_WITH_AES_256_CBC_SHA] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", - [TLS_DH_ANON_WITH_AES_256_CBC_SHA] = "TLS_DH_ANON_WITH_AES_256_CBC_SHA", - [TLS_RSA_WITH_NULL_SHA256] = "TLS_RSA_WITH_NULL_SHA256", - [TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256", - [TLS_RSA_WITH_AES_256_CBC_SHA256] = "TLS_RSA_WITH_AES_256_CBC_SHA256", - [TLS_DH_DSS_WITH_AES_128_CBC_SHA256] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA256", - [TLS_DH_RSA_WITH_AES_128_CBC_SHA256] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA256", - [TLS_DHE_DSS_WITH_AES_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256", - [TLS_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA", - [TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA", - [TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA", - [TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA", - [TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA", - [TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA", - [TLS_RSA_EXPORT1024_WITH_RC4_56_MD5] = "TLS_RSA_EXPORT1024_WITH_RC4_56_MD5", - [TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5] = "TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5", - [TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA] = "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA", - [TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA] = "TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA", - [TLS_RSA_EXPORT1024_WITH_RC4_56_SHA] = "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA", - [TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA] = "TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA", - [TLS_DHE_DSS_WITH_RC4_128_SHA] = "TLS_DHE_DSS_WITH_RC4_128_SHA", - [TLS_DHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", - [TLS_DH_DSS_WITH_AES_256_CBC_SHA256] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA256", - [TLS_DH_RSA_WITH_AES_256_CBC_SHA256] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA256", - [TLS_DHE_DSS_WITH_AES_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256", - [TLS_DHE_RSA_WITH_AES_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", - [TLS_DH_ANON_WITH_AES_128_CBC_SHA256] = "TLS_DH_ANON_WITH_AES_128_CBC_SHA256", - [TLS_DH_ANON_WITH_AES_256_CBC_SHA256] = "TLS_DH_ANON_WITH_AES_256_CBC_SHA256", - [TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD] = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD", - [TLS_DHE_DSS_WITH_AES_128_CBC_RMD] = "TLS_DHE_DSS_WITH_AES_128_CBC_RMD", - [TLS_DHE_DSS_WITH_AES_256_CBC_RMD] = "TLS_DHE_DSS_WITH_AES_256_CBC_RMD", - [TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD] = "TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD", - [TLS_DHE_RSA_WITH_AES_128_CBC_RMD] = "TLS_DHE_RSA_WITH_AES_128_CBC_RMD", - [TLS_DHE_RSA_WITH_AES_256_CBC_RMD] = "TLS_DHE_RSA_WITH_AES_256_CBC_RMD", - [TLS_RSA_WITH_3DES_EDE_CBC_RMD] = "TLS_RSA_WITH_3DES_EDE_CBC_RMD", - [TLS_RSA_WITH_AES_128_CBC_RMD] = "TLS_RSA_WITH_AES_128_CBC_RMD", - [TLS_RSA_WITH_AES_256_CBC_RMD] = "TLS_RSA_WITH_AES_256_CBC_RMD", - [TLS_GOSTR341094_WITH_28147_CNT_IMIT] = "TLS_GOSTR341094_WITH_28147_CNT_IMIT", - [TLS_GOSTR341001_WITH_28147_CNT_IMIT] = "TLS_GOSTR341001_WITH_28147_CNT_IMIT", - [TLS_GOSTR341094_WITH_NULL_GOSTR3411] = "TLS_GOSTR341094_WITH_NULL_GOSTR3411", - [TLS_GOSTR341001_WITH_NULL_GOSTR3411] = "TLS_GOSTR341001_WITH_NULL_GOSTR3411", - [TLS_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA", - [TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA", - [TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA", - [TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA", - [TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA", - [TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA", - [TLS_PSK_WITH_RC4_128_SHA] = "TLS_PSK_WITH_RC4_128_SHA", - [TLS_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_PSK_WITH_3DES_EDE_CBC_SHA", - [TLS_PSK_WITH_AES_128_CBC_SHA] = "TLS_PSK_WITH_AES_128_CBC_SHA", - [TLS_PSK_WITH_AES_256_CBC_SHA] = "TLS_PSK_WITH_AES_256_CBC_SHA", - [TLS_DHE_PSK_WITH_RC4_128_SHA] = "TLS_DHE_PSK_WITH_RC4_128_SHA", - [TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA", - [TLS_DHE_PSK_WITH_AES_128_CBC_SHA] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA", - [TLS_DHE_PSK_WITH_AES_256_CBC_SHA] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA", - [TLS_RSA_PSK_WITH_RC4_128_SHA] = "TLS_RSA_PSK_WITH_RC4_128_SHA", - [TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA", - [TLS_RSA_PSK_WITH_AES_128_CBC_SHA] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA", - [TLS_RSA_PSK_WITH_AES_256_CBC_SHA] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA", - [TLS_RSA_WITH_SEED_CBC_SHA] = "TLS_RSA_WITH_SEED_CBC_SHA", - [TLS_DH_DSS_WITH_SEED_CBC_SHA] = "TLS_DH_DSS_WITH_SEED_CBC_SHA", - [TLS_DH_RSA_WITH_SEED_CBC_SHA] = "TLS_DH_RSA_WITH_SEED_CBC_SHA", - [TLS_DHE_DSS_WITH_SEED_CBC_SHA] = "TLS_DHE_DSS_WITH_SEED_CBC_SHA", - [TLS_DHE_RSA_WITH_SEED_CBC_SHA] = "TLS_DHE_RSA_WITH_SEED_CBC_SHA", - [TLS_DH_ANON_WITH_SEED_CBC_SHA] = "TLS_DH_ANON_WITH_SEED_CBC_SHA", - [TLS_RSA_WITH_AES_128_GCM_SHA256] = "TLS_RSA_WITH_AES_128_GCM_SHA256", - [TLS_RSA_WITH_AES_256_GCM_SHA384] = "TLS_RSA_WITH_AES_256_GCM_SHA384", - [TLS_DHE_RSA_WITH_AES_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", - [TLS_DHE_RSA_WITH_AES_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", - [TLS_DH_RSA_WITH_AES_128_GCM_SHA256] = "TLS_DH_RSA_WITH_AES_128_GCM_SHA256", - [TLS_DH_RSA_WITH_AES_256_GCM_SHA384] = "TLS_DH_RSA_WITH_AES_256_GCM_SHA384", - [TLS_DHE_DSS_WITH_AES_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256", - [TLS_DHE_DSS_WITH_AES_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384", - [TLS_DH_DSS_WITH_AES_128_GCM_SHA256] = "TLS_DH_DSS_WITH_AES_128_GCM_SHA256", - [TLS_DH_DSS_WITH_AES_256_GCM_SHA384] = "TLS_DH_DSS_WITH_AES_256_GCM_SHA384", - [TLS_DH_ANON_WITH_AES_128_GCM_SHA256] = "TLS_DH_ANON_WITH_AES_128_GCM_SHA256", - [TLS_DH_ANON_WITH_AES_256_GCM_SHA384] = "TLS_DH_ANON_WITH_AES_256_GCM_SHA384", - [TLS_PSK_WITH_AES_128_GCM_SHA256] = "TLS_PSK_WITH_AES_128_GCM_SHA256", - [TLS_PSK_WITH_AES_256_GCM_SHA384] = "TLS_PSK_WITH_AES_256_GCM_SHA384", - [TLS_DHE_PSK_WITH_AES_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256", - [TLS_DHE_PSK_WITH_AES_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384", - [TLS_RSA_PSK_WITH_AES_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256", - [TLS_RSA_PSK_WITH_AES_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384", - [TLS_PSK_WITH_AES_128_CBC_SHA256] = "TLS_PSK_WITH_AES_128_CBC_SHA256", - [TLS_PSK_WITH_AES_256_CBC_SHA384] = "TLS_PSK_WITH_AES_256_CBC_SHA384", - [TLS_PSK_WITH_NULL_SHA256] = "TLS_PSK_WITH_NULL_SHA256", - [TLS_PSK_WITH_NULL_SHA384] = "TLS_PSK_WITH_NULL_SHA384", - [TLS_DHE_PSK_WITH_AES_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256", - [TLS_DHE_PSK_WITH_AES_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384", - [TLS_DHE_PSK_WITH_NULL_SHA256] = "TLS_DHE_PSK_WITH_NULL_SHA256", - [TLS_DHE_PSK_WITH_NULL_SHA384] = "TLS_DHE_PSK_WITH_NULL_SHA384", - [TLS_RSA_PSK_WITH_AES_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256", - [TLS_RSA_PSK_WITH_AES_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384", - [TLS_RSA_PSK_WITH_NULL_SHA256] = "TLS_RSA_PSK_WITH_NULL_SHA256", - [TLS_RSA_PSK_WITH_NULL_SHA384] = "TLS_RSA_PSK_WITH_NULL_SHA384", - [TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256", - [TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256", - [TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256", - [TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256", - [TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256", - [TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256", - [TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256", - [TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384", - [TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256", - [TLS_AES_128_CCM_SHA256] = "TLS_AES_128_CCM_SHA256", - [TLS_AES_128_CCM_8_SHA256] = "TLS_AES_128_CCM_8_SHA256", - [TLS_CECPQ1_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_CECPQ1_RSA_WITH_CHACHA20_POLY1305_SHA256", - [TLS_CECPQ1_ECDSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_CECPQ1_ECDSA_WITH_CHACHA20_POLY1305_SHA256", - [TLS_CECPQ1_RSA_WITH_AES_256_GCM_SHA384] = "TLS_CECPQ1_RSA_WITH_AES_256_GCM_SHA384", - [TLS_CECPQ1_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_CECPQ1_ECDSA_WITH_AES_256_GCM_SHA384", - [TLS_FALLBACK_SCSV] = "TLS_FALLBACK_SCSV", - [TLS_ECDH_ECDSA_WITH_NULL_SHA] = "TLS_ECDH_ECDSA_WITH_NULL_SHA", - [TLS_ECDH_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDH_ECDSA_WITH_RC4_128_SHA", - [TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA", - [TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA", - [TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA", - [TLS_ECDHE_ECDSA_WITH_NULL_SHA] = "TLS_ECDHE_ECDSA_WITH_NULL_SHA", - [TLS_ECDHE_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - [TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", - [TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - [TLS_ECDH_RSA_WITH_NULL_SHA] = "TLS_ECDH_RSA_WITH_NULL_SHA", - [TLS_ECDH_RSA_WITH_RC4_128_SHA] = "TLS_ECDH_RSA_WITH_RC4_128_SHA", - [TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA", - [TLS_ECDH_RSA_WITH_AES_128_CBC_SHA] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA", - [TLS_ECDH_RSA_WITH_AES_256_CBC_SHA] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA", - [TLS_ECDHE_RSA_WITH_NULL_SHA] = "TLS_ECDHE_RSA_WITH_NULL_SHA", - [TLS_ECDHE_RSA_WITH_RC4_128_SHA] = "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - [TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - [TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - [TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - [TLS_ECDH_ANON_WITH_NULL_SHA] = "TLS_ECDH_ANON_WITH_NULL_SHA", - [TLS_ECDH_ANON_WITH_RC4_128_SHA] = "TLS_ECDH_ANON_WITH_RC4_128_SHA", - [TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA", - [TLS_ECDH_ANON_WITH_AES_128_CBC_SHA] = "TLS_ECDH_ANON_WITH_AES_128_CBC_SHA", - [TLS_ECDH_ANON_WITH_AES_256_CBC_SHA] = "TLS_ECDH_ANON_WITH_AES_256_CBC_SHA", - [TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA", - [TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA", - [TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA", - [TLS_SRP_SHA_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_WITH_AES_128_CBC_SHA", - [TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA", - [TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA", - [TLS_SRP_SHA_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_WITH_AES_256_CBC_SHA", - [TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA", - [TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA", - [TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", - [TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256", - [TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384", - [TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - [TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", - [TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256", - [TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384", - [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - [TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - [TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256", - [TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384", - [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - [TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - [TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256", - [TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384", - [TLS_ECDHE_PSK_WITH_RC4_128_SHA] = "TLS_ECDHE_PSK_WITH_RC4_128_SHA", - [TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA", - [TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA", - [TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA", - [TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256", - [TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384", - [TLS_ECDHE_PSK_WITH_NULL_SHA] = "TLS_ECDHE_PSK_WITH_NULL_SHA", - [TLS_ECDHE_PSK_WITH_NULL_SHA256] = "TLS_ECDHE_PSK_WITH_NULL_SHA256", - [TLS_ECDHE_PSK_WITH_NULL_SHA384] = "TLS_ECDHE_PSK_WITH_NULL_SHA384", - [TLS_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_RSA_WITH_ARIA_128_CBC_SHA256", - [TLS_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_RSA_WITH_ARIA_256_CBC_SHA384", - [TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256] = "TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256", - [TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384] = "TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384", - [TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256", - [TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384", - [TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256", - [TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384] = "TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384", - [TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256", - [TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384", - [TLS_DH_ANON_WITH_ARIA_128_CBC_SHA256] = "TLS_DH_ANON_WITH_ARIA_128_CBC_SHA256", - [TLS_DH_ANON_WITH_ARIA_256_CBC_SHA384] = "TLS_DH_ANON_WITH_ARIA_256_CBC_SHA384", - [TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256", - [TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384", - [TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256", - [TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384", - [TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256", - [TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384", - [TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256", - [TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384", - [TLS_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_RSA_WITH_ARIA_128_GCM_SHA256", - [TLS_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_RSA_WITH_ARIA_256_GCM_SHA384", - [TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256", - [TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384", - [TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256", - [TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384", - [TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256", - [TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384", - [TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256] = "TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256", - [TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384] = "TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384", - [TLS_DH_ANON_WITH_ARIA_128_GCM_SHA256] = "TLS_DH_ANON_WITH_ARIA_128_GCM_SHA256", - [TLS_DH_ANON_WITH_ARIA_256_GCM_SHA384] = "TLS_DH_ANON_WITH_ARIA_256_GCM_SHA384", - [TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256", - [TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384", - [TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256", - [TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384", - [TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256", - [TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384", - [TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256", - [TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384", - [TLS_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_PSK_WITH_ARIA_128_CBC_SHA256", - [TLS_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_PSK_WITH_ARIA_256_CBC_SHA384", - [TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256", - [TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384", - [TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256", - [TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384", - [TLS_PSK_WITH_ARIA_128_GCM_SHA256] = "TLS_PSK_WITH_ARIA_128_GCM_SHA256", - [TLS_PSK_WITH_ARIA_256_GCM_SHA384] = "TLS_PSK_WITH_ARIA_256_GCM_SHA384", - [TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256", - [TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384", - [TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256", - [TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384", - [TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256", - [TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384", - [TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_DH_ANON_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_DH_ANON_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DH_ANON_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256", - [TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384", - [TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256", - [TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384", - [TLS_RSA_WITH_AES_128_CCM] = "TLS_RSA_WITH_AES_128_CCM", - [TLS_RSA_WITH_AES_256_CCM] = "TLS_RSA_WITH_AES_256_CCM", - [TLS_DHE_RSA_WITH_AES_128_CCM] = "TLS_DHE_RSA_WITH_AES_128_CCM", - [TLS_DHE_RSA_WITH_AES_256_CCM] = "TLS_DHE_RSA_WITH_AES_256_CCM", - [TLS_RSA_WITH_AES_128_CCM_8] = "TLS_RSA_WITH_AES_128_CCM_8", - [TLS_RSA_WITH_AES_256_CCM_8] = "TLS_RSA_WITH_AES_256_CCM_8", - [TLS_DHE_RSA_WITH_AES_128_CCM_8] = "TLS_DHE_RSA_WITH_AES_128_CCM_8", - [TLS_DHE_RSA_WITH_AES_256_CCM_8] = "TLS_DHE_RSA_WITH_AES_256_CCM_8", - [TLS_PSK_WITH_AES_128_CCM] = "TLS_PSK_WITH_AES_128_CCM", - [TLS_PSK_WITH_AES_256_CCM] = "TLS_PSK_WITH_AES_256_CCM", - [TLS_DHE_PSK_WITH_AES_128_CCM] = "TLS_DHE_PSK_WITH_AES_128_CCM", - [TLS_DHE_PSK_WITH_AES_256_CCM] = "TLS_DHE_PSK_WITH_AES_256_CCM", - [TLS_PSK_WITH_AES_128_CCM_8] = "TLS_PSK_WITH_AES_128_CCM_8", - [TLS_PSK_WITH_AES_256_CCM_8] = "TLS_PSK_WITH_AES_256_CCM_8", - [TLS_PSK_DHE_WITH_AES_128_CCM_8] = "TLS_PSK_DHE_WITH_AES_128_CCM_8", - [TLS_PSK_DHE_WITH_AES_256_CCM_8] = "TLS_PSK_DHE_WITH_AES_256_CCM_8", - [TLS_ECDHE_ECDSA_WITH_AES_128_CCM] = "TLS_ECDHE_ECDSA_WITH_AES_128_CCM", - [TLS_ECDHE_ECDSA_WITH_AES_256_CCM] = "TLS_ECDHE_ECDSA_WITH_AES_256_CCM", - [TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8] = "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8", - [TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8] = "TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8", - [TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD", - [TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD", - [TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD] = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD", - [TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - [TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", - [TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - [TLS_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_PSK_WITH_CHACHA20_POLY1305_SHA256", - [TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", - [TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256", - [TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256", - [TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256", - [TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384", - [TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256", - [TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256", - [SSL_RSA_FIPS_WITH_DES_CBC_SHA] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA", - [SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA", - [SSL_RSA_FIPS_WITH_DES_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA_2", - [SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2", - [SSL_RSA_WITH_RC2_CBC_MD5] = "SSL_RSA_WITH_RC2_CBC_MD5", - [SSL_RSA_WITH_IDEA_CBC_MD5] = "SSL_RSA_WITH_IDEA_CBC_MD5", - [SSL_RSA_WITH_DES_CBC_MD5] = "SSL_RSA_WITH_DES_CBC_MD5", - [SSL_RSA_WITH_3DES_EDE_CBC_MD5] = "SSL_RSA_WITH_3DES_EDE_CBC_MD5", - [TLS_EMPTY_RENEGOTIATION_INFO_SCSV] = "TLS_EMPTY_RENEGOTIATION_INFO_SCSV", - } &default=function(i: count):string { return fmt("unknown-%d", i); }; - -} diff --git a/scripts/base/protocols/ssl/consts.zeek b/scripts/base/protocols/ssl/consts.zeek new file mode 100644 index 0000000000..dc4f72674b --- /dev/null +++ b/scripts/base/protocols/ssl/consts.zeek @@ -0,0 +1,1021 @@ +module SSL; + +export { + const SSLv2 = 0x0002; + const SSLv3 = 0x0300; + const TLSv10 = 0x0301; + const TLSv11 = 0x0302; + const TLSv12 = 0x0303; + const TLSv13 = 0x0304; + + const DTLSv10 = 0xFEFF; + # DTLSv11 does not exist + const DTLSv12 = 0xFEFD; + + ## Mapping between the constants and string values for SSL/TLS versions. + const version_strings: table[count] of string = { + [SSLv2] = "SSLv2", + [SSLv3] = "SSLv3", + [TLSv10] = "TLSv10", + [TLSv11] = "TLSv11", + [TLSv12] = "TLSv12", + [TLSv13] = "TLSv13", + [DTLSv10] = "DTLSv10", + [DTLSv12] = "DTLSv12" + } &default=function(i: count):string + { + if ( i/0xFF == 0x7F ) # TLS 1.3 draft + return fmt("TLSv13-draft%d", i % 0x7F ); + + return fmt("unknown-%d", i); + }; + + # TLS content types: + const CHANGE_CIPHER_SPEC = 20; + const ALERT = 21; + const HANDSHAKE = 22; + const APPLICATION_DATA = 23; + const HEARTBEAT = 24; + const V2_ERROR = 300; + const V2_CLIENT_HELLO = 301; + const V2_CLIENT_MASTER_KEY = 302; + const V2_SERVER_HELLO = 304; + + # TLS Handshake types: + const HELLO_REQUEST = 0; + const CLIENT_HELLO = 1; + const SERVER_HELLO = 2; + const HELLO_VERIFY_REQUEST = 3; # RFC 6347 + const SESSION_TICKET = 4; # RFC 5077 + const HELLO_RETRY_REQUEST = 6; # draft-ietf-tls-tls13-16 + const ENCRYPTED_EXTENSIONS = 8; # draft-ietf-tls-tls13-16 + const CERTIFICATE = 11; + const SERVER_KEY_EXCHANGE = 12; + const CERTIFICATE_REQUEST = 13; + const SERVER_HELLO_DONE = 14; + const CERTIFICATE_VERIFY = 15; + const CLIENT_KEY_EXCHANGE = 16; + const FINISHED = 20; + const CERTIFICATE_URL = 21; # RFC 3546 + const CERTIFICATE_STATUS = 22; # RFC 3546 + const SUPPLEMENTAL_DATA = 23; # RFC 4680 + const KEY_UPDATE = 24; # draft-ietf-tls-tls13-16 + + ## Mapping between numeric codes and human readable strings for alert + ## levels. + const alert_levels: table[count] of string = { + [1] = "warning", + [2] = "fatal", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + ## Mapping between numeric codes and human readable strings for hash + ## algorithms. + const hash_algorithms: table[count] of string = { + [0] = "none", + [1] = "md5", + [2] = "sha1", + [3] = "sha224", + [4] = "sha256", + [5] = "sha384", + [6] = "sha512", + [8] = "Intrinsic", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + ## Mapping between numeric codes and human readable strings for signature + ## algorithms. + const signature_algorithms: table[count] of string = { + [0] = "anonymous", + [1] = "rsa", + [2] = "dsa", + [3] = "ecdsa", + [4] = "rsa_pss_sha256", + [5] = "rsa_pss_sha384", + [6] = "rsa_pss_sha512", + [7] = "ed25519", + [8] = "ed448", + [9] = "rsa_pss_sha256", + [10] = "rsa_pss_sha384", + [11] = "rsa_pss_sha512", + [64] = "gostr34102012_256", + [65] = "gostr34102012_256", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + ## Mapping between numeric codes and human readable strings for alert + ## descriptions. + const alert_descriptions: table[count] of string = { + [0] = "close_notify", + [10] = "unexpected_message", + [20] = "bad_record_mac", + [21] = "decryption_failed", + [22] = "record_overflow", + [30] = "decompression_failure", + [40] = "handshake_failure", + [41] = "no_certificate", + [42] = "bad_certificate", + [43] = "unsupported_certificate", + [44] = "certificate_revoked", + [45] = "certificate_expired", + [46] = "certificate_unknown", + [47] = "illegal_parameter", + [48] = "unknown_ca", + [49] = "access_denied", + [50] = "decode_error", + [51] = "decrypt_error", + [60] = "export_restriction", + [70] = "protocol_version", + [71] = "insufficient_security", + [80] = "internal_error", + [86] = "inappropriate_fallback", + [90] = "user_canceled", + [100] = "no_renegotiation", + [110] = "unsupported_extension", + [111] = "certificate_unobtainable", + [112] = "unrecognized_name", + [113] = "bad_certificate_status_response", + [114] = "bad_certificate_hash_value", + [115] = "unknown_psk_identity", + [120] = "no_application_protocol", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + ## Mapping between numeric codes and human readable strings for SSL/TLS + ## extensions. + # More information can be found here: + # http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xml + const extensions: table[count] of string = { + [0] = "server_name", + [1] = "max_fragment_length", + [2] = "client_certificate_url", + [3] = "trusted_ca_keys", + [4] = "truncated_hmac", + [5] = "status_request", + [6] = "user_mapping", + [7] = "client_authz", + [8] = "server_authz", + [9] = "cert_type", + [10] = "supported_groups", # old name: elliptic_curves - draft-ietf-tls-negotiated-ff-dhe + [11] = "ec_point_formats", + [12] = "srp", + [13] = "signature_algorithms", + [14] = "use_srtp", + [15] = "heartbeat", + [16] = "application_layer_protocol_negotiation", + [17] = "status_request_v2", + [18] = "signed_certificate_timestamp", + [19] = "client_certificate_type", + [20] = "server_certificate_type", + [21] = "padding", + [22] = "encrypt_then_mac", + [23] = "extended_master_secret", + [24] = "token_binding", # temporary till 2017-03-06 - draft-ietf-tokbind-negotiation + [25] = "cached_info", + [35] = "SessionTicket TLS", + [40] = "key_share", # new for TLS 1.3; was used for extended_random before. State as of TLS 1.3 draft 16 + [41] = "pre_shared_key", # new for 1.3, state of draft-16 + [42] = "early_data", # new for 1.3, state of draft-16 + [43] = "supported_versions", # new for 1.3, state of draft-16 + [44] = "cookie", # new for 1.3, state of draft-16 + [45] = "psk_key_exchange_modes", # new for 1.3, state of draft-18 + [46] = "TicketEarlyDataInfo", # new for 1.3, state of draft-16 + [47] = "certificate_authorities", # new for 1.3, state of draft-18 + [48] = "oid_filters", # new for 1.3, state of draft-18 + [13172] = "next_protocol_negotiation", + [13175] = "origin_bound_certificates", + [13180] = "encrypted_client_certificates", + [30031] = "channel_id", + [30032] = "channel_id_new", + [35655] = "padding", + [65281] = "renegotiation_info" + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + ## Mapping between numeric codes and human readable string for SSL/TLS elliptic curves. + # See http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8 + const ec_curves: table[count] of string = { + [1] = "sect163k1", # 1-23 are TLS 1.3 obsoleted + [2] = "sect163r1", + [3] = "sect163r2", + [4] = "sect193r1", + [5] = "sect193r2", + [6] = "sect233k1", + [7] = "sect233r1", + [8] = "sect239k1", + [9] = "sect283k1", + [10] = "sect283r1", + [11] = "sect409k1", + [12] = "sect409r1", + [13] = "sect571k1", + [14] = "sect571r1", + [15] = "secp160k1", + [16] = "secp160r1", + [17] = "secp160r2", + [18] = "secp192k1", + [19] = "secp192r1", + [20] = "secp224k1", + [21] = "secp224r1", + [22] = "secp256k1", + [23] = "secp256r1", # TLS 1.3 valid + [24] = "secp384r1", # TLS 1.3 valid + [25] = "secp521r1", # TLS 1.3 valid + [26] = "brainpoolP256r1", # 26-28 are TLS 1.3 obsoleted + [27] = "brainpoolP384r1", + [28] = "brainpoolP512r1", + # Temporary till 2017-01-09 - draft-ietf-tls-rfc4492bis + [29] = "x25519", # TLS 1.3 valid + [30] = "x448", # TLS 1.3 valid + # draft-ietf-tls-negotiated-ff-dhe-10 + [256] = "ffdhe2048", # 256-260 are TLS 1.3 valid + [257] = "ffdhe3072", + [258] = "ffdhe4096", + [259] = "ffdhe6144", + [260] = "ffdhe8192", + [0xFF01] = "arbitrary_explicit_prime_curves", + [0xFF02] = "arbitrary_explicit_char2_curves" + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + ## Mapping between numeric codes and human readable string for SSL/TLS EC point formats. + # See http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-9 + const ec_point_formats: table[count] of string = { + [0] = "uncompressed", + [1] = "ansiX962_compressed_prime", + [2] = "ansiX962_compressed_char2" + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + + # SSLv2 + const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080; + const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080; + const SSLv20_CK_RC2_128_CBC_WITH_MD5 = 0x030080; + const SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5 = 0x040080; + const SSLv20_CK_IDEA_128_CBC_WITH_MD5 = 0x050080; + const SSLv20_CK_DES_64_CBC_WITH_MD5 = 0x060040; + const SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5 = 0x0700C0; + + # TLS + const TLS_NULL_WITH_NULL_NULL = 0x0000; + const TLS_RSA_WITH_NULL_MD5 = 0x0001; + const TLS_RSA_WITH_NULL_SHA = 0x0002; + const TLS_RSA_EXPORT_WITH_RC4_40_MD5 = 0x0003; + const TLS_RSA_WITH_RC4_128_MD5 = 0x0004; + const TLS_RSA_WITH_RC4_128_SHA = 0x0005; + const TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = 0x0006; + const TLS_RSA_WITH_IDEA_CBC_SHA = 0x0007; + const TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0008; + const TLS_RSA_WITH_DES_CBC_SHA = 0x0009; + const TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A; + const TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x000B; + const TLS_DH_DSS_WITH_DES_CBC_SHA = 0x000C; + const TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = 0x000D; + const TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x000E; + const TLS_DH_RSA_WITH_DES_CBC_SHA = 0x000F; + const TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = 0x0010; + const TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = 0x0011; + const TLS_DHE_DSS_WITH_DES_CBC_SHA = 0x0012; + const TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = 0x0013; + const TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = 0x0014; + const TLS_DHE_RSA_WITH_DES_CBC_SHA = 0x0015; + const TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = 0x0016; + const TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5 = 0x0017; + const TLS_DH_ANON_WITH_RC4_128_MD5 = 0x0018; + const TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA = 0x0019; + const TLS_DH_ANON_WITH_DES_CBC_SHA = 0x001A; + const TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA = 0x001B; + const SSL_FORTEZZA_KEA_WITH_NULL_SHA = 0x001C; + const SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA = 0x001D; + const TLS_KRB5_WITH_DES_CBC_SHA = 0x001E; + const TLS_KRB5_WITH_3DES_EDE_CBC_SHA = 0x001F; + const TLS_KRB5_WITH_RC4_128_SHA = 0x0020; + const TLS_KRB5_WITH_IDEA_CBC_SHA = 0x0021; + const TLS_KRB5_WITH_DES_CBC_MD5 = 0x0022; + const TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = 0x0023; + const TLS_KRB5_WITH_RC4_128_MD5 = 0x0024; + const TLS_KRB5_WITH_IDEA_CBC_MD5 = 0x0025; + const TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = 0x0026; + const TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = 0x0027; + const TLS_KRB5_EXPORT_WITH_RC4_40_SHA = 0x0028; + const TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = 0x0029; + const TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = 0x002A; + const TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = 0x002B; + const TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F; + const TLS_DH_DSS_WITH_AES_128_CBC_SHA = 0x0030; + const TLS_DH_RSA_WITH_AES_128_CBC_SHA = 0x0031; + const TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032; + const TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033; + const TLS_DH_ANON_WITH_AES_128_CBC_SHA = 0x0034; + const TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035; + const TLS_DH_DSS_WITH_AES_256_CBC_SHA = 0x0036; + const TLS_DH_RSA_WITH_AES_256_CBC_SHA = 0x0037; + const TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038; + const TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039; + const TLS_DH_ANON_WITH_AES_256_CBC_SHA = 0x003A; + const TLS_RSA_WITH_NULL_SHA256 = 0x003B; + const TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C; + const TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D; + const TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = 0x003E; + const TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = 0x003F; + const TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040; + const TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0041; + const TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0042; + const TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0043; + const TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = 0x0044; + const TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = 0x0045; + const TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA = 0x0046; + const TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 = 0x0060; + const TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 = 0x0061; + const TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA = 0x0062; + const TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA = 0x0063; + const TLS_RSA_EXPORT1024_WITH_RC4_56_SHA = 0x0064; + const TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA = 0x0065; + const TLS_DHE_DSS_WITH_RC4_128_SHA = 0x0066; + const TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067; + const TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = 0x0068; + const TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = 0x0069; + const TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A; + const TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B; + const TLS_DH_ANON_WITH_AES_128_CBC_SHA256 = 0x006C; + const TLS_DH_ANON_WITH_AES_256_CBC_SHA256 = 0x006D; + # draft-ietf-tls-openpgp-keys-06 + const TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD = 0x0072; + const TLS_DHE_DSS_WITH_AES_128_CBC_RMD = 0x0073; + const TLS_DHE_DSS_WITH_AES_256_CBC_RMD = 0x0074; + const TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD = 0x0077; + const TLS_DHE_RSA_WITH_AES_128_CBC_RMD = 0x0078; + const TLS_DHE_RSA_WITH_AES_256_CBC_RMD = 0x0079; + const TLS_RSA_WITH_3DES_EDE_CBC_RMD = 0x007C; + const TLS_RSA_WITH_AES_128_CBC_RMD = 0x007D; + const TLS_RSA_WITH_AES_256_CBC_RMD = 0x007E; + # draft-chudov-cryptopro-cptls-04 + const TLS_GOSTR341094_WITH_28147_CNT_IMIT = 0x0080; + const TLS_GOSTR341001_WITH_28147_CNT_IMIT = 0x0081; + const TLS_GOSTR341094_WITH_NULL_GOSTR3411 = 0x0082; + const TLS_GOSTR341001_WITH_NULL_GOSTR3411 = 0x0083; + const TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0084; + const TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0085; + const TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0086; + const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = 0x0087; + const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = 0x0088; + const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA = 0x0089; + const TLS_PSK_WITH_RC4_128_SHA = 0x008A; + const TLS_PSK_WITH_3DES_EDE_CBC_SHA = 0x008B; + const TLS_PSK_WITH_AES_128_CBC_SHA = 0x008C; + const TLS_PSK_WITH_AES_256_CBC_SHA = 0x008D; + const TLS_DHE_PSK_WITH_RC4_128_SHA = 0x008E; + const TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = 0x008F; + const TLS_DHE_PSK_WITH_AES_128_CBC_SHA = 0x0090; + const TLS_DHE_PSK_WITH_AES_256_CBC_SHA = 0x0091; + const TLS_RSA_PSK_WITH_RC4_128_SHA = 0x0092; + const TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = 0x0093; + const TLS_RSA_PSK_WITH_AES_128_CBC_SHA = 0x0094; + const TLS_RSA_PSK_WITH_AES_256_CBC_SHA = 0x0095; + const TLS_RSA_WITH_SEED_CBC_SHA = 0x0096; + const TLS_DH_DSS_WITH_SEED_CBC_SHA = 0x0097; + const TLS_DH_RSA_WITH_SEED_CBC_SHA = 0x0098; + const TLS_DHE_DSS_WITH_SEED_CBC_SHA = 0x0099; + const TLS_DHE_RSA_WITH_SEED_CBC_SHA = 0x009A; + const TLS_DH_ANON_WITH_SEED_CBC_SHA = 0x009B; + const TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C; + const TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D; + const TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E; + const TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F; + const TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = 0x00A0; + const TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = 0x00A1; + const TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2; + const TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3; + const TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = 0x00A4; + const TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = 0x00A5; + const TLS_DH_ANON_WITH_AES_128_GCM_SHA256 = 0x00A6; + const TLS_DH_ANON_WITH_AES_256_GCM_SHA384 = 0x00A7; + const TLS_PSK_WITH_AES_128_GCM_SHA256 = 0x00A8; + const TLS_PSK_WITH_AES_256_GCM_SHA384 = 0x00A9; + const TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = 0x00AA; + const TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = 0x00AB; + const TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = 0x00AC; + const TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = 0x00AD; + const TLS_PSK_WITH_AES_128_CBC_SHA256 = 0x00AE; + const TLS_PSK_WITH_AES_256_CBC_SHA384 = 0x00AF; + const TLS_PSK_WITH_NULL_SHA256 = 0x00B0; + const TLS_PSK_WITH_NULL_SHA384 = 0x00B1; + const TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = 0x00B2; + const TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = 0x00B3; + const TLS_DHE_PSK_WITH_NULL_SHA256 = 0x00B4; + const TLS_DHE_PSK_WITH_NULL_SHA384 = 0x00B5; + const TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = 0x00B6; + const TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = 0x00B7; + const TLS_RSA_PSK_WITH_NULL_SHA256 = 0x00B8; + const TLS_RSA_PSK_WITH_NULL_SHA384 = 0x00B9; + const TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BA; + const TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BB; + const TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BC; + const TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BD; + const TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BE; + const TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256 = 0x00BF; + const TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C0; + const TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C1; + const TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C2; + const TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C3; + const TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C4; + const TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256 = 0x00C5; + # draft-ietf-tls-tls13-16 + const TLS_AES_128_GCM_SHA256 = 0x1301; + const TLS_AES_256_GCM_SHA384 = 0x1302; + const TLS_CHACHA20_POLY1305_SHA256 = 0x1303; + const TLS_AES_128_CCM_SHA256 = 0x1304; + const TLS_AES_128_CCM_8_SHA256 = 0x1305; + # Google... + const TLS_CECPQ1_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0x16b7; + const TLS_CECPQ1_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0x16b8; + const TLS_CECPQ1_RSA_WITH_AES_256_GCM_SHA384 = 0x16b9; + const TLS_CECPQ1_ECDSA_WITH_AES_256_GCM_SHA384 = 0x16ba; + # draft-bmoeller-tls-downgrade-scsv-01 + const TLS_FALLBACK_SCSV = 0x5600; + # RFC 4492 + const TLS_ECDH_ECDSA_WITH_NULL_SHA = 0xC001; + const TLS_ECDH_ECDSA_WITH_RC4_128_SHA = 0xC002; + const TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC003; + const TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = 0xC004; + const TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = 0xC005; + const TLS_ECDHE_ECDSA_WITH_NULL_SHA = 0xC006; + const TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = 0xC007; + const TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = 0xC008; + const TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009; + const TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A; + const TLS_ECDH_RSA_WITH_NULL_SHA = 0xC00B; + const TLS_ECDH_RSA_WITH_RC4_128_SHA = 0xC00C; + const TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = 0xC00D; + const TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = 0xC00E; + const TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = 0xC00F; + const TLS_ECDHE_RSA_WITH_NULL_SHA = 0xC010; + const TLS_ECDHE_RSA_WITH_RC4_128_SHA = 0xC011; + const TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = 0xC012; + const TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013; + const TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014; + const TLS_ECDH_ANON_WITH_NULL_SHA = 0xC015; + const TLS_ECDH_ANON_WITH_RC4_128_SHA = 0xC016; + const TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA = 0xC017; + const TLS_ECDH_ANON_WITH_AES_128_CBC_SHA = 0xC018; + const TLS_ECDH_ANON_WITH_AES_256_CBC_SHA = 0xC019; + const TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0xC01A; + const TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0xC01B; + const TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = 0xC01C; + const TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0xC01D; + const TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0xC01E; + const TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = 0xC01F; + const TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0xC020; + const TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0xC021; + const TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = 0xC022; + const TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023; + const TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024; + const TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC025; + const TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC026; + const TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027; + const TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028; + const TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = 0xC029; + const TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = 0xC02A; + const TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B; + const TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C; + const TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02D; + const TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02E; + const TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F; + const TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030; + const TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = 0xC031; + const TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = 0xC032; + const TLS_ECDHE_PSK_WITH_RC4_128_SHA = 0xC033; + const TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = 0xC034; + const TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = 0xC035; + const TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = 0xC036; + const TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = 0xC037; + const TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = 0xC038; + const TLS_ECDHE_PSK_WITH_NULL_SHA = 0xC039; + const TLS_ECDHE_PSK_WITH_NULL_SHA256 = 0xC03A; + const TLS_ECDHE_PSK_WITH_NULL_SHA384 = 0xC03B; + # RFC 6209 + const TLS_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC03C; + const TLS_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC03D; + const TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = 0xC03E; + const TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = 0xC03F; + const TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC040; + const TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC041; + const TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = 0xC042; + const TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = 0xC043; + const TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC044; + const TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC045; + const TLS_DH_ANON_WITH_ARIA_128_CBC_SHA256 = 0xC046; + const TLS_DH_ANON_WITH_ARIA_256_CBC_SHA384 = 0xC047; + const TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = 0xC048; + const TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = 0xC049; + const TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = 0xC04A; + const TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = 0xC04B; + const TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC04C; + const TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC04D; + const TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = 0xC04E; + const TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = 0xC04F; + const TLS_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC050; + const TLS_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC051; + const TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC052; + const TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC053; + const TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC054; + const TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC055; + const TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = 0xC056; + const TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = 0xC057; + const TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = 0xC058; + const TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = 0xC059; + const TLS_DH_ANON_WITH_ARIA_128_GCM_SHA256 = 0xC05A; + const TLS_DH_ANON_WITH_ARIA_256_GCM_SHA384 = 0xC05B; + const TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = 0xC05C; + const TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = 0xC05D; + const TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = 0xC05E; + const TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = 0xC05F; + const TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC060; + const TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC061; + const TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = 0xC062; + const TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = 0xC063; + const TLS_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC064; + const TLS_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC065; + const TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC066; + const TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC067; + const TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC068; + const TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC069; + const TLS_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06A; + const TLS_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06B; + const TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06C; + const TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06D; + const TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = 0xC06E; + const TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = 0xC06F; + const TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = 0xC070; + const TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = 0xC071; + # RFC 6367 + const TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC072; + const TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC073; + const TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC074; + const TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC075; + const TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC076; + const TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC077; + const TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = 0xC078; + const TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = 0xC079; + const TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07A; + const TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07B; + const TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07C; + const TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07D; + const TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC07E; + const TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC07F; + const TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = 0xC080; + const TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = 0xC081; + const TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = 0xC082; + const TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = 0xC083; + const TLS_DH_ANON_WITH_CAMELLIA_128_GCM_SHA256 = 0xC084; + const TLS_DH_ANON_WITH_CAMELLIA_256_GCM_SHA384 = 0xC085; + const TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC086; + const TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC087; + const TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC088; + const TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC089; + const TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08A; + const TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08B; + const TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08C; + const TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08D; + const TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC08E; + const TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC08F; + const TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC090; + const TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC091; + const TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = 0xC092; + const TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = 0xC093; + const TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC094; + const TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC095; + const TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC096; + const TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC097; + const TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC098; + const TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC099; + const TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = 0xC09A; + const TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = 0xC09B; + # RFC 6655 + const TLS_RSA_WITH_AES_128_CCM = 0xC09C; + const TLS_RSA_WITH_AES_256_CCM = 0xC09D; + const TLS_DHE_RSA_WITH_AES_128_CCM = 0xC09E; + const TLS_DHE_RSA_WITH_AES_256_CCM = 0xC09F; + const TLS_RSA_WITH_AES_128_CCM_8 = 0xC0A0; + const TLS_RSA_WITH_AES_256_CCM_8 = 0xC0A1; + const TLS_DHE_RSA_WITH_AES_128_CCM_8 = 0xC0A2; + const TLS_DHE_RSA_WITH_AES_256_CCM_8 = 0xC0A3; + const TLS_PSK_WITH_AES_128_CCM = 0xC0A4; + const TLS_PSK_WITH_AES_256_CCM = 0xC0A5; + const TLS_DHE_PSK_WITH_AES_128_CCM = 0xC0A6; + const TLS_DHE_PSK_WITH_AES_256_CCM = 0xC0A7; + const TLS_PSK_WITH_AES_128_CCM_8 = 0xC0A8; + const TLS_PSK_WITH_AES_256_CCM_8 = 0xC0A9; + const TLS_PSK_DHE_WITH_AES_128_CCM_8 = 0xC0AA; + const TLS_PSK_DHE_WITH_AES_256_CCM_8 = 0xC0AB; + const TLS_ECDHE_ECDSA_WITH_AES_128_CCM = 0xC0AC; + const TLS_ECDHE_ECDSA_WITH_AES_256_CCM = 0xC0AD; + const TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = 0xC0AE; + const TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = 0xC0AF; + # draft-agl-tls-chacha20poly1305-02 + const TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD = 0xCC13; + const TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD = 0xCC14; + const TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD = 0xCC15; + # RFC 7905 + const TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8; + const TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9; + const TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAA; + const TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAB; + const TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAC; + const TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAD; + const TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = 0xCCAE; + # draft-ietf-tls-ecdhe-psk-aead-05 + const TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = 0xD001; + const TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = 0xD002; + const TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = 0xD003; + const TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = 0xD004; + + const SSL_RSA_FIPS_WITH_DES_CBC_SHA = 0xFEFE; + const SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA = 0xFEFF; + const SSL_RSA_FIPS_WITH_DES_CBC_SHA_2 = 0xFFE1; + const SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2 = 0xFFE0; + const SSL_RSA_WITH_RC2_CBC_MD5 = 0xFF80; + const SSL_RSA_WITH_IDEA_CBC_MD5 = 0xFF81; + const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82; + const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83; + const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF; + + ## This is a table of all known cipher specs. It can be used for + ## detecting unknown ciphers and for converting the cipher spec + ## constants into a human readable format. + const cipher_desc: table[count] of string = { + [SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] = + "SSLv20_CK_RC4_128_EXPORT40_WITH_MD5", + [SSLv20_CK_RC4_128_WITH_MD5] = "SSLv20_CK_RC4_128_WITH_MD5", + [SSLv20_CK_RC2_128_CBC_WITH_MD5] = "SSLv20_CK_RC2_128_CBC_WITH_MD5", + [SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5] = + "SSLv20_CK_RC2_128_CBC_EXPORT40_WITH_MD5", + [SSLv20_CK_IDEA_128_CBC_WITH_MD5] = "SSLv20_CK_IDEA_128_CBC_WITH_MD5", + [SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5] = + "SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5", + [SSLv20_CK_DES_64_CBC_WITH_MD5] = "SSLv20_CK_DES_64_CBC_WITH_MD5", + + [TLS_NULL_WITH_NULL_NULL] = "TLS_NULL_WITH_NULL_NULL", + [TLS_RSA_WITH_NULL_MD5] = "TLS_RSA_WITH_NULL_MD5", + [TLS_RSA_WITH_NULL_SHA] = "TLS_RSA_WITH_NULL_SHA", + [TLS_RSA_EXPORT_WITH_RC4_40_MD5] = "TLS_RSA_EXPORT_WITH_RC4_40_MD5", + [TLS_RSA_WITH_RC4_128_MD5] = "TLS_RSA_WITH_RC4_128_MD5", + [TLS_RSA_WITH_RC4_128_SHA] = "TLS_RSA_WITH_RC4_128_SHA", + [TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5] = "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5", + [TLS_RSA_WITH_IDEA_CBC_SHA] = "TLS_RSA_WITH_IDEA_CBC_SHA", + [TLS_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_RSA_EXPORT_WITH_DES40_CBC_SHA", + [TLS_RSA_WITH_DES_CBC_SHA] = "TLS_RSA_WITH_DES_CBC_SHA", + [TLS_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + [TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA", + [TLS_DH_DSS_WITH_DES_CBC_SHA] = "TLS_DH_DSS_WITH_DES_CBC_SHA", + [TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA", + [TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA", + [TLS_DH_RSA_WITH_DES_CBC_SHA] = "TLS_DH_RSA_WITH_DES_CBC_SHA", + [TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA", + [TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA", + [TLS_DHE_DSS_WITH_DES_CBC_SHA] = "TLS_DHE_DSS_WITH_DES_CBC_SHA", + [TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA", + [TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA", + [TLS_DHE_RSA_WITH_DES_CBC_SHA] = "TLS_DHE_RSA_WITH_DES_CBC_SHA", + [TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA", + [TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5] = "TLS_DH_ANON_EXPORT_WITH_RC4_40_MD5", + [TLS_DH_ANON_WITH_RC4_128_MD5] = "TLS_DH_ANON_WITH_RC4_128_MD5", + [TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA] = "TLS_DH_ANON_EXPORT_WITH_DES40_CBC_SHA", + [TLS_DH_ANON_WITH_DES_CBC_SHA] = "TLS_DH_ANON_WITH_DES_CBC_SHA", + [TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA] = "TLS_DH_ANON_WITH_3DES_EDE_CBC_SHA", + [SSL_FORTEZZA_KEA_WITH_NULL_SHA] = "SSL_FORTEZZA_KEA_WITH_NULL_SHA", + [SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA] = "SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA", + [TLS_KRB5_WITH_DES_CBC_SHA] = "TLS_KRB5_WITH_DES_CBC_SHA", + [TLS_KRB5_WITH_3DES_EDE_CBC_SHA] = "TLS_KRB5_WITH_3DES_EDE_CBC_SHA", + [TLS_KRB5_WITH_RC4_128_SHA] = "TLS_KRB5_WITH_RC4_128_SHA", + [TLS_KRB5_WITH_IDEA_CBC_SHA] = "TLS_KRB5_WITH_IDEA_CBC_SHA", + [TLS_KRB5_WITH_DES_CBC_MD5] = "TLS_KRB5_WITH_DES_CBC_MD5", + [TLS_KRB5_WITH_3DES_EDE_CBC_MD5] = "TLS_KRB5_WITH_3DES_EDE_CBC_MD5", + [TLS_KRB5_WITH_RC4_128_MD5] = "TLS_KRB5_WITH_RC4_128_MD5", + [TLS_KRB5_WITH_IDEA_CBC_MD5] = "TLS_KRB5_WITH_IDEA_CBC_MD5", + [TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA", + [TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA", + [TLS_KRB5_EXPORT_WITH_RC4_40_SHA] = "TLS_KRB5_EXPORT_WITH_RC4_40_SHA", + [TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5", + [TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5", + [TLS_KRB5_EXPORT_WITH_RC4_40_MD5] = "TLS_KRB5_EXPORT_WITH_RC4_40_MD5", + [TLS_RSA_WITH_AES_128_CBC_SHA] = "TLS_RSA_WITH_AES_128_CBC_SHA", + [TLS_DH_DSS_WITH_AES_128_CBC_SHA] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA", + [TLS_DH_RSA_WITH_AES_128_CBC_SHA] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA", + [TLS_DHE_DSS_WITH_AES_128_CBC_SHA] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA", + [TLS_DHE_RSA_WITH_AES_128_CBC_SHA] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA", + [TLS_DH_ANON_WITH_AES_128_CBC_SHA] = "TLS_DH_ANON_WITH_AES_128_CBC_SHA", + [TLS_RSA_WITH_AES_256_CBC_SHA] = "TLS_RSA_WITH_AES_256_CBC_SHA", + [TLS_DH_DSS_WITH_AES_256_CBC_SHA] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA", + [TLS_DH_RSA_WITH_AES_256_CBC_SHA] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA", + [TLS_DHE_DSS_WITH_AES_256_CBC_SHA] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA", + [TLS_DHE_RSA_WITH_AES_256_CBC_SHA] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA", + [TLS_DH_ANON_WITH_AES_256_CBC_SHA] = "TLS_DH_ANON_WITH_AES_256_CBC_SHA", + [TLS_RSA_WITH_NULL_SHA256] = "TLS_RSA_WITH_NULL_SHA256", + [TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256", + [TLS_RSA_WITH_AES_256_CBC_SHA256] = "TLS_RSA_WITH_AES_256_CBC_SHA256", + [TLS_DH_DSS_WITH_AES_128_CBC_SHA256] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA256", + [TLS_DH_RSA_WITH_AES_128_CBC_SHA256] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA256", + [TLS_DHE_DSS_WITH_AES_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256", + [TLS_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA", + [TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA", + [TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA", + [TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA", + [TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA", + [TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA] = "TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA", + [TLS_RSA_EXPORT1024_WITH_RC4_56_MD5] = "TLS_RSA_EXPORT1024_WITH_RC4_56_MD5", + [TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5] = "TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5", + [TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA] = "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA", + [TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA] = "TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA", + [TLS_RSA_EXPORT1024_WITH_RC4_56_SHA] = "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA", + [TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA] = "TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA", + [TLS_DHE_DSS_WITH_RC4_128_SHA] = "TLS_DHE_DSS_WITH_RC4_128_SHA", + [TLS_DHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", + [TLS_DH_DSS_WITH_AES_256_CBC_SHA256] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA256", + [TLS_DH_RSA_WITH_AES_256_CBC_SHA256] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA256", + [TLS_DHE_DSS_WITH_AES_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256", + [TLS_DHE_RSA_WITH_AES_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", + [TLS_DH_ANON_WITH_AES_128_CBC_SHA256] = "TLS_DH_ANON_WITH_AES_128_CBC_SHA256", + [TLS_DH_ANON_WITH_AES_256_CBC_SHA256] = "TLS_DH_ANON_WITH_AES_256_CBC_SHA256", + [TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD] = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD", + [TLS_DHE_DSS_WITH_AES_128_CBC_RMD] = "TLS_DHE_DSS_WITH_AES_128_CBC_RMD", + [TLS_DHE_DSS_WITH_AES_256_CBC_RMD] = "TLS_DHE_DSS_WITH_AES_256_CBC_RMD", + [TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD] = "TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD", + [TLS_DHE_RSA_WITH_AES_128_CBC_RMD] = "TLS_DHE_RSA_WITH_AES_128_CBC_RMD", + [TLS_DHE_RSA_WITH_AES_256_CBC_RMD] = "TLS_DHE_RSA_WITH_AES_256_CBC_RMD", + [TLS_RSA_WITH_3DES_EDE_CBC_RMD] = "TLS_RSA_WITH_3DES_EDE_CBC_RMD", + [TLS_RSA_WITH_AES_128_CBC_RMD] = "TLS_RSA_WITH_AES_128_CBC_RMD", + [TLS_RSA_WITH_AES_256_CBC_RMD] = "TLS_RSA_WITH_AES_256_CBC_RMD", + [TLS_GOSTR341094_WITH_28147_CNT_IMIT] = "TLS_GOSTR341094_WITH_28147_CNT_IMIT", + [TLS_GOSTR341001_WITH_28147_CNT_IMIT] = "TLS_GOSTR341001_WITH_28147_CNT_IMIT", + [TLS_GOSTR341094_WITH_NULL_GOSTR3411] = "TLS_GOSTR341094_WITH_NULL_GOSTR3411", + [TLS_GOSTR341001_WITH_NULL_GOSTR3411] = "TLS_GOSTR341001_WITH_NULL_GOSTR3411", + [TLS_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA", + [TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA", + [TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA", + [TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA", + [TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA", + [TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA", + [TLS_PSK_WITH_RC4_128_SHA] = "TLS_PSK_WITH_RC4_128_SHA", + [TLS_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_PSK_WITH_3DES_EDE_CBC_SHA", + [TLS_PSK_WITH_AES_128_CBC_SHA] = "TLS_PSK_WITH_AES_128_CBC_SHA", + [TLS_PSK_WITH_AES_256_CBC_SHA] = "TLS_PSK_WITH_AES_256_CBC_SHA", + [TLS_DHE_PSK_WITH_RC4_128_SHA] = "TLS_DHE_PSK_WITH_RC4_128_SHA", + [TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA", + [TLS_DHE_PSK_WITH_AES_128_CBC_SHA] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA", + [TLS_DHE_PSK_WITH_AES_256_CBC_SHA] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA", + [TLS_RSA_PSK_WITH_RC4_128_SHA] = "TLS_RSA_PSK_WITH_RC4_128_SHA", + [TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA", + [TLS_RSA_PSK_WITH_AES_128_CBC_SHA] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA", + [TLS_RSA_PSK_WITH_AES_256_CBC_SHA] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA", + [TLS_RSA_WITH_SEED_CBC_SHA] = "TLS_RSA_WITH_SEED_CBC_SHA", + [TLS_DH_DSS_WITH_SEED_CBC_SHA] = "TLS_DH_DSS_WITH_SEED_CBC_SHA", + [TLS_DH_RSA_WITH_SEED_CBC_SHA] = "TLS_DH_RSA_WITH_SEED_CBC_SHA", + [TLS_DHE_DSS_WITH_SEED_CBC_SHA] = "TLS_DHE_DSS_WITH_SEED_CBC_SHA", + [TLS_DHE_RSA_WITH_SEED_CBC_SHA] = "TLS_DHE_RSA_WITH_SEED_CBC_SHA", + [TLS_DH_ANON_WITH_SEED_CBC_SHA] = "TLS_DH_ANON_WITH_SEED_CBC_SHA", + [TLS_RSA_WITH_AES_128_GCM_SHA256] = "TLS_RSA_WITH_AES_128_GCM_SHA256", + [TLS_RSA_WITH_AES_256_GCM_SHA384] = "TLS_RSA_WITH_AES_256_GCM_SHA384", + [TLS_DHE_RSA_WITH_AES_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + [TLS_DHE_RSA_WITH_AES_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + [TLS_DH_RSA_WITH_AES_128_GCM_SHA256] = "TLS_DH_RSA_WITH_AES_128_GCM_SHA256", + [TLS_DH_RSA_WITH_AES_256_GCM_SHA384] = "TLS_DH_RSA_WITH_AES_256_GCM_SHA384", + [TLS_DHE_DSS_WITH_AES_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256", + [TLS_DHE_DSS_WITH_AES_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384", + [TLS_DH_DSS_WITH_AES_128_GCM_SHA256] = "TLS_DH_DSS_WITH_AES_128_GCM_SHA256", + [TLS_DH_DSS_WITH_AES_256_GCM_SHA384] = "TLS_DH_DSS_WITH_AES_256_GCM_SHA384", + [TLS_DH_ANON_WITH_AES_128_GCM_SHA256] = "TLS_DH_ANON_WITH_AES_128_GCM_SHA256", + [TLS_DH_ANON_WITH_AES_256_GCM_SHA384] = "TLS_DH_ANON_WITH_AES_256_GCM_SHA384", + [TLS_PSK_WITH_AES_128_GCM_SHA256] = "TLS_PSK_WITH_AES_128_GCM_SHA256", + [TLS_PSK_WITH_AES_256_GCM_SHA384] = "TLS_PSK_WITH_AES_256_GCM_SHA384", + [TLS_DHE_PSK_WITH_AES_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256", + [TLS_DHE_PSK_WITH_AES_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384", + [TLS_RSA_PSK_WITH_AES_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256", + [TLS_RSA_PSK_WITH_AES_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384", + [TLS_PSK_WITH_AES_128_CBC_SHA256] = "TLS_PSK_WITH_AES_128_CBC_SHA256", + [TLS_PSK_WITH_AES_256_CBC_SHA384] = "TLS_PSK_WITH_AES_256_CBC_SHA384", + [TLS_PSK_WITH_NULL_SHA256] = "TLS_PSK_WITH_NULL_SHA256", + [TLS_PSK_WITH_NULL_SHA384] = "TLS_PSK_WITH_NULL_SHA384", + [TLS_DHE_PSK_WITH_AES_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256", + [TLS_DHE_PSK_WITH_AES_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384", + [TLS_DHE_PSK_WITH_NULL_SHA256] = "TLS_DHE_PSK_WITH_NULL_SHA256", + [TLS_DHE_PSK_WITH_NULL_SHA384] = "TLS_DHE_PSK_WITH_NULL_SHA384", + [TLS_RSA_PSK_WITH_AES_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256", + [TLS_RSA_PSK_WITH_AES_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384", + [TLS_RSA_PSK_WITH_NULL_SHA256] = "TLS_RSA_PSK_WITH_NULL_SHA256", + [TLS_RSA_PSK_WITH_NULL_SHA384] = "TLS_RSA_PSK_WITH_NULL_SHA384", + [TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_256_CBC_SHA256", + [TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256", + [TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384", + [TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256", + [TLS_AES_128_CCM_SHA256] = "TLS_AES_128_CCM_SHA256", + [TLS_AES_128_CCM_8_SHA256] = "TLS_AES_128_CCM_8_SHA256", + [TLS_CECPQ1_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_CECPQ1_RSA_WITH_CHACHA20_POLY1305_SHA256", + [TLS_CECPQ1_ECDSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_CECPQ1_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + [TLS_CECPQ1_RSA_WITH_AES_256_GCM_SHA384] = "TLS_CECPQ1_RSA_WITH_AES_256_GCM_SHA384", + [TLS_CECPQ1_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_CECPQ1_ECDSA_WITH_AES_256_GCM_SHA384", + [TLS_FALLBACK_SCSV] = "TLS_FALLBACK_SCSV", + [TLS_ECDH_ECDSA_WITH_NULL_SHA] = "TLS_ECDH_ECDSA_WITH_NULL_SHA", + [TLS_ECDH_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDH_ECDSA_WITH_RC4_128_SHA", + [TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA", + [TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA", + [TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA", + [TLS_ECDHE_ECDSA_WITH_NULL_SHA] = "TLS_ECDHE_ECDSA_WITH_NULL_SHA", + [TLS_ECDHE_ECDSA_WITH_RC4_128_SHA] = "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + [TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA", + [TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + [TLS_ECDH_RSA_WITH_NULL_SHA] = "TLS_ECDH_RSA_WITH_NULL_SHA", + [TLS_ECDH_RSA_WITH_RC4_128_SHA] = "TLS_ECDH_RSA_WITH_RC4_128_SHA", + [TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA", + [TLS_ECDH_RSA_WITH_AES_128_CBC_SHA] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA", + [TLS_ECDH_RSA_WITH_AES_256_CBC_SHA] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA", + [TLS_ECDHE_RSA_WITH_NULL_SHA] = "TLS_ECDHE_RSA_WITH_NULL_SHA", + [TLS_ECDHE_RSA_WITH_RC4_128_SHA] = "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + [TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + [TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + [TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + [TLS_ECDH_ANON_WITH_NULL_SHA] = "TLS_ECDH_ANON_WITH_NULL_SHA", + [TLS_ECDH_ANON_WITH_RC4_128_SHA] = "TLS_ECDH_ANON_WITH_RC4_128_SHA", + [TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDH_ANON_WITH_3DES_EDE_CBC_SHA", + [TLS_ECDH_ANON_WITH_AES_128_CBC_SHA] = "TLS_ECDH_ANON_WITH_AES_128_CBC_SHA", + [TLS_ECDH_ANON_WITH_AES_256_CBC_SHA] = "TLS_ECDH_ANON_WITH_AES_256_CBC_SHA", + [TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA", + [TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA", + [TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA", + [TLS_SRP_SHA_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_WITH_AES_128_CBC_SHA", + [TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA", + [TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA", + [TLS_SRP_SHA_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_WITH_AES_256_CBC_SHA", + [TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA", + [TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA] = "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA", + [TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + [TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", + [TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256", + [TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384", + [TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + [TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + [TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256", + [TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384", + [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + [TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + [TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256", + [TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384", + [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + [TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + [TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256", + [TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384", + [TLS_ECDHE_PSK_WITH_RC4_128_SHA] = "TLS_ECDHE_PSK_WITH_RC4_128_SHA", + [TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA] = "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA", + [TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA", + [TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA", + [TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256", + [TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384", + [TLS_ECDHE_PSK_WITH_NULL_SHA] = "TLS_ECDHE_PSK_WITH_NULL_SHA", + [TLS_ECDHE_PSK_WITH_NULL_SHA256] = "TLS_ECDHE_PSK_WITH_NULL_SHA256", + [TLS_ECDHE_PSK_WITH_NULL_SHA384] = "TLS_ECDHE_PSK_WITH_NULL_SHA384", + [TLS_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_RSA_WITH_ARIA_128_CBC_SHA256", + [TLS_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_RSA_WITH_ARIA_256_CBC_SHA384", + [TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256] = "TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256", + [TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384] = "TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384", + [TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256", + [TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384", + [TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256] = "TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256", + [TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384] = "TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384", + [TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256", + [TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384", + [TLS_DH_ANON_WITH_ARIA_128_CBC_SHA256] = "TLS_DH_ANON_WITH_ARIA_128_CBC_SHA256", + [TLS_DH_ANON_WITH_ARIA_256_CBC_SHA384] = "TLS_DH_ANON_WITH_ARIA_256_CBC_SHA384", + [TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256", + [TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384", + [TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256", + [TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384", + [TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256", + [TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384", + [TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256", + [TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384", + [TLS_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_RSA_WITH_ARIA_128_GCM_SHA256", + [TLS_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_RSA_WITH_ARIA_256_GCM_SHA384", + [TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256", + [TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384", + [TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256", + [TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384", + [TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256", + [TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384", + [TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256] = "TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256", + [TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384] = "TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384", + [TLS_DH_ANON_WITH_ARIA_128_GCM_SHA256] = "TLS_DH_ANON_WITH_ARIA_128_GCM_SHA256", + [TLS_DH_ANON_WITH_ARIA_256_GCM_SHA384] = "TLS_DH_ANON_WITH_ARIA_256_GCM_SHA384", + [TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256", + [TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384", + [TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256", + [TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384", + [TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256", + [TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384", + [TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256", + [TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384", + [TLS_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_PSK_WITH_ARIA_128_CBC_SHA256", + [TLS_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_PSK_WITH_ARIA_256_CBC_SHA384", + [TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256", + [TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384", + [TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256", + [TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384", + [TLS_PSK_WITH_ARIA_128_GCM_SHA256] = "TLS_PSK_WITH_ARIA_128_GCM_SHA256", + [TLS_PSK_WITH_ARIA_256_GCM_SHA384] = "TLS_PSK_WITH_ARIA_256_GCM_SHA384", + [TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256", + [TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384", + [TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256", + [TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384", + [TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256", + [TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384", + [TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_DH_ANON_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DH_ANON_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_DH_ANON_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DH_ANON_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256] = "TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256", + [TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384] = "TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384", + [TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256] = "TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256", + [TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384] = "TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384", + [TLS_RSA_WITH_AES_128_CCM] = "TLS_RSA_WITH_AES_128_CCM", + [TLS_RSA_WITH_AES_256_CCM] = "TLS_RSA_WITH_AES_256_CCM", + [TLS_DHE_RSA_WITH_AES_128_CCM] = "TLS_DHE_RSA_WITH_AES_128_CCM", + [TLS_DHE_RSA_WITH_AES_256_CCM] = "TLS_DHE_RSA_WITH_AES_256_CCM", + [TLS_RSA_WITH_AES_128_CCM_8] = "TLS_RSA_WITH_AES_128_CCM_8", + [TLS_RSA_WITH_AES_256_CCM_8] = "TLS_RSA_WITH_AES_256_CCM_8", + [TLS_DHE_RSA_WITH_AES_128_CCM_8] = "TLS_DHE_RSA_WITH_AES_128_CCM_8", + [TLS_DHE_RSA_WITH_AES_256_CCM_8] = "TLS_DHE_RSA_WITH_AES_256_CCM_8", + [TLS_PSK_WITH_AES_128_CCM] = "TLS_PSK_WITH_AES_128_CCM", + [TLS_PSK_WITH_AES_256_CCM] = "TLS_PSK_WITH_AES_256_CCM", + [TLS_DHE_PSK_WITH_AES_128_CCM] = "TLS_DHE_PSK_WITH_AES_128_CCM", + [TLS_DHE_PSK_WITH_AES_256_CCM] = "TLS_DHE_PSK_WITH_AES_256_CCM", + [TLS_PSK_WITH_AES_128_CCM_8] = "TLS_PSK_WITH_AES_128_CCM_8", + [TLS_PSK_WITH_AES_256_CCM_8] = "TLS_PSK_WITH_AES_256_CCM_8", + [TLS_PSK_DHE_WITH_AES_128_CCM_8] = "TLS_PSK_DHE_WITH_AES_128_CCM_8", + [TLS_PSK_DHE_WITH_AES_256_CCM_8] = "TLS_PSK_DHE_WITH_AES_256_CCM_8", + [TLS_ECDHE_ECDSA_WITH_AES_128_CCM] = "TLS_ECDHE_ECDSA_WITH_AES_128_CCM", + [TLS_ECDHE_ECDSA_WITH_AES_256_CCM] = "TLS_ECDHE_ECDSA_WITH_AES_256_CCM", + [TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8] = "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8", + [TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8] = "TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8", + [TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD", + [TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256_OLD", + [TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD] = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256_OLD", + [TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + [TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + [TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + [TLS_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_PSK_WITH_CHACHA20_POLY1305_SHA256", + [TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", + [TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256", + [TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256] = "TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256", + [TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256", + [TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384] = "TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384", + [TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256", + [TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256] = "TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256", + [SSL_RSA_FIPS_WITH_DES_CBC_SHA] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA", + [SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA", + [SSL_RSA_FIPS_WITH_DES_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA_2", + [SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2", + [SSL_RSA_WITH_RC2_CBC_MD5] = "SSL_RSA_WITH_RC2_CBC_MD5", + [SSL_RSA_WITH_IDEA_CBC_MD5] = "SSL_RSA_WITH_IDEA_CBC_MD5", + [SSL_RSA_WITH_DES_CBC_MD5] = "SSL_RSA_WITH_DES_CBC_MD5", + [SSL_RSA_WITH_3DES_EDE_CBC_MD5] = "SSL_RSA_WITH_3DES_EDE_CBC_MD5", + [TLS_EMPTY_RENEGOTIATION_INFO_SCSV] = "TLS_EMPTY_RENEGOTIATION_INFO_SCSV", + } &default=function(i: count):string { return fmt("unknown-%d", i); }; + +} diff --git a/scripts/base/protocols/ssl/ct-list.bro b/scripts/base/protocols/ssl/ct-list.zeek similarity index 100% rename from scripts/base/protocols/ssl/ct-list.bro rename to scripts/base/protocols/ssl/ct-list.zeek diff --git a/scripts/base/protocols/ssl/files.bro b/scripts/base/protocols/ssl/files.bro deleted file mode 100644 index ae13147d8e..0000000000 --- a/scripts/base/protocols/ssl/files.bro +++ /dev/null @@ -1,149 +0,0 @@ -@load ./main -@load base/utils/conn-ids -@load base/frameworks/files -@load base/files/x509 - -module SSL; - -export { - redef record Info += { - ## Chain of certificates offered by the server to validate its - ## complete signing chain. - cert_chain: vector of Files::Info &optional; - - ## An ordered vector of all certificate file unique IDs for the - ## certificates offered by the server. - cert_chain_fuids: vector of string &optional &log; - - ## Chain of certificates offered by the client to validate its - ## complete signing chain. - client_cert_chain: vector of Files::Info &optional; - - ## An ordered vector of all certificate file unique IDs for the - ## certificates offered by the client. - client_cert_chain_fuids: vector of string &optional &log; - - ## Subject of the X.509 certificate offered by the server. - subject: string &log &optional; - - ## Subject of the signer of the X.509 certificate offered by the - ## server. - issuer: string &log &optional; - - ## Subject of the X.509 certificate offered by the client. - client_subject: string &log &optional; - - ## Subject of the signer of the X.509 certificate offered by the - ## client. - client_issuer: string &log &optional; - - ## Current number of certificates seen from either side. Used - ## to create file handles. - server_depth: count &default=0; - client_depth: count &default=0; - }; - - ## Default file handle provider for SSL. - global get_file_handle: function(c: connection, is_orig: bool): string; - - ## Default file describer for SSL. - global describe_file: function(f: fa_file): string; -} - -function get_file_handle(c: connection, is_orig: bool): string - { - # Unused. File handles are generated in the analyzer. - return ""; - } - -function describe_file(f: fa_file): string - { - if ( f$source != "SSL" || ! f?$info || ! f$info?$x509 || ! f$info$x509?$certificate ) - return ""; - - # It is difficult to reliably describe a certificate - especially since - # we do not know when this function is called (hence, if the data structures - # are already populated). - # - # Just return a bit of our connection information and hope that that is good enough. - for ( cid, c in f$conns ) - { - if ( c?$ssl ) - { - return cat(c$id$resp_h, ":", c$id$resp_p); - } - } - - return cat("Serial: ", f$info$x509$certificate$serial, " Subject: ", - f$info$x509$certificate$subject, " Issuer: ", - f$info$x509$certificate$issuer); - } - -event bro_init() &priority=5 - { - Files::register_protocol(Analyzer::ANALYZER_SSL, - [$get_file_handle = SSL::get_file_handle, - $describe = SSL::describe_file]); - - Files::register_protocol(Analyzer::ANALYZER_DTLS, - [$get_file_handle = SSL::get_file_handle, - $describe = SSL::describe_file]); - } - -event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 - { - if ( |f$conns| != 1 ) - return; - - if ( ! f?$info || ! f$info?$mime_type ) - return; - - if ( ! ( f$info$mime_type == "application/x-x509-ca-cert" || f$info$mime_type == "application/x-x509-user-cert" - || f$info$mime_type == "application/pkix-cert" ) ) - return; - - local c: connection; - - for ( cid, c in f$conns ) - { - if ( ! c?$ssl ) - return; - } - - if ( ! c$ssl?$cert_chain ) - { - c$ssl$cert_chain = vector(); - c$ssl$client_cert_chain = vector(); - c$ssl$cert_chain_fuids = string_vec(); - c$ssl$client_cert_chain_fuids = string_vec(); - } - - if ( f$is_orig ) - { - c$ssl$client_cert_chain += f$info; - c$ssl$client_cert_chain_fuids += f$id; - } - else - { - c$ssl$cert_chain += f$info; - c$ssl$cert_chain_fuids += f$id; - } - } - -event ssl_established(c: connection) &priority=6 - { - # update subject and issuer information - if ( c$ssl?$cert_chain && |c$ssl$cert_chain| > 0 && - c$ssl$cert_chain[0]?$x509 ) - { - c$ssl$subject = c$ssl$cert_chain[0]$x509$certificate$subject; - c$ssl$issuer = c$ssl$cert_chain[0]$x509$certificate$issuer; - } - - if ( c$ssl?$client_cert_chain && |c$ssl$client_cert_chain| > 0 && - c$ssl$client_cert_chain[0]?$x509 ) - { - c$ssl$client_subject = c$ssl$client_cert_chain[0]$x509$certificate$subject; - c$ssl$client_issuer = c$ssl$client_cert_chain[0]$x509$certificate$issuer; - } - } diff --git a/scripts/base/protocols/ssl/files.zeek b/scripts/base/protocols/ssl/files.zeek new file mode 100644 index 0000000000..fd3080b47d --- /dev/null +++ b/scripts/base/protocols/ssl/files.zeek @@ -0,0 +1,149 @@ +@load ./main +@load base/utils/conn-ids +@load base/frameworks/files +@load base/files/x509 + +module SSL; + +export { + redef record Info += { + ## Chain of certificates offered by the server to validate its + ## complete signing chain. + cert_chain: vector of Files::Info &optional; + + ## An ordered vector of all certificate file unique IDs for the + ## certificates offered by the server. + cert_chain_fuids: vector of string &optional &log; + + ## Chain of certificates offered by the client to validate its + ## complete signing chain. + client_cert_chain: vector of Files::Info &optional; + + ## An ordered vector of all certificate file unique IDs for the + ## certificates offered by the client. + client_cert_chain_fuids: vector of string &optional &log; + + ## Subject of the X.509 certificate offered by the server. + subject: string &log &optional; + + ## Subject of the signer of the X.509 certificate offered by the + ## server. + issuer: string &log &optional; + + ## Subject of the X.509 certificate offered by the client. + client_subject: string &log &optional; + + ## Subject of the signer of the X.509 certificate offered by the + ## client. + client_issuer: string &log &optional; + + ## Current number of certificates seen from either side. Used + ## to create file handles. + server_depth: count &default=0; + client_depth: count &default=0; + }; + + ## Default file handle provider for SSL. + global get_file_handle: function(c: connection, is_orig: bool): string; + + ## Default file describer for SSL. + global describe_file: function(f: fa_file): string; +} + +function get_file_handle(c: connection, is_orig: bool): string + { + # Unused. File handles are generated in the analyzer. + return ""; + } + +function describe_file(f: fa_file): string + { + if ( f$source != "SSL" || ! f?$info || ! f$info?$x509 || ! f$info$x509?$certificate ) + return ""; + + # It is difficult to reliably describe a certificate - especially since + # we do not know when this function is called (hence, if the data structures + # are already populated). + # + # Just return a bit of our connection information and hope that that is good enough. + for ( cid, c in f$conns ) + { + if ( c?$ssl ) + { + return cat(c$id$resp_h, ":", c$id$resp_p); + } + } + + return cat("Serial: ", f$info$x509$certificate$serial, " Subject: ", + f$info$x509$certificate$subject, " Issuer: ", + f$info$x509$certificate$issuer); + } + +event zeek_init() &priority=5 + { + Files::register_protocol(Analyzer::ANALYZER_SSL, + [$get_file_handle = SSL::get_file_handle, + $describe = SSL::describe_file]); + + Files::register_protocol(Analyzer::ANALYZER_DTLS, + [$get_file_handle = SSL::get_file_handle, + $describe = SSL::describe_file]); + } + +event file_sniff(f: fa_file, meta: fa_metadata) &priority=5 + { + if ( |f$conns| != 1 ) + return; + + if ( ! f?$info || ! f$info?$mime_type ) + return; + + if ( ! ( f$info$mime_type == "application/x-x509-ca-cert" || f$info$mime_type == "application/x-x509-user-cert" + || f$info$mime_type == "application/pkix-cert" ) ) + return; + + local c: connection; + + for ( cid, c in f$conns ) + { + if ( ! c?$ssl ) + return; + } + + if ( ! c$ssl?$cert_chain ) + { + c$ssl$cert_chain = vector(); + c$ssl$client_cert_chain = vector(); + c$ssl$cert_chain_fuids = string_vec(); + c$ssl$client_cert_chain_fuids = string_vec(); + } + + if ( f$is_orig ) + { + c$ssl$client_cert_chain += f$info; + c$ssl$client_cert_chain_fuids += f$id; + } + else + { + c$ssl$cert_chain += f$info; + c$ssl$cert_chain_fuids += f$id; + } + } + +event ssl_established(c: connection) &priority=6 + { + # update subject and issuer information + if ( c$ssl?$cert_chain && |c$ssl$cert_chain| > 0 && + c$ssl$cert_chain[0]?$x509 ) + { + c$ssl$subject = c$ssl$cert_chain[0]$x509$certificate$subject; + c$ssl$issuer = c$ssl$cert_chain[0]$x509$certificate$issuer; + } + + if ( c$ssl?$client_cert_chain && |c$ssl$client_cert_chain| > 0 && + c$ssl$client_cert_chain[0]?$x509 ) + { + c$ssl$client_subject = c$ssl$client_cert_chain[0]$x509$certificate$subject; + c$ssl$client_issuer = c$ssl$client_cert_chain[0]$x509$certificate$issuer; + } + } diff --git a/scripts/base/protocols/ssl/main.bro b/scripts/base/protocols/ssl/main.bro deleted file mode 100644 index 8abb6e1d3f..0000000000 --- a/scripts/base/protocols/ssl/main.bro +++ /dev/null @@ -1,403 +0,0 @@ -##! Base SSL analysis script. This script logs information about the SSL/TLS -##! handshaking and encryption establishment process. - -@load base/frameworks/notice/weird -@load ./consts - -module SSL; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the SSL log. - type Info: record { - ## Time when the SSL connection was first detected. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Numeric SSL/TLS version that the server chose. - version_num: count &optional; - ## SSL/TLS version that the server chose. - version: string &log &optional; - ## SSL/TLS cipher suite that the server chose. - cipher: string &log &optional; - ## Elliptic curve the server chose when using ECDH/ECDHE. - curve: string &log &optional; - ## Value of the Server Name Indicator SSL/TLS extension. It - ## indicates the server name that the client was requesting. - server_name: string &log &optional; - ## Session ID offered by the client for session resumption. - ## Not used for logging. - session_id: string &optional; - ## Flag to indicate if the session was resumed reusing - ## the key material exchanged in an earlier connection. - resumed: bool &log &default=F; - ## Flag to indicate if we saw a non-empty session ticket being - ## sent by the client using an empty session ID. This value - ## is used to determine if a session is being resumed. It's - ## not logged. - client_ticket_empty_session_seen: bool &default=F; - ## Flag to indicate if we saw a client key exchange message sent - ## by the client. This value is used to determine if a session - ## is being resumed. It's not logged. - client_key_exchange_seen: bool &default=F; - ## Count to track if the server already sent an application data - ## packet for TLS 1.3. Used to track when a session was established. - server_appdata: count &default=0; - ## Flag to track if the client already sent an application data - ## packet for TLS 1.3. Used to track when a session was established. - client_appdata: bool &default=F; - - ## Last alert that was seen during the connection. - last_alert: string &log &optional; - ## Next protocol the server chose using the application layer - ## next protocol extension, if present. - next_protocol: string &log &optional; - - ## The analyzer ID used for the analyzer instance attached - ## to each connection. It is not used for logging since it's a - ## meaningless arbitrary number. - analyzer_id: count &optional; - - ## Flag to indicate if this ssl session has been established - ## successfully, or if it was aborted during the handshake. - established: bool &log &default=F; - ## Flag to indicate if this record already has been logged, to - ## prevent duplicates. - logged: bool &default=F; - }; - - ## The default root CA bundle. By default, the mozilla-ca-list.bro - ## script sets this to Mozilla's root CA list. - const root_certs: table[string] of string = {} &redef; - - ## The record type which contains the field for the Certificate - ## Transparency log bundle. - type CTInfo: record { - ## Description of the Log - description: string; - ## Operator of the Log - operator: string; - ## Public key of the Log. - key: string; - ## Maximum merge delay of the Log - maximum_merge_delay: count; - ## URL of the Log - url: string; - }; - - ## The Certificate Transparency log bundle. By default, the ct-list.bro - ## script sets this to the current list of known logs. Entries - ## are indexed by (binary) log-id. - option ct_logs: table[string] of CTInfo = {}; - - ## If true, detach the SSL analyzer from the connection to prevent - ## continuing to process encrypted traffic. Helps with performance - ## (especially with large file transfers). - option disable_analyzer_after_detection = T; - - ## Delays an SSL record for a specific token: the record will not be - ## logged as long as the token exists or until 15 seconds elapses. - global delay_log: function(info: Info, token: string); - - ## Undelays an SSL record for a previously inserted token, allowing the - ## record to be logged. - global undelay_log: function(info: Info, token: string); - - ## Event that can be handled to access the SSL - ## record as it is sent on to the logging framework. - global log_ssl: event(rec: Info); - - # Hook that can be used to perform actions right before the log record - # is written. - global ssl_finishing: hook(c: connection); -} - -redef record connection += { - ssl: Info &optional; -}; - -redef record Info += { - # Adding a string "token" to this set will cause the SSL script - # to delay logging the record until either the token has been removed or - # the record has been delayed. - delay_tokens: set[string] &optional; -}; - -const ssl_ports = { - 443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp, - 989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp -}; - -# There are no well known DTLS ports at the moment. Let's -# just add 443 for now for good measure - who knows :) -const dtls_ports = { 443/udp }; - -redef likely_server_ports += { ssl_ports, dtls_ports }; - -event bro_init() &priority=5 - { - Log::create_stream(SSL::LOG, [$columns=Info, $ev=log_ssl, $path="ssl"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_SSL, ssl_ports); - Analyzer::register_for_ports(Analyzer::ANALYZER_DTLS, dtls_ports); - } - -function set_session(c: connection) - { - if ( ! c?$ssl ) - c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id]; - } - -function delay_log(info: Info, token: string) - { - if ( ! info?$delay_tokens ) - info$delay_tokens = set(); - add info$delay_tokens[token]; - } - -function undelay_log(info: Info, token: string) - { - if ( info?$delay_tokens && token in info$delay_tokens ) - delete info$delay_tokens[token]; - } - -function log_record(info: Info) - { - if ( info$logged ) - return; - - if ( ! info?$delay_tokens || |info$delay_tokens| == 0 ) - { - Log::write(SSL::LOG, info); - info$logged = T; - } - else - { - when ( |info$delay_tokens| == 0 ) - { - log_record(info); - } - timeout 15secs - { - # We are just going to log the record anyway. - delete info$delay_tokens; - log_record(info); - } - } - } - -# remove_analyzer flag is used to prevent disabling analyzer for finished -# connections. -function finish(c: connection, remove_analyzer: bool) - { - log_record(c$ssl); - if ( remove_analyzer && disable_analyzer_after_detection && c?$ssl && c$ssl?$analyzer_id ) - { - disable_analyzer(c$id, c$ssl$analyzer_id); - delete c$ssl$analyzer_id; - } - } - -event ssl_client_hello(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec) &priority=5 - { - set_session(c); - - # Save the session_id if there is one set. - if ( |session_id| > 0 && session_id != /^\x00{32}$/ ) - { - c$ssl$session_id = bytestring_to_hexstr(session_id); - c$ssl$client_ticket_empty_session_seen = F; - } - } - -event ssl_server_hello(c: connection, version: count, record_version: count, possible_ts: time, server_random: string, session_id: string, cipher: count, comp_method: count) &priority=5 - { - set_session(c); - - # If it is already filled, we saw a supported_versions extensions which overrides this. - if ( ! c$ssl?$version_num ) - { - c$ssl$version_num = version; - c$ssl$version = version_strings[version]; - } - c$ssl$cipher = cipher_desc[cipher]; - - if ( c$ssl?$session_id && c$ssl$session_id == bytestring_to_hexstr(session_id) ) - c$ssl$resumed = T; - } - -event ssl_extension_supported_versions(c: connection, is_orig: bool, versions: index_vec) - { - if ( is_orig || |versions| != 1 ) - return; - - set_session(c); - - c$ssl$version_num = versions[0]; - c$ssl$version = version_strings[versions[0]]; - } - -event ssl_ecdh_server_params(c: connection, curve: count, point: string) &priority=5 - { - set_session(c); - - c$ssl$curve = ec_curves[curve]; - } - -event ssl_extension_key_share(c: connection, is_orig: bool, curves: index_vec) - { - if ( is_orig || |curves| != 1 ) - return; - - set_session(c); - c$ssl$curve = ec_curves[curves[0]]; - } - -event ssl_extension_server_name(c: connection, is_orig: bool, names: string_vec) &priority=5 - { - set_session(c); - - if ( is_orig && |names| > 0 ) - { - c$ssl$server_name = names[0]; - if ( |names| > 1 ) - Reporter::conn_weird("SSL_many_server_names", c, cat(names)); - } - } - -event ssl_extension_application_layer_protocol_negotiation(c: connection, is_orig: bool, protocols: string_vec) - { - set_session(c); - - if ( is_orig ) - return; - - if ( |protocols| > 0 ) - c$ssl$next_protocol = protocols[0]; - } - -event ssl_handshake_message(c: connection, is_orig: bool, msg_type: count, length: count) &priority=5 - { - set_session(c); - - if ( is_orig && msg_type == SSL::CLIENT_KEY_EXCHANGE ) - c$ssl$client_key_exchange_seen = T; - } - -# Extension event is fired _before_ the respective client or server hello. -# Important for client_ticket_empty_session_seen. -event ssl_extension(c: connection, is_orig: bool, code: count, val: string) &priority=5 - { - set_session(c); - - if ( is_orig && SSL::extensions[code] == "SessionTicket TLS" && |val| > 0 ) - # In this case, we might have an empty ID. Set back to F in client_hello event - # if it is not empty after all. - c$ssl$client_ticket_empty_session_seen = T; - } - -event ssl_change_cipher_spec(c: connection, is_orig: bool) &priority=5 - { - set_session(c); - - if ( is_orig && c$ssl$client_ticket_empty_session_seen && ! c$ssl$client_key_exchange_seen ) - c$ssl$resumed = T; - } - -event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priority=5 - { - set_session(c); - - c$ssl$last_alert = alert_descriptions[desc]; - } - -event ssl_established(c: connection) &priority=7 - { - set_session(c); - c$ssl$established = T; - } - -event ssl_established(c: connection) &priority=20 - { - hook ssl_finishing(c); - } - -event ssl_established(c: connection) &priority=-5 - { - finish(c, T); - } - -event connection_state_remove(c: connection) &priority=20 - { - if ( c?$ssl && ! c$ssl$logged ) - hook ssl_finishing(c); - } - -event connection_state_remove(c: connection) &priority=-5 - { - if ( c?$ssl ) - # called in case a SSL connection that has not been established terminates - finish(c, F); - } - -event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 - { - if ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) - { - set_session(c); - c$ssl$analyzer_id = aid; - } - } - -event ssl_plaintext_data(c: connection, is_orig: bool, record_version: count, content_type: count, length: count) &priority=5 - { - set_session(c); - - if ( ! c$ssl?$version || c$ssl$established || content_type != APPLICATION_DATA ) - return; - - if ( c$ssl$version_num/0xFF != 0x7F && c$ssl$version_num != TLSv13 ) - { - local wi = Weird::Info($ts=network_time(), $name="ssl_early_application_data", $uid=c$uid, $id=c$id); - Weird::weird(wi); - return; - } - - if ( is_orig ) - { - c$ssl$client_appdata = T; - return; - } - - if ( c$ssl$client_appdata && c$ssl$server_appdata == 0 ) - { - # something went wrong in the handshake here - we can't say if it was established. Just abort. - return; - } - else if ( ! c$ssl$client_appdata && c$ssl$server_appdata == 0 ) - { - c$ssl$server_appdata = 1; - return; - } - else if ( c$ssl$client_appdata && c$ssl$server_appdata == 1 ) - { - # wait for one more packet before we believe it was established. This one could be an encrypted alert. - c$ssl$server_appdata = 2; - return; - } - else if ( c$ssl$client_appdata && c$ssl$server_appdata == 2 ) - { - set_ssl_established(c); - event ssl_established(c); - return; - } - } - -event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, - reason: string) &priority=5 - { - if ( c?$ssl && ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) ) - finish(c, T); - } diff --git a/scripts/base/protocols/ssl/main.zeek b/scripts/base/protocols/ssl/main.zeek new file mode 100644 index 0000000000..cc656d44b8 --- /dev/null +++ b/scripts/base/protocols/ssl/main.zeek @@ -0,0 +1,403 @@ +##! Base SSL analysis script. This script logs information about the SSL/TLS +##! handshaking and encryption establishment process. + +@load base/frameworks/notice/weird +@load ./consts + +module SSL; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the SSL log. + type Info: record { + ## Time when the SSL connection was first detected. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Numeric SSL/TLS version that the server chose. + version_num: count &optional; + ## SSL/TLS version that the server chose. + version: string &log &optional; + ## SSL/TLS cipher suite that the server chose. + cipher: string &log &optional; + ## Elliptic curve the server chose when using ECDH/ECDHE. + curve: string &log &optional; + ## Value of the Server Name Indicator SSL/TLS extension. It + ## indicates the server name that the client was requesting. + server_name: string &log &optional; + ## Session ID offered by the client for session resumption. + ## Not used for logging. + session_id: string &optional; + ## Flag to indicate if the session was resumed reusing + ## the key material exchanged in an earlier connection. + resumed: bool &log &default=F; + ## Flag to indicate if we saw a non-empty session ticket being + ## sent by the client using an empty session ID. This value + ## is used to determine if a session is being resumed. It's + ## not logged. + client_ticket_empty_session_seen: bool &default=F; + ## Flag to indicate if we saw a client key exchange message sent + ## by the client. This value is used to determine if a session + ## is being resumed. It's not logged. + client_key_exchange_seen: bool &default=F; + ## Count to track if the server already sent an application data + ## packet for TLS 1.3. Used to track when a session was established. + server_appdata: count &default=0; + ## Flag to track if the client already sent an application data + ## packet for TLS 1.3. Used to track when a session was established. + client_appdata: bool &default=F; + + ## Last alert that was seen during the connection. + last_alert: string &log &optional; + ## Next protocol the server chose using the application layer + ## next protocol extension, if present. + next_protocol: string &log &optional; + + ## The analyzer ID used for the analyzer instance attached + ## to each connection. It is not used for logging since it's a + ## meaningless arbitrary number. + analyzer_id: count &optional; + + ## Flag to indicate if this ssl session has been established + ## successfully, or if it was aborted during the handshake. + established: bool &log &default=F; + ## Flag to indicate if this record already has been logged, to + ## prevent duplicates. + logged: bool &default=F; + }; + + ## The default root CA bundle. By default, the mozilla-ca-list.zeek + ## script sets this to Mozilla's root CA list. + const root_certs: table[string] of string = {} &redef; + + ## The record type which contains the field for the Certificate + ## Transparency log bundle. + type CTInfo: record { + ## Description of the Log + description: string; + ## Operator of the Log + operator: string; + ## Public key of the Log. + key: string; + ## Maximum merge delay of the Log + maximum_merge_delay: count; + ## URL of the Log + url: string; + }; + + ## The Certificate Transparency log bundle. By default, the ct-list.zeek + ## script sets this to the current list of known logs. Entries + ## are indexed by (binary) log-id. + option ct_logs: table[string] of CTInfo = {}; + + ## If true, detach the SSL analyzer from the connection to prevent + ## continuing to process encrypted traffic. Helps with performance + ## (especially with large file transfers). + option disable_analyzer_after_detection = T; + + ## Delays an SSL record for a specific token: the record will not be + ## logged as long as the token exists or until 15 seconds elapses. + global delay_log: function(info: Info, token: string); + + ## Undelays an SSL record for a previously inserted token, allowing the + ## record to be logged. + global undelay_log: function(info: Info, token: string); + + ## Event that can be handled to access the SSL + ## record as it is sent on to the logging framework. + global log_ssl: event(rec: Info); + + # Hook that can be used to perform actions right before the log record + # is written. + global ssl_finishing: hook(c: connection); +} + +redef record connection += { + ssl: Info &optional; +}; + +redef record Info += { + # Adding a string "token" to this set will cause the SSL script + # to delay logging the record until either the token has been removed or + # the record has been delayed. + delay_tokens: set[string] &optional; +}; + +const ssl_ports = { + 443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp, + 989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp +}; + +# There are no well known DTLS ports at the moment. Let's +# just add 443 for now for good measure - who knows :) +const dtls_ports = { 443/udp }; + +redef likely_server_ports += { ssl_ports, dtls_ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(SSL::LOG, [$columns=Info, $ev=log_ssl, $path="ssl"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_SSL, ssl_ports); + Analyzer::register_for_ports(Analyzer::ANALYZER_DTLS, dtls_ports); + } + +function set_session(c: connection) + { + if ( ! c?$ssl ) + c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id]; + } + +function delay_log(info: Info, token: string) + { + if ( ! info?$delay_tokens ) + info$delay_tokens = set(); + add info$delay_tokens[token]; + } + +function undelay_log(info: Info, token: string) + { + if ( info?$delay_tokens && token in info$delay_tokens ) + delete info$delay_tokens[token]; + } + +function log_record(info: Info) + { + if ( info$logged ) + return; + + if ( ! info?$delay_tokens || |info$delay_tokens| == 0 ) + { + Log::write(SSL::LOG, info); + info$logged = T; + } + else + { + when ( |info$delay_tokens| == 0 ) + { + log_record(info); + } + timeout 15secs + { + # We are just going to log the record anyway. + delete info$delay_tokens; + log_record(info); + } + } + } + +# remove_analyzer flag is used to prevent disabling analyzer for finished +# connections. +function finish(c: connection, remove_analyzer: bool) + { + log_record(c$ssl); + if ( remove_analyzer && disable_analyzer_after_detection && c?$ssl && c$ssl?$analyzer_id ) + { + disable_analyzer(c$id, c$ssl$analyzer_id); + delete c$ssl$analyzer_id; + } + } + +event ssl_client_hello(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec) &priority=5 + { + set_session(c); + + # Save the session_id if there is one set. + if ( |session_id| > 0 && session_id != /^\x00{32}$/ ) + { + c$ssl$session_id = bytestring_to_hexstr(session_id); + c$ssl$client_ticket_empty_session_seen = F; + } + } + +event ssl_server_hello(c: connection, version: count, record_version: count, possible_ts: time, server_random: string, session_id: string, cipher: count, comp_method: count) &priority=5 + { + set_session(c); + + # If it is already filled, we saw a supported_versions extensions which overrides this. + if ( ! c$ssl?$version_num ) + { + c$ssl$version_num = version; + c$ssl$version = version_strings[version]; + } + c$ssl$cipher = cipher_desc[cipher]; + + if ( c$ssl?$session_id && c$ssl$session_id == bytestring_to_hexstr(session_id) ) + c$ssl$resumed = T; + } + +event ssl_extension_supported_versions(c: connection, is_orig: bool, versions: index_vec) + { + if ( is_orig || |versions| != 1 ) + return; + + set_session(c); + + c$ssl$version_num = versions[0]; + c$ssl$version = version_strings[versions[0]]; + } + +event ssl_ecdh_server_params(c: connection, curve: count, point: string) &priority=5 + { + set_session(c); + + c$ssl$curve = ec_curves[curve]; + } + +event ssl_extension_key_share(c: connection, is_orig: bool, curves: index_vec) + { + if ( is_orig || |curves| != 1 ) + return; + + set_session(c); + c$ssl$curve = ec_curves[curves[0]]; + } + +event ssl_extension_server_name(c: connection, is_orig: bool, names: string_vec) &priority=5 + { + set_session(c); + + if ( is_orig && |names| > 0 ) + { + c$ssl$server_name = names[0]; + if ( |names| > 1 ) + Reporter::conn_weird("SSL_many_server_names", c, cat(names)); + } + } + +event ssl_extension_application_layer_protocol_negotiation(c: connection, is_orig: bool, protocols: string_vec) + { + set_session(c); + + if ( is_orig ) + return; + + if ( |protocols| > 0 ) + c$ssl$next_protocol = protocols[0]; + } + +event ssl_handshake_message(c: connection, is_orig: bool, msg_type: count, length: count) &priority=5 + { + set_session(c); + + if ( is_orig && msg_type == SSL::CLIENT_KEY_EXCHANGE ) + c$ssl$client_key_exchange_seen = T; + } + +# Extension event is fired _before_ the respective client or server hello. +# Important for client_ticket_empty_session_seen. +event ssl_extension(c: connection, is_orig: bool, code: count, val: string) &priority=5 + { + set_session(c); + + if ( is_orig && SSL::extensions[code] == "SessionTicket TLS" && |val| > 0 ) + # In this case, we might have an empty ID. Set back to F in client_hello event + # if it is not empty after all. + c$ssl$client_ticket_empty_session_seen = T; + } + +event ssl_change_cipher_spec(c: connection, is_orig: bool) &priority=5 + { + set_session(c); + + if ( is_orig && c$ssl$client_ticket_empty_session_seen && ! c$ssl$client_key_exchange_seen ) + c$ssl$resumed = T; + } + +event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priority=5 + { + set_session(c); + + c$ssl$last_alert = alert_descriptions[desc]; + } + +event ssl_established(c: connection) &priority=7 + { + set_session(c); + c$ssl$established = T; + } + +event ssl_established(c: connection) &priority=20 + { + hook ssl_finishing(c); + } + +event ssl_established(c: connection) &priority=-5 + { + finish(c, T); + } + +event connection_state_remove(c: connection) &priority=20 + { + if ( c?$ssl && ! c$ssl$logged ) + hook ssl_finishing(c); + } + +event connection_state_remove(c: connection) &priority=-5 + { + if ( c?$ssl ) + # called in case a SSL connection that has not been established terminates + finish(c, F); + } + +event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 + { + if ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) + { + set_session(c); + c$ssl$analyzer_id = aid; + } + } + +event ssl_plaintext_data(c: connection, is_orig: bool, record_version: count, content_type: count, length: count) &priority=5 + { + set_session(c); + + if ( ! c$ssl?$version || c$ssl$established || content_type != APPLICATION_DATA ) + return; + + if ( c$ssl$version_num/0xFF != 0x7F && c$ssl$version_num != TLSv13 ) + { + local wi = Weird::Info($ts=network_time(), $name="ssl_early_application_data", $uid=c$uid, $id=c$id); + Weird::weird(wi); + return; + } + + if ( is_orig ) + { + c$ssl$client_appdata = T; + return; + } + + if ( c$ssl$client_appdata && c$ssl$server_appdata == 0 ) + { + # something went wrong in the handshake here - we can't say if it was established. Just abort. + return; + } + else if ( ! c$ssl$client_appdata && c$ssl$server_appdata == 0 ) + { + c$ssl$server_appdata = 1; + return; + } + else if ( c$ssl$client_appdata && c$ssl$server_appdata == 1 ) + { + # wait for one more packet before we believe it was established. This one could be an encrypted alert. + c$ssl$server_appdata = 2; + return; + } + else if ( c$ssl$client_appdata && c$ssl$server_appdata == 2 ) + { + set_ssl_established(c); + event ssl_established(c); + return; + } + } + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, + reason: string) &priority=5 + { + if ( c?$ssl && ( atype == Analyzer::ANALYZER_SSL || atype == Analyzer::ANALYZER_DTLS ) ) + finish(c, T); + } diff --git a/scripts/base/protocols/ssl/mozilla-ca-list.bro b/scripts/base/protocols/ssl/mozilla-ca-list.zeek similarity index 100% rename from scripts/base/protocols/ssl/mozilla-ca-list.bro rename to scripts/base/protocols/ssl/mozilla-ca-list.zeek diff --git a/scripts/base/protocols/syslog/__load__.bro b/scripts/base/protocols/syslog/__load__.zeek similarity index 100% rename from scripts/base/protocols/syslog/__load__.bro rename to scripts/base/protocols/syslog/__load__.zeek diff --git a/scripts/base/protocols/syslog/consts.bro b/scripts/base/protocols/syslog/consts.zeek similarity index 100% rename from scripts/base/protocols/syslog/consts.bro rename to scripts/base/protocols/syslog/consts.zeek diff --git a/scripts/base/protocols/syslog/main.bro b/scripts/base/protocols/syslog/main.bro deleted file mode 100644 index 6e74760225..0000000000 --- a/scripts/base/protocols/syslog/main.bro +++ /dev/null @@ -1,60 +0,0 @@ -##! Core script support for logging syslog messages. This script represents -##! one syslog message as one logged record. - -@load ./consts - -module Syslog; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the syslog log. - type Info: record { - ## Timestamp when the syslog message was seen. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## Protocol over which the message was seen. - proto: transport_proto &log; - ## Syslog facility for the message. - facility: string &log; - ## Syslog severity for the message. - severity: string &log; - ## The plain text message. - message: string &log; - }; -} - -redef record connection += { - syslog: Info &optional; -}; - -const ports = { 514/udp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Log::create_stream(Syslog::LOG, [$columns=Info, $path="syslog"]); - Analyzer::register_for_ports(Analyzer::ANALYZER_SYSLOG, ports); - } - -event syslog_message(c: connection, facility: count, severity: count, msg: string) &priority=5 - { - local info: Info; - info$ts=network_time(); - info$uid=c$uid; - info$id=c$id; - info$proto=get_port_transport_proto(c$id$resp_p); - info$facility=facility_codes[facility]; - info$severity=severity_codes[severity]; - info$message=msg; - - c$syslog = info; - } - -event syslog_message(c: connection, facility: count, severity: count, msg: string) &priority=-5 - { - Log::write(Syslog::LOG, c$syslog); - } diff --git a/scripts/base/protocols/syslog/main.zeek b/scripts/base/protocols/syslog/main.zeek new file mode 100644 index 0000000000..6b8cc7fb77 --- /dev/null +++ b/scripts/base/protocols/syslog/main.zeek @@ -0,0 +1,60 @@ +##! Core script support for logging syslog messages. This script represents +##! one syslog message as one logged record. + +@load ./consts + +module Syslog; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the syslog log. + type Info: record { + ## Timestamp when the syslog message was seen. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## Protocol over which the message was seen. + proto: transport_proto &log; + ## Syslog facility for the message. + facility: string &log; + ## Syslog severity for the message. + severity: string &log; + ## The plain text message. + message: string &log; + }; +} + +redef record connection += { + syslog: Info &optional; +}; + +const ports = { 514/udp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Log::create_stream(Syslog::LOG, [$columns=Info, $path="syslog"]); + Analyzer::register_for_ports(Analyzer::ANALYZER_SYSLOG, ports); + } + +event syslog_message(c: connection, facility: count, severity: count, msg: string) &priority=5 + { + local info: Info; + info$ts=network_time(); + info$uid=c$uid; + info$id=c$id; + info$proto=get_port_transport_proto(c$id$resp_p); + info$facility=facility_codes[facility]; + info$severity=severity_codes[severity]; + info$message=msg; + + c$syslog = info; + } + +event syslog_message(c: connection, facility: count, severity: count, msg: string) &priority=-5 + { + Log::write(Syslog::LOG, c$syslog); + } diff --git a/scripts/base/protocols/tunnels/__load__.bro b/scripts/base/protocols/tunnels/__load__.zeek similarity index 100% rename from scripts/base/protocols/tunnels/__load__.bro rename to scripts/base/protocols/tunnels/__load__.zeek diff --git a/scripts/base/protocols/xmpp/__load__.bro b/scripts/base/protocols/xmpp/__load__.zeek similarity index 100% rename from scripts/base/protocols/xmpp/__load__.bro rename to scripts/base/protocols/xmpp/__load__.zeek diff --git a/scripts/base/protocols/xmpp/main.bro b/scripts/base/protocols/xmpp/main.bro deleted file mode 100644 index 3d7a4cbc37..0000000000 --- a/scripts/base/protocols/xmpp/main.bro +++ /dev/null @@ -1,11 +0,0 @@ - -module XMPP; - -const ports = { 5222/tcp, 5269/tcp }; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_XMPP, ports); - } - diff --git a/scripts/base/protocols/xmpp/main.zeek b/scripts/base/protocols/xmpp/main.zeek new file mode 100644 index 0000000000..587432561f --- /dev/null +++ b/scripts/base/protocols/xmpp/main.zeek @@ -0,0 +1,11 @@ + +module XMPP; + +const ports = { 5222/tcp, 5269/tcp }; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_XMPP, ports); + } + diff --git a/scripts/base/utils/active-http.bro b/scripts/base/utils/active-http.bro deleted file mode 100644 index 8243a7a9a9..0000000000 --- a/scripts/base/utils/active-http.bro +++ /dev/null @@ -1,127 +0,0 @@ -##! A module for performing active HTTP requests and -##! getting the reply at runtime. - -@load ./exec - -module ActiveHTTP; - -export { - ## The default timeout for HTTP requests. - option default_max_time = 1min; - - ## The default HTTP method/verb to use for requests. - option default_method = "GET"; - - type Response: record { - ## Numeric response code from the server. - code: count; - ## String response message from the server. - msg: string; - ## Full body of the response. - body: string &optional; - ## All headers returned by the server. - headers: table[string] of string &optional; - }; - - type Request: record { - ## The URL being requested. - url: string; - ## The HTTP method/verb to use for the request. - method: string &default=default_method; - ## Data to send to the server in the client body. Keep in - ## mind that you will probably need to set the *method* field - ## to "POST" or "PUT". - client_data: string &optional; - - # Arbitrary headers to pass to the server. Some headers - # will be included by libCurl. - #custom_headers: table[string] of string &optional; - - ## Timeout for the request. - max_time: interval &default=default_max_time; - ## Additional curl command line arguments. Be very careful - ## with this option since shell injection could take place - ## if careful handling of untrusted data is not applied. - addl_curl_args: string &optional; - }; - - ## Perform an HTTP request according to the - ## :bro:type:`ActiveHTTP::Request` record. This is an asynchronous - ## function and must be called within a "when" statement. - ## - ## req: A record instance representing all options for an HTTP request. - ## - ## Returns: A record with the full response message. - global request: function(req: ActiveHTTP::Request): ActiveHTTP::Response; -} - -function request2curl(r: Request, bodyfile: string, headersfile: string): string - { - local cmd = fmt("curl -s -g -o %s -D %s -X %s", - safe_shell_quote(bodyfile), - safe_shell_quote(headersfile), - safe_shell_quote(r$method)); - - cmd = fmt("%s -m %.0f", cmd, r$max_time); - - if ( r?$client_data ) - cmd = fmt("%s -d @-", cmd); - - if ( r?$addl_curl_args ) - cmd = fmt("%s %s", cmd, r$addl_curl_args); - - cmd = fmt("%s %s", cmd, safe_shell_quote(r$url)); - # Make sure file will exist even if curl did not write one. - cmd = fmt("%s && touch %s", cmd, safe_shell_quote(bodyfile)); - return cmd; - } - -function request(req: Request): ActiveHTTP::Response - { - local tmpfile = "/tmp/bro-activehttp-" + unique_id(""); - local bodyfile = fmt("%s_body", tmpfile); - local headersfile = fmt("%s_headers", tmpfile); - - local cmd = request2curl(req, bodyfile, headersfile); - local stdin_data = req?$client_data ? req$client_data : ""; - - local resp: Response; - resp$code = 0; - resp$msg = ""; - resp$body = ""; - resp$headers = table(); - return when ( local result = Exec::run([$cmd=cmd, $stdin=stdin_data, $read_files=set(bodyfile, headersfile)]) ) - { - # If there is no response line then nothing else will work either. - if ( ! (result?$files && headersfile in result$files) ) - { - Reporter::error(fmt("There was a failure when requesting \"%s\" with ActiveHTTP.", req$url)); - return resp; - } - - local headers = result$files[headersfile]; - for ( i in headers ) - { - # The reply is the first line. - if ( i == 0 ) - { - local response_line = split_string_n(headers[0], /[[:blank:]]+/, F, 2); - if ( |response_line| != 3 ) - return resp; - - resp$code = to_count(response_line[1]); - resp$msg = response_line[2]; - resp$body = join_string_vec(result$files[bodyfile], ""); - } - else - { - local line = headers[i]; - local h = split_string1(line, /:/); - if ( |h| != 2 ) - next; - resp$headers[h[0]] = sub_bytes(h[1], 0, |h[1]|-1); - } - } - return resp; - } - } diff --git a/scripts/base/utils/active-http.zeek b/scripts/base/utils/active-http.zeek new file mode 100644 index 0000000000..4f84ebca71 --- /dev/null +++ b/scripts/base/utils/active-http.zeek @@ -0,0 +1,127 @@ +##! A module for performing active HTTP requests and +##! getting the reply at runtime. + +@load ./exec + +module ActiveHTTP; + +export { + ## The default timeout for HTTP requests. + option default_max_time = 1min; + + ## The default HTTP method/verb to use for requests. + option default_method = "GET"; + + type Response: record { + ## Numeric response code from the server. + code: count; + ## String response message from the server. + msg: string; + ## Full body of the response. + body: string &optional; + ## All headers returned by the server. + headers: table[string] of string &optional; + }; + + type Request: record { + ## The URL being requested. + url: string; + ## The HTTP method/verb to use for the request. + method: string &default=default_method; + ## Data to send to the server in the client body. Keep in + ## mind that you will probably need to set the *method* field + ## to "POST" or "PUT". + client_data: string &optional; + + # Arbitrary headers to pass to the server. Some headers + # will be included by libCurl. + #custom_headers: table[string] of string &optional; + + ## Timeout for the request. + max_time: interval &default=default_max_time; + ## Additional curl command line arguments. Be very careful + ## with this option since shell injection could take place + ## if careful handling of untrusted data is not applied. + addl_curl_args: string &optional; + }; + + ## Perform an HTTP request according to the + ## :zeek:type:`ActiveHTTP::Request` record. This is an asynchronous + ## function and must be called within a "when" statement. + ## + ## req: A record instance representing all options for an HTTP request. + ## + ## Returns: A record with the full response message. + global request: function(req: ActiveHTTP::Request): ActiveHTTP::Response; +} + +function request2curl(r: Request, bodyfile: string, headersfile: string): string + { + local cmd = fmt("curl -s -g -o %s -D %s -X %s", + safe_shell_quote(bodyfile), + safe_shell_quote(headersfile), + safe_shell_quote(r$method)); + + cmd = fmt("%s -m %.0f", cmd, r$max_time); + + if ( r?$client_data ) + cmd = fmt("%s -d @-", cmd); + + if ( r?$addl_curl_args ) + cmd = fmt("%s %s", cmd, r$addl_curl_args); + + cmd = fmt("%s %s", cmd, safe_shell_quote(r$url)); + # Make sure file will exist even if curl did not write one. + cmd = fmt("%s && touch %s", cmd, safe_shell_quote(bodyfile)); + return cmd; + } + +function request(req: Request): ActiveHTTP::Response + { + local tmpfile = "/tmp/zeek-activehttp-" + unique_id(""); + local bodyfile = fmt("%s_body", tmpfile); + local headersfile = fmt("%s_headers", tmpfile); + + local cmd = request2curl(req, bodyfile, headersfile); + local stdin_data = req?$client_data ? req$client_data : ""; + + local resp: Response; + resp$code = 0; + resp$msg = ""; + resp$body = ""; + resp$headers = table(); + return when ( local result = Exec::run([$cmd=cmd, $stdin=stdin_data, $read_files=set(bodyfile, headersfile)]) ) + { + # If there is no response line then nothing else will work either. + if ( ! (result?$files && headersfile in result$files) ) + { + Reporter::error(fmt("There was a failure when requesting \"%s\" with ActiveHTTP.", req$url)); + return resp; + } + + local headers = result$files[headersfile]; + for ( i in headers ) + { + # The reply is the first line. + if ( i == 0 ) + { + local response_line = split_string_n(headers[0], /[[:blank:]]+/, F, 2); + if ( |response_line| != 3 ) + return resp; + + resp$code = to_count(response_line[1]); + resp$msg = response_line[2]; + resp$body = join_string_vec(result$files[bodyfile], ""); + } + else + { + local line = headers[i]; + local h = split_string1(line, /:/); + if ( |h| != 2 ) + next; + resp$headers[h[0]] = sub_bytes(h[1], 0, |h[1]|-1); + } + } + return resp; + } + } diff --git a/scripts/base/utils/addrs.bro b/scripts/base/utils/addrs.bro deleted file mode 100644 index 9d165936ef..0000000000 --- a/scripts/base/utils/addrs.bro +++ /dev/null @@ -1,166 +0,0 @@ -##! Functions for parsing and manipulating IP and MAC addresses. - -# Regular expressions for matching IP addresses in strings. -const ipv4_addr_regex = /[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/; -const ipv6_8hex_regex = /([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/; -const ipv6_compressed_hex_regex = /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/; -const ipv6_hex4dec_regex = /(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; -const ipv6_compressed_hex4dec_regex = /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; - -# These are commented out until patterns can be constructed this way at init time. -#const ipv6_addr_regex = ipv6_8hex_regex | -# ipv6_compressed_hex_regex | -# ipv6_hex4dec_regex | -# ipv6_compressed_hex4dec_regex; -#const ip_addr_regex = ipv4_addr_regex | ipv6_addr_regex; - -const ipv6_addr_regex = - /([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ | - /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex - /(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/ | # 6Hex4Dec - /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; # CompressedHex4Dec - -const ip_addr_regex = - /[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/ | - /([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ | - /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex - /(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/ | # 6Hex4Dec - /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; # CompressedHex4Dec - -## Checks if all elements of a string array are a valid octet value. -## -## octets: an array of strings to check for valid octet values. -## -## Returns: T if every element is between 0 and 255, inclusive, else F. -function has_valid_octets(octets: string_vec): bool - { - local num = 0; - for ( i in octets ) - { - num = to_count(octets[i]); - if ( num < 0 || 255 < num ) - return F; - } - return T; - } - -## Checks if a string appears to be a valid IPv4 or IPv6 address. -## -## ip_str: the string to check for valid IP formatting. -## -## Returns: T if the string is a valid IPv4 or IPv6 address format. -function is_valid_ip(ip_str: string): bool - { - local octets: string_vec; - if ( ip_str == ipv4_addr_regex ) - { - octets = split_string(ip_str, /\./); - if ( |octets| != 4 ) - return F; - - return has_valid_octets(octets); - } - else if ( ip_str == ipv6_addr_regex ) - { - if ( ip_str == ipv6_hex4dec_regex || - ip_str == ipv6_compressed_hex4dec_regex ) - { - # the regexes for hybrid IPv6-IPv4 address formats don't for valid - # octets within the IPv4 part, so do that now - octets = split_string(ip_str, /\./); - if ( |octets| != 4 ) - return F; - - # get rid of remaining IPv6 stuff in first octet - local tmp = split_string(octets[0], /:/); - octets[0] = tmp[|tmp| - 1]; - - return has_valid_octets(octets); - } - else - { - # pure IPv6 address formats that only use hex digits don't need - # any additional checks -- the regexes should be complete - return T; - } - } - return F; - } - -## Extracts all IP (v4 or v6) address strings from a given string. -## -## input: a string that may contain an IP address anywhere within it. -## -## Returns: an array containing all valid IP address strings found in *input*. -function find_ip_addresses(input: string): string_array &deprecated - { - local parts = split_string_all(input, ip_addr_regex); - local output: string_array; - - for ( i in parts ) - { - if ( i % 2 == 1 && is_valid_ip(parts[i]) ) - output[|output|] = parts[i]; - } - return output; - } - -## Extracts all IP (v4 or v6) address strings from a given string. -## -## input: a string that may contain an IP address anywhere within it. -## -## Returns: an array containing all valid IP address strings found in *input*. -function extract_ip_addresses(input: string): string_vec - { - local parts = split_string_all(input, ip_addr_regex); - local output: string_vec; - - for ( i in parts ) - { - if ( i % 2 == 1 && is_valid_ip(parts[i]) ) - output += parts[i]; - } - return output; - } - -## Returns the string representation of an IP address suitable for inclusion -## in a URI. For IPv4, this does no special formatting, but for IPv6, the -## address is included in square brackets. -## -## a: the address to make suitable for URI inclusion. -## -## Returns: the string representation of the address suitable for URI inclusion. -function addr_to_uri(a: addr): string - { - if ( is_v4_addr(a) ) - return fmt("%s", a); - else - return fmt("[%s]", a); - } - -## Given a string, extracts the hex digits and returns a MAC address in -## the format: 00:a0:32:d7:81:8f. If the string doesn't contain 12 or 16 hex -## digits, an empty string is returned. -## -## a: the string to normalize. -## -## Returns: a normalized MAC address, or an empty string in the case of an error. -function normalize_mac(a: string): string - { - local result = to_lower(gsub(a, /[^A-Fa-f0-9]/, "")); - local octets: string_vec; - - if ( |result| == 12 ) - { - octets = str_split(result, vector(2, 4, 6, 8, 10)); - return fmt("%s:%s:%s:%s:%s:%s", octets[1], octets[2], octets[3], octets[4], octets[5], octets[6]); - } - - if ( |result| == 16 ) - { - octets = str_split(result, vector(2, 4, 6, 8, 10, 12, 14)); - return fmt("%s:%s:%s:%s:%s:%s:%s:%s", octets[1], octets[2], octets[3], octets[4], octets[5], octets[6], octets[7], octets[8]); - } - - return ""; - } diff --git a/scripts/base/utils/addrs.zeek b/scripts/base/utils/addrs.zeek new file mode 100644 index 0000000000..a4a4ef98e2 --- /dev/null +++ b/scripts/base/utils/addrs.zeek @@ -0,0 +1,141 @@ +##! Functions for parsing and manipulating IP and MAC addresses. + +# Regular expressions for matching IP addresses in strings. + +const ipv4_decim = /[0-9]{1}|[0-9]{2}|0[0-9]{2}|1[0-9]{2}|2[0-4][0-9]|25[0-5]/; + +const ipv4_addr_regex = ipv4_decim & /\./ & ipv4_decim & /\./ & ipv4_decim & /\./ & ipv4_decim; + +const ipv6_hextet = /[0-9A-Fa-f]{1,4}/; + +const ipv6_8hex_regex = /([0-9A-Fa-f]{1,4}:){7}/ & ipv6_hextet; + +const ipv6_hex4dec_regex = /([0-9A-Fa-f]{1,4}:){6}/ & ipv4_addr_regex; + +const ipv6_compressed_lead_hextets0 = /::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,6})?/; + +const ipv6_compressed_lead_hextets1 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,5})?/; + +const ipv6_compressed_lead_hextets2 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){1}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,4})?/; + +const ipv6_compressed_lead_hextets3 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){2}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,3})?/; + +const ipv6_compressed_lead_hextets4 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){3}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,2})?/; + +const ipv6_compressed_lead_hextets5 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){4}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,1})?/; + +const ipv6_compressed_lead_hextets6 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){5}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,0})?/; + +const ipv6_compressed_lead_hextets7 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){6}::/; + +const ipv6_compressed_hex_regex = ipv6_compressed_lead_hextets0 | + ipv6_compressed_lead_hextets1 | + ipv6_compressed_lead_hextets2 | + ipv6_compressed_lead_hextets3 | + ipv6_compressed_lead_hextets4 | + ipv6_compressed_lead_hextets5 | + ipv6_compressed_lead_hextets6 | + ipv6_compressed_lead_hextets7; + +const ipv6_compressed_hext4dec_lead_hextets0 = /::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,4})?/ & ipv4_addr_regex; + +const ipv6_compressed_hext4dec_lead_hextets1 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,3})?/ & ipv4_addr_regex; + +const ipv6_compressed_hext4dec_lead_hextets2 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){1}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,2})?/ & ipv4_addr_regex; + +const ipv6_compressed_hext4dec_lead_hextets3 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){2}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,1})?/ & ipv4_addr_regex; + +const ipv6_compressed_hext4dec_lead_hextets4 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){3}::([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,0})?/ & ipv4_addr_regex; + +const ipv6_compressed_hext4dec_lead_hextets5 = /[0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){4}::/ & ipv4_addr_regex; + +const ipv6_compressed_hex4dec_regex = ipv6_compressed_hext4dec_lead_hextets0 | + ipv6_compressed_hext4dec_lead_hextets1 | + ipv6_compressed_hext4dec_lead_hextets2 | + ipv6_compressed_hext4dec_lead_hextets3 | + ipv6_compressed_hext4dec_lead_hextets4 | + ipv6_compressed_hext4dec_lead_hextets5; + +const ipv6_addr_regex = ipv6_8hex_regex | + ipv6_compressed_hex_regex | + ipv6_hex4dec_regex | + ipv6_compressed_hex4dec_regex; + +const ip_addr_regex = ipv4_addr_regex | ipv6_addr_regex; + +## Checks if all elements of a string array are a valid octet value. +## +## octets: an array of strings to check for valid octet values. +## +## Returns: T if every element is between 0 and 255, inclusive, else F. +function has_valid_octets(octets: string_vec): bool + { + local num = 0; + for ( i in octets ) + { + num = to_count(octets[i]); + if ( num < 0 || 255 < num ) + return F; + } + return T; + } + +## Extracts all IP (v4 or v6) address strings from a given string. +## +## input: a string that may contain an IP address anywhere within it. +## +## Returns: an array containing all valid IP address strings found in *input*. +function extract_ip_addresses(input: string): string_vec + { + local parts = split_string_all(input, ip_addr_regex); + local output: string_vec; + + for ( i in parts ) + { + if ( i % 2 == 1 && is_valid_ip(parts[i]) ) + output += parts[i]; + } + return output; + } + +## Returns the string representation of an IP address suitable for inclusion +## in a URI. For IPv4, this does no special formatting, but for IPv6, the +## address is included in square brackets. +## +## a: the address to make suitable for URI inclusion. +## +## Returns: the string representation of the address suitable for URI inclusion. +function addr_to_uri(a: addr): string + { + if ( is_v4_addr(a) ) + return fmt("%s", a); + else + return fmt("[%s]", a); + } + +## Given a string, extracts the hex digits and returns a MAC address in +## the format: 00:a0:32:d7:81:8f. If the string doesn't contain 12 or 16 hex +## digits, an empty string is returned. +## +## a: the string to normalize. +## +## Returns: a normalized MAC address, or an empty string in the case of an error. +function normalize_mac(a: string): string + { + local result = to_lower(gsub(a, /[^A-Fa-f0-9]/, "")); + local octets: string_vec; + + if ( |result| == 12 ) + { + octets = str_split(result, vector(2, 4, 6, 8, 10)); + return fmt("%s:%s:%s:%s:%s:%s", octets[1], octets[2], octets[3], octets[4], octets[5], octets[6]); + } + + if ( |result| == 16 ) + { + octets = str_split(result, vector(2, 4, 6, 8, 10, 12, 14)); + return fmt("%s:%s:%s:%s:%s:%s:%s:%s", octets[1], octets[2], octets[3], octets[4], octets[5], octets[6], octets[7], octets[8]); + } + + return ""; + } diff --git a/scripts/base/utils/conn-ids.bro b/scripts/base/utils/conn-ids.bro deleted file mode 100644 index 6601b665e5..0000000000 --- a/scripts/base/utils/conn-ids.bro +++ /dev/null @@ -1,38 +0,0 @@ -##! Simple functions for generating ASCII strings from connection IDs. - -module GLOBAL; - -export { - ## Takes a conn_id record and returns a string representation with the - ## general data flow appearing to be from the connection originator - ## on the left to the responder on the right. - global id_string: function(id: conn_id): string; - - ## Takes a conn_id record and returns a string representation with the - ## general data flow appearing to be from the connection responder - ## on the right to the originator on the left. - global reverse_id_string: function(id: conn_id): string; - - ## Calls :bro:id:`id_string` or :bro:id:`reverse_id_string` if the - ## second argument is T or F, respectively. - global directed_id_string: function(id: conn_id, is_orig: bool): string; -} - -function id_string(id: conn_id): string - { - return fmt("%s:%d > %s:%d", - id$orig_h, id$orig_p, - id$resp_h, id$resp_p); - } - -function reverse_id_string(id: conn_id): string - { - return fmt("%s:%d < %s:%d", - id$orig_h, id$orig_p, - id$resp_h, id$resp_p); - } - -function directed_id_string(id: conn_id, is_orig: bool): string - { - return is_orig ? id_string(id) : reverse_id_string(id); - } diff --git a/scripts/base/utils/conn-ids.zeek b/scripts/base/utils/conn-ids.zeek new file mode 100644 index 0000000000..b5d7fffd77 --- /dev/null +++ b/scripts/base/utils/conn-ids.zeek @@ -0,0 +1,38 @@ +##! Simple functions for generating ASCII strings from connection IDs. + +module GLOBAL; + +export { + ## Takes a conn_id record and returns a string representation with the + ## general data flow appearing to be from the connection originator + ## on the left to the responder on the right. + global id_string: function(id: conn_id): string; + + ## Takes a conn_id record and returns a string representation with the + ## general data flow appearing to be from the connection responder + ## on the right to the originator on the left. + global reverse_id_string: function(id: conn_id): string; + + ## Calls :zeek:id:`id_string` or :zeek:id:`reverse_id_string` if the + ## second argument is T or F, respectively. + global directed_id_string: function(id: conn_id, is_orig: bool): string; +} + +function id_string(id: conn_id): string + { + return fmt("%s:%d > %s:%d", + id$orig_h, id$orig_p, + id$resp_h, id$resp_p); + } + +function reverse_id_string(id: conn_id): string + { + return fmt("%s:%d < %s:%d", + id$orig_h, id$orig_p, + id$resp_h, id$resp_p); + } + +function directed_id_string(id: conn_id, is_orig: bool): string + { + return is_orig ? id_string(id) : reverse_id_string(id); + } diff --git a/scripts/base/utils/dir.bro b/scripts/base/utils/dir.bro deleted file mode 100644 index eb5597a7b7..0000000000 --- a/scripts/base/utils/dir.bro +++ /dev/null @@ -1,65 +0,0 @@ -@load base/utils/exec -@load base/frameworks/reporter -@load base/utils/paths - -module Dir; - -export { - ## The default interval this module checks for files in directories when - ## using the :bro:see:`Dir::monitor` function. - option polling_interval = 30sec; - - ## Register a directory to monitor with a callback that is called - ## every time a previously unseen file is seen. If a file is deleted - ## and seen to be gone, then the file is available for being seen again - ## in the future. - ## - ## dir: The directory to monitor for files. - ## - ## callback: Callback that gets executed with each file name - ## that is found. Filenames are provided with the full path. - ## - ## poll_interval: An interval at which to check for new files. - global monitor: function(dir: string, callback: function(fname: string), - poll_interval: interval &default=polling_interval); -} - -event Dir::monitor_ev(dir: string, last_files: set[string], - callback: function(fname: string), - poll_interval: interval) - { - when ( local result = Exec::run([$cmd=fmt("ls -1 %s/", safe_shell_quote(dir))]) ) - { - if ( result$exit_code != 0 ) - { - Reporter::warning(fmt("Requested monitoring of non-existent directory (%s).", dir)); - return; - } - - local current_files: set[string] = set(); - local files: vector of string = vector(); - - if ( result?$stdout ) - files = result$stdout; - - for ( i in files ) - { - if ( files[i] !in last_files ) - callback(build_path_compressed(dir, files[i])); - add current_files[files[i]]; - } - - schedule poll_interval - { - Dir::monitor_ev(dir, current_files, callback, poll_interval) - }; - } - } - -function monitor(dir: string, callback: function(fname: string), - poll_interval: interval &default=polling_interval) - { - event Dir::monitor_ev(dir, set(), callback, poll_interval); - } - - diff --git a/scripts/base/utils/dir.zeek b/scripts/base/utils/dir.zeek new file mode 100644 index 0000000000..678e81d7ed --- /dev/null +++ b/scripts/base/utils/dir.zeek @@ -0,0 +1,65 @@ +@load base/utils/exec +@load base/frameworks/reporter +@load base/utils/paths + +module Dir; + +export { + ## The default interval this module checks for files in directories when + ## using the :zeek:see:`Dir::monitor` function. + option polling_interval = 30sec; + + ## Register a directory to monitor with a callback that is called + ## every time a previously unseen file is seen. If a file is deleted + ## and seen to be gone, then the file is available for being seen again + ## in the future. + ## + ## dir: The directory to monitor for files. + ## + ## callback: Callback that gets executed with each file name + ## that is found. Filenames are provided with the full path. + ## + ## poll_interval: An interval at which to check for new files. + global monitor: function(dir: string, callback: function(fname: string), + poll_interval: interval &default=polling_interval); +} + +event Dir::monitor_ev(dir: string, last_files: set[string], + callback: function(fname: string), + poll_interval: interval) + { + when ( local result = Exec::run([$cmd=fmt("ls -1 %s/", safe_shell_quote(dir))]) ) + { + if ( result$exit_code != 0 ) + { + Reporter::warning(fmt("Requested monitoring of non-existent directory (%s).", dir)); + return; + } + + local current_files: set[string] = set(); + local files: vector of string = vector(); + + if ( result?$stdout ) + files = result$stdout; + + for ( i in files ) + { + if ( files[i] !in last_files ) + callback(build_path_compressed(dir, files[i])); + add current_files[files[i]]; + } + + schedule poll_interval + { + Dir::monitor_ev(dir, current_files, callback, poll_interval) + }; + } + } + +function monitor(dir: string, callback: function(fname: string), + poll_interval: interval &default=polling_interval) + { + event Dir::monitor_ev(dir, set(), callback, poll_interval); + } + + diff --git a/scripts/base/utils/directions-and-hosts.bro b/scripts/base/utils/directions-and-hosts.zeek similarity index 100% rename from scripts/base/utils/directions-and-hosts.bro rename to scripts/base/utils/directions-and-hosts.zeek diff --git a/scripts/base/utils/email.bro b/scripts/base/utils/email.zeek similarity index 100% rename from scripts/base/utils/email.bro rename to scripts/base/utils/email.zeek diff --git a/scripts/base/utils/exec.bro b/scripts/base/utils/exec.bro deleted file mode 100644 index 91053a1223..0000000000 --- a/scripts/base/utils/exec.bro +++ /dev/null @@ -1,195 +0,0 @@ -##! A module for executing external command line programs. - -@load base/frameworks/input - -module Exec; - -export { - type Command: record { - ## The command line to execute. Use care to avoid injection - ## attacks (i.e., if the command uses untrusted/variable data, - ## sanitize it with :bro:see:`safe_shell_quote`). - cmd: string; - ## Provide standard input to the program as a string. - stdin: string &default=""; - ## If additional files are required to be read in as part of the - ## output of the command they can be defined here. - read_files: set[string] &optional; - ## The unique id for tracking executors. - uid: string &default=unique_id(""); - }; - - type Result: record { - ## Exit code from the program. - exit_code: count &default=0; - ## True if the command was terminated with a signal. - signal_exit: bool &default=F; - ## Each line of standard output. - stdout: vector of string &optional; - ## Each line of standard error. - stderr: vector of string &optional; - ## If additional files were requested to be read in - ## the content of the files will be available here. - files: table[string] of string_vec &optional; - }; - - ## Function for running command line programs and getting - ## output. This is an asynchronous function which is meant - ## to be run with the `when` statement. - ## - ## cmd: The command to run. Use care to avoid injection attacks! - ## - ## Returns: A record representing the full results from the - ## external program execution. - global run: function(cmd: Command): Result; -} - -# Indexed by command uid. -global results: table[string] of Result; -global pending_commands: set[string]; -global pending_files: table[string] of set[string]; - -type OneLine: record { - s: string; - is_stderr: bool; -}; - -type FileLine: record { - s: string; -}; - -event Exec::line(description: Input::EventDescription, tpe: Input::Event, s: string, is_stderr: bool) - { - local result = results[description$name]; - if ( is_stderr ) - { - if ( ! result?$stderr ) - result$stderr = vector(s); - else - result$stderr += s; - } - else - { - if ( ! result?$stdout ) - result$stdout = vector(s); - else - result$stdout += s; - } - } - -event Exec::file_line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - local parts = split_string1(description$name, /_/); - local name = parts[0]; - local track_file = parts[1]; - - local result = results[name]; - if ( ! result?$files ) - result$files = table(); - - if ( track_file !in result$files ) - result$files[track_file] = vector(s); - else - result$files[track_file] += s; - } - -event Input::end_of_data(orig_name: string, source:string) - { - local name = orig_name; - local parts = split_string1(name, /_/); - name = parts[0]; - - if ( name !in pending_commands || |parts| < 2 ) - return; - - local track_file = parts[1]; - - # If the file is empty, still add it to the result$files table. This is needed - # because it is expected that the file was read even if it was empty. - local result = results[name]; - if ( ! result?$files ) - result$files = table(); - - if ( track_file !in result$files ) - result$files[track_file] = vector(); - - Input::remove(orig_name); - - if ( name !in pending_files ) - delete pending_commands[name]; - else - { - delete pending_files[name][track_file]; - if ( |pending_files[name]| == 0 ) - delete pending_commands[name]; - system(fmt("rm %s", safe_shell_quote(track_file))); - } - } - -event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) - { - if ( name !in pending_commands ) - return; - - # Upon the process exiting, the internal Raw reader code should take - # care of signalling that the stream needs to be disabled/removed. - #Input::remove(name); - results[name]$exit_code = exit_code; - results[name]$signal_exit = signal_exit; - - if ( name !in pending_files || |pending_files[name]| == 0 ) - # No extra files to read, command is done. - delete pending_commands[name]; - else - for ( read_file in pending_files[name] ) - Input::add_event([$source=fmt("%s", read_file), - $name=fmt("%s_%s", name, read_file), - $reader=Input::READER_RAW, - $want_record=F, - $fields=FileLine, - $ev=Exec::file_line]); - } - -function run(cmd: Command): Result - { - add pending_commands[cmd$uid]; - results[cmd$uid] = []; - - if ( cmd?$read_files ) - { - for ( read_file in cmd$read_files ) - { - if ( cmd$uid !in pending_files ) - pending_files[cmd$uid] = set(); - add pending_files[cmd$uid][read_file]; - } - } - - local config_strings: table[string] of string = { - ["stdin"] = cmd$stdin, - ["read_stderr"] = "1", - }; - Input::add_event([$name=cmd$uid, - $source=fmt("%s |", cmd$cmd), - $reader=Input::READER_RAW, - $mode=Input::STREAM, - $fields=Exec::OneLine, - $ev=Exec::line, - $want_record=F, - $config=config_strings]); - - return when ( cmd$uid !in pending_commands ) - { - local result = results[cmd$uid]; - delete results[cmd$uid]; - return result; - } - } - -event bro_done() - { - # We are punting here and just deleting any unprocessed files. - for ( uid in pending_files ) - for ( fname in pending_files[uid] ) - system(fmt("rm %s", safe_shell_quote(fname))); - } diff --git a/scripts/base/utils/exec.zeek b/scripts/base/utils/exec.zeek new file mode 100644 index 0000000000..85500bf9c2 --- /dev/null +++ b/scripts/base/utils/exec.zeek @@ -0,0 +1,195 @@ +##! A module for executing external command line programs. + +@load base/frameworks/input + +module Exec; + +export { + type Command: record { + ## The command line to execute. Use care to avoid injection + ## attacks (i.e., if the command uses untrusted/variable data, + ## sanitize it with :zeek:see:`safe_shell_quote`). + cmd: string; + ## Provide standard input to the program as a string. + stdin: string &default=""; + ## If additional files are required to be read in as part of the + ## output of the command they can be defined here. + read_files: set[string] &optional; + ## The unique id for tracking executors. + uid: string &default=unique_id(""); + }; + + type Result: record { + ## Exit code from the program. + exit_code: count &default=0; + ## True if the command was terminated with a signal. + signal_exit: bool &default=F; + ## Each line of standard output. + stdout: vector of string &optional; + ## Each line of standard error. + stderr: vector of string &optional; + ## If additional files were requested to be read in + ## the content of the files will be available here. + files: table[string] of string_vec &optional; + }; + + ## Function for running command line programs and getting + ## output. This is an asynchronous function which is meant + ## to be run with the `when` statement. + ## + ## cmd: The command to run. Use care to avoid injection attacks! + ## + ## Returns: A record representing the full results from the + ## external program execution. + global run: function(cmd: Command): Result; +} + +# Indexed by command uid. +global results: table[string] of Result; +global pending_commands: set[string]; +global pending_files: table[string] of set[string]; + +type OneLine: record { + s: string; + is_stderr: bool; +}; + +type FileLine: record { + s: string; +}; + +event Exec::line(description: Input::EventDescription, tpe: Input::Event, s: string, is_stderr: bool) + { + local result = results[description$name]; + if ( is_stderr ) + { + if ( ! result?$stderr ) + result$stderr = vector(s); + else + result$stderr += s; + } + else + { + if ( ! result?$stdout ) + result$stdout = vector(s); + else + result$stdout += s; + } + } + +event Exec::file_line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + local parts = split_string1(description$name, /_/); + local name = parts[0]; + local track_file = parts[1]; + + local result = results[name]; + if ( ! result?$files ) + result$files = table(); + + if ( track_file !in result$files ) + result$files[track_file] = vector(s); + else + result$files[track_file] += s; + } + +event Input::end_of_data(orig_name: string, source:string) + { + local name = orig_name; + local parts = split_string1(name, /_/); + name = parts[0]; + + if ( name !in pending_commands || |parts| < 2 ) + return; + + local track_file = parts[1]; + + # If the file is empty, still add it to the result$files table. This is needed + # because it is expected that the file was read even if it was empty. + local result = results[name]; + if ( ! result?$files ) + result$files = table(); + + if ( track_file !in result$files ) + result$files[track_file] = vector(); + + Input::remove(orig_name); + + if ( name !in pending_files ) + delete pending_commands[name]; + else + { + delete pending_files[name][track_file]; + if ( |pending_files[name]| == 0 ) + delete pending_commands[name]; + system(fmt("rm %s", safe_shell_quote(track_file))); + } + } + +event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) + { + if ( name !in pending_commands ) + return; + + # Upon the process exiting, the internal Raw reader code should take + # care of signalling that the stream needs to be disabled/removed. + #Input::remove(name); + results[name]$exit_code = exit_code; + results[name]$signal_exit = signal_exit; + + if ( name !in pending_files || |pending_files[name]| == 0 ) + # No extra files to read, command is done. + delete pending_commands[name]; + else + for ( read_file in pending_files[name] ) + Input::add_event([$source=fmt("%s", read_file), + $name=fmt("%s_%s", name, read_file), + $reader=Input::READER_RAW, + $want_record=F, + $fields=FileLine, + $ev=Exec::file_line]); + } + +function run(cmd: Command): Result + { + add pending_commands[cmd$uid]; + results[cmd$uid] = []; + + if ( cmd?$read_files ) + { + for ( read_file in cmd$read_files ) + { + if ( cmd$uid !in pending_files ) + pending_files[cmd$uid] = set(); + add pending_files[cmd$uid][read_file]; + } + } + + local config_strings: table[string] of string = { + ["stdin"] = cmd$stdin, + ["read_stderr"] = "1", + }; + Input::add_event([$name=cmd$uid, + $source=fmt("%s |", cmd$cmd), + $reader=Input::READER_RAW, + $mode=Input::STREAM, + $fields=Exec::OneLine, + $ev=Exec::line, + $want_record=F, + $config=config_strings]); + + return when ( cmd$uid !in pending_commands ) + { + local result = results[cmd$uid]; + delete results[cmd$uid]; + return result; + } + } + +event zeek_done() + { + # We are punting here and just deleting any unprocessed files. + for ( uid in pending_files ) + for ( fname in pending_files[uid] ) + system(fmt("rm %s", safe_shell_quote(fname))); + } diff --git a/scripts/base/utils/files.bro b/scripts/base/utils/files.zeek similarity index 100% rename from scripts/base/utils/files.bro rename to scripts/base/utils/files.zeek diff --git a/scripts/base/utils/geoip-distance.bro b/scripts/base/utils/geoip-distance.bro deleted file mode 100644 index 8d3149cb03..0000000000 --- a/scripts/base/utils/geoip-distance.bro +++ /dev/null @@ -1,26 +0,0 @@ -##! Functions to calculate distance between two locations, based on GeoIP data. - -## Returns the distance between two IP addresses using the haversine formula, -## based on GeoIP database locations. Requires Bro to be built with GeoIP. -## -## a1: First IP address. -## -## a2: Second IP address. -## -## Returns: The distance between *a1* and *a2* in miles, or -1.0 if GeoIP data -## is not available for either of the IP addresses. -## -## .. bro:see:: haversine_distance lookup_location -function haversine_distance_ip(a1: addr, a2: addr): double - { - local loc1 = lookup_location(a1); - local loc2 = lookup_location(a2); - local miles: double; - - if ( loc1?$latitude && loc1?$longitude && loc2?$latitude && loc2?$longitude ) - miles = haversine_distance(loc1$latitude, loc1$longitude, loc2$latitude, loc2$longitude); - else - miles = -1.0; - - return miles; - } diff --git a/scripts/base/utils/geoip-distance.zeek b/scripts/base/utils/geoip-distance.zeek new file mode 100644 index 0000000000..241c274175 --- /dev/null +++ b/scripts/base/utils/geoip-distance.zeek @@ -0,0 +1,26 @@ +##! Functions to calculate distance between two locations, based on GeoIP data. + +## Returns the distance between two IP addresses using the haversine formula, +## based on GeoIP database locations. Requires Zeek to be built with GeoIP. +## +## a1: First IP address. +## +## a2: Second IP address. +## +## Returns: The distance between *a1* and *a2* in miles, or -1.0 if GeoIP data +## is not available for either of the IP addresses. +## +## .. zeek:see:: haversine_distance lookup_location +function haversine_distance_ip(a1: addr, a2: addr): double + { + local loc1 = lookup_location(a1); + local loc2 = lookup_location(a2); + local miles: double; + + if ( loc1?$latitude && loc1?$longitude && loc2?$latitude && loc2?$longitude ) + miles = haversine_distance(loc1$latitude, loc1$longitude, loc2$latitude, loc2$longitude); + else + miles = -1.0; + + return miles; + } diff --git a/scripts/base/utils/hash_hrw.bro b/scripts/base/utils/hash_hrw.zeek similarity index 100% rename from scripts/base/utils/hash_hrw.bro rename to scripts/base/utils/hash_hrw.zeek diff --git a/scripts/base/utils/json.bro b/scripts/base/utils/json.bro deleted file mode 100644 index ead214f93e..0000000000 --- a/scripts/base/utils/json.bro +++ /dev/null @@ -1,109 +0,0 @@ -##! Functions to assist with generating JSON data from Bro data scructures. -# We might want to implement this in core somtime, this looks... hacky at best. - -@load base/utils/strings - -## A function to convert arbitrary Bro data into a JSON string. -## -## v: The value to convert to JSON. Typically a record. -## -## only_loggable: If the v value is a record this will only cause -## fields with the &log attribute to be included in the JSON. -## -## returns: a JSON formatted string. -function to_json(v: any, only_loggable: bool &default=F, field_escape_pattern: pattern &default=/^_/): string - { - local tn = type_name(v); - switch ( tn ) - { - case "type": - return ""; - - case "string": - return cat("\"", gsub(gsub(clean(v), /\\/, "\\\\"), /\"/, "\\\""), "\""); - - case "port": - return cat(port_to_count(to_port(cat(v)))); - - case "enum": - fallthrough; - case "interval": - fallthrough; - case "addr": - fallthrough; - case "subnet": - return cat("\"", v, "\""); - - case "int": - fallthrough; - case "count": - fallthrough; - case "time": - return cat(v); - - case "double": - return fmt("%.16g", v); - - case "bool": - local bval: bool = v; - return bval ? "true" : "false"; - - default: - break; - } - - if ( /^record/ in tn ) - { - local rec_parts: string_vec = vector(); - - local ft = record_fields(v); - for ( field, field_desc in ft ) - { - # replace the escape pattern in the field. - if( field_escape_pattern in field ) - field = cat(sub(field, field_escape_pattern, "")); - if ( field_desc?$value && (!only_loggable || field_desc$log) ) - { - local onepart = cat("\"", field, "\": ", to_json(field_desc$value, only_loggable)); - rec_parts += onepart; - } - } - return cat("{", join_string_vec(rec_parts, ", "), "}"); - } - - # None of the following are supported. - else if ( /^set/ in tn ) - { - local set_parts: string_vec = vector(); - local sa: set[bool] = v; - for ( sv in sa ) - { - set_parts += to_json(sv, only_loggable); - } - return cat("[", join_string_vec(set_parts, ", "), "]"); - } - else if ( /^table/ in tn ) - { - local tab_parts: vector of string = vector(); - local ta: table[bool] of any = v; - for ( ti, tv in ta ) - { - local ts = to_json(ti); - local if_quotes = (ts[0] == "\"") ? "" : "\""; - tab_parts += cat(if_quotes, ts, if_quotes, ": ", to_json(tv, only_loggable)); - } - return cat("{", join_string_vec(tab_parts, ", "), "}"); - } - else if ( /^vector/ in tn ) - { - local vec_parts: string_vec = vector(); - local va: vector of any = v; - for ( vi in va ) - { - vec_parts += to_json(va[vi], only_loggable); - } - return cat("[", join_string_vec(vec_parts, ", "), "]"); - } - - return "\"\""; - } diff --git a/scripts/base/utils/json.zeek b/scripts/base/utils/json.zeek new file mode 100644 index 0000000000..5bce89d18b --- /dev/null +++ b/scripts/base/utils/json.zeek @@ -0,0 +1,109 @@ +##! Functions to assist with generating JSON data from Zeek data scructures. +# We might want to implement this in core somtime, this looks... hacky at best. + +@load base/utils/strings + +## A function to convert arbitrary Zeek data into a JSON string. +## +## v: The value to convert to JSON. Typically a record. +## +## only_loggable: If the v value is a record this will only cause +## fields with the &log attribute to be included in the JSON. +## +## returns: a JSON formatted string. +function to_json(v: any, only_loggable: bool &default=F, field_escape_pattern: pattern &default=/^_/): string + { + local tn = type_name(v); + switch ( tn ) + { + case "type": + return ""; + + case "string": + return cat("\"", gsub(gsub(clean(v), /\\/, "\\\\"), /\"/, "\\\""), "\""); + + case "port": + return cat(port_to_count(to_port(cat(v)))); + + case "enum": + fallthrough; + case "interval": + fallthrough; + case "addr": + fallthrough; + case "subnet": + return cat("\"", v, "\""); + + case "int": + fallthrough; + case "count": + fallthrough; + case "time": + return cat(v); + + case "double": + return fmt("%.16g", v); + + case "bool": + local bval: bool = v; + return bval ? "true" : "false"; + + default: + break; + } + + if ( /^record/ in tn ) + { + local rec_parts: string_vec = vector(); + + local ft = record_fields(v); + for ( field, field_desc in ft ) + { + # replace the escape pattern in the field. + if( field_escape_pattern in field ) + field = cat(sub(field, field_escape_pattern, "")); + if ( field_desc?$value && (!only_loggable || field_desc$log) ) + { + local onepart = cat("\"", field, "\": ", to_json(field_desc$value, only_loggable)); + rec_parts += onepart; + } + } + return cat("{", join_string_vec(rec_parts, ", "), "}"); + } + + # None of the following are supported. + else if ( /^set/ in tn ) + { + local set_parts: string_vec = vector(); + local sa: set[bool] = v; + for ( sv in sa ) + { + set_parts += to_json(sv, only_loggable); + } + return cat("[", join_string_vec(set_parts, ", "), "]"); + } + else if ( /^table/ in tn ) + { + local tab_parts: vector of string = vector(); + local ta: table[bool] of any = v; + for ( ti, tv in ta ) + { + local ts = to_json(ti); + local if_quotes = (ts[0] == "\"") ? "" : "\""; + tab_parts += cat(if_quotes, ts, if_quotes, ": ", to_json(tv, only_loggable)); + } + return cat("{", join_string_vec(tab_parts, ", "), "}"); + } + else if ( /^vector/ in tn ) + { + local vec_parts: string_vec = vector(); + local va: vector of any = v; + for ( vi in va ) + { + vec_parts += to_json(va[vi], only_loggable); + } + return cat("[", join_string_vec(vec_parts, ", "), "]"); + } + + return "\"\""; + } diff --git a/scripts/base/utils/numbers.bro b/scripts/base/utils/numbers.zeek similarity index 100% rename from scripts/base/utils/numbers.bro rename to scripts/base/utils/numbers.zeek diff --git a/scripts/base/utils/paths.bro b/scripts/base/utils/paths.bro deleted file mode 100644 index 6de5b85e2e..0000000000 --- a/scripts/base/utils/paths.bro +++ /dev/null @@ -1,82 +0,0 @@ -##! Functions to parse and manipulate UNIX style paths and directories. - -const absolute_path_pat = /(\/|[A-Za-z]:[\\\/]).*/; - -## Given an arbitrary string, extracts a single, absolute path (directory -## with filename). -## -## .. todo:: Make this work on Window's style directories. -## -## input: a string that may contain an absolute path. -## -## Returns: the first absolute path found in input string, else an empty string. -function extract_path(input: string): string - { - const dir_pattern = /(\/|[A-Za-z]:[\\\/])([^\"\ ]|(\\\ ))*/; - local parts = split_string_all(input, dir_pattern); - - if ( |parts| < 3 ) - return ""; - - return parts[1]; - } - -## Compresses a given path by removing '..'s and the parent directory it -## references and also removing dual '/'s and extraneous '/./'s. -## -## dir: a path string, either relative or absolute. -## -## Returns: a compressed version of the input path. -function compress_path(dir: string): string - { - const cdup_sep = /((\/)*([^\/]|\\\/)+)?((\/)+\.\.(\/)*)/; - - local parts = split_string_n(dir, cdup_sep, T, 1); - if ( |parts| > 1 ) - { - # reaching a point with two parent dir references back-to-back means - # we don't know about anything higher in the tree to pop off - if ( parts[1] == "../.." ) - return join_string_vec(parts, ""); - if ( sub_bytes(parts[1], 0, 1) == "/" ) - parts[1] = "/"; - else - parts[1] = ""; - dir = join_string_vec(parts, ""); - return compress_path(dir); - } - - const multislash_sep = /(\/\.?){2,}/; - parts = split_string_all(dir, multislash_sep); - for ( i in parts ) - if ( i % 2 == 1 ) - parts[i] = "/"; - dir = join_string_vec(parts, ""); - - # remove trailing slashes from path - if ( |dir| > 1 && sub_bytes(dir, |dir|, 1) == "/" ) - dir = sub_bytes(dir, 0, |dir| - 1); - - return dir; - } - -## Constructs a path to a file given a directory and a file name. -## -## dir: the directory in which the file lives. -## -## file_name: the name of the file. -## -## Returns: the concatenation of the directory path and file name, or just -## the file name if it's already an absolute path. -function build_path(dir: string, file_name: string): string - { - return (file_name == absolute_path_pat) ? - file_name : cat(dir, "/", file_name); - } - -## Returns a compressed path to a file given a directory and file name. -## See :bro:id:`build_path` and :bro:id:`compress_path`. -function build_path_compressed(dir: string, file_name: string): string - { - return compress_path(build_path(dir, file_name)); - } diff --git a/scripts/base/utils/paths.zeek b/scripts/base/utils/paths.zeek new file mode 100644 index 0000000000..fdc9bd5d3d --- /dev/null +++ b/scripts/base/utils/paths.zeek @@ -0,0 +1,82 @@ +##! Functions to parse and manipulate UNIX style paths and directories. + +const absolute_path_pat = /(\/|[A-Za-z]:[\\\/]).*/; + +## Given an arbitrary string, extracts a single, absolute path (directory +## with filename). +## +## .. todo:: Make this work on Window's style directories. +## +## input: a string that may contain an absolute path. +## +## Returns: the first absolute path found in input string, else an empty string. +function extract_path(input: string): string + { + const dir_pattern = /(\/|[A-Za-z]:[\\\/])([^\"\ ]|(\\\ ))*/; + local parts = split_string_all(input, dir_pattern); + + if ( |parts| < 3 ) + return ""; + + return parts[1]; + } + +## Compresses a given path by removing '..'s and the parent directory it +## references and also removing dual '/'s and extraneous '/./'s. +## +## dir: a path string, either relative or absolute. +## +## Returns: a compressed version of the input path. +function compress_path(dir: string): string + { + const cdup_sep = /((\/)*([^\/]|\\\/)+)?((\/)+\.\.(\/)*)/; + + local parts = split_string_n(dir, cdup_sep, T, 1); + if ( |parts| > 1 ) + { + # reaching a point with two parent dir references back-to-back means + # we don't know about anything higher in the tree to pop off + if ( parts[1] == "../.." ) + return join_string_vec(parts, ""); + if ( sub_bytes(parts[1], 0, 1) == "/" ) + parts[1] = "/"; + else + parts[1] = ""; + dir = join_string_vec(parts, ""); + return compress_path(dir); + } + + const multislash_sep = /(\/\.?){2,}/; + parts = split_string_all(dir, multislash_sep); + for ( i in parts ) + if ( i % 2 == 1 ) + parts[i] = "/"; + dir = join_string_vec(parts, ""); + + # remove trailing slashes from path + if ( |dir| > 1 && sub_bytes(dir, |dir|, 1) == "/" ) + dir = sub_bytes(dir, 0, |dir| - 1); + + return dir; + } + +## Constructs a path to a file given a directory and a file name. +## +## dir: the directory in which the file lives. +## +## file_name: the name of the file. +## +## Returns: the concatenation of the directory path and file name, or just +## the file name if it's already an absolute path. +function build_path(dir: string, file_name: string): string + { + return (file_name == absolute_path_pat) ? + file_name : cat(dir, "/", file_name); + } + +## Returns a compressed path to a file given a directory and file name. +## See :zeek:id:`build_path` and :zeek:id:`compress_path`. +function build_path_compressed(dir: string, file_name: string): string + { + return compress_path(build_path(dir, file_name)); + } diff --git a/scripts/base/utils/patterns.bro b/scripts/base/utils/patterns.bro deleted file mode 100644 index 47b8cf4e37..0000000000 --- a/scripts/base/utils/patterns.bro +++ /dev/null @@ -1,60 +0,0 @@ -##! Functions for creating and working with patterns. - -module GLOBAL; - -## Given a pattern as a string with two tildes (~~) contained in it, it will -## return a pattern with string set's elements OR'd together where the -## double-tilde was given (this function only works at or before init time). -## -## ss: a set of strings to OR together. -## -## pat: the pattern containing a "~~" in it. If a literal backslash is -## included, it needs to be escaped with another backslash due to Bro's -## string parsing reducing it to a single backslash upon rendering. -## -## Returns: the input pattern with "~~" replaced by OR'd elements of input set. -function set_to_regex(ss: set[string], pat: string): pattern - { - local i: count = 0; - local return_pat = ""; - for ( s in ss ) - { - local tmp_pattern = convert_for_pattern(s); - return_pat = ( i == 0 ) ? - tmp_pattern : cat(tmp_pattern, "|", return_pat); - ++i; - } - return string_to_pattern(sub(pat, /~~/, return_pat), F); - } - -type PatternMatchResult: record { - ## T if a match was found, F otherwise. - matched: bool; - ## Portion of string that first matched. - str: string; - ## 1-based offset where match starts. - off: count; -}; - -## Matches the given pattern against the given string, returning -## a :bro:type:`PatternMatchResult` record. -## For example: ``match_pattern("foobar", /o*[a-k]/)`` returns -## ``[matched=T, str=f, off=1]``, because the *first* match is for -## zero o's followed by an [a-k], but ``match_pattern("foobar", /o+[a-k]/)`` -## returns ``[matched=T, str=oob, off=2]``. -## -## s: a string to match against. -## -## p: a pattern to match. -## -## Returns: a record indicating the match status. -function match_pattern(s: string, p: pattern): PatternMatchResult - { - local a = split_string_n(s, p, T, 1); - - if ( |a| == 1 ) - # no match - return [$matched = F, $str = "", $off = 0]; - else - return [$matched = T, $str = a[1], $off = |a[0]| + 1]; - } diff --git a/scripts/base/utils/patterns.zeek b/scripts/base/utils/patterns.zeek new file mode 100644 index 0000000000..861f0b20bd --- /dev/null +++ b/scripts/base/utils/patterns.zeek @@ -0,0 +1,60 @@ +##! Functions for creating and working with patterns. + +module GLOBAL; + +## Given a pattern as a string with two tildes (~~) contained in it, it will +## return a pattern with string set's elements OR'd together where the +## double-tilde was given (this function only works at or before init time). +## +## ss: a set of strings to OR together. +## +## pat: the pattern containing a "~~" in it. If a literal backslash is +## included, it needs to be escaped with another backslash due to Zeek's +## string parsing reducing it to a single backslash upon rendering. +## +## Returns: the input pattern with "~~" replaced by OR'd elements of input set. +function set_to_regex(ss: set[string], pat: string): pattern + { + local i: count = 0; + local return_pat = ""; + for ( s in ss ) + { + local tmp_pattern = convert_for_pattern(s); + return_pat = ( i == 0 ) ? + tmp_pattern : cat(tmp_pattern, "|", return_pat); + ++i; + } + return string_to_pattern(sub(pat, /~~/, return_pat), F); + } + +type PatternMatchResult: record { + ## T if a match was found, F otherwise. + matched: bool; + ## Portion of string that first matched. + str: string; + ## 1-based offset where match starts. + off: count; +}; + +## Matches the given pattern against the given string, returning +## a :zeek:type:`PatternMatchResult` record. +## For example: ``match_pattern("foobar", /o*[a-k]/)`` returns +## ``[matched=T, str=f, off=1]``, because the *first* match is for +## zero o's followed by an [a-k], but ``match_pattern("foobar", /o+[a-k]/)`` +## returns ``[matched=T, str=oob, off=2]``. +## +## s: a string to match against. +## +## p: a pattern to match. +## +## Returns: a record indicating the match status. +function match_pattern(s: string, p: pattern): PatternMatchResult + { + local a = split_string_n(s, p, T, 1); + + if ( |a| == 1 ) + # no match + return [$matched = F, $str = "", $off = 0]; + else + return [$matched = T, $str = a[1], $off = |a[0]| + 1]; + } diff --git a/scripts/base/utils/queue.bro b/scripts/base/utils/queue.zeek similarity index 100% rename from scripts/base/utils/queue.bro rename to scripts/base/utils/queue.zeek diff --git a/scripts/base/utils/site.bro b/scripts/base/utils/site.bro deleted file mode 100644 index aa40e1b92b..0000000000 --- a/scripts/base/utils/site.bro +++ /dev/null @@ -1,161 +0,0 @@ -##! Definitions describing a site - which networks and DNS zones are "local" -##! and "neighbors", and servers running particular services. -@load ./patterns - -module Site; - -export { - ## Address space that is considered private and unrouted. - ## By default it has RFC defined non-routable IPv4 address space. - option private_address_space: set[subnet] = { - 10.0.0.0/8, - 192.168.0.0/16, - 172.16.0.0/12, - 100.64.0.0/10, # RFC6598 Carrier Grade NAT - 127.0.0.0/8, - [fe80::]/10, - [::1]/128, - }; - - ## Networks that are considered "local". Note that BroControl sets - ## this automatically. - option local_nets: set[subnet] = {}; - - ## This is used for retrieving the subnet when using multiple entries in - ## :bro:id:`Site::local_nets`. It's populated automatically from there. - ## A membership query can be done with an - ## :bro:type:`addr` and the table will yield the subnet it was found - ## within. - global local_nets_table: table[subnet] of subnet = {}; - - ## Networks that are considered "neighbors". - option neighbor_nets: set[subnet] = {}; - - ## If local network administrators are known and they have responsibility - ## for defined address space, then a mapping can be defined here between - ## networks for which they have responsibility and a set of email - ## addresses. - option local_admins: table[subnet] of set[string] = {}; - - ## DNS zones that are considered "local". - option local_zones: set[string] = {}; - - ## DNS zones that are considered "neighbors". - option neighbor_zones: set[string] = {}; - - ## Function that returns true if an address corresponds to one of - ## the local networks, false if not. - ## The function inspects :bro:id:`Site::local_nets`. - global is_local_addr: function(a: addr): bool; - - ## Function that returns true if an address corresponds to one of - ## the neighbor networks, false if not. - ## The function inspects :bro:id:`Site::neighbor_nets`. - global is_neighbor_addr: function(a: addr): bool; - - ## Function that returns true if an address corresponds to one of - ## the private/unrouted networks, false if not. - ## The function inspects :bro:id:`Site::private_address_space`. - global is_private_addr: function(a: addr): bool; - - ## Function that returns true if a host name is within a local - ## DNS zone. - ## The function inspects :bro:id:`Site::local_zones`. - global is_local_name: function(name: string): bool; - - ## Function that returns true if a host name is within a neighbor - ## DNS zone. - ## The function inspects :bro:id:`Site::neighbor_zones`. - global is_neighbor_name: function(name: string): bool; - - ## Function that returns a comma-separated list of email addresses - ## that are considered administrators for the IP address provided as - ## an argument. - ## The function inspects :bro:id:`Site::local_admins`. - global get_emails: function(a: addr): string; -} - -# Please ignore, this is an interally used variable. -global local_dns_suffix_regex: pattern = /MATCH_NOTHING/; -global local_dns_neighbor_suffix_regex: pattern = /MATCH_NOTHING/; - - -function is_local_addr(a: addr): bool - { - return a in local_nets; - } - -function is_neighbor_addr(a: addr): bool - { - return a in neighbor_nets; - } - -function is_private_addr(a: addr): bool - { - return a in private_address_space; - } - -function is_local_name(name: string): bool - { - return local_dns_suffix_regex in name; - } - -function is_neighbor_name(name: string): bool - { - return local_dns_neighbor_suffix_regex in name; - } - -# This is a hack for doing a for loop. -const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; - -# TODO: make this work with IPv6 -function find_all_emails(ip: addr): set[string] - { - if ( ip !in local_admins ) return set(); - - local output_values: set[string] = set(); - local tmp_subnet: subnet; - local i: count; - local emails: string; - for ( i in one_to_32 ) - { - tmp_subnet = mask_addr(ip, one_to_32[i]); - if ( tmp_subnet in local_admins ) - for ( email in local_admins[tmp_subnet] ) - { - if ( email != "" ) - add output_values[email]; - } - } - return output_values; - } - -function fmt_email_string(emails: set[string]): string - { - local output=""; - for( email in emails ) - { - if ( output == "" ) - output = email; - else - output = fmt("%s, %s", output, email); - } - return output; - } - -function get_emails(a: addr): string - { - return fmt_email_string(find_all_emails(a)); - } - -event bro_init() &priority=10 - { - # Double backslashes are needed due to string parsing. - local_dns_suffix_regex = set_to_regex(local_zones, "(^\\.?|\\.)(~~)$"); - local_dns_neighbor_suffix_regex = set_to_regex(neighbor_zones, "(^\\.?|\\.)(~~)$"); - - # Create the local_nets mapping table. - for ( cidr in Site::local_nets ) - local_nets_table[cidr] = cidr; - - } diff --git a/scripts/base/utils/site.zeek b/scripts/base/utils/site.zeek new file mode 100644 index 0000000000..94c487f7a3 --- /dev/null +++ b/scripts/base/utils/site.zeek @@ -0,0 +1,161 @@ +##! Definitions describing a site - which networks and DNS zones are "local" +##! and "neighbors", and servers running particular services. +@load ./patterns + +module Site; + +export { + ## Address space that is considered private and unrouted. + ## By default it has RFC defined non-routable IPv4 address space. + option private_address_space: set[subnet] = { + 10.0.0.0/8, + 192.168.0.0/16, + 172.16.0.0/12, + 100.64.0.0/10, # RFC6598 Carrier Grade NAT + 127.0.0.0/8, + [fe80::]/10, + [::1]/128, + }; + + ## Networks that are considered "local". Note that ZeekControl sets + ## this automatically. + option local_nets: set[subnet] = {}; + + ## This is used for retrieving the subnet when using multiple entries in + ## :zeek:id:`Site::local_nets`. It's populated automatically from there. + ## A membership query can be done with an + ## :zeek:type:`addr` and the table will yield the subnet it was found + ## within. + global local_nets_table: table[subnet] of subnet = {}; + + ## Networks that are considered "neighbors". + option neighbor_nets: set[subnet] = {}; + + ## If local network administrators are known and they have responsibility + ## for defined address space, then a mapping can be defined here between + ## networks for which they have responsibility and a set of email + ## addresses. + option local_admins: table[subnet] of set[string] = {}; + + ## DNS zones that are considered "local". + option local_zones: set[string] = {}; + + ## DNS zones that are considered "neighbors". + option neighbor_zones: set[string] = {}; + + ## Function that returns true if an address corresponds to one of + ## the local networks, false if not. + ## The function inspects :zeek:id:`Site::local_nets`. + global is_local_addr: function(a: addr): bool; + + ## Function that returns true if an address corresponds to one of + ## the neighbor networks, false if not. + ## The function inspects :zeek:id:`Site::neighbor_nets`. + global is_neighbor_addr: function(a: addr): bool; + + ## Function that returns true if an address corresponds to one of + ## the private/unrouted networks, false if not. + ## The function inspects :zeek:id:`Site::private_address_space`. + global is_private_addr: function(a: addr): bool; + + ## Function that returns true if a host name is within a local + ## DNS zone. + ## The function inspects :zeek:id:`Site::local_zones`. + global is_local_name: function(name: string): bool; + + ## Function that returns true if a host name is within a neighbor + ## DNS zone. + ## The function inspects :zeek:id:`Site::neighbor_zones`. + global is_neighbor_name: function(name: string): bool; + + ## Function that returns a comma-separated list of email addresses + ## that are considered administrators for the IP address provided as + ## an argument. + ## The function inspects :zeek:id:`Site::local_admins`. + global get_emails: function(a: addr): string; +} + +# Please ignore, this is an interally used variable. +global local_dns_suffix_regex: pattern = /MATCH_NOTHING/; +global local_dns_neighbor_suffix_regex: pattern = /MATCH_NOTHING/; + + +function is_local_addr(a: addr): bool + { + return a in local_nets; + } + +function is_neighbor_addr(a: addr): bool + { + return a in neighbor_nets; + } + +function is_private_addr(a: addr): bool + { + return a in private_address_space; + } + +function is_local_name(name: string): bool + { + return local_dns_suffix_regex in name; + } + +function is_neighbor_name(name: string): bool + { + return local_dns_neighbor_suffix_regex in name; + } + +# This is a hack for doing a for loop. +const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; + +# TODO: make this work with IPv6 +function find_all_emails(ip: addr): set[string] + { + if ( ip !in local_admins ) return set(); + + local output_values: set[string] = set(); + local tmp_subnet: subnet; + local i: count; + local emails: string; + for ( i in one_to_32 ) + { + tmp_subnet = mask_addr(ip, one_to_32[i]); + if ( tmp_subnet in local_admins ) + for ( email in local_admins[tmp_subnet] ) + { + if ( email != "" ) + add output_values[email]; + } + } + return output_values; + } + +function fmt_email_string(emails: set[string]): string + { + local output=""; + for( email in emails ) + { + if ( output == "" ) + output = email; + else + output = fmt("%s, %s", output, email); + } + return output; + } + +function get_emails(a: addr): string + { + return fmt_email_string(find_all_emails(a)); + } + +event zeek_init() &priority=10 + { + # Double backslashes are needed due to string parsing. + local_dns_suffix_regex = set_to_regex(local_zones, "(^\\.?|\\.)(~~)$"); + local_dns_neighbor_suffix_regex = set_to_regex(neighbor_zones, "(^\\.?|\\.)(~~)$"); + + # Create the local_nets mapping table. + for ( cidr in Site::local_nets ) + local_nets_table[cidr] = cidr; + + } diff --git a/scripts/base/utils/strings.bro b/scripts/base/utils/strings.bro deleted file mode 100644 index 8a3f03faaa..0000000000 --- a/scripts/base/utils/strings.bro +++ /dev/null @@ -1,63 +0,0 @@ -##! Functions to assist with small string analysis and manipulation that can -##! be implemented as Bro functions and don't need to be implemented as built-in -##! functions. - -## Returns true if the given string is at least 25% composed of 8-bit -## characters. -function is_string_binary(s: string): bool - { - return |gsub(s, /[\x00-\x7f]/, "")| * 100 / |s| >= 25; - } - -## Join a set of strings together, with elements delimited by a constant string. -## -## ss: a set of strings to join. -## -## j: the string used to join set elements. -## -## Returns: a string composed of all elements of the set, delimited by the -## joining string. -function join_string_set(ss: set[string], j: string): string - { - local output=""; - local i=0; - for ( s in ss ) - { - if ( i > 0 ) - output = cat(output, j); - - output = cat(output, s); - ++i; - } - return output; - } - -## Given a string, returns an escaped version. -## -## s: a string to escape. -## -## chars: a string containing all the characters that need to be escaped. -## -## Returns: a string with all occurrences of any character in *chars* escaped -## using ``\``, and any literal ``\`` characters likewise escaped. -function string_escape(s: string, chars: string): string - { - s = subst_string(s, "\\", "\\\\"); - for ( c in chars ) - s = subst_string(s, c, cat("\\", c)); - return s; - } - -## Cut a number of characters from the end of the given string. -## -## s: a string to trim. -## -## tail_len: the number of characters to remove from the end of the string. -## -## Returns: the given string with *tail_len* characters removed from the end. -function cut_tail(s: string, tail_len: count): string - { - if ( tail_len > |s| ) - tail_len = |s|; - return sub_bytes(s, 1, int_to_count(|s| - tail_len)); - } diff --git a/scripts/base/utils/strings.zeek b/scripts/base/utils/strings.zeek new file mode 100644 index 0000000000..4fa002acd6 --- /dev/null +++ b/scripts/base/utils/strings.zeek @@ -0,0 +1,63 @@ +##! Functions to assist with small string analysis and manipulation that can +##! be implemented as Zeek functions and don't need to be implemented as built-in +##! functions. + +## Returns true if the given string is at least 25% composed of 8-bit +## characters. +function is_string_binary(s: string): bool + { + return |gsub(s, /[\x00-\x7f]/, "")| * 100 / |s| >= 25; + } + +## Join a set of strings together, with elements delimited by a constant string. +## +## ss: a set of strings to join. +## +## j: the string used to join set elements. +## +## Returns: a string composed of all elements of the set, delimited by the +## joining string. +function join_string_set(ss: set[string], j: string): string + { + local output=""; + local i=0; + for ( s in ss ) + { + if ( i > 0 ) + output = cat(output, j); + + output = cat(output, s); + ++i; + } + return output; + } + +## Given a string, returns an escaped version. +## +## s: a string to escape. +## +## chars: a string containing all the characters that need to be escaped. +## +## Returns: a string with all occurrences of any character in *chars* escaped +## using ``\``, and any literal ``\`` characters likewise escaped. +function string_escape(s: string, chars: string): string + { + s = subst_string(s, "\\", "\\\\"); + for ( c in chars ) + s = subst_string(s, c, cat("\\", c)); + return s; + } + +## Cut a number of characters from the end of the given string. +## +## s: a string to trim. +## +## tail_len: the number of characters to remove from the end of the string. +## +## Returns: the given string with *tail_len* characters removed from the end. +function cut_tail(s: string, tail_len: count): string + { + if ( tail_len > |s| ) + tail_len = |s|; + return sub_bytes(s, 1, int_to_count(|s| - tail_len)); + } diff --git a/scripts/base/utils/thresholds.bro b/scripts/base/utils/thresholds.bro deleted file mode 100644 index 31d1d3e84f..0000000000 --- a/scripts/base/utils/thresholds.bro +++ /dev/null @@ -1,62 +0,0 @@ -##! Functions for using multiple thresholds with a counting tracker. For -##! example, you may want to generate a notice when something happens 10 times -##! and again when it happens 100 times but nothing in between. You can use -##! the :bro:id:`check_threshold` function to define your threshold points -##! and the :bro:type:`TrackCount` variable where you are keeping track of your -##! counter. - -module GLOBAL; - -export { - type TrackCount: record { - ## The counter for the number of times something has happened. - n: count &default=0; - ## The index of the vector where the counter currently is. This - ## is used to track which threshold is currently being watched - ## for. - index: count &default=0; - }; - - ## The thresholds you would like to use as defaults with the - ## :bro:id:`default_check_threshold` function. - const default_notice_thresholds: vector of count = { - 30, 100, 1000, 10000, 100000, 1000000, 10000000, - } &redef; - - ## This will check if a :bro:type:`TrackCount` variable has crossed any - ## thresholds in a given set. - ## - ## v: a vector holding counts that represent thresholds. - ## - ## tracker: the record being used to track event counter and currently - ## monitored threshold value. - ## - ## Returns: T if a threshold has been crossed, else F. - global check_threshold: function(v: vector of count, tracker: TrackCount): bool; - - ## This will use the :bro:id:`default_notice_thresholds` variable to - ## check a :bro:type:`TrackCount` variable to see if it has crossed - ## another threshold. - global default_check_threshold: function(tracker: TrackCount): bool; -} - -function new_track_count(): TrackCount - { - local tc: TrackCount; - return tc; - } - -function check_threshold(v: vector of count, tracker: TrackCount): bool - { - if ( tracker$index <= |v| && tracker$n >= v[tracker$index] ) - { - ++tracker$index; - return T; - } - return F; - } - -function default_check_threshold(tracker: TrackCount): bool - { - return check_threshold(default_notice_thresholds, tracker); - } diff --git a/scripts/base/utils/thresholds.zeek b/scripts/base/utils/thresholds.zeek new file mode 100644 index 0000000000..d30e9f2b0a --- /dev/null +++ b/scripts/base/utils/thresholds.zeek @@ -0,0 +1,62 @@ +##! Functions for using multiple thresholds with a counting tracker. For +##! example, you may want to generate a notice when something happens 10 times +##! and again when it happens 100 times but nothing in between. You can use +##! the :zeek:id:`check_threshold` function to define your threshold points +##! and the :zeek:type:`TrackCount` variable where you are keeping track of your +##! counter. + +module GLOBAL; + +export { + type TrackCount: record { + ## The counter for the number of times something has happened. + n: count &default=0; + ## The index of the vector where the counter currently is. This + ## is used to track which threshold is currently being watched + ## for. + index: count &default=0; + }; + + ## The thresholds you would like to use as defaults with the + ## :zeek:id:`default_check_threshold` function. + const default_notice_thresholds: vector of count = { + 30, 100, 1000, 10000, 100000, 1000000, 10000000, + } &redef; + + ## This will check if a :zeek:type:`TrackCount` variable has crossed any + ## thresholds in a given set. + ## + ## v: a vector holding counts that represent thresholds. + ## + ## tracker: the record being used to track event counter and currently + ## monitored threshold value. + ## + ## Returns: T if a threshold has been crossed, else F. + global check_threshold: function(v: vector of count, tracker: TrackCount): bool; + + ## This will use the :zeek:id:`default_notice_thresholds` variable to + ## check a :zeek:type:`TrackCount` variable to see if it has crossed + ## another threshold. + global default_check_threshold: function(tracker: TrackCount): bool; +} + +function new_track_count(): TrackCount + { + local tc: TrackCount; + return tc; + } + +function check_threshold(v: vector of count, tracker: TrackCount): bool + { + if ( tracker$index <= |v| && tracker$n >= v[tracker$index] ) + { + ++tracker$index; + return T; + } + return F; + } + +function default_check_threshold(tracker: TrackCount): bool + { + return check_threshold(default_notice_thresholds, tracker); + } diff --git a/scripts/base/utils/time.bro b/scripts/base/utils/time.zeek similarity index 100% rename from scripts/base/utils/time.bro rename to scripts/base/utils/time.zeek diff --git a/scripts/base/utils/urls.bro b/scripts/base/utils/urls.bro deleted file mode 100644 index a34b6a02c1..0000000000 --- a/scripts/base/utils/urls.bro +++ /dev/null @@ -1,130 +0,0 @@ -##! Functions for URL handling. - -## A regular expression for matching and extracting URLs. -const url_regex = /^([a-zA-Z\-]{3,5})(:\/\/[^\/?#"'\r\n><]*)([^?#"'\r\n><]*)([^[:blank:]\r\n"'><]*|\??[^"'\r\n><]*)/ &redef; - -## A URI, as parsed by :bro:id:`decompose_uri`. -type URI: record { - ## The URL's scheme.. - scheme: string &optional; - ## The location, which could be a domain name or an IP address. Left empty if not - ## specified. - netlocation: string; - ## Port number, if included in URI. - portnum: count &optional; - ## Full including the file name. Will be '/' if there's not path given. - path: string; - ## Full file name, including extension, if there is a file name. - file_name: string &optional; - ## The base filename, without extension, if there is a file name. - file_base: string &optional; - ## The filename's extension, if there is a file name. - file_ext: string &optional; - ## A table of all query parameters, mapping their keys to values, if there's a - ## query. - params: table[string] of string &optional; -}; - -## Extracts URLs discovered in arbitrary text. -function find_all_urls(s: string): string_set - { - return find_all(s, url_regex); - } - -## Extracts URLs discovered in arbitrary text without -## the URL scheme included. -function find_all_urls_without_scheme(s: string): string_set - { - local urls = find_all_urls(s); - local return_urls: set[string] = set(); - for ( url in urls ) - { - local no_scheme = sub(url, /^([a-zA-Z\-]{3,5})(:\/\/)/, ""); - add return_urls[no_scheme]; - } - - return return_urls; - } - -function decompose_uri(uri: string): URI - { - local parts: string_vec; - local u = URI($netlocation="", $path="/"); - local s = uri; - - if ( /\?/ in s ) - { - u$params = table(); - - parts = split_string1(s, /\?/); - s = parts[0]; - local query = parts[1]; - - if ( /&/ in query ) - { - local opv = split_string(query, /&/); - - for ( each in opv ) - { - if ( /=/ in opv[each] ) - { - parts = split_string1(opv[each], /=/); - u$params[parts[0]] = parts[1]; - } - } - } - else if ( /=/ in query ) - { - parts = split_string1(query, /=/); - u$params[parts[0]] = parts[1]; - } - } - - if ( /:\/\// in s ) - { - # Parse scheme and remove from s. - parts = split_string1(s, /:\/\//); - u$scheme = parts[0]; - s = parts[1]; - } - - if ( /\// in s ) - { - # Parse path and remove from s. - parts = split_string1(s, /\//); - s = parts[0]; - u$path = fmt("/%s", parts[1]); - - if ( |u$path| > 1 && u$path[|u$path| - 1] != "/" ) - { - local last_token = find_last(u$path, /\/.+/); - local full_filename = split_string1(last_token, /\//)[1]; - - if ( /\./ in full_filename ) - { - u$file_name = full_filename; - u$file_base = split_string1(full_filename, /\./)[0]; - u$file_ext = split_string1(full_filename, /\./)[1]; - } - else - { - u$file_name = full_filename; - u$file_base = full_filename; - } - } - } - - if ( /:/ in s ) - { - # Parse location and port. - parts = split_string1(s, /:/); - u$netlocation = parts[0]; - u$portnum = to_count(parts[1]); - } - else - { - u$netlocation = s; - } - - return u; - } diff --git a/scripts/base/utils/urls.zeek b/scripts/base/utils/urls.zeek new file mode 100644 index 0000000000..c6ec41cbfc --- /dev/null +++ b/scripts/base/utils/urls.zeek @@ -0,0 +1,130 @@ +##! Functions for URL handling. + +## A regular expression for matching and extracting URLs. +const url_regex = /^([a-zA-Z\-]{3,5})(:\/\/[^\/?#"'\r\n><]*)([^?#"'\r\n><]*)([^[:blank:]\r\n"'><]*|\??[^"'\r\n><]*)/ &redef; + +## A URI, as parsed by :zeek:id:`decompose_uri`. +type URI: record { + ## The URL's scheme.. + scheme: string &optional; + ## The location, which could be a domain name or an IP address. Left empty if not + ## specified. + netlocation: string; + ## Port number, if included in URI. + portnum: count &optional; + ## Full including the file name. Will be '/' if there's not path given. + path: string; + ## Full file name, including extension, if there is a file name. + file_name: string &optional; + ## The base filename, without extension, if there is a file name. + file_base: string &optional; + ## The filename's extension, if there is a file name. + file_ext: string &optional; + ## A table of all query parameters, mapping their keys to values, if there's a + ## query. + params: table[string] of string &optional; +}; + +## Extracts URLs discovered in arbitrary text. +function find_all_urls(s: string): string_set + { + return find_all(s, url_regex); + } + +## Extracts URLs discovered in arbitrary text without +## the URL scheme included. +function find_all_urls_without_scheme(s: string): string_set + { + local urls = find_all_urls(s); + local return_urls: set[string] = set(); + for ( url in urls ) + { + local no_scheme = sub(url, /^([a-zA-Z\-]{3,5})(:\/\/)/, ""); + add return_urls[no_scheme]; + } + + return return_urls; + } + +function decompose_uri(uri: string): URI + { + local parts: string_vec; + local u = URI($netlocation="", $path="/"); + local s = uri; + + if ( /\?/ in s ) + { + u$params = table(); + + parts = split_string1(s, /\?/); + s = parts[0]; + local query = parts[1]; + + if ( /&/ in query ) + { + local opv = split_string(query, /&/); + + for ( each in opv ) + { + if ( /=/ in opv[each] ) + { + parts = split_string1(opv[each], /=/); + u$params[parts[0]] = parts[1]; + } + } + } + else if ( /=/ in query ) + { + parts = split_string1(query, /=/); + u$params[parts[0]] = parts[1]; + } + } + + if ( /:\/\// in s ) + { + # Parse scheme and remove from s. + parts = split_string1(s, /:\/\//); + u$scheme = parts[0]; + s = parts[1]; + } + + if ( /\// in s ) + { + # Parse path and remove from s. + parts = split_string1(s, /\//); + s = parts[0]; + u$path = fmt("/%s", parts[1]); + + if ( |u$path| > 1 && u$path[|u$path| - 1] != "/" ) + { + local last_token = find_last(u$path, /\/.+/); + local full_filename = split_string1(last_token, /\//)[1]; + + if ( /\./ in full_filename ) + { + u$file_name = full_filename; + u$file_base = split_string1(full_filename, /\./)[0]; + u$file_ext = split_string1(full_filename, /\./)[1]; + } + else + { + u$file_name = full_filename; + u$file_base = full_filename; + } + } + } + + if ( /:/ in s ) + { + # Parse location and port. + parts = split_string1(s, /:/); + u$netlocation = parts[0]; + u$portnum = to_count(parts[1]); + } + else + { + u$netlocation = s; + } + + return u; + } diff --git a/scripts/broxygen/README b/scripts/broxygen/README deleted file mode 100644 index ac7f522285..0000000000 --- a/scripts/broxygen/README +++ /dev/null @@ -1,4 +0,0 @@ -This package is loaded during the process which automatically generates -reference documentation for all Bro scripts (i.e. "Broxygen"). Its only -purpose is to provide an easy way to load all known Bro scripts plus any -extra scripts needed or used by the documentation process. diff --git a/scripts/broxygen/__load__.bro b/scripts/broxygen/__load__.bro deleted file mode 100644 index 5d4ac5ea03..0000000000 --- a/scripts/broxygen/__load__.bro +++ /dev/null @@ -1,17 +0,0 @@ -@load test-all-policy.bro - -# Scripts which are commented out in test-all-policy.bro. -@load protocols/ssl/notary.bro -@load frameworks/control/controllee.bro -@load frameworks/control/controller.bro -@load frameworks/files/extract-all-files.bro -@load policy/misc/dump-events.bro -@load policy/protocols/dhcp/deprecated_events.bro -@load policy/protocols/smb/__load__.bro - -@load ./example.bro - -event bro_init() - { - terminate(); - } diff --git a/scripts/broxygen/example.bro b/scripts/broxygen/example.bro deleted file mode 100644 index 65cc5ff1c7..0000000000 --- a/scripts/broxygen/example.bro +++ /dev/null @@ -1,194 +0,0 @@ -##! This is an example script that demonstrates Broxygen-style -##! documentation. It generally will make most sense when viewing -##! the script's raw source code and comparing to the HTML-rendered -##! version. -##! -##! Comments in the from ``##!`` are meant to summarize the script's -##! purpose. They are transferred directly in to the generated -##! `reStructuredText `_ -##! (reST) document associated with the script. -##! -##! .. tip:: You can embed directives and roles within ``##``-stylized comments. -##! -##! There's also a custom role to reference any identifier node in -##! the Bro Sphinx domain that's good for "see alsos", e.g. -##! -##! See also: :bro:see:`BroxygenExample::a_var`, -##! :bro:see:`BroxygenExample::ONE`, :bro:see:`SSH::Info` -##! -##! And a custom directive does the equivalent references: -##! -##! .. bro:see:: BroxygenExample::a_var BroxygenExample::ONE SSH::Info - -# Comments that use a single pound sign (#) are not significant to -# a script's auto-generated documentation, but ones that use a -# double pound sign (##) do matter. In some cases, like record -# field comments, it's necessary to disambiguate the field with -# which a comment associates: e.g. "##<" can be used on the same line -# as a field to signify the comment relates to it and not the -# following field. "##<" can also be used more generally in any -# variable declarations to associate with the last-declared identifier. -# -# Generally, the auto-doc comments (##) are associated with the -# next declaration/identifier found in the script, but Broxygen -# will track/render identifiers regardless of whether they have any -# of these special comments associated with them. -# -# The first sentence contained within the "##"-stylized comments for -# a given identifier is special in that it will be used as summary -# text in a table containing all such identifiers and short summaries. -# If there are no sentences (text terminated with '.'), then everything -# in the "##"-stylized comments up until the first empty comment -# is taken as the summary text for a given identifier. - -# @load directives are self-documenting, don't use any ``##`` style -# comments with them. -@load base/frameworks/notice -@load base/protocols/http -@load frameworks/software/vulnerable - -# "module" statements are self-documenting, don't use any ``##`` style -# comments with them. -module BroxygenExample; - -# Redefinitions of "Notice::Type" are self-documenting, but -# more information can be supplied in two different ways. -redef enum Notice::Type += { - ## Any number of this type of comment - ## will document "Broxygen_One". - Broxygen_One, - Broxygen_Two, ##< Any number of this type of comment - ##< will document "BROXYGEN_TWO". - Broxygen_Three, - ## Omitting comments is fine, and so is mixing ``##`` and ``##<``, but - Broxygen_Four, ##< it's probably best to use only one style consistently. -}; - -# All redefs are automatically tracked. Comments of the "##" form can be use -# to further document it, but in some cases, like here, they wouldn't be -# ading any interesting information that's not implicit. -redef enum Log::ID += { LOG }; - -# Only identifiers declared in an export section will show up in generated docs. - -export { - - ## Documentation for the "SimpleEnum" type goes here. - ## It can span multiple lines. - type SimpleEnum: enum { - ## Documentation for particular enum values is added like this. - ## And can also span multiple lines. - ONE, - TWO, ##< Or this style is valid to document the preceding enum value. - THREE, - }; - - ## Document the "SimpleEnum" redef here with any special info regarding - ## the *redef* itself. - redef enum SimpleEnum += { - FOUR, ##< And some documentation for "FOUR". - ## Also "FIVE". - FIVE - }; - - ## General documentation for a type "SimpleRecord" goes here. - ## The way fields can be documented is similar to what's already seen - ## for enums. - type SimpleRecord: record { - ## Counts something. - field1: count; - field2: bool; ##< Toggles something. - }; - - ## Document the record extension *redef* itself here. - redef record SimpleRecord += { - ## Document the extending field like this. - field_ext: string &optional; ##< Or here, like this. - }; - - ## General documentation for a type "ComplexRecord" goes here. - type ComplexRecord: record { - field1: count; ##< Counts something. - field2: bool; ##< Toggles something. - field3: SimpleRecord; ##< Broxygen automatically tracks types - ##< and cross-references are automatically - ##< inserted in to generated docs. - msg: string &default="blah"; ##< Attributes are self-documenting. - } &redef; - - ## An example record to be used with a logging stream. - ## Nothing special about it. If another script redefs this type - ## to add fields, the generated documentation will show all original - ## fields plus the extensions and the scripts which contributed to it - ## (provided they are also @load'ed). - type Info: record { - ts: time &log; - uid: string &log; - status: count &log &optional; - }; - - ## Add documentation for "an_option" here. - ## The type/attribute information is all generated automatically. - const an_option: set[addr, addr, string] &redef; - - ## Default initialization will be generated automatically. - const option_with_init = 0.01 secs &redef; ##< More docs can be added here. - - ## Put some documentation for "a_var" here. Any global/non-const that - ## isn't a function/event/hook is classified as a "state variable" - ## in the generated docs. - global a_var: bool; - - ## Types are inferred, that information is self-documenting. - global var_without_explicit_type = "this works"; - - ## The first sentence for a particular identifier's summary text ends here. - ## And this second sentence doesn't show in the short description provided - ## by the table of all identifiers declared by this script. - global summary_test: string; - - ## Summarize purpose of "a_function" here. - ## Give more details about "a_function" here. - ## Separating the documentation of the params/return values with - ## empty comments is optional, but improves readability of script. - ## - ## tag: Function arguments can be described - ## like this. - ## - ## msg: Another param. - ## - ## Returns: Describe the return type here. - global a_function: function(tag: string, msg: string): string; - - ## Summarize "an_event" here. - ## Give more details about "an_event" here. - ## - ## BroxygenExample::a_function should not be confused as a parameter - ## in the generated docs, but it also doesn't generate a cross-reference - ## link. Use the see role instead: :bro:see:`BroxygenExample::a_function`. - ## - ## name: Describe the argument here. - global an_event: event(name: string); -} - -# This function isn't exported, so it won't appear anywhere in the generated -# documentation. So using ``##``-style comments is pointless here. -function function_without_proto(tag: string): string - { - return "blah"; - } - -# Same thing goes for types -- it's not exported, so it's considered -# private to this script and comments are only interesting to a person -# who is already reading the raw source for the script (so don't use -# ``##`` comments here. -type PrivateRecord: record { - field1: bool; - field2: count; -}; - -# Event handlers are also an implementation detail of a script, so they -# don't show up anywhere in the generated documentation. -event bro_init() - { - } diff --git a/scripts/base/files/unified2/README b/scripts/policy/files/unified2/README similarity index 100% rename from scripts/base/files/unified2/README rename to scripts/policy/files/unified2/README diff --git a/scripts/base/protocols/snmp/__load__.bro b/scripts/policy/files/unified2/__load__.zeek similarity index 100% rename from scripts/base/protocols/snmp/__load__.bro rename to scripts/policy/files/unified2/__load__.zeek diff --git a/scripts/policy/files/unified2/main.zeek b/scripts/policy/files/unified2/main.zeek new file mode 100644 index 0000000000..1a9841d5b1 --- /dev/null +++ b/scripts/policy/files/unified2/main.zeek @@ -0,0 +1,297 @@ + +@load base/utils/dir +@load base/utils/paths + +module Unified2; + +export { + redef enum Log::ID += { LOG }; + + ## File to watch for Unified2 files. + const watch_file = "" &redef; + + ## Directory to watch for Unified2 records. + const watch_dir = "" &redef; + + ## The sid-msg.map file you would like to use for your alerts. + const sid_msg = "" &redef; + + ## The gen-msg.map file you would like to use for your alerts. + const gen_msg = "" &redef; + + ## The classification.config file you would like to use for your alerts. + const classification_config = "" &redef; + + ## Reconstructed "alert" which combines related events + ## and packets. + global alert: event(f: fa_file, ev: Unified2::IDSEvent, pkt: Unified2::Packet); + + type PacketID: record { + src_ip: addr; + src_p: port; + dst_ip: addr; + dst_p: port; + } &log; + + type Info: record { + ## Timestamp attached to the alert. + ts: time &log; + ## Addresses and ports for the connection. + id: PacketID &log; + ## Sensor that originated this event. + sensor_id: count &log; + ## Sig id for this generator. + signature_id: count &log; + ## A string representation of the *signature_id* field if a sid_msg.map file was loaded. + signature: string &log &optional; + ## Which generator generated the alert? + generator_id: count &log; + ## A string representation of the *generator_id* field if a gen_msg.map file was loaded. + generator: string &log &optional; + ## Sig revision for this id. + signature_revision: count &log; + ## Event classification. + classification_id: count &log; + ## Descriptive classification string. + classification: string &log &optional; + ## Event priority. + priority_id: count &log; + ## Event ID. + event_id: count &log; + ## Some of the packet data. + packet: string &log &optional; + } &log; + + ## The event for accessing logged records. + global log_unified2: event(rec: Info); +} + +# Mappings for extended information from alerts. +global classification_map: table[count] of string; +global sid_map: table[count] of string; +global gen_map: table[count] of string; + +global num_classification_map_reads = 0; +global num_sid_map_reads = 0; +global num_gen_map_reads = 0; +global watching = F; + +# For reading in config files. +type OneLine: record { + line: string; +}; + +function mappings_initialized(): bool + { + return num_classification_map_reads > 0 && + num_sid_map_reads > 0 && + num_gen_map_reads > 0; + } + +function start_watching() + { + if ( watching ) + return; + + watching = T; + + if ( watch_dir != "" ) + { + Dir::monitor(watch_dir, function(fname: string) + { + Input::add_analysis([$source=fname, + $reader=Input::READER_BINARY, + $mode=Input::STREAM, + $name=fname]); + }, 10secs); + } + + if ( watch_file != "" ) + { + Input::add_analysis([$source=watch_file, + $reader=Input::READER_BINARY, + $mode=Input::STREAM, + $name=watch_file]); + } + } + +function create_info(ev: IDSEvent): Info + { + local info = Info($ts=ev$ts, + $id=PacketID($src_ip=ev$src_ip, $src_p=ev$src_p, + $dst_ip=ev$dst_ip, $dst_p=ev$dst_p), + $sensor_id=ev$sensor_id, + $signature_id=ev$signature_id, + $generator_id=ev$generator_id, + $signature_revision=ev$signature_revision, + $classification_id=ev$classification_id, + $priority_id=ev$priority_id, + $event_id=ev$event_id); + + if ( ev$signature_id in sid_map ) + info$signature=sid_map[ev$signature_id]; + if ( ev$generator_id in gen_map ) + info$generator=gen_map[ev$generator_id]; + if ( ev$classification_id in classification_map ) + info$classification=classification_map[ev$classification_id]; + + return info; + } + +redef record fa_file += { + ## Recently received IDS events. This is primarily used + ## for tying together Unified2 events and packets. + u2_events: table[count] of Unified2::IDSEvent + &optional &create_expire=5sec + &expire_func=function(t: table[count] of Unified2::IDSEvent, event_id: count): interval + { + Log::write(LOG, create_info(t[event_id])); + return 0secs; + }; +}; + +event Unified2::read_sid_msg_line(desc: Input::EventDescription, tpe: Input::Event, line: string) + { + local parts = split_string_n(line, / \|\| /, F, 100); + if ( |parts| >= 2 && /^[0-9]+$/ in parts[0] ) + sid_map[to_count(parts[0])] = parts[1]; + } + +event Unified2::read_gen_msg_line(desc: Input::EventDescription, tpe: Input::Event, line: string) + { + local parts = split_string_n(line, / \|\| /, F, 3); + if ( |parts| >= 2 && /^[0-9]+$/ in parts[0] ) + gen_map[to_count(parts[0])] = parts[2]; + } + +event Unified2::read_classification_line(desc: Input::EventDescription, tpe: Input::Event, line: string) + { + local parts = split_string_n(line, /: /, F, 2); + if ( |parts| == 2 ) + { + local parts2 = split_string_n(parts[1], /,/, F, 4); + if ( |parts2| > 1 ) + classification_map[|classification_map|+1] = parts2[0]; + } + } + +event Input::end_of_data(name: string, source: string) + { + if ( name == classification_config ) + ++num_classification_map_reads; + else if ( name == sid_msg ) + ++num_sid_map_reads; + else if ( name == gen_msg ) + ++num_gen_map_reads; + else + return; + + if ( watching ) + return; + + if ( mappings_initialized() ) + start_watching(); + } + +event zeek_init() &priority=5 + { + Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2, $path="unified2"]); + + if ( sid_msg == "" ) + { + num_sid_map_reads = 1; + } + else + { + Input::add_event([$source=sid_msg, + $reader=Input::READER_RAW, + $mode=Input::REREAD, + $name=sid_msg, + $fields=Unified2::OneLine, + $want_record=F, + $ev=Unified2::read_sid_msg_line]); + } + + if ( gen_msg == "" ) + { + num_gen_map_reads = 1; + } + else + { + Input::add_event([$source=gen_msg, + $name=gen_msg, + $reader=Input::READER_RAW, + $mode=Input::REREAD, + $fields=Unified2::OneLine, + $want_record=F, + $ev=Unified2::read_gen_msg_line]); + } + + if ( classification_config == "" ) + { + num_classification_map_reads = 1; + } + else + { + Input::add_event([$source=classification_config, + $name=classification_config, + $reader=Input::READER_RAW, + $mode=Input::REREAD, + $fields=Unified2::OneLine, + $want_record=F, + $ev=Unified2::read_classification_line]); + } + + if ( mappings_initialized() ) + start_watching(); + } + +event file_new(f: fa_file) + { + local file_dir = ""; + local parts = split_string_all(f$source, /\/[^\/]*$/); + if ( |parts| == 3 ) + file_dir = parts[0]; + + if ( (watch_file != "" && f$source == watch_file) || + (watch_dir != "" && compress_path(watch_dir) == file_dir) ) + { + Files::add_analyzer(f, Files::ANALYZER_UNIFIED2); + f$u2_events = table(); + } + } + +event unified2_event(f: fa_file, ev: Unified2::IDSEvent) + { + f$u2_events[ev$event_id] = ev; + } + +event unified2_packet(f: fa_file, pkt: Unified2::Packet) + { + if ( f?$u2_events && pkt$event_id in f$u2_events) + { + local ev = f$u2_events[pkt$event_id]; + event Unified2::alert(f, ev, pkt); + delete f$u2_events[pkt$event_id]; + } + } + +event Unified2::alert(f: fa_file, ev: IDSEvent, pkt: Packet) + { + local info = create_info(ev); + info$packet=pkt$data; + Log::write(LOG, info); + } + +event file_state_remove(f: fa_file) + { + if ( f?$u2_events ) + { + # In case any events never had matching packets, flush + # the extras to the log. + for ( i, ev in f$u2_events ) + { + Log::write(LOG, create_info(ev)); + } + } + } diff --git a/scripts/policy/files/x509/log-ocsp.bro b/scripts/policy/files/x509/log-ocsp.bro deleted file mode 100644 index e416535dd4..0000000000 --- a/scripts/policy/files/x509/log-ocsp.bro +++ /dev/null @@ -1,62 +0,0 @@ -##! Enable logging of OCSP responses. -# -# This script is in policy and not loaded by default because OCSP logging -# does not provide a lot of interesting information in most environments. - -module OCSP; - -export { - redef enum Log::ID += { LOG }; - - ## The record type which contains the fields of the OCSP log. - type Info: record { - ## Time when the OCSP reply was encountered. - ts: time &log; - ## File id of the OCSP reply. - id: string &log; - ## Hash algorithm used to generate issuerNameHash and issuerKeyHash. - hashAlgorithm: string &log; - ## Hash of the issuer's distingueshed name. - issuerNameHash: string &log; - ## Hash of the issuer's public key. - issuerKeyHash: string &log; - ## Serial number of the affected certificate. - serialNumber: string &log; - ## Status of the affected certificate. - certStatus: string &log; - ## Time at which the certificate was revoked. - revoketime: time &log &optional; - ## Reason for which the certificate was revoked. - revokereason: string &log &optional; - ## The time at which the status being shows is known to have been correct. - thisUpdate: time &log; - ## The latest time at which new information about the status of the certificate will be available. - nextUpdate: time &log &optional; - }; - - ## Event that can be handled to access the OCSP record - ## as it is sent to the logging framework. - global log_ocsp: event(rec: Info); -} - -event bro_init() - { - Log::create_stream(LOG, [$columns=Info, $ev=log_ocsp, $path="ocsp"]); - Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); - } - -event ocsp_response_certificate(f: fa_file, hashAlgorithm: string, issuerNameHash: string, issuerKeyHash: string, serialNumber: string, certStatus: string, revoketime: time, revokereason: string, thisUpdate: time, nextUpdate: time) - { - local wr = OCSP::Info($ts=f$info$ts, $id=f$id, $hashAlgorithm=hashAlgorithm, $issuerNameHash=issuerNameHash, - $issuerKeyHash=issuerKeyHash, $serialNumber=serialNumber, $certStatus=certStatus, - $thisUpdate=thisUpdate); - - if ( revokereason != "" ) - wr$revokereason = revokereason; - if ( time_to_double(revoketime) != 0 ) - wr$revoketime = revoketime; - if ( time_to_double(nextUpdate) != 0 ) - wr$nextUpdate = nextUpdate; - - Log::write(LOG, wr); - } diff --git a/scripts/policy/files/x509/log-ocsp.zeek b/scripts/policy/files/x509/log-ocsp.zeek new file mode 100644 index 0000000000..8cc9d5aef3 --- /dev/null +++ b/scripts/policy/files/x509/log-ocsp.zeek @@ -0,0 +1,62 @@ +##! Enable logging of OCSP responses. +# +# This script is in policy and not loaded by default because OCSP logging +# does not provide a lot of interesting information in most environments. + +module OCSP; + +export { + redef enum Log::ID += { LOG }; + + ## The record type which contains the fields of the OCSP log. + type Info: record { + ## Time when the OCSP reply was encountered. + ts: time &log; + ## File id of the OCSP reply. + id: string &log; + ## Hash algorithm used to generate issuerNameHash and issuerKeyHash. + hashAlgorithm: string &log; + ## Hash of the issuer's distingueshed name. + issuerNameHash: string &log; + ## Hash of the issuer's public key. + issuerKeyHash: string &log; + ## Serial number of the affected certificate. + serialNumber: string &log; + ## Status of the affected certificate. + certStatus: string &log; + ## Time at which the certificate was revoked. + revoketime: time &log &optional; + ## Reason for which the certificate was revoked. + revokereason: string &log &optional; + ## The time at which the status being shows is known to have been correct. + thisUpdate: time &log; + ## The latest time at which new information about the status of the certificate will be available. + nextUpdate: time &log &optional; + }; + + ## Event that can be handled to access the OCSP record + ## as it is sent to the logging framework. + global log_ocsp: event(rec: Info); +} + +event zeek_init() + { + Log::create_stream(LOG, [$columns=Info, $ev=log_ocsp, $path="ocsp"]); + Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); + } + +event ocsp_response_certificate(f: fa_file, hashAlgorithm: string, issuerNameHash: string, issuerKeyHash: string, serialNumber: string, certStatus: string, revoketime: time, revokereason: string, thisUpdate: time, nextUpdate: time) + { + local wr = OCSP::Info($ts=f$info$ts, $id=f$id, $hashAlgorithm=hashAlgorithm, $issuerNameHash=issuerNameHash, + $issuerKeyHash=issuerKeyHash, $serialNumber=serialNumber, $certStatus=certStatus, + $thisUpdate=thisUpdate); + + if ( revokereason != "" ) + wr$revokereason = revokereason; + if ( time_to_double(revoketime) != 0 ) + wr$revoketime = revoketime; + if ( time_to_double(nextUpdate) != 0 ) + wr$nextUpdate = nextUpdate; + + Log::write(LOG, wr); + } diff --git a/scripts/policy/frameworks/control/controllee.bro b/scripts/policy/frameworks/control/controllee.bro deleted file mode 100644 index c3f08cda2b..0000000000 --- a/scripts/policy/frameworks/control/controllee.bro +++ /dev/null @@ -1,85 +0,0 @@ -##! The controllee portion of the control framework. Load this script if remote -##! runtime control of the Bro process is desired. -##! -##! A controllee only needs to load the controllee script in addition -##! to the specific analysis scripts desired. It may also need a node -##! configured as a controller node in the communications nodes configuration:: -##! -##! bro frameworks/control/controllee - -@load base/frameworks/control -@load base/frameworks/broker - -module Control; - -event bro_init() &priority=-10 - { - Broker::subscribe(Control::topic_prefix + "/" + Broker::node_id()); - Broker::auto_publish(Control::topic_prefix + "/id_value_response", - Control::id_value_response); - Broker::auto_publish(Control::topic_prefix + "/peer_status_response", - Control::peer_status_response); - Broker::auto_publish(Control::topic_prefix + "/net_stats_response", - Control::net_stats_response); - Broker::auto_publish(Control::topic_prefix + "/configuration_update_response", - Control::configuration_update_response); - Broker::auto_publish(Control::topic_prefix + "/shutdown_response", - Control::shutdown_response); - - if ( Control::controllee_listen ) - Broker::listen(); - } - -event Control::id_value_request(id: string) - { - local val = lookup_ID(id); - event Control::id_value_response(id, fmt("%s", val)); - } - -event Control::peer_status_request() - { - local status = ""; - - # @todo: need to expose broker::endpoint::peers and broker::peer_status - local peers = Broker::peers(); - - for ( i in peers ) - { - local bpeer = peers[i]; - status += fmt("%.6f peer=%s host=%s status=%s\n", - network_time(), - bpeer$peer$id, - bpeer$peer$network$address, - bpeer$status); - } - - event Control::peer_status_response(status); - } - -event Control::net_stats_request() - { - local ns = get_net_stats(); - local reply = fmt("%.6f recvd=%d dropped=%d link=%d\n", network_time(), - ns$pkts_recvd, ns$pkts_dropped, ns$pkts_link); - event Control::net_stats_response(reply); - } - -event Control::configuration_update_request() - { - # Generate the alias event. - event Control::configuration_update(); - - # Don't need to do anything in particular here, it's just indicating that - # the configuration is going to be updated. This event could be handled - # by other scripts if they need to do some ancilliary processing if - # redef-able consts are modified at runtime. - event Control::configuration_update_response(); - } - -event Control::shutdown_request() - { - # Send the acknowledgement event. - event Control::shutdown_response(); - # Schedule the shutdown to let the current event queue flush itself first. - schedule 1sec { terminate_event() }; - } diff --git a/scripts/policy/frameworks/control/controllee.zeek b/scripts/policy/frameworks/control/controllee.zeek new file mode 100644 index 0000000000..f807915852 --- /dev/null +++ b/scripts/policy/frameworks/control/controllee.zeek @@ -0,0 +1,85 @@ +##! The controllee portion of the control framework. Load this script if remote +##! runtime control of the Zeek process is desired. +##! +##! A controllee only needs to load the controllee script in addition +##! to the specific analysis scripts desired. It may also need a node +##! configured as a controller node in the communications nodes configuration:: +##! +##! zeek frameworks/control/controllee + +@load base/frameworks/control +@load base/frameworks/broker + +module Control; + +event zeek_init() &priority=-10 + { + Broker::subscribe(Control::topic_prefix + "/" + Broker::node_id()); + Broker::auto_publish(Control::topic_prefix + "/id_value_response", + Control::id_value_response); + Broker::auto_publish(Control::topic_prefix + "/peer_status_response", + Control::peer_status_response); + Broker::auto_publish(Control::topic_prefix + "/net_stats_response", + Control::net_stats_response); + Broker::auto_publish(Control::topic_prefix + "/configuration_update_response", + Control::configuration_update_response); + Broker::auto_publish(Control::topic_prefix + "/shutdown_response", + Control::shutdown_response); + + if ( Control::controllee_listen ) + Broker::listen(); + } + +event Control::id_value_request(id: string) + { + local val = lookup_ID(id); + event Control::id_value_response(id, fmt("%s", val)); + } + +event Control::peer_status_request() + { + local status = ""; + + # @todo: need to expose broker::endpoint::peers and broker::peer_status + local peers = Broker::peers(); + + for ( i in peers ) + { + local bpeer = peers[i]; + status += fmt("%.6f peer=%s host=%s status=%s\n", + network_time(), + bpeer$peer$id, + bpeer$peer$network$address, + bpeer$status); + } + + event Control::peer_status_response(status); + } + +event Control::net_stats_request() + { + local ns = get_net_stats(); + local reply = fmt("%.6f recvd=%d dropped=%d link=%d\n", network_time(), + ns$pkts_recvd, ns$pkts_dropped, ns$pkts_link); + event Control::net_stats_response(reply); + } + +event Control::configuration_update_request() + { + # Generate the alias event. + event Control::configuration_update(); + + # Don't need to do anything in particular here, it's just indicating that + # the configuration is going to be updated. This event could be handled + # by other scripts if they need to do some ancilliary processing if + # redef-able consts are modified at runtime. + event Control::configuration_update_response(); + } + +event Control::shutdown_request() + { + # Send the acknowledgement event. + event Control::shutdown_response(); + # Schedule the shutdown to let the current event queue flush itself first. + schedule 1sec { terminate_event() }; + } diff --git a/scripts/policy/frameworks/control/controller.bro b/scripts/policy/frameworks/control/controller.bro deleted file mode 100644 index b81ce4b2d6..0000000000 --- a/scripts/policy/frameworks/control/controller.bro +++ /dev/null @@ -1,133 +0,0 @@ -##! This is a utility script that implements the controller interface for the -##! control framework. It's intended to be run to control a remote Bro -##! and then shutdown. -##! -##! It's intended to be used from the command line like this:: -##! -##! bro frameworks/control/controller Control::host= Control::host_port= Control::cmd= [Control::arg=] - -@load base/frameworks/control -@load base/frameworks/broker - -module Control; - -# Do some sanity checking and rework the communication nodes. -event bro_init() &priority=5 - { - # We know that some command was given because this script wouldn't be - # loaded if there wasn't so we can feel free to throw an error here and - # shutdown. - if ( cmd !in commands ) - { - Reporter::error(fmt("The '%s' control command is unknown.", cmd)); - terminate(); - } - - Broker::subscribe(Control::topic_prefix); - Broker::peer(cat(host), host_port); - } - -event Control::id_value_response(id: string, val: string) &priority=-10 - { - event terminate_event(); - } - -event Control::peer_status_response(s: string) &priority=-10 - { - event terminate_event(); - } - -event Control::net_stats_response(s: string) &priority=-10 - { - event terminate_event(); - } - -event Control::configuration_update_response() &priority=-10 - { - event terminate_event(); - } - -event Control::shutdown_response() &priority=-10 - { - event terminate_event(); - } - -function configurable_ids(): id_table - { - local rval: id_table = table(); - local globals = global_ids(); - - for ( id in globals ) - { - if ( id in ignore_ids ) - next; - - local t = globals[id]; - - # Skip it if the variable isn't redefinable or not const. - # We don't want to update non-const globals because that's usually - # where state is stored and those values will frequently be declared - # with &redef so that attributes can be redefined. - # - # NOTE: functions are currently not fully supported for serialization and hence - # aren't sent. - if ( t$constant && t$redefinable && t$type_name != "func" ) - rval[id] = t; - } - - return rval; - } - -function send_control_request(topic: string) - { - switch ( cmd ) { - case "id_value": - if ( arg == "" ) - Reporter::fatal("The Control::id_value command requires that Control::arg also has some value."); - - Broker::publish(topic, Control::id_value_request, arg); - break; - - case "peer_status": - Broker::publish(topic, Control::peer_status_request); - break; - - case "net_stats": - Broker::publish(topic, Control::net_stats_request); - break; - - case "shutdown": - Broker::publish(topic, Control::shutdown_request); - break; - - case "configuration_update": - Broker::publish(topic, Control::configuration_update_request); - break; - - default: - Reporter::fatal(fmt("unhandled Control::cmd, %s", cmd)); - break; - } - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=-10 - { - local topic = Control::topic_prefix + "/" + endpoint$id; - - if ( cmd == "configuration_update" ) - { - # Send all &redef'able consts to the peer. - local ids = configurable_ids(); - local publish_count = 0; - - for ( id in ids ) - { - if ( Broker::publish_id(topic, id) ) - ++publish_count; - } - - Reporter::info(fmt("Control framework sent %d IDs", publish_count)); - } - - send_control_request(topic); - } diff --git a/scripts/policy/frameworks/control/controller.zeek b/scripts/policy/frameworks/control/controller.zeek new file mode 100644 index 0000000000..b68f89b345 --- /dev/null +++ b/scripts/policy/frameworks/control/controller.zeek @@ -0,0 +1,133 @@ +##! This is a utility script that implements the controller interface for the +##! control framework. It's intended to be run to control a remote Zeek +##! and then shutdown. +##! +##! It's intended to be used from the command line like this:: +##! +##! zeek frameworks/control/controller Control::host= Control::host_port= Control::cmd= [Control::arg=] + +@load base/frameworks/control +@load base/frameworks/broker + +module Control; + +# Do some sanity checking and rework the communication nodes. +event zeek_init() &priority=5 + { + # We know that some command was given because this script wouldn't be + # loaded if there wasn't so we can feel free to throw an error here and + # shutdown. + if ( cmd !in commands ) + { + Reporter::error(fmt("The '%s' control command is unknown.", cmd)); + terminate(); + } + + Broker::subscribe(Control::topic_prefix); + Broker::peer(cat(host), host_port); + } + +event Control::id_value_response(id: string, val: string) &priority=-10 + { + event terminate_event(); + } + +event Control::peer_status_response(s: string) &priority=-10 + { + event terminate_event(); + } + +event Control::net_stats_response(s: string) &priority=-10 + { + event terminate_event(); + } + +event Control::configuration_update_response() &priority=-10 + { + event terminate_event(); + } + +event Control::shutdown_response() &priority=-10 + { + event terminate_event(); + } + +function configurable_ids(): id_table + { + local rval: id_table = table(); + local globals = global_ids(); + + for ( id in globals ) + { + if ( id in ignore_ids ) + next; + + local t = globals[id]; + + # Skip it if the variable isn't redefinable or not const. + # We don't want to update non-const globals because that's usually + # where state is stored and those values will frequently be declared + # with &redef so that attributes can be redefined. + # + # NOTE: functions are currently not fully supported for serialization and hence + # aren't sent. + if ( t$constant && t$redefinable && t$type_name != "func" ) + rval[id] = t; + } + + return rval; + } + +function send_control_request(topic: string) + { + switch ( cmd ) { + case "id_value": + if ( arg == "" ) + Reporter::fatal("The Control::id_value command requires that Control::arg also has some value."); + + Broker::publish(topic, Control::id_value_request, arg); + break; + + case "peer_status": + Broker::publish(topic, Control::peer_status_request); + break; + + case "net_stats": + Broker::publish(topic, Control::net_stats_request); + break; + + case "shutdown": + Broker::publish(topic, Control::shutdown_request); + break; + + case "configuration_update": + Broker::publish(topic, Control::configuration_update_request); + break; + + default: + Reporter::fatal(fmt("unhandled Control::cmd, %s", cmd)); + break; + } + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=-10 + { + local topic = Control::topic_prefix + "/" + endpoint$id; + + if ( cmd == "configuration_update" ) + { + # Send all &redef'able consts to the peer. + local ids = configurable_ids(); + local publish_count = 0; + + for ( id in ids ) + { + if ( Broker::publish_id(topic, id) ) + ++publish_count; + } + + Reporter::info(fmt("Control framework sent %d IDs", publish_count)); + } + + send_control_request(topic); + } diff --git a/scripts/policy/frameworks/dpd/detect-protocols.bro b/scripts/policy/frameworks/dpd/detect-protocols.zeek similarity index 100% rename from scripts/policy/frameworks/dpd/detect-protocols.bro rename to scripts/policy/frameworks/dpd/detect-protocols.zeek diff --git a/scripts/policy/frameworks/dpd/packet-segment-logging.bro b/scripts/policy/frameworks/dpd/packet-segment-logging.bro deleted file mode 100644 index 35a52c3870..0000000000 --- a/scripts/policy/frameworks/dpd/packet-segment-logging.bro +++ /dev/null @@ -1,29 +0,0 @@ -##! This script enables logging of packet segment data when a protocol -##! parsing violation is encountered. The amount of data from the -##! packet logged is set by the :bro:see:`DPD::packet_segment_size` variable. -##! A caveat to logging packet data is that in some cases, the packet may -##! not be the packet that actually caused the protocol violation. - -@load base/frameworks/dpd - -module DPD; - -export { - redef record Info += { - ## A chunk of the payload that most likely resulted in the - ## protocol violation. - packet_segment: string &optional &log; - }; - - ## Size of the packet segment to display in the DPD log. - option packet_segment_size: int = 255; -} - - -event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, - reason: string) &priority=4 - { - if ( ! c?$dpd ) return; - - c$dpd$packet_segment=fmt("%s", sub_bytes(get_current_packet()$data, 0, packet_segment_size)); - } diff --git a/scripts/policy/frameworks/dpd/packet-segment-logging.zeek b/scripts/policy/frameworks/dpd/packet-segment-logging.zeek new file mode 100644 index 0000000000..7dff2b07f8 --- /dev/null +++ b/scripts/policy/frameworks/dpd/packet-segment-logging.zeek @@ -0,0 +1,29 @@ +##! This script enables logging of packet segment data when a protocol +##! parsing violation is encountered. The amount of data from the +##! packet logged is set by the :zeek:see:`DPD::packet_segment_size` variable. +##! A caveat to logging packet data is that in some cases, the packet may +##! not be the packet that actually caused the protocol violation. + +@load base/frameworks/dpd + +module DPD; + +export { + redef record Info += { + ## A chunk of the payload that most likely resulted in the + ## protocol violation. + packet_segment: string &optional &log; + }; + + ## Size of the packet segment to display in the DPD log. + option packet_segment_size: int = 255; +} + + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, + reason: string) &priority=4 + { + if ( ! c?$dpd ) return; + + c$dpd$packet_segment=fmt("%s", sub_bytes(get_current_packet()$data, 0, packet_segment_size)); + } diff --git a/scripts/policy/frameworks/files/detect-MHR.bro b/scripts/policy/frameworks/files/detect-MHR.zeek similarity index 100% rename from scripts/policy/frameworks/files/detect-MHR.bro rename to scripts/policy/frameworks/files/detect-MHR.zeek diff --git a/scripts/policy/frameworks/files/entropy-test-all-files.bro b/scripts/policy/frameworks/files/entropy-test-all-files.zeek similarity index 100% rename from scripts/policy/frameworks/files/entropy-test-all-files.bro rename to scripts/policy/frameworks/files/entropy-test-all-files.zeek diff --git a/scripts/policy/frameworks/files/extract-all-files.bro b/scripts/policy/frameworks/files/extract-all-files.zeek similarity index 100% rename from scripts/policy/frameworks/files/extract-all-files.bro rename to scripts/policy/frameworks/files/extract-all-files.zeek diff --git a/scripts/policy/frameworks/files/hash-all-files.bro b/scripts/policy/frameworks/files/hash-all-files.zeek similarity index 100% rename from scripts/policy/frameworks/files/hash-all-files.bro rename to scripts/policy/frameworks/files/hash-all-files.zeek diff --git a/scripts/policy/frameworks/intel/do_expire.bro b/scripts/policy/frameworks/intel/do_expire.zeek similarity index 100% rename from scripts/policy/frameworks/intel/do_expire.bro rename to scripts/policy/frameworks/intel/do_expire.zeek diff --git a/scripts/policy/frameworks/intel/do_notice.bro b/scripts/policy/frameworks/intel/do_notice.zeek similarity index 100% rename from scripts/policy/frameworks/intel/do_notice.bro rename to scripts/policy/frameworks/intel/do_notice.zeek diff --git a/scripts/policy/frameworks/intel/removal.bro b/scripts/policy/frameworks/intel/removal.zeek similarity index 100% rename from scripts/policy/frameworks/intel/removal.bro rename to scripts/policy/frameworks/intel/removal.zeek diff --git a/scripts/policy/frameworks/intel/seen/__load__.bro b/scripts/policy/frameworks/intel/seen/__load__.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/__load__.bro rename to scripts/policy/frameworks/intel/seen/__load__.zeek diff --git a/scripts/policy/frameworks/intel/seen/conn-established.bro b/scripts/policy/frameworks/intel/seen/conn-established.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/conn-established.bro rename to scripts/policy/frameworks/intel/seen/conn-established.zeek diff --git a/scripts/policy/frameworks/intel/seen/dns.bro b/scripts/policy/frameworks/intel/seen/dns.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/dns.bro rename to scripts/policy/frameworks/intel/seen/dns.zeek diff --git a/scripts/policy/frameworks/intel/seen/file-hashes.bro b/scripts/policy/frameworks/intel/seen/file-hashes.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/file-hashes.bro rename to scripts/policy/frameworks/intel/seen/file-hashes.zeek diff --git a/scripts/policy/frameworks/intel/seen/file-names.bro b/scripts/policy/frameworks/intel/seen/file-names.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/file-names.bro rename to scripts/policy/frameworks/intel/seen/file-names.zeek diff --git a/scripts/policy/frameworks/intel/seen/http-headers.bro b/scripts/policy/frameworks/intel/seen/http-headers.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/http-headers.bro rename to scripts/policy/frameworks/intel/seen/http-headers.zeek diff --git a/scripts/policy/frameworks/intel/seen/http-url.bro b/scripts/policy/frameworks/intel/seen/http-url.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/http-url.bro rename to scripts/policy/frameworks/intel/seen/http-url.zeek diff --git a/scripts/policy/frameworks/intel/seen/pubkey-hashes.bro b/scripts/policy/frameworks/intel/seen/pubkey-hashes.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/pubkey-hashes.bro rename to scripts/policy/frameworks/intel/seen/pubkey-hashes.zeek diff --git a/scripts/policy/frameworks/intel/seen/smb-filenames.bro b/scripts/policy/frameworks/intel/seen/smb-filenames.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/smb-filenames.bro rename to scripts/policy/frameworks/intel/seen/smb-filenames.zeek diff --git a/scripts/policy/frameworks/intel/seen/smtp-url-extraction.bro b/scripts/policy/frameworks/intel/seen/smtp-url-extraction.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/smtp-url-extraction.bro rename to scripts/policy/frameworks/intel/seen/smtp-url-extraction.zeek diff --git a/scripts/policy/frameworks/intel/seen/smtp.bro b/scripts/policy/frameworks/intel/seen/smtp.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/smtp.bro rename to scripts/policy/frameworks/intel/seen/smtp.zeek diff --git a/scripts/policy/frameworks/intel/seen/ssl.bro b/scripts/policy/frameworks/intel/seen/ssl.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/ssl.bro rename to scripts/policy/frameworks/intel/seen/ssl.zeek diff --git a/scripts/policy/frameworks/intel/seen/where-locations.bro b/scripts/policy/frameworks/intel/seen/where-locations.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/where-locations.bro rename to scripts/policy/frameworks/intel/seen/where-locations.zeek diff --git a/scripts/policy/frameworks/intel/seen/x509.bro b/scripts/policy/frameworks/intel/seen/x509.zeek similarity index 100% rename from scripts/policy/frameworks/intel/seen/x509.bro rename to scripts/policy/frameworks/intel/seen/x509.zeek diff --git a/scripts/policy/frameworks/intel/whitelist.bro b/scripts/policy/frameworks/intel/whitelist.zeek similarity index 100% rename from scripts/policy/frameworks/intel/whitelist.bro rename to scripts/policy/frameworks/intel/whitelist.zeek diff --git a/scripts/policy/frameworks/netcontrol/catch-and-release.zeek b/scripts/policy/frameworks/netcontrol/catch-and-release.zeek new file mode 100644 index 0000000000..b11170929f --- /dev/null +++ b/scripts/policy/frameworks/netcontrol/catch-and-release.zeek @@ -0,0 +1,534 @@ +##! Implementation of catch-and-release functionality for NetControl. + +@load base/frameworks/netcontrol +@load base/frameworks/cluster + +module NetControl; + +export { + + redef enum Log::ID += { CATCH_RELEASE }; + + ## This record is used for storing information about current blocks that are + ## part of catch and release. + type BlockInfo: record { + ## Absolute time indicating until when a block is inserted using NetControl. + block_until: time &optional; + ## Absolute time indicating until when an IP address is watched to reblock it. + watch_until: time; + ## Number of times an IP address was reblocked. + num_reblocked: count &default=0; + ## Number indicating at which catch and release interval we currently are. + current_interval: count; + ## ID of the inserted block, if any. + current_block_id: string; + ## User specified string. + location: string &optional; + }; + + ## The enum that contains the different kinds of messages that are logged by + ## catch and release. + type CatchReleaseActions: enum { + ## Log lines marked with info are purely informational; no action was taken. + INFO, + ## A rule for the specified IP address already existed in NetControl (outside + ## of catch-and-release). Catch and release did not add a new rule, but is now + ## watching the IP address and will add a new rule after the current rule expires. + ADDED, + ## A drop was requested by catch and release. + DROP, + ## An address was successfully blocked by catch and release. + DROPPED, + ## An address was unblocked after the timeout expired. + UNBLOCK, + ## An address was forgotten because it did not reappear within the `watch_until` interval. + FORGOTTEN, + ## A watched IP address was seen again; catch and release will re-block it. + SEEN_AGAIN + }; + + ## The record type that is used for representing and logging + type CatchReleaseInfo: record { + ## The absolute time indicating when the action for this log-line occured. + ts: time &log; + ## The rule id that this log line refers to. + rule_id: string &log &optional; + ## The IP address that this line refers to. + ip: addr &log; + ## The action that was taken in this log-line. + action: CatchReleaseActions &log; + ## The current block_interaval (for how long the address is blocked). + block_interval: interval &log &optional; + ## The current watch_interval (for how long the address will be watched and re-block if it reappears). + watch_interval: interval &log &optional; + ## The absolute time until which the address is blocked. + blocked_until: time &log &optional; + ## The absolute time until which the address will be monitored. + watched_until: time &log &optional; + ## Number of times that this address was blocked in the current cycle. + num_blocked: count &log &optional; + ## The user specified location string. + location: string &log &optional; + ## Additional informational string by the catch and release framework about this log-line. + message: string &log &optional; + }; + + ## Stops all packets involving an IP address from being forwarded. This function + ## uses catch-and-release functionality, where the IP address is only dropped for + ## a short amount of time that is incremented steadily when the IP is encountered + ## again. + ## + ## In cluster mode, this function works on workers as well as the manager. On managers, + ## the returned :zeek:see:`NetControl::BlockInfo` record will not contain the block ID, + ## which will be assigned on the manager. + ## + ## a: The address to be dropped. + ## + ## t: How long to drop it, with 0 being indefinitely. + ## + ## location: An optional string describing where the drop was triggered. + ## + ## Returns: The :zeek:see:`NetControl::BlockInfo` record containing information about + ## the inserted block. + global drop_address_catch_release: function(a: addr, location: string &default="") : BlockInfo; + + ## Removes an address from being watched with catch and release. Returns true if the + ## address was found and removed; returns false if it was unknown to catch and release. + ## + ## If the address is currently blocked, and the block was inserted by catch and release, + ## the block is removed. + ## + ## a: The address to be unblocked. + ## + ## reason: A reason for the unblock. + ## + ## Returns: True if the address was unblocked. + global unblock_address_catch_release: function(a: addr, reason: string &default="") : bool; + + ## This function can be called to notify the catch and release script that activity by + ## an IP address was seen. If the respective IP address is currently monitored by catch and + ## release and not blocked, the block will be reinstated. See the documentation of watch_new_connection + ## which events the catch and release functionality usually monitors for activity. + ## + ## a: The address that was seen and should be re-dropped if it is being watched. + global catch_release_seen: function(a: addr); + + ## Get the :zeek:see:`NetControl::BlockInfo` record for an address currently blocked by catch and release. + ## If the address is unknown to catch and release, the watch_until time will be set to 0. + ## + ## In cluster mode, this function works on the manager and workers. On workers, the data will + ## lag slightly behind the manager; if you add a block, it will not be instantly available via + ## this function. + ## + ## a: The address to get information about. + ## + ## Returns: The :zeek:see:`NetControl::BlockInfo` record containing information about + ## the inserted block. + global get_catch_release_info: function(a: addr) : BlockInfo; + + ## Event is raised when catch and release cases management of an IP address because no + ## activity was seen within the watch_until period. + ## + ## a: The address that is no longer being managed. + ## + ## bi: The :zeek:see:`NetControl::BlockInfo` record containing information about the block. + global catch_release_forgotten: event(a: addr, bi: BlockInfo); + + ## If true, catch_release_seen is called on the connection originator in new_connection, + ## connection_established, partial_connection, connection_attempt, connection_rejected, + ## connection_reset and connection_pending + const watch_connections = T &redef; + + ## If true, catch and release warns if packets of an IP address are still seen after it + ## should have been blocked. + option catch_release_warn_blocked_ip_encountered = F; + + ## Time intervals for which subsequent drops of the same IP take + ## effect. + const catch_release_intervals: vector of interval = vector(10min, 1hr, 24hrs, 7days) &redef; + + ## Event that can be handled to access the :zeek:type:`NetControl::CatchReleaseInfo` + ## record as it is sent on to the logging framework. + global log_netcontrol_catch_release: event(rec: CatchReleaseInfo); + + # Cluster events for catch and release + global catch_release_block_new: event(a: addr, b: BlockInfo); + global catch_release_block_delete: event(a: addr); + global catch_release_add: event(a: addr, location: string); + global catch_release_delete: event(a: addr, reason: string); + global catch_release_encountered: event(a: addr); +} + +# Set that is used to only send seen notifications to the master every ~30 seconds. +global catch_release_recently_notified: set[addr] &create_expire=30secs; + +event zeek_init() &priority=5 + { + Log::create_stream(NetControl::CATCH_RELEASE, [$columns=CatchReleaseInfo, $ev=log_netcontrol_catch_release, $path="netcontrol_catch_release"]); + } + +function get_watch_interval(current_interval: count): interval + { + if ( (current_interval + 1) in catch_release_intervals ) + return catch_release_intervals[current_interval+1]; + else + return catch_release_intervals[current_interval]; + } + +function populate_log_record(ip: addr, bi: BlockInfo, action: CatchReleaseActions): CatchReleaseInfo + { + local log = CatchReleaseInfo($ts=network_time(), $ip=ip, $action=action, + $block_interval=catch_release_intervals[bi$current_interval], + $watch_interval=get_watch_interval(bi$current_interval), + $watched_until=bi$watch_until, + $num_blocked=bi$num_reblocked+1 + ); + + if ( bi?$block_until ) + log$blocked_until = bi$block_until; + + if ( bi?$current_block_id && bi$current_block_id != "" ) + log$rule_id = bi$current_block_id; + + if ( bi?$location ) + log$location = bi$location; + + return log; + } + +function per_block_interval(t: table[addr] of BlockInfo, idx: addr): interval + { + local remaining_time = t[idx]$watch_until - network_time(); + if ( remaining_time < 0secs ) + remaining_time = 0secs; + +@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) + if ( remaining_time == 0secs ) + { + local log = populate_log_record(idx, t[idx], FORGOTTEN); + Log::write(CATCH_RELEASE, log); + + event NetControl::catch_release_forgotten(idx, t[idx]); + } +@endif + + return remaining_time; + } + +# This is the internally maintained table containing all the addresses that are currently being +# watched to see if they will re-surface. After the time is reached, monitoring of that specific +# IP will stop. +global blocks: table[addr] of BlockInfo = {} + &create_expire=0secs + &expire_func=per_block_interval; + + +@if ( Cluster::is_enabled() ) + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_new); + Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_delete); + } +@else +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_add); + Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_delete); + Broker::auto_publish(Cluster::manager_topic, NetControl::catch_release_encountered); + } +@endif + +@endif + +function cr_check_rule(r: Rule): bool + { + if ( r$ty == DROP && r$entity$ty == ADDRESS ) + { + local ip = r$entity$ip; + if ( ( is_v4_subnet(ip) && subnet_width(ip) == 32 ) || ( is_v6_subnet(ip) && subnet_width(ip) == 128 ) ) + { + if ( subnet_to_addr(ip) in blocks ) + return T; + } + } + + return F; + } + +@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) + +event rule_added(r: Rule, p: PluginState, msg: string &default="") + { + if ( !cr_check_rule(r) ) + return; + + local ip = subnet_to_addr(r$entity$ip); + local bi = blocks[ip]; + + local log = populate_log_record(ip, bi, DROPPED); + if ( msg != "" ) + log$message = msg; + Log::write(CATCH_RELEASE, log); + } + + +event rule_timeout(r: Rule, i: FlowInfo, p: PluginState) + { + if ( !cr_check_rule(r) ) + return; + + local ip = subnet_to_addr(r$entity$ip); + local bi = blocks[ip]; + + local log = populate_log_record(ip, bi, UNBLOCK); + if ( bi?$block_until ) + { + local difference: interval = network_time() - bi$block_until; + if ( interval_to_double(difference) > 60 || interval_to_double(difference) < -60 ) + log$message = fmt("Difference between network_time and block time excessive: %f", difference); + } + + Log::write(CATCH_RELEASE, log); + } + +@endif + +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) +event catch_release_add(a: addr, location: string) + { + drop_address_catch_release(a, location); + } + +event catch_release_delete(a: addr, reason: string) + { + unblock_address_catch_release(a, reason); + } + +event catch_release_encountered(a: addr) + { + catch_release_seen(a); + } +@endif + +@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) +event catch_release_block_new(a: addr, b: BlockInfo) + { + blocks[a] = b; + } + +event catch_release_block_delete(a: addr) + { + if ( a in blocks ) + delete blocks[a]; + } +@endif + +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) +@endif + +function get_catch_release_info(a: addr): BlockInfo + { + if ( a in blocks ) + return blocks[a]; + + return BlockInfo($watch_until=double_to_time(0), $current_interval=0, $current_block_id=""); + } + +function drop_address_catch_release(a: addr, location: string &default=""): BlockInfo + { + local bi: BlockInfo; + local log: CatchReleaseInfo; + + if ( a in blocks ) + { + log = populate_log_record(a, blocks[a], INFO); + log$message = "Already blocked using catch-and-release - ignoring duplicate"; + Log::write(CATCH_RELEASE, log); + + return blocks[a]; + } + + local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a)); + if ( [e,DROP] in rule_entities ) + { + local r = rule_entities[e,DROP]; + + bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $current_interval=0, $current_block_id=r$id); + if ( location != "" ) + bi$location = location; +@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) + log = populate_log_record(a, bi, ADDED); + log$message = "Address already blocked outside of catch-and-release. Catch and release will monitor and only actively block if it appears in network traffic."; + Log::write(CATCH_RELEASE, log); + blocks[a] = bi; + event NetControl::catch_release_block_new(a, bi); +@endif +@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) + event NetControl::catch_release_add(a, location); +@endif + return bi; + } + + local block_interval = catch_release_intervals[0]; + +@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) + local ret = drop_address(a, block_interval, location); + + if ( ret != "" ) + { + bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id=ret); + if ( location != "" ) + bi$location = location; + blocks[a] = bi; + event NetControl::catch_release_block_new(a, bi); + blocks[a] = bi; + log = populate_log_record(a, bi, DROP); + Log::write(CATCH_RELEASE, log); + return bi; + } + Reporter::error(fmt("Catch and release could not add block for %s; failing.", a)); + return BlockInfo($watch_until=double_to_time(0), $current_interval=0, $current_block_id=""); +@endif + +@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) + bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id=""); + event NetControl::catch_release_add(a, location); + return bi; +@endif + + } + +function unblock_address_catch_release(a: addr, reason: string &default=""): bool + { + if ( a !in blocks ) + return F; + +@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) + local bi = blocks[a]; + local log = populate_log_record(a, bi, UNBLOCK); + if ( reason != "" ) + log$message = reason; + Log::write(CATCH_RELEASE, log); + delete blocks[a]; + if ( bi?$block_until && bi$block_until > network_time() && bi$current_block_id != "" ) + remove_rule(bi$current_block_id, reason); +@endif +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) + event NetControl::catch_release_block_delete(a); +@endif +@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) + event NetControl::catch_release_delete(a, reason); +@endif + + return T; + } + +function catch_release_seen(a: addr) + { + if ( a in blocks ) + { +@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) ) + local bi = blocks[a]; + local log: CatchReleaseInfo; + local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a)); + + if ( [e,DROP] in rule_entities ) + { + if ( catch_release_warn_blocked_ip_encountered == F ) + return; + + # This should be blocked - block has not been applied yet by hardware? Ignore for the moment... + log = populate_log_record(a, bi, INFO); + log$action = INFO; + log$message = "Block seen while in rule_entities. No action taken."; + Log::write(CATCH_RELEASE, log); + return; + } + + # ok, this one returned again while still in the backoff period. + + local try = bi$current_interval; + if ( (try+1) in catch_release_intervals ) + ++try; + + bi$current_interval = try; + if ( (try+1) in catch_release_intervals ) + bi$watch_until = network_time() + catch_release_intervals[try+1]; + else + bi$watch_until = network_time() + catch_release_intervals[try]; + + bi$block_until = network_time() + catch_release_intervals[try]; + ++bi$num_reblocked; + + local block_interval = catch_release_intervals[try]; + local location = ""; + if ( bi?$location ) + location = bi$location; + local drop = drop_address(a, block_interval, fmt("Re-drop by catch-and-release: %s", location)); + bi$current_block_id = drop; + + blocks[a] = bi; + + log = populate_log_record(a, bi, SEEN_AGAIN); + Log::write(CATCH_RELEASE, log); +@endif +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) + event NetControl::catch_release_block_new(a, bi); +@endif +@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) + if ( a in catch_release_recently_notified ) + return; + + event NetControl::catch_release_encountered(a); + add catch_release_recently_notified[a]; +@endif + + return; + } + + return; + } + +event new_connection(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } + +event connection_established(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } + +event partial_connection(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } + +event connection_attempt(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } + +event connection_rejected(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } + +event connection_reset(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } + +event connection_pending(c: connection) + { + if ( watch_connections ) + catch_release_seen(c$id$orig_h); + } diff --git a/scripts/policy/frameworks/notice/__load__.bro b/scripts/policy/frameworks/notice/__load__.zeek similarity index 100% rename from scripts/policy/frameworks/notice/__load__.bro rename to scripts/policy/frameworks/notice/__load__.zeek diff --git a/scripts/policy/frameworks/notice/actions/drop.zeek b/scripts/policy/frameworks/notice/actions/drop.zeek new file mode 100644 index 0000000000..03862bac08 --- /dev/null +++ b/scripts/policy/frameworks/notice/actions/drop.zeek @@ -0,0 +1,36 @@ +##! This script extends the built in notice code to implement the IP address +##! dropping functionality. + +@load base/frameworks/notice/main +@load base/frameworks/netcontrol +@load policy/frameworks/netcontrol/catch-and-release + +module Notice; + +export { + redef enum Action += { + ## Drops the address via :zeek:see:`NetControl::drop_address_catch_release`. + ACTION_DROP + }; + + redef record Info += { + ## Indicate if the $src IP address was dropped and denied + ## network access. + dropped: bool &log &default=F; + }; +} + +hook notice(n: Notice::Info) &priority=-5 + { + if ( ACTION_DROP in n$actions ) + { + local ci = NetControl::get_catch_release_info(n$src); + if ( ci$watch_until == double_to_time(0) ) + { + # we have not seen this one yet. Drop it. + local addl = n?$msg ? fmt("ACTION_DROP: %s", n?$msg) : "ACTION_DROP"; + local res = NetControl::drop_address_catch_release(n$src, addl); + n$dropped = res$watch_until != double_to_time(0); + } + } + } diff --git a/scripts/policy/frameworks/notice/extend-email/hostnames.bro b/scripts/policy/frameworks/notice/extend-email/hostnames.bro deleted file mode 100644 index 9ee58d3e0b..0000000000 --- a/scripts/policy/frameworks/notice/extend-email/hostnames.bro +++ /dev/null @@ -1,52 +0,0 @@ -##! Loading this script extends the :bro:enum:`Notice::ACTION_EMAIL` action -##! by appending to the email the hostnames associated with -##! :bro:type:`Notice::Info`'s *src* and *dst* fields as determined by a -##! DNS lookup. - -@load base/frameworks/notice/main - -module Notice; - -# We have to store references to the notices here because the when statement -# clones the frame which doesn't give us access to modify values outside -# of it's execution scope. (we get a clone of the notice instead of a -# reference to the original notice) -global tmp_notice_storage: table[string] of Notice::Info &create_expire=max_email_delay+10secs; - -hook notice(n: Notice::Info) &priority=10 - { - if ( ! n?$src && ! n?$dst ) - return; - - # This should only be done for notices that are being sent to email. - if ( ACTION_EMAIL !in n$actions ) - return; - - # I'm not recovering gracefully from the when statements because I want - # the notice framework to detect that something has exceeded the maximum - # allowed email delay and tell the user. - local uid = unique_id(""); - tmp_notice_storage[uid] = n; - - local output = ""; - if ( n?$src ) - { - add n$email_delay_tokens["hostnames-src"]; - when ( local src_name = lookup_addr(n$src) ) - { - output = string_cat("orig/src hostname: ", src_name, "\n"); - tmp_notice_storage[uid]$email_body_sections += output; - delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-src"]; - } - } - if ( n?$dst ) - { - add n$email_delay_tokens["hostnames-dst"]; - when ( local dst_name = lookup_addr(n$dst) ) - { - output = string_cat("resp/dst hostname: ", dst_name, "\n"); - tmp_notice_storage[uid]$email_body_sections += output; - delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-dst"]; - } - } - } diff --git a/scripts/policy/frameworks/notice/extend-email/hostnames.zeek b/scripts/policy/frameworks/notice/extend-email/hostnames.zeek new file mode 100644 index 0000000000..5be74c7913 --- /dev/null +++ b/scripts/policy/frameworks/notice/extend-email/hostnames.zeek @@ -0,0 +1,52 @@ +##! Loading this script extends the :zeek:enum:`Notice::ACTION_EMAIL` action +##! by appending to the email the hostnames associated with +##! :zeek:type:`Notice::Info`'s *src* and *dst* fields as determined by a +##! DNS lookup. + +@load base/frameworks/notice/main + +module Notice; + +# We have to store references to the notices here because the when statement +# clones the frame which doesn't give us access to modify values outside +# of it's execution scope. (we get a clone of the notice instead of a +# reference to the original notice) +global tmp_notice_storage: table[string] of Notice::Info &create_expire=max_email_delay+10secs; + +hook notice(n: Notice::Info) &priority=10 + { + if ( ! n?$src && ! n?$dst ) + return; + + # This should only be done for notices that are being sent to email. + if ( ACTION_EMAIL !in n$actions ) + return; + + # I'm not recovering gracefully from the when statements because I want + # the notice framework to detect that something has exceeded the maximum + # allowed email delay and tell the user. + local uid = unique_id(""); + tmp_notice_storage[uid] = n; + + local output = ""; + if ( n?$src ) + { + add n$email_delay_tokens["hostnames-src"]; + when ( local src_name = lookup_addr(n$src) ) + { + output = string_cat("orig/src hostname: ", src_name, "\n"); + tmp_notice_storage[uid]$email_body_sections += output; + delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-src"]; + } + } + if ( n?$dst ) + { + add n$email_delay_tokens["hostnames-dst"]; + when ( local dst_name = lookup_addr(n$dst) ) + { + output = string_cat("resp/dst hostname: ", dst_name, "\n"); + tmp_notice_storage[uid]$email_body_sections += output; + delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-dst"]; + } + } + } diff --git a/scripts/policy/frameworks/packet-filter/shunt.bro b/scripts/policy/frameworks/packet-filter/shunt.bro deleted file mode 100644 index 97ae0c792d..0000000000 --- a/scripts/policy/frameworks/packet-filter/shunt.bro +++ /dev/null @@ -1,170 +0,0 @@ -@load base/frameworks/notice -@load base/frameworks/packet-filter - -module PacketFilter; - -export { - ## The maximum number of BPF based shunts that Bro is allowed to perform. - const max_bpf_shunts = 100 &redef; - - ## Call this function to use BPF to shunt a connection (to prevent the - ## data packets from reaching Bro). For TCP connections, control - ## packets are still allowed through so that Bro can continue logging - ## the connection and it can stop shunting once the connection ends. - global shunt_conn: function(id: conn_id): bool; - - ## This function will use a BPF expression to shunt traffic between - ## the two hosts given in the `conn_id` so that the traffic is never - ## exposed to Bro's traffic processing. - global shunt_host_pair: function(id: conn_id): bool; - - ## Remove shunting for a host pair given as a `conn_id`. The filter - ## is not immediately removed. It waits for the occasional filter - ## update done by the `PacketFilter` framework. - global unshunt_host_pair: function(id: conn_id): bool; - - ## Performs the same function as the :bro:id:`PacketFilter::unshunt_host_pair` - ## function, but it forces an immediate filter update. - global force_unshunt_host_pair: function(id: conn_id): bool; - - ## Retrieve the currently shunted connections. - global current_shunted_conns: function(): set[conn_id]; - - ## Retrieve the currently shunted host pairs. - global current_shunted_host_pairs: function(): set[conn_id]; - - redef enum Notice::Type += { - ## Indicative that :bro:id:`PacketFilter::max_bpf_shunts` - ## connections are already being shunted with BPF filters and - ## no more are allowed. - No_More_Conn_Shunts_Available, - - ## Limitations in BPF make shunting some connections with BPF - ## impossible. This notice encompasses those various cases. - Cannot_BPF_Shunt_Conn, - }; -} - -global shunted_conns: set[conn_id]; -global shunted_host_pairs: set[conn_id]; - -function shunt_filters() - { - # NOTE: this could wrongly match if a connection happens with the ports reversed. - local tcp_filter = ""; - local udp_filter = ""; - for ( id in shunted_conns ) - { - local prot = get_port_transport_proto(id$resp_p); - - local filt = fmt("host %s and port %d and host %s and port %d", id$orig_h, id$orig_p, id$resp_h, id$resp_p); - if ( prot == udp ) - udp_filter = combine_filters(udp_filter, "and", filt); - else if ( prot == tcp ) - tcp_filter = combine_filters(tcp_filter, "and", filt); - } - if ( tcp_filter != "" ) - tcp_filter = combine_filters("tcp and tcp[tcpflags] & (tcp-syn|tcp-fin|tcp-rst) == 0", "and", tcp_filter); - local conn_shunt_filter = combine_filters(tcp_filter, "and", udp_filter); - - local hp_shunt_filter = ""; - for ( id in shunted_host_pairs ) - hp_shunt_filter = combine_filters(hp_shunt_filter, "and", fmt("host %s and host %s", id$orig_h, id$resp_h)); - - local filter = combine_filters(conn_shunt_filter, "and", hp_shunt_filter); - if ( filter != "" ) - PacketFilter::exclude("shunt_filters", filter); -} - -event bro_init() &priority=5 - { - register_filter_plugin([ - $func()={ return shunt_filters(); } - ]); - } - -function current_shunted_conns(): set[conn_id] - { - return shunted_conns; - } - -function current_shunted_host_pairs(): set[conn_id] - { - return shunted_host_pairs; - } - -function reached_max_shunts(): bool - { - if ( |shunted_conns| + |shunted_host_pairs| > max_bpf_shunts ) - { - NOTICE([$note=No_More_Conn_Shunts_Available, - $msg=fmt("%d BPF shunts are in place and no more will be added until space clears.", max_bpf_shunts)]); - return T; - } - else - return F; - } - -function shunt_host_pair(id: conn_id): bool - { - PacketFilter::filter_changed = T; - - if ( reached_max_shunts() ) - return F; - - add shunted_host_pairs[id]; - install(); - return T; - } - -function unshunt_host_pair(id: conn_id): bool - { - PacketFilter::filter_changed = T; - - if ( id in shunted_host_pairs ) - { - delete shunted_host_pairs[id]; - return T; - } - else - return F; - } - -function force_unshunt_host_pair(id: conn_id): bool - { - if ( unshunt_host_pair(id) ) - { - install(); - return T; - } - else - return F; - } - -function shunt_conn(id: conn_id): bool - { - if ( is_v6_addr(id$orig_h) ) - { - NOTICE([$note=Cannot_BPF_Shunt_Conn, - $msg="IPv6 connections can't be shunted with BPF due to limitations in BPF", - $sub="ipv6_conn", - $id=id, $identifier=cat(id)]); - return F; - } - - if ( reached_max_shunts() ) - return F; - - PacketFilter::filter_changed = T; - add shunted_conns[id]; - install(); - return T; - } - -event connection_state_remove(c: connection) &priority=-5 - { - # Don't rebuild the filter right away because the packet filter framework - # will check every few minutes and update the filter if things have changed. - if ( c$id in shunted_conns ) - delete shunted_conns[c$id]; - } diff --git a/scripts/policy/frameworks/packet-filter/shunt.zeek b/scripts/policy/frameworks/packet-filter/shunt.zeek new file mode 100644 index 0000000000..8fd2596fdc --- /dev/null +++ b/scripts/policy/frameworks/packet-filter/shunt.zeek @@ -0,0 +1,170 @@ +@load base/frameworks/notice +@load base/frameworks/packet-filter + +module PacketFilter; + +export { + ## The maximum number of BPF based shunts that Zeek is allowed to perform. + const max_bpf_shunts = 100 &redef; + + ## Call this function to use BPF to shunt a connection (to prevent the + ## data packets from reaching Zeek). For TCP connections, control + ## packets are still allowed through so that Zeek can continue logging + ## the connection and it can stop shunting once the connection ends. + global shunt_conn: function(id: conn_id): bool; + + ## This function will use a BPF expression to shunt traffic between + ## the two hosts given in the `conn_id` so that the traffic is never + ## exposed to Zeek's traffic processing. + global shunt_host_pair: function(id: conn_id): bool; + + ## Remove shunting for a host pair given as a `conn_id`. The filter + ## is not immediately removed. It waits for the occasional filter + ## update done by the `PacketFilter` framework. + global unshunt_host_pair: function(id: conn_id): bool; + + ## Performs the same function as the :zeek:id:`PacketFilter::unshunt_host_pair` + ## function, but it forces an immediate filter update. + global force_unshunt_host_pair: function(id: conn_id): bool; + + ## Retrieve the currently shunted connections. + global current_shunted_conns: function(): set[conn_id]; + + ## Retrieve the currently shunted host pairs. + global current_shunted_host_pairs: function(): set[conn_id]; + + redef enum Notice::Type += { + ## Indicative that :zeek:id:`PacketFilter::max_bpf_shunts` + ## connections are already being shunted with BPF filters and + ## no more are allowed. + No_More_Conn_Shunts_Available, + + ## Limitations in BPF make shunting some connections with BPF + ## impossible. This notice encompasses those various cases. + Cannot_BPF_Shunt_Conn, + }; +} + +global shunted_conns: set[conn_id]; +global shunted_host_pairs: set[conn_id]; + +function shunt_filters() + { + # NOTE: this could wrongly match if a connection happens with the ports reversed. + local tcp_filter = ""; + local udp_filter = ""; + for ( id in shunted_conns ) + { + local prot = get_port_transport_proto(id$resp_p); + + local filt = fmt("host %s and port %d and host %s and port %d", id$orig_h, id$orig_p, id$resp_h, id$resp_p); + if ( prot == udp ) + udp_filter = combine_filters(udp_filter, "and", filt); + else if ( prot == tcp ) + tcp_filter = combine_filters(tcp_filter, "and", filt); + } + if ( tcp_filter != "" ) + tcp_filter = combine_filters("tcp and tcp[tcpflags] & (tcp-syn|tcp-fin|tcp-rst) == 0", "and", tcp_filter); + local conn_shunt_filter = combine_filters(tcp_filter, "and", udp_filter); + + local hp_shunt_filter = ""; + for ( id in shunted_host_pairs ) + hp_shunt_filter = combine_filters(hp_shunt_filter, "and", fmt("host %s and host %s", id$orig_h, id$resp_h)); + + local filter = combine_filters(conn_shunt_filter, "and", hp_shunt_filter); + if ( filter != "" ) + PacketFilter::exclude("shunt_filters", filter); +} + +event zeek_init() &priority=5 + { + register_filter_plugin([ + $func()={ return shunt_filters(); } + ]); + } + +function current_shunted_conns(): set[conn_id] + { + return shunted_conns; + } + +function current_shunted_host_pairs(): set[conn_id] + { + return shunted_host_pairs; + } + +function reached_max_shunts(): bool + { + if ( |shunted_conns| + |shunted_host_pairs| > max_bpf_shunts ) + { + NOTICE([$note=No_More_Conn_Shunts_Available, + $msg=fmt("%d BPF shunts are in place and no more will be added until space clears.", max_bpf_shunts)]); + return T; + } + else + return F; + } + +function shunt_host_pair(id: conn_id): bool + { + PacketFilter::filter_changed = T; + + if ( reached_max_shunts() ) + return F; + + add shunted_host_pairs[id]; + install(); + return T; + } + +function unshunt_host_pair(id: conn_id): bool + { + PacketFilter::filter_changed = T; + + if ( id in shunted_host_pairs ) + { + delete shunted_host_pairs[id]; + return T; + } + else + return F; + } + +function force_unshunt_host_pair(id: conn_id): bool + { + if ( unshunt_host_pair(id) ) + { + install(); + return T; + } + else + return F; + } + +function shunt_conn(id: conn_id): bool + { + if ( is_v6_addr(id$orig_h) ) + { + NOTICE([$note=Cannot_BPF_Shunt_Conn, + $msg="IPv6 connections can't be shunted with BPF due to limitations in BPF", + $sub="ipv6_conn", + $id=id, $identifier=cat(id)]); + return F; + } + + if ( reached_max_shunts() ) + return F; + + PacketFilter::filter_changed = T; + add shunted_conns[id]; + install(); + return T; + } + +event connection_state_remove(c: connection) &priority=-5 + { + # Don't rebuild the filter right away because the packet filter framework + # will check every few minutes and update the filter if things have changed. + if ( c$id in shunted_conns ) + delete shunted_conns[c$id]; + } diff --git a/scripts/policy/frameworks/software/version-changes.bro b/scripts/policy/frameworks/software/version-changes.bro deleted file mode 100644 index 215a64d6b7..0000000000 --- a/scripts/policy/frameworks/software/version-changes.bro +++ /dev/null @@ -1,37 +0,0 @@ -##! Provides the possibility to define software names that are interesting to -##! watch for changes. A notice is generated if software versions change on a -##! host. - -@load base/frameworks/notice -@load base/frameworks/software - -module Software; - -export { - redef enum Notice::Type += { - ## For certain software, a version changing may matter. In that - ## case, this notice will be generated. Software that matters - ## if the version changes can be configured with the - ## :bro:id:`Software::interesting_version_changes` variable. - Software_Version_Change, - }; - - ## Some software is more interesting when the version changes and this - ## is a set of all software that should raise a notice when a different - ## version is seen on a host. - option interesting_version_changes: set[string] = {}; -} - -event Software::version_change(old: Software::Info, new: Software::Info) - { - if ( old$name !in interesting_version_changes ) - return; - - local msg = fmt("%.6f %s '%s' version changed from %s to %s", - network_time(), old$software_type, old$name, - software_fmt_version(old$version), - software_fmt_version(new$version)); - - NOTICE([$note=Software_Version_Change, $src=new$host, - $msg=msg, $sub=software_fmt(new)]); - } diff --git a/scripts/policy/frameworks/software/version-changes.zeek b/scripts/policy/frameworks/software/version-changes.zeek new file mode 100644 index 0000000000..865cc20447 --- /dev/null +++ b/scripts/policy/frameworks/software/version-changes.zeek @@ -0,0 +1,37 @@ +##! Provides the possibility to define software names that are interesting to +##! watch for changes. A notice is generated if software versions change on a +##! host. + +@load base/frameworks/notice +@load base/frameworks/software + +module Software; + +export { + redef enum Notice::Type += { + ## For certain software, a version changing may matter. In that + ## case, this notice will be generated. Software that matters + ## if the version changes can be configured with the + ## :zeek:id:`Software::interesting_version_changes` variable. + Software_Version_Change, + }; + + ## Some software is more interesting when the version changes and this + ## is a set of all software that should raise a notice when a different + ## version is seen on a host. + option interesting_version_changes: set[string] = {}; +} + +event Software::version_change(old: Software::Info, new: Software::Info) + { + if ( old$name !in interesting_version_changes ) + return; + + local msg = fmt("%.6f %s '%s' version changed from %s to %s", + network_time(), old$software_type, old$name, + software_fmt_version(old$version), + software_fmt_version(new$version)); + + NOTICE([$note=Software_Version_Change, $src=new$host, + $msg=msg, $sub=software_fmt(new)]); + } diff --git a/scripts/policy/frameworks/software/vulnerable.bro b/scripts/policy/frameworks/software/vulnerable.bro deleted file mode 100644 index 92a6698af3..0000000000 --- a/scripts/policy/frameworks/software/vulnerable.bro +++ /dev/null @@ -1,146 +0,0 @@ -##! Provides a variable to define vulnerable versions of software and if -##! a version of that software is as old or older than the defined version a -##! notice will be generated. - -@load base/frameworks/control -@load base/frameworks/notice -@load base/frameworks/software - -module Software; - -export { - redef enum Notice::Type += { - ## Indicates that a vulnerable version of software was detected. - Vulnerable_Version, - }; - - type VulnerableVersionRange: record { - ## The minimal version of a vulnerable version range. This - ## field can be undefined if all previous versions of a piece - ## of software are vulnerable. - min: Software::Version &optional; - ## The maximum vulnerable version. This field is deliberately - ## not optional because a maximum vulnerable version must - ## always be defined. This assumption may become incorrect - ## if all future versions of some software are to be considered - ## vulnerable. :) - max: Software::Version; - }; - - ## The DNS zone where runtime vulnerable software updates will - ## be loaded from. - option vulnerable_versions_update_endpoint = ""; - - ## The interval at which vulnerable versions should grab updates - ## over DNS. - option vulnerable_versions_update_interval = 1hr; - - ## This is a table of software versions indexed by the name of the - ## software and a set of version ranges that are declared to be - ## vulnerable for that software. - const vulnerable_versions: table[string] of set[VulnerableVersionRange] = table() &redef; -} - -global internal_vulnerable_versions: table[string] of set[VulnerableVersionRange] = table(); - -function decode_vulnerable_version_range(vuln_sw: string): VulnerableVersionRange - { - # Create a max value with a dunce value only because the $max field - # is not optional. - local vvr: Software::VulnerableVersionRange = [$max=[$major=0]]; - - if ( /max=/ !in vuln_sw ) - { - Reporter::warning(fmt("The vulnerable software detection script encountered a version with no max value (which is required). %s", vuln_sw)); - return vvr; - } - - local versions = split_string1(vuln_sw, /\x09/); - - for ( i in versions ) - { - local field_and_ver = split_string1(versions[i], /=/); - if ( |field_and_ver| != 2 ) - return vvr; #failure! - - local ver = Software::parse(field_and_ver[1])$version; - if ( field_and_ver[0] == "min" ) - vvr$min = ver; - else if ( field_and_ver[0] == "max" ) - vvr$max = ver; - } - - return vvr; - } - -event grab_vulnerable_versions(i: count) - { - if ( vulnerable_versions_update_endpoint == "" ) - { - # Reschedule this event in case the user updates the setting at runtime. - schedule vulnerable_versions_update_interval { grab_vulnerable_versions(1) }; - return; - } - - when ( local result = lookup_hostname_txt(cat(i,".",vulnerable_versions_update_endpoint)) ) - { - local parts = split_string1(result, /\x09/); - if ( |parts| != 2 ) #failure or end of list! - { - schedule vulnerable_versions_update_interval { grab_vulnerable_versions(1) }; - return; - } - - local sw = parts[0]; - local vvr = decode_vulnerable_version_range(parts[1]); - if ( sw !in internal_vulnerable_versions ) - internal_vulnerable_versions[sw] = set(); - add internal_vulnerable_versions[sw][vvr]; - - event grab_vulnerable_versions(i+1); - } - timeout 5secs - { - # In case a lookup fails, try starting over in one minute. - schedule 1min { grab_vulnerable_versions(1) }; - } - } - -function update_vulnerable_sw() - { - internal_vulnerable_versions = table(); - - # Copy the const vulnerable versions into the global modifiable one. - for ( sw, vuln_range_set in vulnerable_versions ) - internal_vulnerable_versions[sw] = vuln_range_set; - - event grab_vulnerable_versions(1); - } - -event bro_init() &priority=3 - { - update_vulnerable_sw(); - } - -event Control::configuration_update() &priority=3 - { - update_vulnerable_sw(); - } - -event log_software(rec: Info) - { - if ( rec$name !in internal_vulnerable_versions ) - return; - - for ( version_range in internal_vulnerable_versions[rec$name] ) - { - if ( cmp_versions(rec$version, version_range$max) <= 0 && - (!version_range?$min || cmp_versions(rec$version, version_range$min) >= 0) ) - { - # The software is inside a vulnerable version range. - NOTICE([$note=Vulnerable_Version, $src=rec$host, - $msg=fmt("%s is running %s which is vulnerable.", rec$host, software_fmt(rec)), - $sub=software_fmt(rec)]); - } - } - } diff --git a/scripts/policy/frameworks/software/vulnerable.zeek b/scripts/policy/frameworks/software/vulnerable.zeek new file mode 100644 index 0000000000..b8d8c43a12 --- /dev/null +++ b/scripts/policy/frameworks/software/vulnerable.zeek @@ -0,0 +1,146 @@ +##! Provides a variable to define vulnerable versions of software and if +##! a version of that software is as old or older than the defined version a +##! notice will be generated. + +@load base/frameworks/control +@load base/frameworks/notice +@load base/frameworks/software + +module Software; + +export { + redef enum Notice::Type += { + ## Indicates that a vulnerable version of software was detected. + Vulnerable_Version, + }; + + type VulnerableVersionRange: record { + ## The minimal version of a vulnerable version range. This + ## field can be undefined if all previous versions of a piece + ## of software are vulnerable. + min: Software::Version &optional; + ## The maximum vulnerable version. This field is deliberately + ## not optional because a maximum vulnerable version must + ## always be defined. This assumption may become incorrect + ## if all future versions of some software are to be considered + ## vulnerable. :) + max: Software::Version; + }; + + ## The DNS zone where runtime vulnerable software updates will + ## be loaded from. + option vulnerable_versions_update_endpoint = ""; + + ## The interval at which vulnerable versions should grab updates + ## over DNS. + option vulnerable_versions_update_interval = 1hr; + + ## This is a table of software versions indexed by the name of the + ## software and a set of version ranges that are declared to be + ## vulnerable for that software. + const vulnerable_versions: table[string] of set[VulnerableVersionRange] = table() &redef; +} + +global internal_vulnerable_versions: table[string] of set[VulnerableVersionRange] = table(); + +function decode_vulnerable_version_range(vuln_sw: string): VulnerableVersionRange + { + # Create a max value with a dunce value only because the $max field + # is not optional. + local vvr: Software::VulnerableVersionRange = [$max=[$major=0]]; + + if ( /max=/ !in vuln_sw ) + { + Reporter::warning(fmt("The vulnerable software detection script encountered a version with no max value (which is required). %s", vuln_sw)); + return vvr; + } + + local versions = split_string1(vuln_sw, /\x09/); + + for ( i in versions ) + { + local field_and_ver = split_string1(versions[i], /=/); + if ( |field_and_ver| != 2 ) + return vvr; #failure! + + local ver = Software::parse(field_and_ver[1])$version; + if ( field_and_ver[0] == "min" ) + vvr$min = ver; + else if ( field_and_ver[0] == "max" ) + vvr$max = ver; + } + + return vvr; + } + +event grab_vulnerable_versions(i: count) + { + if ( vulnerable_versions_update_endpoint == "" ) + { + # Reschedule this event in case the user updates the setting at runtime. + schedule vulnerable_versions_update_interval { grab_vulnerable_versions(1) }; + return; + } + + when ( local result = lookup_hostname_txt(cat(i,".",vulnerable_versions_update_endpoint)) ) + { + local parts = split_string1(result, /\x09/); + if ( |parts| != 2 ) #failure or end of list! + { + schedule vulnerable_versions_update_interval { grab_vulnerable_versions(1) }; + return; + } + + local sw = parts[0]; + local vvr = decode_vulnerable_version_range(parts[1]); + if ( sw !in internal_vulnerable_versions ) + internal_vulnerable_versions[sw] = set(); + add internal_vulnerable_versions[sw][vvr]; + + event grab_vulnerable_versions(i+1); + } + timeout 5secs + { + # In case a lookup fails, try starting over in one minute. + schedule 1min { grab_vulnerable_versions(1) }; + } + } + +function update_vulnerable_sw() + { + internal_vulnerable_versions = table(); + + # Copy the const vulnerable versions into the global modifiable one. + for ( sw, vuln_range_set in vulnerable_versions ) + internal_vulnerable_versions[sw] = vuln_range_set; + + event grab_vulnerable_versions(1); + } + +event zeek_init() &priority=3 + { + update_vulnerable_sw(); + } + +event Control::configuration_update() &priority=3 + { + update_vulnerable_sw(); + } + +event log_software(rec: Info) + { + if ( rec$name !in internal_vulnerable_versions ) + return; + + for ( version_range in internal_vulnerable_versions[rec$name] ) + { + if ( cmp_versions(rec$version, version_range$max) <= 0 && + (!version_range?$min || cmp_versions(rec$version, version_range$min) >= 0) ) + { + # The software is inside a vulnerable version range. + NOTICE([$note=Vulnerable_Version, $src=rec$host, + $msg=fmt("%s is running %s which is vulnerable.", rec$host, software_fmt(rec)), + $sub=software_fmt(rec)]); + } + } + } diff --git a/scripts/policy/frameworks/software/windows-version-detection.bro b/scripts/policy/frameworks/software/windows-version-detection.zeek similarity index 100% rename from scripts/policy/frameworks/software/windows-version-detection.bro rename to scripts/policy/frameworks/software/windows-version-detection.zeek diff --git a/scripts/policy/integration/barnyard2/__load__.bro b/scripts/policy/integration/barnyard2/__load__.zeek similarity index 100% rename from scripts/policy/integration/barnyard2/__load__.bro rename to scripts/policy/integration/barnyard2/__load__.zeek diff --git a/scripts/policy/integration/barnyard2/main.bro b/scripts/policy/integration/barnyard2/main.bro deleted file mode 100644 index 96c74043f7..0000000000 --- a/scripts/policy/integration/barnyard2/main.bro +++ /dev/null @@ -1,59 +0,0 @@ -##! This script lets Barnyard2 integrate with Bro. It receives alerts from -##! Barnyard2 and logs them. In the future it will do more correlation -##! and derive new notices from the alerts. - -@load ./types - -module Barnyard2; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Timestamp of the alert. - ts: time &log; - ## Associated packet ID. - pid: PacketID &log; - ## Associated alert data. - alert: AlertData &log; - }; - - ## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to - ## a :bro:type:`conn_id` value in the case that you might need to index - ## into an existing data structure elsewhere within Bro. - global pid2cid: function(p: PacketID): conn_id; -} - -event bro_init() &priority=5 - { - Log::create_stream(Barnyard2::LOG, [$columns=Info, $path="barnyard2"]); - } - - -function pid2cid(p: PacketID): conn_id - { - return [$orig_h=p$src_ip, $orig_p=p$src_p, $resp_h=p$dst_ip, $resp_p=p$dst_p]; - } - -event barnyard_alert(id: PacketID, alert: AlertData, msg: string, data: string) - { - Log::write(Barnyard2::LOG, [$ts=network_time(), $pid=id, $alert=alert]); - - #local proto_connection_string: string; - #if ( id$src_p == 0/tcp ) - # proto_connection_string = fmt("{PROTO:255} %s -> %s", id$src_ip, id$dst_ip); - #else - # proto_connection_string = fmt("{%s} %s:%d -> %s:%d", - # to_upper(fmt("%s", get_port_transport_proto(id$dst_p))), - # id$src_ip, id$src_p, id$dst_ip, id$dst_p); - # - #local snort_alike_msg = fmt("%.6f [**] [%d:%d:%d] %s [**] [Classification: %s] [Priority: %d] %s", - # sad$ts, - # sad$generator_id, - # sad$signature_id, - # sad$signature_revision, - # msg, - # sad$classification, - # sad$priority_id, - # proto_connection_string); - } diff --git a/scripts/policy/integration/barnyard2/main.zeek b/scripts/policy/integration/barnyard2/main.zeek new file mode 100644 index 0000000000..7e77e66ee6 --- /dev/null +++ b/scripts/policy/integration/barnyard2/main.zeek @@ -0,0 +1,59 @@ +##! This script lets Barnyard2 integrate with Zeek. It receives alerts from +##! Barnyard2 and logs them. In the future it will do more correlation +##! and derive new notices from the alerts. + +@load ./types + +module Barnyard2; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Timestamp of the alert. + ts: time &log; + ## Associated packet ID. + pid: PacketID &log; + ## Associated alert data. + alert: AlertData &log; + }; + + ## This can convert a Barnyard :zeek:type:`Barnyard2::PacketID` value to + ## a :zeek:type:`conn_id` value in the case that you might need to index + ## into an existing data structure elsewhere within Zeek. + global pid2cid: function(p: PacketID): conn_id; +} + +event zeek_init() &priority=5 + { + Log::create_stream(Barnyard2::LOG, [$columns=Info, $path="barnyard2"]); + } + + +function pid2cid(p: PacketID): conn_id + { + return [$orig_h=p$src_ip, $orig_p=p$src_p, $resp_h=p$dst_ip, $resp_p=p$dst_p]; + } + +event barnyard_alert(id: PacketID, alert: AlertData, msg: string, data: string) + { + Log::write(Barnyard2::LOG, [$ts=network_time(), $pid=id, $alert=alert]); + + #local proto_connection_string: string; + #if ( id$src_p == 0/tcp ) + # proto_connection_string = fmt("{PROTO:255} %s -> %s", id$src_ip, id$dst_ip); + #else + # proto_connection_string = fmt("{%s} %s:%d -> %s:%d", + # to_upper(fmt("%s", get_port_transport_proto(id$dst_p))), + # id$src_ip, id$src_p, id$dst_ip, id$dst_p); + # + #local snort_alike_msg = fmt("%.6f [**] [%d:%d:%d] %s [**] [Classification: %s] [Priority: %d] %s", + # sad$ts, + # sad$generator_id, + # sad$signature_id, + # sad$signature_revision, + # msg, + # sad$classification, + # sad$priority_id, + # proto_connection_string); + } diff --git a/scripts/policy/integration/barnyard2/types.bro b/scripts/policy/integration/barnyard2/types.zeek similarity index 100% rename from scripts/policy/integration/barnyard2/types.bro rename to scripts/policy/integration/barnyard2/types.zeek diff --git a/scripts/policy/integration/collective-intel/README b/scripts/policy/integration/collective-intel/README index 17d534c8dd..3e28be5903 100644 --- a/scripts/policy/integration/collective-intel/README +++ b/scripts/policy/integration/collective-intel/README @@ -1,4 +1,4 @@ The scripts in this module are for deeper integration with the -Collective Intelligence Framework (CIF) since Bro's Intel framework +Collective Intelligence Framework (CIF) since Zeek's Intel framework doesn't natively behave the same as CIF nor does it store and maintain the same data in all cases. diff --git a/scripts/policy/integration/collective-intel/__load__.bro b/scripts/policy/integration/collective-intel/__load__.zeek similarity index 100% rename from scripts/policy/integration/collective-intel/__load__.bro rename to scripts/policy/integration/collective-intel/__load__.zeek diff --git a/scripts/policy/integration/collective-intel/main.bro b/scripts/policy/integration/collective-intel/main.bro deleted file mode 100644 index 48459c378a..0000000000 --- a/scripts/policy/integration/collective-intel/main.bro +++ /dev/null @@ -1,15 +0,0 @@ - -@load base/frameworks/intel - -module Intel; - -## These are some fields to add extended compatibility between Bro and the -## Collective Intelligence Framework. -redef record Intel::MetaData += { - ## Maps to the Impact field in the Collective Intelligence Framework. - cif_impact: string &optional; - ## Maps to the Severity field in the Collective Intelligence Framework. - cif_severity: string &optional; - ## Maps to the Confidence field in the Collective Intelligence Framework. - cif_confidence: double &optional; -}; diff --git a/scripts/policy/integration/collective-intel/main.zeek b/scripts/policy/integration/collective-intel/main.zeek new file mode 100644 index 0000000000..fac86dd744 --- /dev/null +++ b/scripts/policy/integration/collective-intel/main.zeek @@ -0,0 +1,15 @@ + +@load base/frameworks/intel + +module Intel; + +## These are some fields to add extended compatibility between Zeek and the +## Collective Intelligence Framework. +redef record Intel::MetaData += { + ## Maps to the Impact field in the Collective Intelligence Framework. + cif_impact: string &optional; + ## Maps to the Severity field in the Collective Intelligence Framework. + cif_severity: string &optional; + ## Maps to the Confidence field in the Collective Intelligence Framework. + cif_confidence: double &optional; +}; diff --git a/scripts/policy/misc/capture-loss.bro b/scripts/policy/misc/capture-loss.bro deleted file mode 100644 index 541f6577cc..0000000000 --- a/scripts/policy/misc/capture-loss.bro +++ /dev/null @@ -1,84 +0,0 @@ -##! This script logs evidence regarding the degree to which the packet -##! capture process suffers from measurement loss. -##! The loss could be due to overload on the host or NIC performing -##! the packet capture or it could even be beyond the host. If you are -##! capturing from a switch with a SPAN port, it's very possible that -##! the switch itself could be overloaded and dropping packets. -##! Reported loss is computed in terms of the number of "gap events" (ACKs -##! for a sequence number that's above a gap). - -@load base/frameworks/notice - -module CaptureLoss; - -export { - redef enum Log::ID += { LOG }; - - redef enum Notice::Type += { - ## Report if the detected capture loss exceeds the percentage - ## threshold. - Too_Much_Loss - }; - - type Info: record { - ## Timestamp for when the measurement occurred. - ts: time &log; - ## The time delay between this measurement and the last. - ts_delta: interval &log; - ## In the event that there are multiple Bro instances logging - ## to the same host, this distinguishes each peer with its - ## individual name. - peer: string &log; - ## Number of missed ACKs from the previous measurement interval. - gaps: count &log; - ## Total number of ACKs seen in the previous measurement interval. - acks: count &log; - ## Percentage of ACKs seen where the data being ACKed wasn't seen. - percent_lost: double &log; - }; - - ## The interval at which capture loss reports are created. - option watch_interval = 15mins; - - ## The percentage of missed data that is considered "too much" - ## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be - ## generated. The value is expressed as a double between 0 and 1 with 1 - ## being 100%. - option too_much_loss: double = 0.1; -} - -event CaptureLoss::take_measurement(last_ts: time, last_acks: count, last_gaps: count) - { - if ( last_ts == 0 ) - { - schedule watch_interval { CaptureLoss::take_measurement(network_time(), 0, 0) }; - return; - } - - local now = network_time(); - local g = get_gap_stats(); - local acks = g$ack_events - last_acks; - local gaps = g$gap_events - last_gaps; - local pct_lost = (acks == 0) ? 0.0 : (100 * (1.0 * gaps) / (1.0 * acks)); - local info: Info = [$ts=now, - $ts_delta=now-last_ts, - $peer=peer_description, - $acks=acks, $gaps=gaps, - $percent_lost=pct_lost]; - - if ( pct_lost >= too_much_loss*100 ) - NOTICE([$note=Too_Much_Loss, - $msg=fmt("The capture loss script detected an estimated loss rate above %.3f%%", pct_lost)]); - - Log::write(LOG, info); - schedule watch_interval { CaptureLoss::take_measurement(now, g$ack_events, g$gap_events) }; - } - -event bro_init() &priority=5 - { - Log::create_stream(LOG, [$columns=Info, $path="capture_loss"]); - - # We only schedule the event if we are capturing packets. - if ( reading_live_traffic() || reading_traces() ) - schedule watch_interval { CaptureLoss::take_measurement(network_time(), 0, 0) }; - } diff --git a/scripts/policy/misc/capture-loss.zeek b/scripts/policy/misc/capture-loss.zeek new file mode 100644 index 0000000000..2864a81109 --- /dev/null +++ b/scripts/policy/misc/capture-loss.zeek @@ -0,0 +1,84 @@ +##! This script logs evidence regarding the degree to which the packet +##! capture process suffers from measurement loss. +##! The loss could be due to overload on the host or NIC performing +##! the packet capture or it could even be beyond the host. If you are +##! capturing from a switch with a SPAN port, it's very possible that +##! the switch itself could be overloaded and dropping packets. +##! Reported loss is computed in terms of the number of "gap events" (ACKs +##! for a sequence number that's above a gap). + +@load base/frameworks/notice + +module CaptureLoss; + +export { + redef enum Log::ID += { LOG }; + + redef enum Notice::Type += { + ## Report if the detected capture loss exceeds the percentage + ## threshold. + Too_Much_Loss + }; + + type Info: record { + ## Timestamp for when the measurement occurred. + ts: time &log; + ## The time delay between this measurement and the last. + ts_delta: interval &log; + ## In the event that there are multiple Zeek instances logging + ## to the same host, this distinguishes each peer with its + ## individual name. + peer: string &log; + ## Number of missed ACKs from the previous measurement interval. + gaps: count &log; + ## Total number of ACKs seen in the previous measurement interval. + acks: count &log; + ## Percentage of ACKs seen where the data being ACKed wasn't seen. + percent_lost: double &log; + }; + + ## The interval at which capture loss reports are created. + option watch_interval = 15mins; + + ## The percentage of missed data that is considered "too much" + ## when the :zeek:enum:`CaptureLoss::Too_Much_Loss` notice should be + ## generated. The value is expressed as a double between 0 and 1 with 1 + ## being 100%. + option too_much_loss: double = 0.1; +} + +event CaptureLoss::take_measurement(last_ts: time, last_acks: count, last_gaps: count) + { + if ( last_ts == 0 ) + { + schedule watch_interval { CaptureLoss::take_measurement(network_time(), 0, 0) }; + return; + } + + local now = network_time(); + local g = get_gap_stats(); + local acks = g$ack_events - last_acks; + local gaps = g$gap_events - last_gaps; + local pct_lost = (acks == 0) ? 0.0 : (100 * (1.0 * gaps) / (1.0 * acks)); + local info: Info = [$ts=now, + $ts_delta=now-last_ts, + $peer=peer_description, + $acks=acks, $gaps=gaps, + $percent_lost=pct_lost]; + + if ( pct_lost >= too_much_loss*100 ) + NOTICE([$note=Too_Much_Loss, + $msg=fmt("The capture loss script detected an estimated loss rate above %.3f%%", pct_lost)]); + + Log::write(LOG, info); + schedule watch_interval { CaptureLoss::take_measurement(now, g$ack_events, g$gap_events) }; + } + +event zeek_init() &priority=5 + { + Log::create_stream(LOG, [$columns=Info, $path="capture_loss"]); + + # We only schedule the event if we are capturing packets. + if ( reading_live_traffic() || reading_traces() ) + schedule watch_interval { CaptureLoss::take_measurement(network_time(), 0, 0) }; + } diff --git a/scripts/policy/misc/detect-traceroute/__load__.bro b/scripts/policy/misc/detect-traceroute/__load__.zeek similarity index 100% rename from scripts/policy/misc/detect-traceroute/__load__.bro rename to scripts/policy/misc/detect-traceroute/__load__.zeek diff --git a/scripts/policy/misc/detect-traceroute/main.bro b/scripts/policy/misc/detect-traceroute/main.bro deleted file mode 100644 index 5cbb34e27e..0000000000 --- a/scripts/policy/misc/detect-traceroute/main.bro +++ /dev/null @@ -1,101 +0,0 @@ -##! This script detects a large number of ICMP Time Exceeded messages heading -##! toward hosts that have sent low TTL packets. It generates a notice when the -##! number of ICMP Time Exceeded messages for a source-destination pair exceeds -##! a threshold. - -@load base/frameworks/sumstats -@load base/frameworks/signatures -@load-sigs ./detect-low-ttls.sig - -redef Signatures::ignored_ids += /traceroute-detector.*/; - -module Traceroute; - -export { - redef enum Log::ID += { LOG }; - - redef enum Notice::Type += { - ## Indicates that a host was seen running traceroutes. For more - ## detail about specific traceroutes that we run, refer to the - ## traceroute.log. - Detected - }; - - ## By default this script requires that any host detected running - ## traceroutes first send low TTL packets (TTL < 10) to the traceroute - ## destination host. Changing this setting to F will relax the - ## detection a bit by solely relying on ICMP time-exceeded messages to - ## detect traceroute. - const require_low_ttl_packets = T &redef; - - ## Defines the threshold for ICMP Time Exceeded messages for a src-dst - ## pair. This threshold only comes into play after a host is found to - ## be sending low TTL packets. - const icmp_time_exceeded_threshold: double = 3 &redef; - - ## Interval at which to watch for the - ## :bro:id:`Traceroute::icmp_time_exceeded_threshold` variable to be - ## crossed. At the end of each interval the counter is reset. - const icmp_time_exceeded_interval = 3min &redef; - - ## The log record for the traceroute log. - type Info: record { - ## Timestamp - ts: time &log; - ## Address initiating the traceroute. - src: addr &log; - ## Destination address of the traceroute. - dst: addr &log; - ## Protocol used for the traceroute. - proto: string &log; - }; - - global log_traceroute: event(rec: Traceroute::Info); -} - -event bro_init() &priority=5 - { - Log::create_stream(Traceroute::LOG, [$columns=Info, $ev=log_traceroute, $path="traceroute"]); - - local r1: SumStats::Reducer = [$stream="traceroute.time_exceeded", $apply=set(SumStats::UNIQUE)]; - local r2: SumStats::Reducer = [$stream="traceroute.low_ttl_packet", $apply=set(SumStats::SUM)]; - SumStats::create([$name="traceroute-detection", - $epoch=icmp_time_exceeded_interval, - $reducers=set(r1, r2), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - # Give a threshold value of zero depending on if the host - # sends a low ttl packet. - if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 ) - return 0.0; - else - return result["traceroute.time_exceeded"]$unique+0; - }, - $threshold=icmp_time_exceeded_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local parts = split_string_n(key$str, /-/, F, 2); - local src = to_addr(parts[0]); - local dst = to_addr(parts[1]); - local proto = parts[2]; - Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst, $proto=proto]); - NOTICE([$note=Traceroute::Detected, - $msg=fmt("%s seems to be running traceroute using %s", src, proto), - $src=src, - $identifier=cat(src,proto)]); - }]); - } - -# Low TTL packets are detected with a signature. -event signature_match(state: signature_state, msg: string, data: string) - { - if ( state$sig_id == /traceroute-detector.*/ ) - { - SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h,"-",get_port_transport_proto(state$conn$id$resp_p))], [$num=1]); - } - } - -event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context) - { - SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h,"-",get_port_transport_proto(context$id$resp_p))], [$str=cat(c$id$orig_h)]); - } diff --git a/scripts/policy/misc/detect-traceroute/main.zeek b/scripts/policy/misc/detect-traceroute/main.zeek new file mode 100644 index 0000000000..091ceceed6 --- /dev/null +++ b/scripts/policy/misc/detect-traceroute/main.zeek @@ -0,0 +1,101 @@ +##! This script detects a large number of ICMP Time Exceeded messages heading +##! toward hosts that have sent low TTL packets. It generates a notice when the +##! number of ICMP Time Exceeded messages for a source-destination pair exceeds +##! a threshold. + +@load base/frameworks/sumstats +@load base/frameworks/signatures +@load-sigs ./detect-low-ttls.sig + +redef Signatures::ignored_ids += /traceroute-detector.*/; + +module Traceroute; + +export { + redef enum Log::ID += { LOG }; + + redef enum Notice::Type += { + ## Indicates that a host was seen running traceroutes. For more + ## detail about specific traceroutes that we run, refer to the + ## traceroute.log. + Detected + }; + + ## By default this script requires that any host detected running + ## traceroutes first send low TTL packets (TTL < 10) to the traceroute + ## destination host. Changing this setting to F will relax the + ## detection a bit by solely relying on ICMP time-exceeded messages to + ## detect traceroute. + const require_low_ttl_packets = T &redef; + + ## Defines the threshold for ICMP Time Exceeded messages for a src-dst + ## pair. This threshold only comes into play after a host is found to + ## be sending low TTL packets. + const icmp_time_exceeded_threshold: double = 3 &redef; + + ## Interval at which to watch for the + ## :zeek:id:`Traceroute::icmp_time_exceeded_threshold` variable to be + ## crossed. At the end of each interval the counter is reset. + const icmp_time_exceeded_interval = 3min &redef; + + ## The log record for the traceroute log. + type Info: record { + ## Timestamp + ts: time &log; + ## Address initiating the traceroute. + src: addr &log; + ## Destination address of the traceroute. + dst: addr &log; + ## Protocol used for the traceroute. + proto: string &log; + }; + + global log_traceroute: event(rec: Traceroute::Info); +} + +event zeek_init() &priority=5 + { + Log::create_stream(Traceroute::LOG, [$columns=Info, $ev=log_traceroute, $path="traceroute"]); + + local r1: SumStats::Reducer = [$stream="traceroute.time_exceeded", $apply=set(SumStats::UNIQUE)]; + local r2: SumStats::Reducer = [$stream="traceroute.low_ttl_packet", $apply=set(SumStats::SUM)]; + SumStats::create([$name="traceroute-detection", + $epoch=icmp_time_exceeded_interval, + $reducers=set(r1, r2), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + # Give a threshold value of zero depending on if the host + # sends a low ttl packet. + if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 ) + return 0.0; + else + return result["traceroute.time_exceeded"]$unique+0; + }, + $threshold=icmp_time_exceeded_threshold, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local parts = split_string_n(key$str, /-/, F, 2); + local src = to_addr(parts[0]); + local dst = to_addr(parts[1]); + local proto = parts[2]; + Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst, $proto=proto]); + NOTICE([$note=Traceroute::Detected, + $msg=fmt("%s seems to be running traceroute using %s", src, proto), + $src=src, + $identifier=cat(src,proto)]); + }]); + } + +# Low TTL packets are detected with a signature. +event signature_match(state: signature_state, msg: string, data: string) + { + if ( state$sig_id == /traceroute-detector.*/ ) + { + SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h,"-",get_port_transport_proto(state$conn$id$resp_p))], [$num=1]); + } + } + +event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context) + { + SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h,"-",get_port_transport_proto(context$id$resp_p))], [$str=cat(c$id$orig_h)]); + } diff --git a/scripts/policy/misc/dump-events.bro b/scripts/policy/misc/dump-events.bro deleted file mode 100644 index 7d27e91981..0000000000 --- a/scripts/policy/misc/dump-events.bro +++ /dev/null @@ -1,40 +0,0 @@ -##! This script dumps the events that Bro raises out to standard output in a -##! readable form. This is for debugging only and allows to understand events and -##! their parameters as Bro processes input. Note that it will show only events -##! for which a handler is defined. - -module DumpEvents; - -export { - ## If true, include event arguments in output. - option include_args = T; - - ## Only include events matching the given pattern into output. By default, the - ## pattern matches all events. - option include = /.*/; -} - -event new_event(name: string, args: call_argument_vector) - { - if ( include !in name ) - return; - - print fmt("%17.6f %s", network_time(), name); - - if ( ! include_args || |args| == 0 ) - return; - - for ( i in args ) - { - local a = args[i]; - - local proto = fmt("%s: %s", a$name, a$type_name); - - if ( a?$value ) - print fmt(" [%d] %-18s = %s", i, proto, a$value); - else - print fmt(" | %-18s = %s [default]", proto, a$value); - } - - print ""; - } diff --git a/scripts/policy/misc/dump-events.zeek b/scripts/policy/misc/dump-events.zeek new file mode 100644 index 0000000000..9b3a78ffd3 --- /dev/null +++ b/scripts/policy/misc/dump-events.zeek @@ -0,0 +1,40 @@ +##! This script dumps the events that Zeek raises out to standard output in a +##! readable form. This is for debugging only and allows to understand events and +##! their parameters as Zeek processes input. Note that it will show only events +##! for which a handler is defined. + +module DumpEvents; + +export { + ## If true, include event arguments in output. + option include_args = T; + + ## Only include events matching the given pattern into output. By default, the + ## pattern matches all events. + option include = /.*/; +} + +event new_event(name: string, args: call_argument_vector) + { + if ( include !in name ) + return; + + print fmt("%17.6f %s", network_time(), name); + + if ( ! include_args || |args| == 0 ) + return; + + for ( i in args ) + { + local a = args[i]; + + local proto = fmt("%s: %s", a$name, a$type_name); + + if ( a?$value ) + print fmt(" [%d] %-18s = %s", i, proto, a$value); + else + print fmt(" | %-18s = %s [default]", proto, a$value); + } + + print ""; + } diff --git a/scripts/policy/misc/load-balancing.bro b/scripts/policy/misc/load-balancing.bro deleted file mode 100644 index 40bbe238ca..0000000000 --- a/scripts/policy/misc/load-balancing.bro +++ /dev/null @@ -1,111 +0,0 @@ -##! This script implements the "Bro side" of several load balancing -##! approaches for Bro clusters. - -@load base/frameworks/cluster -@load base/frameworks/packet-filter - -module LoadBalancing; - -export { - - type Method: enum { - ## Apply BPF filters to each worker in a way that causes them to - ## automatically flow balance traffic between them. - AUTO_BPF, - }; - - ## Defines the method of load balancing to use. - const method = AUTO_BPF &redef; - - redef record Cluster::Node += { - ## A BPF filter for load balancing traffic sniffed on a single - ## interface across a number of processes. In normal uses, this - ## will be assigned dynamically by the manager and installed by - ## the workers. - lb_filter: string &optional; - }; -} - -@if ( Cluster::is_enabled() ) - -event bro_init() &priority=5 - { - if ( method != AUTO_BPF ) - return; - - local worker_ip_interface: table[addr, string] of count = table(); - local sorted_node_names: vector of string = vector(); - local node: Cluster::Node; - local name: string; - - # Sort nodes list so that every node iterates over it in same order. - for ( name in Cluster::nodes ) - sorted_node_names += name; - - sort(sorted_node_names, strcmp); - - for ( idx in sorted_node_names ) - { - name = sorted_node_names[idx]; - node = Cluster::nodes[name]; - - if ( node$node_type != Cluster::WORKER ) - next; - - if ( ! node?$interface ) - next; - - if ( [node$ip, node$interface] !in worker_ip_interface ) - worker_ip_interface[node$ip, node$interface] = 0; - - ++worker_ip_interface[node$ip, node$interface]; - } - - # Now that we've counted up how many processes are running per - # interface, let's create the filters for each worker. - local lb_proc_track: table[addr, string] of count = table(); - - for ( idx in sorted_node_names ) - { - name = sorted_node_names[idx]; - node = Cluster::nodes[name]; - - if ( node$node_type != Cluster::WORKER ) - next; - - if ( ! node?$interface ) - next; - - if ( [node$ip, node$interface] !in worker_ip_interface ) - next; - - if ( [node$ip, node$interface] !in lb_proc_track ) - lb_proc_track[node$ip, node$interface] = 0; - - local this_lb_proc = lb_proc_track[node$ip, node$interface]; - local total_lb_procs = worker_ip_interface[node$ip, node$interface]; - ++lb_proc_track[node$ip, node$interface]; - - if ( total_lb_procs > 1 ) - node$lb_filter = PacketFilter::sampling_filter(total_lb_procs, - this_lb_proc); - } - - # Finally, install filter for the current node if it needs one. - for ( idx in sorted_node_names ) - { - name = sorted_node_names[idx]; - node = Cluster::nodes[name]; - - if ( name != Cluster::node ) - next; - - if ( ! node?$lb_filter ) - next; - - restrict_filters["lb_filter"] = node$lb_filter; - PacketFilter::install(); - } - } - -@endif diff --git a/scripts/policy/misc/load-balancing.zeek b/scripts/policy/misc/load-balancing.zeek new file mode 100644 index 0000000000..7a1be09871 --- /dev/null +++ b/scripts/policy/misc/load-balancing.zeek @@ -0,0 +1,111 @@ +##! This script implements the "Zeek side" of several load balancing +##! approaches for Zeek clusters. + +@load base/frameworks/cluster +@load base/frameworks/packet-filter + +module LoadBalancing; + +export { + + type Method: enum { + ## Apply BPF filters to each worker in a way that causes them to + ## automatically flow balance traffic between them. + AUTO_BPF, + }; + + ## Defines the method of load balancing to use. + const method = AUTO_BPF &redef; + + redef record Cluster::Node += { + ## A BPF filter for load balancing traffic sniffed on a single + ## interface across a number of processes. In normal uses, this + ## will be assigned dynamically by the manager and installed by + ## the workers. + lb_filter: string &optional; + }; +} + +@if ( Cluster::is_enabled() ) + +event zeek_init() &priority=5 + { + if ( method != AUTO_BPF ) + return; + + local worker_ip_interface: table[addr, string] of count = table(); + local sorted_node_names: vector of string = vector(); + local node: Cluster::Node; + local name: string; + + # Sort nodes list so that every node iterates over it in same order. + for ( name in Cluster::nodes ) + sorted_node_names += name; + + sort(sorted_node_names, strcmp); + + for ( idx in sorted_node_names ) + { + name = sorted_node_names[idx]; + node = Cluster::nodes[name]; + + if ( node$node_type != Cluster::WORKER ) + next; + + if ( ! node?$interface ) + next; + + if ( [node$ip, node$interface] !in worker_ip_interface ) + worker_ip_interface[node$ip, node$interface] = 0; + + ++worker_ip_interface[node$ip, node$interface]; + } + + # Now that we've counted up how many processes are running per + # interface, let's create the filters for each worker. + local lb_proc_track: table[addr, string] of count = table(); + + for ( idx in sorted_node_names ) + { + name = sorted_node_names[idx]; + node = Cluster::nodes[name]; + + if ( node$node_type != Cluster::WORKER ) + next; + + if ( ! node?$interface ) + next; + + if ( [node$ip, node$interface] !in worker_ip_interface ) + next; + + if ( [node$ip, node$interface] !in lb_proc_track ) + lb_proc_track[node$ip, node$interface] = 0; + + local this_lb_proc = lb_proc_track[node$ip, node$interface]; + local total_lb_procs = worker_ip_interface[node$ip, node$interface]; + ++lb_proc_track[node$ip, node$interface]; + + if ( total_lb_procs > 1 ) + node$lb_filter = PacketFilter::sampling_filter(total_lb_procs, + this_lb_proc); + } + + # Finally, install filter for the current node if it needs one. + for ( idx in sorted_node_names ) + { + name = sorted_node_names[idx]; + node = Cluster::nodes[name]; + + if ( name != Cluster::node ) + next; + + if ( ! node?$lb_filter ) + next; + + restrict_filters["lb_filter"] = node$lb_filter; + PacketFilter::install(); + } + } + +@endif diff --git a/scripts/policy/misc/loaded-scripts.bro b/scripts/policy/misc/loaded-scripts.bro deleted file mode 100644 index bfc0aad114..0000000000 --- a/scripts/policy/misc/loaded-scripts.bro +++ /dev/null @@ -1,38 +0,0 @@ -##! Log the loaded scripts. -@load base/utils/paths - -module LoadedScripts; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ## Name of the script loaded potentially with spaces included - ## before the file name to indicate load depth. The convention - ## is two spaces per level of depth. - name: string &log; - }; -} - -# This is inefficient; however, since this script only executes once on -# startup, this shold be ok. -function get_indent(level: count): string - { - local out = ""; - while ( level > 0 ) - { - --level; - out = out + " "; - } - return out; - } - -event bro_init() &priority=5 - { - Log::create_stream(LoadedScripts::LOG, [$columns=Info, $path="loaded_scripts"]); - } - -event bro_script_loaded(path: string, level: count) - { - Log::write(LoadedScripts::LOG, [$name=cat(get_indent(level), compress_path(path))]); - } diff --git a/scripts/policy/misc/loaded-scripts.zeek b/scripts/policy/misc/loaded-scripts.zeek new file mode 100644 index 0000000000..0bd986e01a --- /dev/null +++ b/scripts/policy/misc/loaded-scripts.zeek @@ -0,0 +1,38 @@ +##! Log the loaded scripts. +@load base/utils/paths + +module LoadedScripts; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ## Name of the script loaded potentially with spaces included + ## before the file name to indicate load depth. The convention + ## is two spaces per level of depth. + name: string &log; + }; +} + +# This is inefficient; however, since this script only executes once on +# startup, this shold be ok. +function get_indent(level: count): string + { + local out = ""; + while ( level > 0 ) + { + --level; + out = out + " "; + } + return out; + } + +event zeek_init() &priority=5 + { + Log::create_stream(LoadedScripts::LOG, [$columns=Info, $path="loaded_scripts"]); + } + +event zeek_script_loaded(path: string, level: count) + { + Log::write(LoadedScripts::LOG, [$name=cat(get_indent(level), compress_path(path))]); + } diff --git a/scripts/policy/misc/profiling.bro b/scripts/policy/misc/profiling.bro deleted file mode 100644 index 613e78f860..0000000000 --- a/scripts/policy/misc/profiling.bro +++ /dev/null @@ -1,19 +0,0 @@ -##! Turns on profiling of Bro resource consumption. - -module Profiling; - -## Set the profiling output file. -redef profiling_file = open_log_file("prof"); - -## Set the cheap profiling interval. -redef profiling_interval = 15 secs; - -## Set the expensive profiling interval (multiple of -## :bro:id:`profiling_interval`). -redef expensive_profiling_multiple = 20; - -event bro_init() - { - set_buf(profiling_file, F); - } - diff --git a/scripts/policy/misc/profiling.zeek b/scripts/policy/misc/profiling.zeek new file mode 100644 index 0000000000..197d3a3f82 --- /dev/null +++ b/scripts/policy/misc/profiling.zeek @@ -0,0 +1,19 @@ +##! Turns on profiling of Zeek resource consumption. + +module Profiling; + +## Set the profiling output file. +redef profiling_file = open_log_file("prof"); + +## Set the cheap profiling interval. +redef profiling_interval = 15 secs; + +## Set the expensive profiling interval (multiple of +## :zeek:id:`profiling_interval`). +redef expensive_profiling_multiple = 20; + +event zeek_init() + { + set_buf(profiling_file, F); + } + diff --git a/scripts/policy/misc/scan.bro b/scripts/policy/misc/scan.bro deleted file mode 100644 index d70f8f9e79..0000000000 --- a/scripts/policy/misc/scan.bro +++ /dev/null @@ -1,182 +0,0 @@ -##! TCP Scan detection. - -# ..Authors: Sheharbano Khattak -# Seth Hall -# All the authors of the old scan.bro - -@load base/frameworks/notice -@load base/frameworks/sumstats - -@load base/utils/time - -module Scan; - -export { - redef enum Notice::Type += { - ## Address scans detect that a host appears to be scanning some - ## number of destinations on a single port. This notice is - ## generated when more than :bro:id:`Scan::addr_scan_threshold` - ## unique hosts are seen over the previous - ## :bro:id:`Scan::addr_scan_interval` time range. - Address_Scan, - - ## Port scans detect that an attacking host appears to be - ## scanning a single victim host on several ports. This notice - ## is generated when an attacking host attempts to connect to - ## :bro:id:`Scan::port_scan_threshold` - ## unique ports on a single host over the previous - ## :bro:id:`Scan::port_scan_interval` time range. - Port_Scan, - }; - - ## Failed connection attempts are tracked over this time interval for - ## the address scan detection. A higher interval will detect slower - ## scanners, but may also yield more false positives. - const addr_scan_interval = 5min &redef; - - ## Failed connection attempts are tracked over this time interval for - ## the port scan detection. A higher interval will detect slower - ## scanners, but may also yield more false positives. - const port_scan_interval = 5min &redef; - - ## The threshold of the unique number of hosts a scanning host has to - ## have failed connections with on a single port. - const addr_scan_threshold = 25.0 &redef; - - ## The threshold of the number of unique ports a scanning host has to - ## have failed connections with on a single victim host. - const port_scan_threshold = 15.0 &redef; - - global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); - global Scan::port_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); -} - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="scan.addr.fail", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(addr_scan_threshold+2)]; - SumStats::create([$name="addr-scan", - $epoch=addr_scan_interval, - $reducers=set(r1), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["scan.addr.fail"]$unique+0.0; - }, - #$threshold_func=check_addr_scan_threshold, - $threshold=addr_scan_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["scan.addr.fail"]; - local side = Site::is_local_addr(key$host) ? "local" : "remote"; - local dur = duration_to_mins_secs(r$end-r$begin); - local message=fmt("%s scanned at least %d unique hosts on port %s in %s", key$host, r$unique, key$str, dur); - NOTICE([$note=Address_Scan, - $src=key$host, - $p=to_port(key$str), - $sub=side, - $msg=message, - $identifier=cat(key$host)]); - }]); - - # Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port); - local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(port_scan_threshold+2)]; - SumStats::create([$name="port-scan", - $epoch=port_scan_interval, - $reducers=set(r2), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["scan.port.fail"]$unique+0.0; - }, - $threshold=port_scan_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["scan.port.fail"]; - local side = Site::is_local_addr(key$host) ? "local" : "remote"; - local dur = duration_to_mins_secs(r$end-r$begin); - local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur); - NOTICE([$note=Port_Scan, - $src=key$host, - $dst=to_addr(key$str), - $sub=side, - $msg=message, - $identifier=cat(key$host)]); - }]); - } - -function add_sumstats(id: conn_id, reverse: bool) - { - local scanner = id$orig_h; - local victim = id$resp_h; - local scanned_port = id$resp_p; - - if ( reverse ) - { - scanner = id$resp_h; - victim = id$orig_h; - scanned_port = id$orig_p; - } - - if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) ) - SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]); - - if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) ) - SumStats::observe("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]); - } - -function is_failed_conn(c: connection): bool - { - # Sr || ( (hR || ShR) && (data not sent in any direction) ) - if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) || - (((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) || - (c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history ) - ) && /[Dd]/ !in c$history ) - ) - return T; - return F; - } - -function is_reverse_failed_conn(c: connection): bool - { - # reverse scan i.e. conn dest is the scanner - # sR || ( (Hr || sHr) && (data not sent in any direction) ) - if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) || - (((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) || - (c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history ) - ) && /[Dd]/ !in c$history ) - ) - return T; - return F; - } - -event connection_attempt(c: connection) - { - local is_reverse_scan = F; - if ( "H" in c$history ) - is_reverse_scan = T; - - add_sumstats(c$id, is_reverse_scan); - } - -event connection_rejected(c: connection) - { - local is_reverse_scan = F; - if ( "s" in c$history ) - is_reverse_scan = T; - - add_sumstats(c$id, is_reverse_scan); - } - -event connection_reset(c: connection) - { - if ( is_failed_conn(c) ) - add_sumstats(c$id, F); - else if ( is_reverse_failed_conn(c) ) - add_sumstats(c$id, T); - } - -event connection_pending(c: connection) - { - if ( is_failed_conn(c) ) - add_sumstats(c$id, F); - else if ( is_reverse_failed_conn(c) ) - add_sumstats(c$id, T); - } diff --git a/scripts/policy/misc/scan.zeek b/scripts/policy/misc/scan.zeek new file mode 100644 index 0000000000..26dc54ce90 --- /dev/null +++ b/scripts/policy/misc/scan.zeek @@ -0,0 +1,182 @@ +##! TCP Scan detection. + +# ..Authors: Sheharbano Khattak +# Seth Hall +# All the authors of the old scan.bro + +@load base/frameworks/notice +@load base/frameworks/sumstats + +@load base/utils/time + +module Scan; + +export { + redef enum Notice::Type += { + ## Address scans detect that a host appears to be scanning some + ## number of destinations on a single port. This notice is + ## generated when more than :zeek:id:`Scan::addr_scan_threshold` + ## unique hosts are seen over the previous + ## :zeek:id:`Scan::addr_scan_interval` time range. + Address_Scan, + + ## Port scans detect that an attacking host appears to be + ## scanning a single victim host on several ports. This notice + ## is generated when an attacking host attempts to connect to + ## :zeek:id:`Scan::port_scan_threshold` + ## unique ports on a single host over the previous + ## :zeek:id:`Scan::port_scan_interval` time range. + Port_Scan, + }; + + ## Failed connection attempts are tracked over this time interval for + ## the address scan detection. A higher interval will detect slower + ## scanners, but may also yield more false positives. + const addr_scan_interval = 5min &redef; + + ## Failed connection attempts are tracked over this time interval for + ## the port scan detection. A higher interval will detect slower + ## scanners, but may also yield more false positives. + const port_scan_interval = 5min &redef; + + ## The threshold of the unique number of hosts a scanning host has to + ## have failed connections with on a single port. + const addr_scan_threshold = 25.0 &redef; + + ## The threshold of the number of unique ports a scanning host has to + ## have failed connections with on a single victim host. + const port_scan_threshold = 15.0 &redef; + + global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); + global Scan::port_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); +} + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="scan.addr.fail", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(addr_scan_threshold+2)]; + SumStats::create([$name="addr-scan", + $epoch=addr_scan_interval, + $reducers=set(r1), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["scan.addr.fail"]$unique+0.0; + }, + #$threshold_func=check_addr_scan_threshold, + $threshold=addr_scan_threshold, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["scan.addr.fail"]; + local side = Site::is_local_addr(key$host) ? "local" : "remote"; + local dur = duration_to_mins_secs(r$end-r$begin); + local message=fmt("%s scanned at least %d unique hosts on port %s in %s", key$host, r$unique, key$str, dur); + NOTICE([$note=Address_Scan, + $src=key$host, + $p=to_port(key$str), + $sub=side, + $msg=message, + $identifier=cat(key$host)]); + }]); + + # Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port); + local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(port_scan_threshold+2)]; + SumStats::create([$name="port-scan", + $epoch=port_scan_interval, + $reducers=set(r2), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["scan.port.fail"]$unique+0.0; + }, + $threshold=port_scan_threshold, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["scan.port.fail"]; + local side = Site::is_local_addr(key$host) ? "local" : "remote"; + local dur = duration_to_mins_secs(r$end-r$begin); + local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur); + NOTICE([$note=Port_Scan, + $src=key$host, + $dst=to_addr(key$str), + $sub=side, + $msg=message, + $identifier=cat(key$host)]); + }]); + } + +function add_sumstats(id: conn_id, reverse: bool) + { + local scanner = id$orig_h; + local victim = id$resp_h; + local scanned_port = id$resp_p; + + if ( reverse ) + { + scanner = id$resp_h; + victim = id$orig_h; + scanned_port = id$orig_p; + } + + if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) ) + SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]); + + if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) ) + SumStats::observe("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]); + } + +function is_failed_conn(c: connection): bool + { + # Sr || ( (hR || ShR) && (data not sent in any direction) ) + if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) || + (((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) || + (c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history ) + ) && /[Dd]/ !in c$history ) + ) + return T; + return F; + } + +function is_reverse_failed_conn(c: connection): bool + { + # reverse scan i.e. conn dest is the scanner + # sR || ( (Hr || sHr) && (data not sent in any direction) ) + if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) || + (((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) || + (c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history ) + ) && /[Dd]/ !in c$history ) + ) + return T; + return F; + } + +event connection_attempt(c: connection) + { + local is_reverse_scan = F; + if ( "H" in c$history ) + is_reverse_scan = T; + + add_sumstats(c$id, is_reverse_scan); + } + +event connection_rejected(c: connection) + { + local is_reverse_scan = F; + if ( "s" in c$history ) + is_reverse_scan = T; + + add_sumstats(c$id, is_reverse_scan); + } + +event connection_reset(c: connection) + { + if ( is_failed_conn(c) ) + add_sumstats(c$id, F); + else if ( is_reverse_failed_conn(c) ) + add_sumstats(c$id, T); + } + +event connection_pending(c: connection) + { + if ( is_failed_conn(c) ) + add_sumstats(c$id, F); + else if ( is_reverse_failed_conn(c) ) + add_sumstats(c$id, T); + } diff --git a/scripts/policy/misc/stats.bro b/scripts/policy/misc/stats.bro deleted file mode 100644 index 0bbf5c8aac..0000000000 --- a/scripts/policy/misc/stats.bro +++ /dev/null @@ -1,155 +0,0 @@ -##! Log memory/packet/lag statistics. - -@load base/frameworks/notice - -module Stats; - -export { - redef enum Log::ID += { LOG }; - - ## How often stats are reported. - option report_interval = 5min; - - type Info: record { - ## Timestamp for the measurement. - ts: time &log; - ## Peer that generated this log. Mostly for clusters. - peer: string &log; - ## Amount of memory currently in use in MB. - mem: count &log; - ## Number of packets processed since the last stats interval. - pkts_proc: count &log; - ## Number of bytes received since the last stats interval if - ## reading live traffic. - bytes_recv: count &log; - - ## Number of packets dropped since the last stats interval if - ## reading live traffic. - pkts_dropped: count &log &optional; - ## Number of packets seen on the link since the last stats - ## interval if reading live traffic. - pkts_link: count &log &optional; - ## Lag between the wall clock and packet timestamps if reading - ## live traffic. - pkt_lag: interval &log &optional; - - ## Number of events processed since the last stats interval. - events_proc: count &log; - ## Number of events that have been queued since the last stats - ## interval. - events_queued: count &log; - - ## TCP connections currently in memory. - active_tcp_conns: count &log; - ## UDP connections currently in memory. - active_udp_conns: count &log; - ## ICMP connections currently in memory. - active_icmp_conns: count &log; - - ## TCP connections seen since last stats interval. - tcp_conns: count &log; - ## UDP connections seen since last stats interval. - udp_conns: count &log; - ## ICMP connections seen since last stats interval. - icmp_conns: count &log; - - ## Number of timers scheduled since last stats interval. - timers: count &log; - ## Current number of scheduled timers. - active_timers: count &log; - - ## Number of files seen since last stats interval. - files: count &log; - ## Current number of files actively being seen. - active_files: count &log; - - ## Number of DNS requests seen since last stats interval. - dns_requests: count &log; - ## Current number of DNS requests awaiting a reply. - active_dns_requests: count &log; - - ## Current size of TCP data in reassembly. - reassem_tcp_size: count &log; - ## Current size of File data in reassembly. - reassem_file_size: count &log; - ## Current size of packet fragment data in reassembly. - reassem_frag_size: count &log; - ## Current size of unknown data in reassembly (this is only PIA buffer right now). - reassem_unknown_size: count &log; - }; - - ## Event to catch stats as they are written to the logging stream. - global log_stats: event(rec: Info); -} - -event bro_init() &priority=5 - { - Log::create_stream(Stats::LOG, [$columns=Info, $ev=log_stats, $path="stats"]); - } - -event check_stats(then: time, last_ns: NetStats, last_cs: ConnStats, last_ps: ProcStats, last_es: EventStats, last_rs: ReassemblerStats, last_ts: TimerStats, last_fs: FileAnalysisStats, last_ds: DNSStats) - { - local nettime = network_time(); - local ns = get_net_stats(); - local cs = get_conn_stats(); - local ps = get_proc_stats(); - local es = get_event_stats(); - local rs = get_reassembler_stats(); - local ts = get_timer_stats(); - local fs = get_file_analysis_stats(); - local ds = get_dns_stats(); - - if ( bro_is_terminating() ) - # No more stats will be written or scheduled when Bro is - # shutting down. - return; - - local info: Info = [$ts=nettime, - $peer=peer_description, - $mem=ps$mem/1048576, - $pkts_proc=ns$pkts_recvd - last_ns$pkts_recvd, - $bytes_recv = ns$bytes_recvd - last_ns$bytes_recvd, - - $active_tcp_conns=cs$num_tcp_conns, - $tcp_conns=cs$cumulative_tcp_conns - last_cs$cumulative_tcp_conns, - $active_udp_conns=cs$num_udp_conns, - $udp_conns=cs$cumulative_udp_conns - last_cs$cumulative_udp_conns, - $active_icmp_conns=cs$num_icmp_conns, - $icmp_conns=cs$cumulative_icmp_conns - last_cs$cumulative_icmp_conns, - - $reassem_tcp_size=rs$tcp_size, - $reassem_file_size=rs$file_size, - $reassem_frag_size=rs$frag_size, - $reassem_unknown_size=rs$unknown_size, - - $events_proc=es$dispatched - last_es$dispatched, - $events_queued=es$queued - last_es$queued, - - $timers=ts$cumulative - last_ts$cumulative, - $active_timers=ts$current, - - $files=fs$cumulative - last_fs$cumulative, - $active_files=fs$current, - - $dns_requests=ds$requests - last_ds$requests, - $active_dns_requests=ds$pending - ]; - - # Someone's going to have to explain what this is and add a field to the Info record. - # info$util = 100.0*((ps$user_time + ps$system_time) - (last_ps$user_time + last_ps$system_time))/(now-then); - - if ( reading_live_traffic() ) - { - info$pkt_lag = current_time() - nettime; - info$pkts_dropped = ns$pkts_dropped - last_ns$pkts_dropped; - info$pkts_link = ns$pkts_link - last_ns$pkts_link; - } - - Log::write(Stats::LOG, info); - schedule report_interval { check_stats(nettime, ns, cs, ps, es, rs, ts, fs, ds) }; - } - -event bro_init() - { - schedule report_interval { check_stats(network_time(), get_net_stats(), get_conn_stats(), get_proc_stats(), get_event_stats(), get_reassembler_stats(), get_timer_stats(), get_file_analysis_stats(), get_dns_stats()) }; - } diff --git a/scripts/policy/misc/stats.zeek b/scripts/policy/misc/stats.zeek new file mode 100644 index 0000000000..df092ea064 --- /dev/null +++ b/scripts/policy/misc/stats.zeek @@ -0,0 +1,155 @@ +##! Log memory/packet/lag statistics. + +@load base/frameworks/notice + +module Stats; + +export { + redef enum Log::ID += { LOG }; + + ## How often stats are reported. + option report_interval = 5min; + + type Info: record { + ## Timestamp for the measurement. + ts: time &log; + ## Peer that generated this log. Mostly for clusters. + peer: string &log; + ## Amount of memory currently in use in MB. + mem: count &log; + ## Number of packets processed since the last stats interval. + pkts_proc: count &log; + ## Number of bytes received since the last stats interval if + ## reading live traffic. + bytes_recv: count &log; + + ## Number of packets dropped since the last stats interval if + ## reading live traffic. + pkts_dropped: count &log &optional; + ## Number of packets seen on the link since the last stats + ## interval if reading live traffic. + pkts_link: count &log &optional; + ## Lag between the wall clock and packet timestamps if reading + ## live traffic. + pkt_lag: interval &log &optional; + + ## Number of events processed since the last stats interval. + events_proc: count &log; + ## Number of events that have been queued since the last stats + ## interval. + events_queued: count &log; + + ## TCP connections currently in memory. + active_tcp_conns: count &log; + ## UDP connections currently in memory. + active_udp_conns: count &log; + ## ICMP connections currently in memory. + active_icmp_conns: count &log; + + ## TCP connections seen since last stats interval. + tcp_conns: count &log; + ## UDP connections seen since last stats interval. + udp_conns: count &log; + ## ICMP connections seen since last stats interval. + icmp_conns: count &log; + + ## Number of timers scheduled since last stats interval. + timers: count &log; + ## Current number of scheduled timers. + active_timers: count &log; + + ## Number of files seen since last stats interval. + files: count &log; + ## Current number of files actively being seen. + active_files: count &log; + + ## Number of DNS requests seen since last stats interval. + dns_requests: count &log; + ## Current number of DNS requests awaiting a reply. + active_dns_requests: count &log; + + ## Current size of TCP data in reassembly. + reassem_tcp_size: count &log; + ## Current size of File data in reassembly. + reassem_file_size: count &log; + ## Current size of packet fragment data in reassembly. + reassem_frag_size: count &log; + ## Current size of unknown data in reassembly (this is only PIA buffer right now). + reassem_unknown_size: count &log; + }; + + ## Event to catch stats as they are written to the logging stream. + global log_stats: event(rec: Info); +} + +event zeek_init() &priority=5 + { + Log::create_stream(Stats::LOG, [$columns=Info, $ev=log_stats, $path="stats"]); + } + +event check_stats(then: time, last_ns: NetStats, last_cs: ConnStats, last_ps: ProcStats, last_es: EventStats, last_rs: ReassemblerStats, last_ts: TimerStats, last_fs: FileAnalysisStats, last_ds: DNSStats) + { + local nettime = network_time(); + local ns = get_net_stats(); + local cs = get_conn_stats(); + local ps = get_proc_stats(); + local es = get_event_stats(); + local rs = get_reassembler_stats(); + local ts = get_timer_stats(); + local fs = get_file_analysis_stats(); + local ds = get_dns_stats(); + + if ( zeek_is_terminating() ) + # No more stats will be written or scheduled when Zeek is + # shutting down. + return; + + local info: Info = [$ts=nettime, + $peer=peer_description, + $mem=ps$mem/1048576, + $pkts_proc=ns$pkts_recvd - last_ns$pkts_recvd, + $bytes_recv = ns$bytes_recvd - last_ns$bytes_recvd, + + $active_tcp_conns=cs$num_tcp_conns, + $tcp_conns=cs$cumulative_tcp_conns - last_cs$cumulative_tcp_conns, + $active_udp_conns=cs$num_udp_conns, + $udp_conns=cs$cumulative_udp_conns - last_cs$cumulative_udp_conns, + $active_icmp_conns=cs$num_icmp_conns, + $icmp_conns=cs$cumulative_icmp_conns - last_cs$cumulative_icmp_conns, + + $reassem_tcp_size=rs$tcp_size, + $reassem_file_size=rs$file_size, + $reassem_frag_size=rs$frag_size, + $reassem_unknown_size=rs$unknown_size, + + $events_proc=es$dispatched - last_es$dispatched, + $events_queued=es$queued - last_es$queued, + + $timers=ts$cumulative - last_ts$cumulative, + $active_timers=ts$current, + + $files=fs$cumulative - last_fs$cumulative, + $active_files=fs$current, + + $dns_requests=ds$requests - last_ds$requests, + $active_dns_requests=ds$pending + ]; + + # Someone's going to have to explain what this is and add a field to the Info record. + # info$util = 100.0*((ps$user_time + ps$system_time) - (last_ps$user_time + last_ps$system_time))/(now-then); + + if ( reading_live_traffic() ) + { + info$pkt_lag = current_time() - nettime; + info$pkts_dropped = ns$pkts_dropped - last_ns$pkts_dropped; + info$pkts_link = ns$pkts_link - last_ns$pkts_link; + } + + Log::write(Stats::LOG, info); + schedule report_interval { check_stats(nettime, ns, cs, ps, es, rs, ts, fs, ds) }; + } + +event zeek_init() + { + schedule report_interval { check_stats(network_time(), get_net_stats(), get_conn_stats(), get_proc_stats(), get_event_stats(), get_reassembler_stats(), get_timer_stats(), get_file_analysis_stats(), get_dns_stats()) }; + } diff --git a/scripts/policy/misc/trim-trace-file.bro b/scripts/policy/misc/trim-trace-file.bro deleted file mode 100644 index 8f534ec005..0000000000 --- a/scripts/policy/misc/trim-trace-file.bro +++ /dev/null @@ -1,38 +0,0 @@ -##! Deletes the ``-w`` tracefile at regular intervals and starts a new file -##! from scratch. - -module TrimTraceFile; - -export { - ## The interval between times that the output tracefile is rotated. - const trim_interval = 10 mins &redef; - - ## This event can be generated externally to this script if on-demand - ## tracefile rotation is required with the caveat that the script - ## doesn't currently attempt to get back on schedule automatically and - ## the next trim likely won't happen on the - ## :bro:id:`TrimTraceFile::trim_interval`. - global go: event(first_trim: bool); - } - -event TrimTraceFile::go(first_trim: bool) - { - if ( bro_is_terminating() || trace_output_file == "" ) - return; - - if ( ! first_trim ) - { - local info = rotate_file_by_name(trace_output_file); - if ( info$old_name != "" ) - system(fmt("/bin/rm %s", info$new_name)); - } - - schedule trim_interval { TrimTraceFile::go(F) }; - } - -event bro_init() - { - if ( trim_interval > 0 secs ) - schedule trim_interval { TrimTraceFile::go(T) }; - } - diff --git a/scripts/policy/misc/trim-trace-file.zeek b/scripts/policy/misc/trim-trace-file.zeek new file mode 100644 index 0000000000..81f54e991f --- /dev/null +++ b/scripts/policy/misc/trim-trace-file.zeek @@ -0,0 +1,38 @@ +##! Deletes the ``-w`` tracefile at regular intervals and starts a new file +##! from scratch. + +module TrimTraceFile; + +export { + ## The interval between times that the output tracefile is rotated. + const trim_interval = 10 mins &redef; + + ## This event can be generated externally to this script if on-demand + ## tracefile rotation is required with the caveat that the script + ## doesn't currently attempt to get back on schedule automatically and + ## the next trim likely won't happen on the + ## :zeek:id:`TrimTraceFile::trim_interval`. + global go: event(first_trim: bool); + } + +event TrimTraceFile::go(first_trim: bool) + { + if ( zeek_is_terminating() || trace_output_file == "" ) + return; + + if ( ! first_trim ) + { + local info = rotate_file_by_name(trace_output_file); + if ( info$old_name != "" ) + system(fmt("/bin/rm %s", info$new_name)); + } + + schedule trim_interval { TrimTraceFile::go(F) }; + } + +event zeek_init() + { + if ( trim_interval > 0 secs ) + schedule trim_interval { TrimTraceFile::go(T) }; + } + diff --git a/scripts/policy/misc/weird-stats.bro b/scripts/policy/misc/weird-stats.bro deleted file mode 100644 index ac0914d531..0000000000 --- a/scripts/policy/misc/weird-stats.bro +++ /dev/null @@ -1,102 +0,0 @@ -##! Log weird statistics. - -@load base/frameworks/sumstats -@load base/frameworks/cluster - -module WeirdStats; - -export { - redef enum Log::ID += { LOG }; - - ## How often stats are reported. - const weird_stat_interval = 15min &redef; - - type Info: record { - ## Timestamp for the measurement. - ts: time &log; - ## Name of the weird. - name: string &log; - ## Number of times weird was seen since the last stats interval. - num_seen: count &log; - }; - - global log_weird_stats: event(rec: Info); -} - -global this_epoch_weirds: table[string] of double; -global last_epoch_weirds: table[string] of double; - -function weird_epoch_results(ts: time, key: SumStats::Key, result: SumStats::Result) - { - this_epoch_weirds[key$str]=result["weirds.encountered"]$sum; - } - -function weird_epoch_finished(ts: time) - { - for ( n, v in this_epoch_weirds ) - { - local last_count: double = 0.0; - - if ( n in last_epoch_weirds ) - last_count = last_epoch_weirds[n]; - - local num_seen: double = v - last_count; - - if ( num_seen > 0.0 ) - Log::write(LOG, Info($ts = ts, $name = n, - $num_seen = double_to_count(num_seen))); - } - - last_epoch_weirds = this_epoch_weirds; - this_epoch_weirds = table(); - } - -event bro_init() &priority=5 - { - Log::create_stream(WeirdStats::LOG, - [$columns = Info, $ev = log_weird_stats, - $path="weird_stats"]); - local r1 = SumStats::Reducer($stream = "weirds.encountered", - $apply = set(SumStats::SUM)); - SumStats::create([$name = "weirds.statistics", - $epoch = weird_stat_interval, $reducers = set(r1), - $epoch_result = weird_epoch_results, - $epoch_finished = weird_epoch_finished]); - } - -module SumStats; - -function observe_weird_stats() - { - local rs = get_reporter_stats(); - - for ( n, v in rs$weirds_by_type ) - SumStats::observe("weirds.encountered", SumStats::Key($str = n), - SumStats::Observation($dbl=(v + 0.0))); - } - -@if ( Cluster::is_enabled() ) - -# I'm not sure if this is a hack or not: the manager will generate this -# event at the end of its epoch so workers can handle it just in time to -# generate the necessary stats. Alternative may be workers generating the -# stats individually/proactively in their own finish_epoch, but that may be -# less synchronized? -event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool) &priority=10 - { - if ( ss_name != "weirds.statistics" ) - return; - observe_weird_stats(); - } - -@else - -event SumStats::finish_epoch(ss: SumStat) &priority=10 - { - if ( ss$name != "weirds.statistics" ) - return; - - observe_weird_stats(); - } - -@endif diff --git a/scripts/policy/misc/weird-stats.zeek b/scripts/policy/misc/weird-stats.zeek new file mode 100644 index 0000000000..bc75e2057a --- /dev/null +++ b/scripts/policy/misc/weird-stats.zeek @@ -0,0 +1,102 @@ +##! Log weird statistics. + +@load base/frameworks/sumstats +@load base/frameworks/cluster + +module WeirdStats; + +export { + redef enum Log::ID += { LOG }; + + ## How often stats are reported. + const weird_stat_interval = 15min &redef; + + type Info: record { + ## Timestamp for the measurement. + ts: time &log; + ## Name of the weird. + name: string &log; + ## Number of times weird was seen since the last stats interval. + num_seen: count &log; + }; + + global log_weird_stats: event(rec: Info); +} + +global this_epoch_weirds: table[string] of double; +global last_epoch_weirds: table[string] of double; + +function weird_epoch_results(ts: time, key: SumStats::Key, result: SumStats::Result) + { + this_epoch_weirds[key$str]=result["weirds.encountered"]$sum; + } + +function weird_epoch_finished(ts: time) + { + for ( n, v in this_epoch_weirds ) + { + local last_count: double = 0.0; + + if ( n in last_epoch_weirds ) + last_count = last_epoch_weirds[n]; + + local num_seen: double = v - last_count; + + if ( num_seen > 0.0 ) + Log::write(LOG, Info($ts = ts, $name = n, + $num_seen = double_to_count(num_seen))); + } + + last_epoch_weirds = this_epoch_weirds; + this_epoch_weirds = table(); + } + +event zeek_init() &priority=5 + { + Log::create_stream(WeirdStats::LOG, + [$columns = Info, $ev = log_weird_stats, + $path="weird_stats"]); + local r1 = SumStats::Reducer($stream = "weirds.encountered", + $apply = set(SumStats::SUM)); + SumStats::create([$name = "weirds.statistics", + $epoch = weird_stat_interval, $reducers = set(r1), + $epoch_result = weird_epoch_results, + $epoch_finished = weird_epoch_finished]); + } + +module SumStats; + +function observe_weird_stats() + { + local rs = get_reporter_stats(); + + for ( n, v in rs$weirds_by_type ) + SumStats::observe("weirds.encountered", SumStats::Key($str = n), + SumStats::Observation($dbl=(v + 0.0))); + } + +@if ( Cluster::is_enabled() ) + +# I'm not sure if this is a hack or not: the manager will generate this +# event at the end of its epoch so workers can handle it just in time to +# generate the necessary stats. Alternative may be workers generating the +# stats individually/proactively in their own finish_epoch, but that may be +# less synchronized? +event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool) &priority=10 + { + if ( ss_name != "weirds.statistics" ) + return; + observe_weird_stats(); + } + +@else + +event SumStats::finish_epoch(ss: SumStat) &priority=10 + { + if ( ss$name != "weirds.statistics" ) + return; + + observe_weird_stats(); + } + +@endif diff --git a/scripts/policy/protocols/conn/known-hosts.bro b/scripts/policy/protocols/conn/known-hosts.bro deleted file mode 100644 index ef78630c6a..0000000000 --- a/scripts/policy/protocols/conn/known-hosts.bro +++ /dev/null @@ -1,166 +0,0 @@ -##! This script logs hosts that Bro determines have performed complete TCP -##! handshakes and logs the address once per day (by default). The log that -##! is output provides an easy way to determine a count of the IP addresses in -##! use on a network per day. - -@load base/utils/directions-and-hosts -@load base/frameworks/cluster - -module Known; - -export { - ## The known-hosts logging stream identifier. - redef enum Log::ID += { HOSTS_LOG }; - - ## The record type which contains the column fields of the known-hosts log. - type HostsInfo: record { - ## The timestamp at which the host was detected. - ts: time &log; - ## The address that was detected originating or responding to a - ## TCP connection. - host: addr &log; - }; - - ## Toggles between different implementations of this script. - ## When true, use a Broker data store, else use a regular Bro set - ## with keys uniformly distributed over proxy nodes in cluster - ## operation. - const use_host_store = T &redef; - - ## The hosts whose existence should be logged and tracked. - ## See :bro:type:`Host` for possible choices. - option host_tracking = LOCAL_HOSTS; - - ## Holds the set of all known hosts. Keys in the store are addresses - ## and their associated value will always be the "true" boolean. - global host_store: Cluster::StoreInfo; - - ## The Broker topic name to use for :bro:see:`Known::host_store`. - const host_store_name = "bro/known/hosts" &redef; - - ## The expiry interval of new entries in :bro:see:`Known::host_store`. - ## This also changes the interval at which hosts get logged. - const host_store_expiry = 1day &redef; - - ## The timeout interval to use for operations against - ## :bro:see:`Known::host_store`. - option host_store_timeout = 15sec; - - ## The set of all known addresses to store for preventing duplicate - ## logging of addresses. It can also be used from other scripts to - ## inspect if an address has been seen in use. - ## Maintain the list of known hosts for 24 hours so that the existence - ## of each individual address is logged each day. - ## - ## In cluster operation, this set is distributed uniformly across - ## proxy nodes. - global hosts: set[addr] &create_expire=1day &redef; - - ## An event that can be handled to access the :bro:type:`Known::HostsInfo` - ## record as it is sent on to the logging framework. - global log_known_hosts: event(rec: HostsInfo); -} - -event bro_init() - { - if ( ! Known::use_host_store ) - return; - - Known::host_store = Cluster::create_store(Known::host_store_name); - } - -event Known::host_found(info: HostsInfo) - { - if ( ! Known::use_host_store ) - return; - - when ( local r = Broker::put_unique(Known::host_store$store, info$host, - T, Known::host_store_expiry) ) - { - if ( r$status == Broker::SUCCESS ) - { - if ( r$result as bool ) - Log::write(Known::HOSTS_LOG, info); - } - else - Reporter::error(fmt("%s: data store put_unique failure", - Known::host_store_name)); - } - timeout Known::host_store_timeout - { - # Can't really tell if master store ended up inserting a key. - Log::write(Known::HOSTS_LOG, info); - } - } - -event known_host_add(info: HostsInfo) - { - if ( use_host_store ) - return; - - if ( info$host in Known::hosts ) - return; - - add Known::hosts[info$host]; - - @if ( ! Cluster::is_enabled() || - Cluster::local_node_type() == Cluster::PROXY ) - Log::write(Known::HOSTS_LOG, info); - @endif - } - -event Cluster::node_up(name: string, id: string) - { - if ( use_host_store ) - return; - - if ( Cluster::local_node_type() != Cluster::WORKER ) - return; - - # Drop local suppression cache on workers to force HRW key repartitioning. - Known::hosts = set(); - } - -event Cluster::node_down(name: string, id: string) - { - if ( use_host_store ) - return; - - if ( Cluster::local_node_type() != Cluster::WORKER ) - return; - - # Drop local suppression cache on workers to force HRW key repartitioning. - Known::hosts = set(); - } - -event Known::host_found(info: HostsInfo) - { - if ( use_host_store ) - return; - - if ( info$host in Known::hosts ) - return; - - Cluster::publish_hrw(Cluster::proxy_pool, info$host, known_host_add, info); - event known_host_add(info); - } - -event bro_init() - { - Log::create_stream(Known::HOSTS_LOG, [$columns=HostsInfo, $ev=log_known_hosts, $path="known_hosts"]); - } - -event connection_established(c: connection) &priority=5 - { - if ( c$orig$state != TCP_ESTABLISHED ) - return; - - if ( c$resp$state != TCP_ESTABLISHED ) - return; - - local id = c$id; - - for ( host in set(id$orig_h, id$resp_h) ) - if ( addr_matches_host(host, host_tracking) ) - event Known::host_found([$ts = network_time(), $host = host]); - } diff --git a/scripts/policy/protocols/conn/known-hosts.zeek b/scripts/policy/protocols/conn/known-hosts.zeek new file mode 100644 index 0000000000..8a3383e1b2 --- /dev/null +++ b/scripts/policy/protocols/conn/known-hosts.zeek @@ -0,0 +1,166 @@ +##! This script logs hosts that Zeek determines have performed complete TCP +##! handshakes and logs the address once per day (by default). The log that +##! is output provides an easy way to determine a count of the IP addresses in +##! use on a network per day. + +@load base/utils/directions-and-hosts +@load base/frameworks/cluster + +module Known; + +export { + ## The known-hosts logging stream identifier. + redef enum Log::ID += { HOSTS_LOG }; + + ## The record type which contains the column fields of the known-hosts log. + type HostsInfo: record { + ## The timestamp at which the host was detected. + ts: time &log; + ## The address that was detected originating or responding to a + ## TCP connection. + host: addr &log; + }; + + ## Toggles between different implementations of this script. + ## When true, use a Broker data store, else use a regular Zeek set + ## with keys uniformly distributed over proxy nodes in cluster + ## operation. + const use_host_store = T &redef; + + ## The hosts whose existence should be logged and tracked. + ## See :zeek:type:`Host` for possible choices. + option host_tracking = LOCAL_HOSTS; + + ## Holds the set of all known hosts. Keys in the store are addresses + ## and their associated value will always be the "true" boolean. + global host_store: Cluster::StoreInfo; + + ## The Broker topic name to use for :zeek:see:`Known::host_store`. + const host_store_name = "zeek/known/hosts" &redef; + + ## The expiry interval of new entries in :zeek:see:`Known::host_store`. + ## This also changes the interval at which hosts get logged. + const host_store_expiry = 1day &redef; + + ## The timeout interval to use for operations against + ## :zeek:see:`Known::host_store`. + option host_store_timeout = 15sec; + + ## The set of all known addresses to store for preventing duplicate + ## logging of addresses. It can also be used from other scripts to + ## inspect if an address has been seen in use. + ## Maintain the list of known hosts for 24 hours so that the existence + ## of each individual address is logged each day. + ## + ## In cluster operation, this set is distributed uniformly across + ## proxy nodes. + global hosts: set[addr] &create_expire=1day &redef; + + ## An event that can be handled to access the :zeek:type:`Known::HostsInfo` + ## record as it is sent on to the logging framework. + global log_known_hosts: event(rec: HostsInfo); +} + +event zeek_init() + { + if ( ! Known::use_host_store ) + return; + + Known::host_store = Cluster::create_store(Known::host_store_name); + } + +event Known::host_found(info: HostsInfo) + { + if ( ! Known::use_host_store ) + return; + + when ( local r = Broker::put_unique(Known::host_store$store, info$host, + T, Known::host_store_expiry) ) + { + if ( r$status == Broker::SUCCESS ) + { + if ( r$result as bool ) + Log::write(Known::HOSTS_LOG, info); + } + else + Reporter::error(fmt("%s: data store put_unique failure", + Known::host_store_name)); + } + timeout Known::host_store_timeout + { + # Can't really tell if master store ended up inserting a key. + Log::write(Known::HOSTS_LOG, info); + } + } + +event known_host_add(info: HostsInfo) + { + if ( use_host_store ) + return; + + if ( info$host in Known::hosts ) + return; + + add Known::hosts[info$host]; + + @if ( ! Cluster::is_enabled() || + Cluster::local_node_type() == Cluster::PROXY ) + Log::write(Known::HOSTS_LOG, info); + @endif + } + +event Cluster::node_up(name: string, id: string) + { + if ( use_host_store ) + return; + + if ( Cluster::local_node_type() != Cluster::WORKER ) + return; + + # Drop local suppression cache on workers to force HRW key repartitioning. + Known::hosts = set(); + } + +event Cluster::node_down(name: string, id: string) + { + if ( use_host_store ) + return; + + if ( Cluster::local_node_type() != Cluster::WORKER ) + return; + + # Drop local suppression cache on workers to force HRW key repartitioning. + Known::hosts = set(); + } + +event Known::host_found(info: HostsInfo) + { + if ( use_host_store ) + return; + + if ( info$host in Known::hosts ) + return; + + Cluster::publish_hrw(Cluster::proxy_pool, info$host, known_host_add, info); + event known_host_add(info); + } + +event zeek_init() + { + Log::create_stream(Known::HOSTS_LOG, [$columns=HostsInfo, $ev=log_known_hosts, $path="known_hosts"]); + } + +event connection_established(c: connection) &priority=5 + { + if ( c$orig$state != TCP_ESTABLISHED ) + return; + + if ( c$resp$state != TCP_ESTABLISHED ) + return; + + local id = c$id; + + for ( host in set(id$orig_h, id$resp_h) ) + if ( addr_matches_host(host, host_tracking) ) + event Known::host_found([$ts = network_time(), $host = host]); + } diff --git a/scripts/policy/protocols/conn/known-services.bro b/scripts/policy/protocols/conn/known-services.bro deleted file mode 100644 index f9e129839d..0000000000 --- a/scripts/policy/protocols/conn/known-services.bro +++ /dev/null @@ -1,225 +0,0 @@ -##! This script logs and tracks services. In the case of this script, a service -##! is defined as an IP address and port which has responded to and fully -##! completed a TCP handshake with another host. If a protocol is detected -##! during the session, the protocol will also be logged. - -@load base/utils/directions-and-hosts -@load base/frameworks/cluster - -module Known; - -export { - ## The known-services logging stream identifier. - redef enum Log::ID += { SERVICES_LOG }; - - ## The record type which contains the column fields of the known-services - ## log. - type ServicesInfo: record { - ## The time at which the service was detected. - ts: time &log; - ## The host address on which the service is running. - host: addr &log; - ## The port number on which the service is running. - port_num: port &log; - ## The transport-layer protocol which the service uses. - port_proto: transport_proto &log; - ## A set of protocols that match the service's connection payloads. - service: set[string] &log; - }; - - ## Toggles between different implementations of this script. - ## When true, use a Broker data store, else use a regular Bro set - ## with keys uniformly distributed over proxy nodes in cluster - ## operation. - const use_service_store = T &redef; - - ## The hosts whose services should be tracked and logged. - ## See :bro:type:`Host` for possible choices. - option service_tracking = LOCAL_HOSTS; - - type AddrPortPair: record { - host: addr; - p: port; - }; - - ## Holds the set of all known services. Keys in the store are - ## :bro:type:`Known::AddrPortPair` and their associated value is - ## always the boolean value of "true". - global service_store: Cluster::StoreInfo; - - ## The Broker topic name to use for :bro:see:`Known::service_store`. - const service_store_name = "bro/known/services" &redef; - - ## The expiry interval of new entries in :bro:see:`Known::service_store`. - ## This also changes the interval at which services get logged. - const service_store_expiry = 1day &redef; - - ## The timeout interval to use for operations against - ## :bro:see:`Known::service_store`. - option service_store_timeout = 15sec; - - ## Tracks the set of daily-detected services for preventing the logging - ## of duplicates, but can also be inspected by other scripts for - ## different purposes. - ## - ## In cluster operation, this set is uniformly distributed across - ## proxy nodes. - ## - ## This set is automatically populated and shouldn't be directly modified. - global services: set[addr, port] &create_expire=1day; - - ## Event that can be handled to access the :bro:type:`Known::ServicesInfo` - ## record as it is sent on to the logging framework. - global log_known_services: event(rec: ServicesInfo); -} - -redef record connection += { - # This field is to indicate whether or not the processing for detecting - # and logging the service for this connection is complete. - known_services_done: bool &default=F; -}; - - -event bro_init() - { - if ( ! Known::use_service_store ) - return; - - Known::service_store = Cluster::create_store(Known::service_store_name); - } - -event service_info_commit(info: ServicesInfo) - - { - if ( ! Known::use_service_store ) - return; - - local key = AddrPortPair($host = info$host, $p = info$port_num); - - when ( local r = Broker::put_unique(Known::service_store$store, key, - T, Known::service_store_expiry) ) - { - if ( r$status == Broker::SUCCESS ) - { - if ( r$result as bool ) - Log::write(Known::SERVICES_LOG, info); - } - else - Reporter::error(fmt("%s: data store put_unique failure", - Known::service_store_name)); - } - timeout Known::service_store_timeout - { - Log::write(Known::SERVICES_LOG, info); - } - } - -event known_service_add(info: ServicesInfo) - { - if ( Known::use_service_store ) - return; - - if ( [info$host, info$port_num] in Known::services ) - return; - - add Known::services[info$host, info$port_num]; - - @if ( ! Cluster::is_enabled() || - Cluster::local_node_type() == Cluster::PROXY ) - Log::write(Known::SERVICES_LOG, info); - @endif - } - -event Cluster::node_up(name: string, id: string) - { - if ( Known::use_service_store ) - return; - - if ( Cluster::local_node_type() != Cluster::WORKER ) - return; - - # Drop local suppression cache on workers to force HRW key repartitioning. - Known::services = set(); - } - -event Cluster::node_down(name: string, id: string) - { - if ( Known::use_service_store ) - return; - - if ( Cluster::local_node_type() != Cluster::WORKER ) - return; - - # Drop local suppression cache on workers to force HRW key repartitioning. - Known::services = set(); - } - -event service_info_commit(info: ServicesInfo) - { - if ( Known::use_service_store ) - return; - - if ( [info$host, info$port_num] in Known::services ) - return; - - local key = cat(info$host, info$port_num); - Cluster::publish_hrw(Cluster::proxy_pool, key, known_service_add, info); - event known_service_add(info); - } - -function known_services_done(c: connection) - { - local id = c$id; - c$known_services_done = T; - - if ( ! addr_matches_host(id$resp_h, service_tracking) ) - return; - - if ( |c$service| == 1 ) - { - if ( "ftp-data" in c$service ) - # Don't include ftp data sessions. - return; - - if ( "DNS" in c$service && c$resp$size == 0 ) - # For dns, require that the server talks. - return; - } - - local info = ServicesInfo($ts = network_time(), $host = id$resp_h, - $port_num = id$resp_p, - $port_proto = get_port_transport_proto(id$resp_p), - $service = c$service); - - # If no protocol was detected, wait a short time before attempting to log - # in case a protocol is detected on another connection. - if ( |c$service| == 0 ) - schedule 5min { service_info_commit(info) }; - else - event service_info_commit(info); - } - -event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=-5 - { - known_services_done(c); - } - -# Handle the connection ending in case no protocol was ever detected. -event connection_state_remove(c: connection) &priority=-5 - { - if ( c$known_services_done ) - return; - - if ( c$resp$state != TCP_ESTABLISHED ) - return; - - known_services_done(c); - } - -event bro_init() &priority=5 - { - Log::create_stream(Known::SERVICES_LOG, [$columns=ServicesInfo, - $ev=log_known_services, - $path="known_services"]); - } - diff --git a/scripts/policy/protocols/conn/known-services.zeek b/scripts/policy/protocols/conn/known-services.zeek new file mode 100644 index 0000000000..24774586dc --- /dev/null +++ b/scripts/policy/protocols/conn/known-services.zeek @@ -0,0 +1,225 @@ +##! This script logs and tracks services. In the case of this script, a service +##! is defined as an IP address and port which has responded to and fully +##! completed a TCP handshake with another host. If a protocol is detected +##! during the session, the protocol will also be logged. + +@load base/utils/directions-and-hosts +@load base/frameworks/cluster + +module Known; + +export { + ## The known-services logging stream identifier. + redef enum Log::ID += { SERVICES_LOG }; + + ## The record type which contains the column fields of the known-services + ## log. + type ServicesInfo: record { + ## The time at which the service was detected. + ts: time &log; + ## The host address on which the service is running. + host: addr &log; + ## The port number on which the service is running. + port_num: port &log; + ## The transport-layer protocol which the service uses. + port_proto: transport_proto &log; + ## A set of protocols that match the service's connection payloads. + service: set[string] &log; + }; + + ## Toggles between different implementations of this script. + ## When true, use a Broker data store, else use a regular Zeek set + ## with keys uniformly distributed over proxy nodes in cluster + ## operation. + const use_service_store = T &redef; + + ## The hosts whose services should be tracked and logged. + ## See :zeek:type:`Host` for possible choices. + option service_tracking = LOCAL_HOSTS; + + type AddrPortPair: record { + host: addr; + p: port; + }; + + ## Holds the set of all known services. Keys in the store are + ## :zeek:type:`Known::AddrPortPair` and their associated value is + ## always the boolean value of "true". + global service_store: Cluster::StoreInfo; + + ## The Broker topic name to use for :zeek:see:`Known::service_store`. + const service_store_name = "zeek/known/services" &redef; + + ## The expiry interval of new entries in :zeek:see:`Known::service_store`. + ## This also changes the interval at which services get logged. + const service_store_expiry = 1day &redef; + + ## The timeout interval to use for operations against + ## :zeek:see:`Known::service_store`. + option service_store_timeout = 15sec; + + ## Tracks the set of daily-detected services for preventing the logging + ## of duplicates, but can also be inspected by other scripts for + ## different purposes. + ## + ## In cluster operation, this set is uniformly distributed across + ## proxy nodes. + ## + ## This set is automatically populated and shouldn't be directly modified. + global services: set[addr, port] &create_expire=1day; + + ## Event that can be handled to access the :zeek:type:`Known::ServicesInfo` + ## record as it is sent on to the logging framework. + global log_known_services: event(rec: ServicesInfo); +} + +redef record connection += { + # This field is to indicate whether or not the processing for detecting + # and logging the service for this connection is complete. + known_services_done: bool &default=F; +}; + + +event zeek_init() + { + if ( ! Known::use_service_store ) + return; + + Known::service_store = Cluster::create_store(Known::service_store_name); + } + +event service_info_commit(info: ServicesInfo) + + { + if ( ! Known::use_service_store ) + return; + + local key = AddrPortPair($host = info$host, $p = info$port_num); + + when ( local r = Broker::put_unique(Known::service_store$store, key, + T, Known::service_store_expiry) ) + { + if ( r$status == Broker::SUCCESS ) + { + if ( r$result as bool ) + Log::write(Known::SERVICES_LOG, info); + } + else + Reporter::error(fmt("%s: data store put_unique failure", + Known::service_store_name)); + } + timeout Known::service_store_timeout + { + Log::write(Known::SERVICES_LOG, info); + } + } + +event known_service_add(info: ServicesInfo) + { + if ( Known::use_service_store ) + return; + + if ( [info$host, info$port_num] in Known::services ) + return; + + add Known::services[info$host, info$port_num]; + + @if ( ! Cluster::is_enabled() || + Cluster::local_node_type() == Cluster::PROXY ) + Log::write(Known::SERVICES_LOG, info); + @endif + } + +event Cluster::node_up(name: string, id: string) + { + if ( Known::use_service_store ) + return; + + if ( Cluster::local_node_type() != Cluster::WORKER ) + return; + + # Drop local suppression cache on workers to force HRW key repartitioning. + Known::services = set(); + } + +event Cluster::node_down(name: string, id: string) + { + if ( Known::use_service_store ) + return; + + if ( Cluster::local_node_type() != Cluster::WORKER ) + return; + + # Drop local suppression cache on workers to force HRW key repartitioning. + Known::services = set(); + } + +event service_info_commit(info: ServicesInfo) + { + if ( Known::use_service_store ) + return; + + if ( [info$host, info$port_num] in Known::services ) + return; + + local key = cat(info$host, info$port_num); + Cluster::publish_hrw(Cluster::proxy_pool, key, known_service_add, info); + event known_service_add(info); + } + +function known_services_done(c: connection) + { + local id = c$id; + c$known_services_done = T; + + if ( ! addr_matches_host(id$resp_h, service_tracking) ) + return; + + if ( |c$service| == 1 ) + { + if ( "ftp-data" in c$service ) + # Don't include ftp data sessions. + return; + + if ( "DNS" in c$service && c$resp$size == 0 ) + # For dns, require that the server talks. + return; + } + + local info = ServicesInfo($ts = network_time(), $host = id$resp_h, + $port_num = id$resp_p, + $port_proto = get_port_transport_proto(id$resp_p), + $service = c$service); + + # If no protocol was detected, wait a short time before attempting to log + # in case a protocol is detected on another connection. + if ( |c$service| == 0 ) + schedule 5min { service_info_commit(info) }; + else + event service_info_commit(info); + } + +event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=-5 + { + known_services_done(c); + } + +# Handle the connection ending in case no protocol was ever detected. +event connection_state_remove(c: connection) &priority=-5 + { + if ( c$known_services_done ) + return; + + if ( c$resp$state != TCP_ESTABLISHED ) + return; + + known_services_done(c); + } + +event zeek_init() &priority=5 + { + Log::create_stream(Known::SERVICES_LOG, [$columns=ServicesInfo, + $ev=log_known_services, + $path="known_services"]); + } + diff --git a/scripts/policy/protocols/conn/mac-logging.bro b/scripts/policy/protocols/conn/mac-logging.zeek similarity index 100% rename from scripts/policy/protocols/conn/mac-logging.bro rename to scripts/policy/protocols/conn/mac-logging.zeek diff --git a/scripts/policy/protocols/conn/vlan-logging.bro b/scripts/policy/protocols/conn/vlan-logging.zeek similarity index 100% rename from scripts/policy/protocols/conn/vlan-logging.bro rename to scripts/policy/protocols/conn/vlan-logging.zeek diff --git a/scripts/policy/protocols/conn/weirds.bro b/scripts/policy/protocols/conn/weirds.zeek similarity index 100% rename from scripts/policy/protocols/conn/weirds.bro rename to scripts/policy/protocols/conn/weirds.zeek diff --git a/scripts/policy/protocols/dhcp/deprecated_events.bro b/scripts/policy/protocols/dhcp/deprecated_events.bro deleted file mode 100644 index 941e5c72c3..0000000000 --- a/scripts/policy/protocols/dhcp/deprecated_events.bro +++ /dev/null @@ -1,272 +0,0 @@ -##! Bro 2.6 removed certain DHCP events, but scripts in the Bro -##! ecosystem are still relying on those events. As a transition, this -##! script will handle the new event, and generate the old events, -##! which are marked as deprecated. Note: This script should be -##! removed in the next Bro version after 2.6. - -@load base/protocols/dhcp - -## A DHCP message. -## -## .. note:: This type is included to support the deprecated events dhcp_ack, -## dhcp_decline, dhcp_discover, dhcp_inform, dhcp_nak, dhcp_offer, -## dhcp_release and dhcp_request and is thus similarly deprecated -## itself. Use :bro:see:`dhcp_message` instead. -## -## .. bro:see:: dhcp_message dhcp_ack dhcp_decline dhcp_discover -## dhcp_inform dhcp_nak dhcp_offer dhcp_release dhcp_request -type dhcp_msg: record { - op: count; ##< Message OP code. 1 = BOOTREQUEST, 2 = BOOTREPLY - m_type: count; ##< The type of DHCP message. - xid: count; ##< Transaction ID of a DHCP session. - h_addr: string; ##< Hardware address of the client. - ciaddr: addr; ##< Original IP address of the client. - yiaddr: addr; ##< IP address assigned to the client. -}; - -## A list of router addresses offered by a DHCP server. -## -## .. note:: This type is included to support the deprecated events dhcp_ack -## and dhcp_offer and is thus similarly deprecated -## itself. Use :bro:see:`dhcp_message` instead. -## -## .. bro:see:: dhcp_message dhcp_ack dhcp_offer -type dhcp_router_list: table[count] of addr; - -## Generated for DHCP messages of type *DHCPDISCOVER* (client broadcast to locate -## available servers). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## req_addr: The specific address requested by the client. -## -## host_name: The value of the host name option, if specified by the client. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_request -## dhcp_decline dhcp_ack dhcp_nak dhcp_release dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -## .. note:: Bro does not support broadcast packets (as used by the DHCP -## protocol). It treats broadcast addresses just like any other and -## associates packets into transport-level flows in the same way as usual. -## -global dhcp_discover: event(c: connection, msg: dhcp_msg, req_addr: addr, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPOFFER* (server to client in response -## to DHCPDISCOVER with offer of configuration parameters). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## mask: The subnet mask specified by the message. -## -## router: The list of routers specified by the message. -## -## lease: The least interval specified by the message. -## -## serv_addr: The server address specified by the message. -## -## host_name: Optional host name value. May differ from the host name requested -## from the client. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_request dhcp_decline -## dhcp_ack dhcp_nak dhcp_release dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -## .. note:: Bro does not support broadcast packets (as used by the DHCP -## protocol). It treats broadcast addresses just like any other and -## associates packets into transport-level flows in the same way as usual. -## -global dhcp_offer: event(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPREQUEST* (Client message to servers either -## (a) requesting offered parameters from one server and implicitly declining offers -## from all others, (b) confirming correctness of previously allocated address after, -## e.g., system reboot, or (c) extending the lease on a particular network address.) -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## req_addr: The client address specified by the message. -## -## serv_addr: The server address specified by the message. -## -## host_name: The value of the host name option, if specified by the client. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_decline -## dhcp_ack dhcp_nak dhcp_release dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -## .. note:: Bro does not support broadcast packets (as used by the DHCP -## protocol). It treats broadcast addresses just like any other and -## associates packets into transport-level flows in the same way as usual. -## -global dhcp_request: event(c: connection, msg: dhcp_msg, req_addr: addr, serv_addr: addr, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPDECLINE* (Client to server indicating -## network address is already in use). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## host_name: Optional host name value. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_request -## dhcp_ack dhcp_nak dhcp_release dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -## .. note:: Bro does not support broadcast packets (as used by the DHCP -## protocol). It treats broadcast addresses just like any other and -## associates packets into transport-level flows in the same way as usual. -## -global dhcp_decline: event(c: connection, msg: dhcp_msg, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPACK* (Server to client with configuration -## parameters, including committed network address). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## mask: The subnet mask specified by the message. -## -## router: The list of routers specified by the message. -## -## lease: The least interval specified by the message. -## -## serv_addr: The server address specified by the message. -## -## host_name: Optional host name value. May differ from the host name requested -## from the client. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_request -## dhcp_decline dhcp_nak dhcp_release dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -global dhcp_ack: event(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPNAK* (Server to client indicating client's -## notion of network address is incorrect (e.g., client has moved to new subnet) or -## client's lease has expired). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## host_name: Optional host name value. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_request -## dhcp_decline dhcp_ack dhcp_release dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -## .. note:: Bro does not support broadcast packets (as used by the DHCP -## protocol). It treats broadcast addresses just like any other and -## associates packets into transport-level flows in the same way as usual. -## -global dhcp_nak: event(c: connection, msg: dhcp_msg, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPRELEASE* (Client to server relinquishing -## network address and cancelling remaining lease). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## host_name: The value of the host name option, if specified by the client. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_request -## dhcp_decline dhcp_ack dhcp_nak dhcp_inform -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -global dhcp_release: event(c: connection, msg: dhcp_msg, host_name: string) &deprecated; - -## Generated for DHCP messages of type *DHCPINFORM* (Client to server, asking only for -## local configuration parameters; client already has externally configured network -## address). -## -## c: The connection record describing the underlying UDP flow. -## -## msg: The parsed type-independent part of the DHCP message. -## -## host_name: The value of the host name option, if specified by the client. -## -## .. bro:see:: dhcp_message dhcp_discover dhcp_offer dhcp_request -## dhcp_decline dhcp_ack dhcp_nak dhcp_release -## -## .. note:: This event has been deprecated, and will be removed in the next version. -## Use dhcp_message instead. -## -## .. note:: Bro does not support broadcast packets (as used by the DHCP -## protocol). It treats broadcast addresses just like any other and -## associates packets into transport-level flows in the same way as usual. -## -global dhcp_inform: event(c: connection, msg: dhcp_msg, host_name: string) &deprecated; - -event dhcp_message(c: connection, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) - { - local old_msg: dhcp_msg = [$op=msg$op, $m_type=msg$m_type, $xid=msg$xid, - $h_addr=msg$chaddr, $ciaddr=msg$ciaddr, $yiaddr=msg$yiaddr]; - - local routers = dhcp_router_list(); - - if ( options?$routers ) - for ( i in options$routers ) - routers[|routers|] = options$routers[i]; - - # These fields are technically optional, but aren't listed as such in the event. - # We give it some defaults in order to suppress errors. - local ar = ( options?$addr_request ) ? options$addr_request : 0.0.0.0; - local hn = ( options?$host_name ) ? options$host_name : ""; - local le = ( options?$lease ) ? options$lease : 0 secs; - local sm = ( options?$subnet_mask ) ? options$subnet_mask : 255.255.255.255; - local sa = ( options?$serv_addr ) ? options$serv_addr : 0.0.0.0; - - switch ( DHCP::message_types[msg$m_type] ) { - case "DISCOVER": - event dhcp_discover(c, old_msg, ar, hn); - break; - case "OFFER": - event dhcp_offer(c, old_msg, sm, routers, le, sa, hn); - break; - case "REQUEST": - event dhcp_request(c, old_msg, ar, sa, hn); - break; - case "DECLINE": - event dhcp_decline(c, old_msg, hn); - break; - case "ACK": - event dhcp_ack(c, old_msg, sm, routers, le, sa, hn); - break; - case "NAK": - event dhcp_nak(c, old_msg, hn); - break; - case "RELEASE": - event dhcp_release(c, old_msg, hn); - break; - case "INFORM": - event dhcp_inform(c, old_msg, hn); - break; - default: - # This isn't a weird, it's just a DHCP message type the old scripts don't handle - break; - } - } diff --git a/scripts/policy/protocols/dhcp/msg-orig.bro b/scripts/policy/protocols/dhcp/msg-orig.zeek similarity index 100% rename from scripts/policy/protocols/dhcp/msg-orig.bro rename to scripts/policy/protocols/dhcp/msg-orig.zeek diff --git a/scripts/policy/protocols/dhcp/software.bro b/scripts/policy/protocols/dhcp/software.zeek similarity index 100% rename from scripts/policy/protocols/dhcp/software.bro rename to scripts/policy/protocols/dhcp/software.zeek diff --git a/scripts/policy/protocols/dhcp/sub-opts.bro b/scripts/policy/protocols/dhcp/sub-opts.zeek similarity index 100% rename from scripts/policy/protocols/dhcp/sub-opts.bro rename to scripts/policy/protocols/dhcp/sub-opts.zeek diff --git a/scripts/policy/protocols/dns/auth-addl.bro b/scripts/policy/protocols/dns/auth-addl.zeek similarity index 100% rename from scripts/policy/protocols/dns/auth-addl.bro rename to scripts/policy/protocols/dns/auth-addl.zeek diff --git a/scripts/policy/protocols/dns/detect-external-names.bro b/scripts/policy/protocols/dns/detect-external-names.bro deleted file mode 100644 index ea56e5676f..0000000000 --- a/scripts/policy/protocols/dns/detect-external-names.bro +++ /dev/null @@ -1,35 +0,0 @@ -##! This script detects names which are not within zones considered to be -##! local but resolving to addresses considered local. -##! The :bro:id:`Site::local_zones` variable **must** be set appropriately for -##! this detection. - -@load base/frameworks/notice -@load base/utils/site - -module DNS; - -export { - redef enum Notice::Type += { - ## Raised when a non-local name is found to be pointing at a - ## local host. The :bro:id:`Site::local_zones` variable - ## **must** be set appropriately for this detection. - External_Name, - }; -} - -event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=-3 - { - if ( |Site::local_zones| == 0 ) - return; - - # Check for responses from remote hosts that point at local hosts - # but the name is not considered to be within a "local" zone. - if ( Site::is_local_addr(a) && # referring to a local host - ! Site::is_local_name(ans$query) ) # name isn't in a local zone. - { - NOTICE([$note=External_Name, - $msg=fmt("%s is pointing to a local host - %s.", ans$query, a), - $conn=c, - $identifier=cat(a,ans$query)]); - } - } diff --git a/scripts/policy/protocols/dns/detect-external-names.zeek b/scripts/policy/protocols/dns/detect-external-names.zeek new file mode 100644 index 0000000000..9533f396a2 --- /dev/null +++ b/scripts/policy/protocols/dns/detect-external-names.zeek @@ -0,0 +1,35 @@ +##! This script detects names which are not within zones considered to be +##! local but resolving to addresses considered local. +##! The :zeek:id:`Site::local_zones` variable **must** be set appropriately for +##! this detection. + +@load base/frameworks/notice +@load base/utils/site + +module DNS; + +export { + redef enum Notice::Type += { + ## Raised when a non-local name is found to be pointing at a + ## local host. The :zeek:id:`Site::local_zones` variable + ## **must** be set appropriately for this detection. + External_Name, + }; +} + +event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=-3 + { + if ( |Site::local_zones| == 0 ) + return; + + # Check for responses from remote hosts that point at local hosts + # but the name is not considered to be within a "local" zone. + if ( Site::is_local_addr(a) && # referring to a local host + ! Site::is_local_name(ans$query) ) # name isn't in a local zone. + { + NOTICE([$note=External_Name, + $msg=fmt("%s is pointing to a local host - %s.", ans$query, a), + $conn=c, + $identifier=cat(a,ans$query)]); + } + } diff --git a/scripts/policy/protocols/ftp/detect-bruteforcing.bro b/scripts/policy/protocols/ftp/detect-bruteforcing.bro deleted file mode 100644 index eb70688d47..0000000000 --- a/scripts/policy/protocols/ftp/detect-bruteforcing.bro +++ /dev/null @@ -1,60 +0,0 @@ -##! FTP brute-forcing detector, triggering when too many rejected usernames or -##! failed passwords have occurred from a single address. - -@load base/protocols/ftp -@load base/frameworks/sumstats - -@load base/utils/time - -module FTP; - -export { - redef enum Notice::Type += { - ## Indicates a host bruteforcing FTP logins by watching for too - ## many rejected usernames or failed passwords. - Bruteforcing - }; - - ## How many rejected usernames or passwords are required before being - ## considered to be bruteforcing. - const bruteforce_threshold: double = 20 &redef; - - ## The time period in which the threshold needs to be crossed before - ## being reset. - const bruteforce_measurement_interval = 15mins &redef; -} - - -event bro_init() - { - local r1: SumStats::Reducer = [$stream="ftp.failed_auth", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(bruteforce_threshold+2)]; - SumStats::create([$name="ftp-detect-bruteforcing", - $epoch=bruteforce_measurement_interval, - $reducers=set(r1), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["ftp.failed_auth"]$num+0.0; - }, - $threshold=bruteforce_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["ftp.failed_auth"]; - local dur = duration_to_mins_secs(r$end-r$begin); - local plural = r$unique>1 ? "s" : ""; - local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur); - NOTICE([$note=FTP::Bruteforcing, - $src=key$host, - $msg=message, - $identifier=cat(key$host)]); - }]); - } - -event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) - { - local cmd = c$ftp$cmdarg$cmd; - if ( cmd == "USER" || cmd == "PASS" ) - { - if ( FTP::parse_ftp_reply_code(code)$x == 5 ) - SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]); - } - } diff --git a/scripts/policy/protocols/ftp/detect-bruteforcing.zeek b/scripts/policy/protocols/ftp/detect-bruteforcing.zeek new file mode 100644 index 0000000000..4ac7b61efc --- /dev/null +++ b/scripts/policy/protocols/ftp/detect-bruteforcing.zeek @@ -0,0 +1,60 @@ +##! FTP brute-forcing detector, triggering when too many rejected usernames or +##! failed passwords have occurred from a single address. + +@load base/protocols/ftp +@load base/frameworks/sumstats + +@load base/utils/time + +module FTP; + +export { + redef enum Notice::Type += { + ## Indicates a host bruteforcing FTP logins by watching for too + ## many rejected usernames or failed passwords. + Bruteforcing + }; + + ## How many rejected usernames or passwords are required before being + ## considered to be bruteforcing. + const bruteforce_threshold: double = 20 &redef; + + ## The time period in which the threshold needs to be crossed before + ## being reset. + const bruteforce_measurement_interval = 15mins &redef; +} + + +event zeek_init() + { + local r1: SumStats::Reducer = [$stream="ftp.failed_auth", $apply=set(SumStats::UNIQUE), $unique_max=double_to_count(bruteforce_threshold+2)]; + SumStats::create([$name="ftp-detect-bruteforcing", + $epoch=bruteforce_measurement_interval, + $reducers=set(r1), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["ftp.failed_auth"]$num+0.0; + }, + $threshold=bruteforce_threshold, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["ftp.failed_auth"]; + local dur = duration_to_mins_secs(r$end-r$begin); + local plural = r$unique>1 ? "s" : ""; + local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur); + NOTICE([$note=FTP::Bruteforcing, + $src=key$host, + $msg=message, + $identifier=cat(key$host)]); + }]); + } + +event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) + { + local cmd = c$ftp$cmdarg$cmd; + if ( cmd == "USER" || cmd == "PASS" ) + { + if ( FTP::parse_ftp_reply_code(code)$x == 5 ) + SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]); + } + } diff --git a/scripts/policy/protocols/ftp/detect.bro b/scripts/policy/protocols/ftp/detect.zeek similarity index 100% rename from scripts/policy/protocols/ftp/detect.bro rename to scripts/policy/protocols/ftp/detect.zeek diff --git a/scripts/policy/protocols/ftp/software.bro b/scripts/policy/protocols/ftp/software.zeek similarity index 100% rename from scripts/policy/protocols/ftp/software.bro rename to scripts/policy/protocols/ftp/software.zeek diff --git a/scripts/policy/protocols/http/detect-sqli.bro b/scripts/policy/protocols/http/detect-sqli.bro deleted file mode 100644 index 01c98ba0d7..0000000000 --- a/scripts/policy/protocols/http/detect-sqli.bro +++ /dev/null @@ -1,127 +0,0 @@ -##! SQL injection attack detection in HTTP. - -@load base/frameworks/notice -@load base/frameworks/sumstats -@load base/protocols/http - -module HTTP; - -export { - redef enum Notice::Type += { - ## Indicates that a host performing SQL injection attacks was - ## detected. - SQL_Injection_Attacker, - ## Indicates that a host was seen to have SQL injection attacks - ## against it. This is tracked by IP address as opposed to - ## hostname. - SQL_Injection_Victim, - }; - - redef enum Tags += { - ## Indicator of a URI based SQL injection attack. - URI_SQLI, - ## Indicator of client body based SQL injection attack. This is - ## typically the body content of a POST request. Not implemented - ## yet. - POST_SQLI, - ## Indicator of a cookie based SQL injection attack. Not - ## implemented yet. - COOKIE_SQLI, - }; - - ## Defines the threshold that determines if an SQL injection attack - ## is ongoing based on the number of requests that appear to be SQL - ## injection attacks. - const sqli_requests_threshold: double = 50.0 &redef; - - ## Interval at which to watch for the - ## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed. - ## At the end of each interval the counter is reset. - const sqli_requests_interval = 5min &redef; - - ## Collecting samples will add extra data to notice emails - ## by collecting some sample SQL injection url paths. Disable - ## sample collection by setting this value to 0. - const collect_SQLi_samples = 5 &redef; - - ## Regular expression is used to match URI based SQL injections. - const match_sql_injection_uri = - /[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/ - | /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/ - | /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/ - | /[\?&][^[:blank:]\x00-\x37\|]+?=([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/ - | /[\?&][^[:blank:]\x00-\x37]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/ - | /\/\*![[:digit:]]{5}.*?\*\// &redef; - - ## A hook that can be used to prevent specific requests from being counted - ## as an injection attempt. Use a 'break' statement to exit the hook - ## early and ignore the request. - global HTTP::sqli_policy: hook(c: connection, method: string, unescaped_URI: string); -} - -function format_sqli_samples(samples: vector of SumStats::Observation): string - { - local ret = "SQL Injection samples\n---------------------"; - for ( i in samples ) - ret += "\n" + samples[i]$str; - return ret; - } - -event bro_init() &priority=3 - { - # Add filters to the metrics so that the metrics framework knows how to - # determine when it looks like an actual attack and how to respond when - # thresholds are crossed. - local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=collect_SQLi_samples]; - SumStats::create([$name="detect-sqli-attackers", - $epoch=sqli_requests_interval, - $reducers=set(r1), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["http.sqli.attacker"]$sum; - }, - $threshold=sqli_requests_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["http.sqli.attacker"]; - NOTICE([$note=SQL_Injection_Attacker, - $msg="An SQL injection attacker was discovered!", - $email_body_sections=vector(format_sqli_samples(r$samples)), - $src=key$host, - $identifier=cat(key$host)]); - }]); - - local r2: SumStats::Reducer = [$stream="http.sqli.victim", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=collect_SQLi_samples]; - SumStats::create([$name="detect-sqli-victims", - $epoch=sqli_requests_interval, - $reducers=set(r2), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["http.sqli.victim"]$sum; - }, - $threshold=sqli_requests_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["http.sqli.victim"]; - NOTICE([$note=SQL_Injection_Victim, - $msg="An SQL injection victim was discovered!", - $email_body_sections=vector(format_sqli_samples(r$samples)), - $src=key$host, - $identifier=cat(key$host)]); - }]); - } - -event http_request(c: connection, method: string, original_URI: string, - unescaped_URI: string, version: string) &priority=3 - { - if ( ! hook HTTP::sqli_policy(c, method, unescaped_URI) ) - return; - - if ( match_sql_injection_uri in unescaped_URI ) - { - add c$http$tags[URI_SQLI]; - - SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]); - SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]); - } - } diff --git a/scripts/policy/protocols/http/detect-sqli.zeek b/scripts/policy/protocols/http/detect-sqli.zeek new file mode 100644 index 0000000000..5baf6b89ab --- /dev/null +++ b/scripts/policy/protocols/http/detect-sqli.zeek @@ -0,0 +1,127 @@ +##! SQL injection attack detection in HTTP. + +@load base/frameworks/notice +@load base/frameworks/sumstats +@load base/protocols/http + +module HTTP; + +export { + redef enum Notice::Type += { + ## Indicates that a host performing SQL injection attacks was + ## detected. + SQL_Injection_Attacker, + ## Indicates that a host was seen to have SQL injection attacks + ## against it. This is tracked by IP address as opposed to + ## hostname. + SQL_Injection_Victim, + }; + + redef enum Tags += { + ## Indicator of a URI based SQL injection attack. + URI_SQLI, + ## Indicator of client body based SQL injection attack. This is + ## typically the body content of a POST request. Not implemented + ## yet. + POST_SQLI, + ## Indicator of a cookie based SQL injection attack. Not + ## implemented yet. + COOKIE_SQLI, + }; + + ## Defines the threshold that determines if an SQL injection attack + ## is ongoing based on the number of requests that appear to be SQL + ## injection attacks. + const sqli_requests_threshold: double = 50.0 &redef; + + ## Interval at which to watch for the + ## :zeek:id:`HTTP::sqli_requests_threshold` variable to be crossed. + ## At the end of each interval the counter is reset. + const sqli_requests_interval = 5min &redef; + + ## Collecting samples will add extra data to notice emails + ## by collecting some sample SQL injection url paths. Disable + ## sample collection by setting this value to 0. + const collect_SQLi_samples = 5 &redef; + + ## Regular expression is used to match URI based SQL injections. + const match_sql_injection_uri = + /[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/ + | /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/ + | /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/ + | /[\?&][^[:blank:]\x00-\x37\|]+?=([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/ + | /[\?&][^[:blank:]\x00-\x37]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/ + | /\/\*![[:digit:]]{5}.*?\*\// &redef; + + ## A hook that can be used to prevent specific requests from being counted + ## as an injection attempt. Use a 'break' statement to exit the hook + ## early and ignore the request. + global HTTP::sqli_policy: hook(c: connection, method: string, unescaped_URI: string); +} + +function format_sqli_samples(samples: vector of SumStats::Observation): string + { + local ret = "SQL Injection samples\n---------------------"; + for ( i in samples ) + ret += "\n" + samples[i]$str; + return ret; + } + +event zeek_init() &priority=3 + { + # Add filters to the metrics so that the metrics framework knows how to + # determine when it looks like an actual attack and how to respond when + # thresholds are crossed. + local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=collect_SQLi_samples]; + SumStats::create([$name="detect-sqli-attackers", + $epoch=sqli_requests_interval, + $reducers=set(r1), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["http.sqli.attacker"]$sum; + }, + $threshold=sqli_requests_threshold, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["http.sqli.attacker"]; + NOTICE([$note=SQL_Injection_Attacker, + $msg="An SQL injection attacker was discovered!", + $email_body_sections=vector(format_sqli_samples(r$samples)), + $src=key$host, + $identifier=cat(key$host)]); + }]); + + local r2: SumStats::Reducer = [$stream="http.sqli.victim", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=collect_SQLi_samples]; + SumStats::create([$name="detect-sqli-victims", + $epoch=sqli_requests_interval, + $reducers=set(r2), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["http.sqli.victim"]$sum; + }, + $threshold=sqli_requests_threshold, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["http.sqli.victim"]; + NOTICE([$note=SQL_Injection_Victim, + $msg="An SQL injection victim was discovered!", + $email_body_sections=vector(format_sqli_samples(r$samples)), + $src=key$host, + $identifier=cat(key$host)]); + }]); + } + +event http_request(c: connection, method: string, original_URI: string, + unescaped_URI: string, version: string) &priority=3 + { + if ( ! hook HTTP::sqli_policy(c, method, unescaped_URI) ) + return; + + if ( match_sql_injection_uri in unescaped_URI ) + { + add c$http$tags[URI_SQLI]; + + SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]); + SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]); + } + } diff --git a/scripts/policy/protocols/http/detect-webapps.bro b/scripts/policy/protocols/http/detect-webapps.zeek similarity index 100% rename from scripts/policy/protocols/http/detect-webapps.bro rename to scripts/policy/protocols/http/detect-webapps.zeek diff --git a/scripts/policy/protocols/http/header-names.bro b/scripts/policy/protocols/http/header-names.zeek similarity index 100% rename from scripts/policy/protocols/http/header-names.bro rename to scripts/policy/protocols/http/header-names.zeek diff --git a/scripts/policy/protocols/http/software-browser-plugins.bro b/scripts/policy/protocols/http/software-browser-plugins.zeek similarity index 100% rename from scripts/policy/protocols/http/software-browser-plugins.bro rename to scripts/policy/protocols/http/software-browser-plugins.zeek diff --git a/scripts/policy/protocols/http/software.bro b/scripts/policy/protocols/http/software.zeek similarity index 100% rename from scripts/policy/protocols/http/software.bro rename to scripts/policy/protocols/http/software.zeek diff --git a/scripts/policy/protocols/http/var-extraction-cookies.bro b/scripts/policy/protocols/http/var-extraction-cookies.zeek similarity index 100% rename from scripts/policy/protocols/http/var-extraction-cookies.bro rename to scripts/policy/protocols/http/var-extraction-cookies.zeek diff --git a/scripts/policy/protocols/http/var-extraction-uri.bro b/scripts/policy/protocols/http/var-extraction-uri.zeek similarity index 100% rename from scripts/policy/protocols/http/var-extraction-uri.bro rename to scripts/policy/protocols/http/var-extraction-uri.zeek diff --git a/scripts/policy/protocols/krb/ticket-logging.bro b/scripts/policy/protocols/krb/ticket-logging.zeek similarity index 100% rename from scripts/policy/protocols/krb/ticket-logging.bro rename to scripts/policy/protocols/krb/ticket-logging.zeek diff --git a/scripts/policy/protocols/modbus/known-masters-slaves.bro b/scripts/policy/protocols/modbus/known-masters-slaves.bro deleted file mode 100644 index a49e1f81e4..0000000000 --- a/scripts/policy/protocols/modbus/known-masters-slaves.bro +++ /dev/null @@ -1,58 +0,0 @@ -##! Script for tracking known Modbus masters and slaves. -##! -##! .. todo:: This script needs a lot of work. What might be more interesting -##! is to track master/slave relationships based on commands sent and -##! successful (non-exception) responses. - -@load base/protocols/modbus - -module Known; - -export { - redef enum Log::ID += { MODBUS_LOG }; - - type ModbusDeviceType: enum { - MODBUS_MASTER, - MODBUS_SLAVE, - }; - - type ModbusInfo: record { - ## The time the device was discovered. - ts: time &log; - ## The IP address of the host. - host: addr &log; - ## The type of device being tracked. - device_type: ModbusDeviceType &log; - }; - - ## The Modbus nodes being tracked. - global modbus_nodes: set[addr, ModbusDeviceType] &create_expire=1day &redef; - - ## Event that can be handled to access the loggable record as it is sent - ## on to the logging framework. - global log_known_modbus: event(rec: ModbusInfo); -} - -event bro_init() &priority=5 - { - Log::create_stream(Known::MODBUS_LOG, [$columns=ModbusInfo, $ev=log_known_modbus, $path="known_modbus"]); - } - -event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) - { - local master = c$id$orig_h; - local slave = c$id$resp_h; - - if ( [master, MODBUS_MASTER] !in modbus_nodes ) - { - add modbus_nodes[master, MODBUS_MASTER]; - Log::write(MODBUS_LOG, [$ts=network_time(), $host=master, $device_type=MODBUS_MASTER]); - } - - if ( [slave, MODBUS_SLAVE] !in modbus_nodes ) - { - add modbus_nodes[slave, MODBUS_SLAVE]; - Log::write(MODBUS_LOG, [$ts=network_time(), $host=slave, $device_type=MODBUS_SLAVE]); - } - - } diff --git a/scripts/policy/protocols/modbus/known-masters-slaves.zeek b/scripts/policy/protocols/modbus/known-masters-slaves.zeek new file mode 100644 index 0000000000..4ce56570d8 --- /dev/null +++ b/scripts/policy/protocols/modbus/known-masters-slaves.zeek @@ -0,0 +1,58 @@ +##! Script for tracking known Modbus masters and slaves. +##! +##! .. todo:: This script needs a lot of work. What might be more interesting +##! is to track master/slave relationships based on commands sent and +##! successful (non-exception) responses. + +@load base/protocols/modbus + +module Known; + +export { + redef enum Log::ID += { MODBUS_LOG }; + + type ModbusDeviceType: enum { + MODBUS_MASTER, + MODBUS_SLAVE, + }; + + type ModbusInfo: record { + ## The time the device was discovered. + ts: time &log; + ## The IP address of the host. + host: addr &log; + ## The type of device being tracked. + device_type: ModbusDeviceType &log; + }; + + ## The Modbus nodes being tracked. + global modbus_nodes: set[addr, ModbusDeviceType] &create_expire=1day &redef; + + ## Event that can be handled to access the loggable record as it is sent + ## on to the logging framework. + global log_known_modbus: event(rec: ModbusInfo); +} + +event zeek_init() &priority=5 + { + Log::create_stream(Known::MODBUS_LOG, [$columns=ModbusInfo, $ev=log_known_modbus, $path="known_modbus"]); + } + +event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) + { + local master = c$id$orig_h; + local slave = c$id$resp_h; + + if ( [master, MODBUS_MASTER] !in modbus_nodes ) + { + add modbus_nodes[master, MODBUS_MASTER]; + Log::write(MODBUS_LOG, [$ts=network_time(), $host=master, $device_type=MODBUS_MASTER]); + } + + if ( [slave, MODBUS_SLAVE] !in modbus_nodes ) + { + add modbus_nodes[slave, MODBUS_SLAVE]; + Log::write(MODBUS_LOG, [$ts=network_time(), $host=slave, $device_type=MODBUS_SLAVE]); + } + + } diff --git a/scripts/policy/protocols/modbus/track-memmap.bro b/scripts/policy/protocols/modbus/track-memmap.bro deleted file mode 100644 index 9a6e49e214..0000000000 --- a/scripts/policy/protocols/modbus/track-memmap.bro +++ /dev/null @@ -1,106 +0,0 @@ -##! This script tracks the memory map of holding (read/write) registers and logs -##! changes as they are discovered. -##! -##! .. todo:: Not all register read and write functions are supported yet. - -@load base/protocols/modbus -@load base/utils/directions-and-hosts - -module Modbus; - -export { - redef enum Log::ID += { Modbus::REGISTER_CHANGE_LOG }; - - ## The hosts that should have memory mapping enabled. - option track_memmap: Host = ALL_HOSTS; - - type MemmapInfo: record { - ## Timestamp for the detected register change. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## Connection ID. - id: conn_id &log; - ## The device memory offset. - register: count &log; - ## The old value stored in the register. - old_val: count &log; - ## The new value stored in the register. - new_val: count &log; - ## The time delta between when the *old_val* and *new_val* were - ## seen. - delta: interval &log; - }; - - type RegisterValue: record { - last_set: time; - value: count; - }; - - ## Indexed on the device register value and yielding the register value. - type Registers: table[count] of RegisterValue; - - ## The memory map of slaves is tracked with this variable. - global device_registers: table[addr] of Registers; - - ## This event is generated every time a register is seen to be different - ## than it was previously seen to be. - global changed_register: event(c: connection, register: count, old_val: count, new_val: count, delta: interval); -} - -redef record Modbus::Info += { - track_address: count &default=0; -}; - -event bro_init() &priority=5 - { - Log::create_stream(Modbus::REGISTER_CHANGE_LOG, [$columns=MemmapInfo, $path="modbus_register_change"]); - } - -event modbus_read_holding_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) - { - c$modbus$track_address = start_address+1; - } - -event modbus_read_holding_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) - { - local slave = c$id$resp_h; - - if ( ! addr_matches_host(slave, track_memmap ) ) - return; - - if ( slave !in device_registers ) - device_registers[slave] = table(); - - local slave_regs = device_registers[slave]; - for ( i in registers ) - { - if ( c$modbus$track_address in slave_regs ) - { - if ( slave_regs[c$modbus$track_address]$value != registers[i] ) - { - local delta = network_time() - slave_regs[c$modbus$track_address]$last_set; - event Modbus::changed_register(c, c$modbus$track_address, - slave_regs[c$modbus$track_address]$value, registers[i], - delta); - - slave_regs[c$modbus$track_address]$last_set = network_time(); - slave_regs[c$modbus$track_address]$value = registers[i]; - } - } - else - { - local tmp_reg: RegisterValue = [$last_set=network_time(), $value=registers[i]]; - slave_regs[c$modbus$track_address] = tmp_reg; - } - - ++c$modbus$track_address; - } - } - -event Modbus::changed_register(c: connection, register: count, old_val: count, new_val: count, delta: interval) - { - local rec: MemmapInfo = [$ts=network_time(), $uid=c$uid, $id=c$id, - $register=register, $old_val=old_val, $new_val=new_val, $delta=delta]; - Log::write(REGISTER_CHANGE_LOG, rec); - } diff --git a/scripts/policy/protocols/modbus/track-memmap.zeek b/scripts/policy/protocols/modbus/track-memmap.zeek new file mode 100644 index 0000000000..da2be29745 --- /dev/null +++ b/scripts/policy/protocols/modbus/track-memmap.zeek @@ -0,0 +1,106 @@ +##! This script tracks the memory map of holding (read/write) registers and logs +##! changes as they are discovered. +##! +##! .. todo:: Not all register read and write functions are supported yet. + +@load base/protocols/modbus +@load base/utils/directions-and-hosts + +module Modbus; + +export { + redef enum Log::ID += { Modbus::REGISTER_CHANGE_LOG }; + + ## The hosts that should have memory mapping enabled. + option track_memmap: Host = ALL_HOSTS; + + type MemmapInfo: record { + ## Timestamp for the detected register change. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## Connection ID. + id: conn_id &log; + ## The device memory offset. + register: count &log; + ## The old value stored in the register. + old_val: count &log; + ## The new value stored in the register. + new_val: count &log; + ## The time delta between when the *old_val* and *new_val* were + ## seen. + delta: interval &log; + }; + + type RegisterValue: record { + last_set: time; + value: count; + }; + + ## Indexed on the device register value and yielding the register value. + type Registers: table[count] of RegisterValue; + + ## The memory map of slaves is tracked with this variable. + global device_registers: table[addr] of Registers; + + ## This event is generated every time a register is seen to be different + ## than it was previously seen to be. + global changed_register: event(c: connection, register: count, old_val: count, new_val: count, delta: interval); +} + +redef record Modbus::Info += { + track_address: count &default=0; +}; + +event zeek_init() &priority=5 + { + Log::create_stream(Modbus::REGISTER_CHANGE_LOG, [$columns=MemmapInfo, $path="modbus_register_change"]); + } + +event modbus_read_holding_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) + { + c$modbus$track_address = start_address+1; + } + +event modbus_read_holding_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) + { + local slave = c$id$resp_h; + + if ( ! addr_matches_host(slave, track_memmap ) ) + return; + + if ( slave !in device_registers ) + device_registers[slave] = table(); + + local slave_regs = device_registers[slave]; + for ( i in registers ) + { + if ( c$modbus$track_address in slave_regs ) + { + if ( slave_regs[c$modbus$track_address]$value != registers[i] ) + { + local delta = network_time() - slave_regs[c$modbus$track_address]$last_set; + event Modbus::changed_register(c, c$modbus$track_address, + slave_regs[c$modbus$track_address]$value, registers[i], + delta); + + slave_regs[c$modbus$track_address]$last_set = network_time(); + slave_regs[c$modbus$track_address]$value = registers[i]; + } + } + else + { + local tmp_reg: RegisterValue = [$last_set=network_time(), $value=registers[i]]; + slave_regs[c$modbus$track_address] = tmp_reg; + } + + ++c$modbus$track_address; + } + } + +event Modbus::changed_register(c: connection, register: count, old_val: count, new_val: count, delta: interval) + { + local rec: MemmapInfo = [$ts=network_time(), $uid=c$uid, $id=c$id, + $register=register, $old_val=old_val, $new_val=new_val, $delta=delta]; + Log::write(REGISTER_CHANGE_LOG, rec); + } diff --git a/scripts/policy/protocols/mysql/software.bro b/scripts/policy/protocols/mysql/software.zeek similarity index 100% rename from scripts/policy/protocols/mysql/software.bro rename to scripts/policy/protocols/mysql/software.zeek diff --git a/scripts/policy/protocols/rdp/indicate_ssl.bro b/scripts/policy/protocols/rdp/indicate_ssl.zeek similarity index 100% rename from scripts/policy/protocols/rdp/indicate_ssl.bro rename to scripts/policy/protocols/rdp/indicate_ssl.zeek diff --git a/scripts/policy/protocols/smb/__load__.bro b/scripts/policy/protocols/smb/__load__.bro deleted file mode 100644 index 9e826f7fd6..0000000000 --- a/scripts/policy/protocols/smb/__load__.bro +++ /dev/null @@ -1,3 +0,0 @@ -@deprecated "Use '@load base/protocols/smb' instead" - -@load base/protocols/smb diff --git a/scripts/policy/protocols/smb/log-cmds.bro b/scripts/policy/protocols/smb/log-cmds.bro deleted file mode 100644 index 53e309c5ea..0000000000 --- a/scripts/policy/protocols/smb/log-cmds.bro +++ /dev/null @@ -1,82 +0,0 @@ -##! Load this script to generate an SMB command log, smb_cmd.log. -##! This is primarily useful for debugging. - -@load base/protocols/smb - -module SMB; - -export { - redef enum Log::ID += { - CMD_LOG, - }; - - ## The server response statuses which are *not* logged. - option ignored_command_statuses: set[string] = { - "MORE_PROCESSING_REQUIRED", - }; -} - -## Internal use only. -## Some commands shouldn't be logged by the smb1_message event. -const deferred_logging_cmds: set[string] = { - "NEGOTIATE", - "READ_ANDX", - "SESSION_SETUP_ANDX", - "TREE_CONNECT_ANDX", -}; - -event bro_init() &priority=5 - { - Log::create_stream(SMB::CMD_LOG, [$columns=SMB::CmdInfo, $path="smb_cmd"]); - } - -event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=-5 - { - if ( is_orig ) - return; - - if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses ) - return; - - if ( c$smb_state$current_cmd$command in SMB::deferred_logging_cmds ) - return; - - Log::write(SMB::CMD_LOG, c$smb_state$current_cmd); - } - -event smb1_error(c: connection, hdr: SMB1::Header, is_orig: bool) - { - if ( is_orig ) - return; - - # This is for deferred commands only. - # The more specific messages won't fire for errors - - if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses ) - return; - - if ( c$smb_state$current_cmd$command !in SMB::deferred_logging_cmds ) - return; - - Log::write(SMB::CMD_LOG, c$smb_state$current_cmd); - } - -event smb2_message(c: connection, hdr: SMB2::Header, is_orig: bool) &priority=-5 - { - if ( is_orig ) - return; - - # If the command that is being looked at right now was - # marked as PENDING, then we'll skip all of this and wait - # for a reply that isn't marked pending. - if ( c$smb_state$current_cmd$status == "PENDING" ) - return; - - if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses ) - return; - - if ( c$smb_state$current_cmd$command in SMB::deferred_logging_cmds ) - return; - - Log::write(SMB::CMD_LOG, c$smb_state$current_cmd); - } diff --git a/scripts/policy/protocols/smb/log-cmds.zeek b/scripts/policy/protocols/smb/log-cmds.zeek new file mode 100644 index 0000000000..88108276dc --- /dev/null +++ b/scripts/policy/protocols/smb/log-cmds.zeek @@ -0,0 +1,82 @@ +##! Load this script to generate an SMB command log, smb_cmd.log. +##! This is primarily useful for debugging. + +@load base/protocols/smb + +module SMB; + +export { + redef enum Log::ID += { + CMD_LOG, + }; + + ## The server response statuses which are *not* logged. + option ignored_command_statuses: set[string] = { + "MORE_PROCESSING_REQUIRED", + }; +} + +## Internal use only. +## Some commands shouldn't be logged by the smb1_message event. +const deferred_logging_cmds: set[string] = { + "NEGOTIATE", + "READ_ANDX", + "SESSION_SETUP_ANDX", + "TREE_CONNECT_ANDX", +}; + +event zeek_init() &priority=5 + { + Log::create_stream(SMB::CMD_LOG, [$columns=SMB::CmdInfo, $path="smb_cmd"]); + } + +event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=-5 + { + if ( is_orig ) + return; + + if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses ) + return; + + if ( c$smb_state$current_cmd$command in SMB::deferred_logging_cmds ) + return; + + Log::write(SMB::CMD_LOG, c$smb_state$current_cmd); + } + +event smb1_error(c: connection, hdr: SMB1::Header, is_orig: bool) + { + if ( is_orig ) + return; + + # This is for deferred commands only. + # The more specific messages won't fire for errors + + if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses ) + return; + + if ( c$smb_state$current_cmd$command !in SMB::deferred_logging_cmds ) + return; + + Log::write(SMB::CMD_LOG, c$smb_state$current_cmd); + } + +event smb2_message(c: connection, hdr: SMB2::Header, is_orig: bool) &priority=-5 + { + if ( is_orig ) + return; + + # If the command that is being looked at right now was + # marked as PENDING, then we'll skip all of this and wait + # for a reply that isn't marked pending. + if ( c$smb_state$current_cmd$status == "PENDING" ) + return; + + if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses ) + return; + + if ( c$smb_state$current_cmd$command in SMB::deferred_logging_cmds ) + return; + + Log::write(SMB::CMD_LOG, c$smb_state$current_cmd); + } diff --git a/scripts/policy/protocols/smtp/blocklists.bro b/scripts/policy/protocols/smtp/blocklists.zeek similarity index 100% rename from scripts/policy/protocols/smtp/blocklists.bro rename to scripts/policy/protocols/smtp/blocklists.zeek diff --git a/scripts/policy/protocols/smtp/detect-suspicious-orig.bro b/scripts/policy/protocols/smtp/detect-suspicious-orig.bro deleted file mode 100644 index 58eecceb11..0000000000 --- a/scripts/policy/protocols/smtp/detect-suspicious-orig.bro +++ /dev/null @@ -1,52 +0,0 @@ -@load base/frameworks/notice/main -@load base/protocols/smtp/main - -module SMTP; - -export { - redef enum Notice::Type += { - Suspicious_Origination - }; - - ## Places where it's suspicious for mail to originate from represented - ## as all-capital, two character country codes (e.g., US). It requires - ## Bro to be built with GeoIP support. - option suspicious_origination_countries: set[string] = {}; - option suspicious_origination_networks: set[subnet] = {}; - -} - -event log_smtp(rec: Info) - { - local ip: addr; - local loc: geo_location; - if ( rec?$x_originating_ip ) - { - ip = rec$x_originating_ip; - loc = lookup_location(ip); - - if ( (loc?$country_code && - loc$country_code in suspicious_origination_countries) || - ip in suspicious_origination_networks ) - { - NOTICE([$note=Suspicious_Origination, - $msg=fmt("An email originated from %s (%s).", - loc?$country_code ? loc$country_code : "", ip), - $id=rec$id]); - } - } - if ( rec?$path ) - { - ip = rec$path[|rec$path|-1]; - loc = lookup_location(ip); - - if ( (loc?$country_code && - loc$country_code in suspicious_origination_countries) || - ip in suspicious_origination_networks ) - { - NOTICE([$note=Suspicious_Origination, - $msg=fmt("Based up Received headers, email originated from %s (%s).", loc?$country_code ? loc$country_code : "", ip), - $id=rec$id]); - } - } - } diff --git a/scripts/policy/protocols/smtp/detect-suspicious-orig.zeek b/scripts/policy/protocols/smtp/detect-suspicious-orig.zeek new file mode 100644 index 0000000000..12a9a0c312 --- /dev/null +++ b/scripts/policy/protocols/smtp/detect-suspicious-orig.zeek @@ -0,0 +1,52 @@ +@load base/frameworks/notice/main +@load base/protocols/smtp/main + +module SMTP; + +export { + redef enum Notice::Type += { + Suspicious_Origination + }; + + ## Places where it's suspicious for mail to originate from represented + ## as all-capital, two character country codes (e.g., US). It requires + ## Zeek to be built with GeoIP support. + option suspicious_origination_countries: set[string] = {}; + option suspicious_origination_networks: set[subnet] = {}; + +} + +event log_smtp(rec: Info) + { + local ip: addr; + local loc: geo_location; + if ( rec?$x_originating_ip ) + { + ip = rec$x_originating_ip; + loc = lookup_location(ip); + + if ( (loc?$country_code && + loc$country_code in suspicious_origination_countries) || + ip in suspicious_origination_networks ) + { + NOTICE([$note=Suspicious_Origination, + $msg=fmt("An email originated from %s (%s).", + loc?$country_code ? loc$country_code : "", ip), + $id=rec$id]); + } + } + if ( rec?$path ) + { + ip = rec$path[|rec$path|-1]; + loc = lookup_location(ip); + + if ( (loc?$country_code && + loc$country_code in suspicious_origination_countries) || + ip in suspicious_origination_networks ) + { + NOTICE([$note=Suspicious_Origination, + $msg=fmt("Based up Received headers, email originated from %s (%s).", loc?$country_code ? loc$country_code : "", ip), + $id=rec$id]); + } + } + } diff --git a/scripts/policy/protocols/smtp/entities-excerpt.bro b/scripts/policy/protocols/smtp/entities-excerpt.bro deleted file mode 100644 index f4ee2b07d5..0000000000 --- a/scripts/policy/protocols/smtp/entities-excerpt.bro +++ /dev/null @@ -1,34 +0,0 @@ -##! This script is for optionally adding a body excerpt to the SMTP -##! entities log. - -@load base/protocols/smtp/entities - -module SMTP; - -export { - redef record SMTP::Entity+= { - ## The entity body excerpt. - excerpt: string &log &default=""; - }; - - ## This is the default value for how much of the entity body should be - ## included for all MIME entities. The lesser of this value and - ## :bro:see:`default_file_bof_buffer_size` will be used. - option default_entity_excerpt_len = 0; -} - -event file_new(f: fa_file) &priority=5 - { - if ( ! f?$source ) return; - if ( f$source != "SMTP" ) return; - if ( ! f?$bof_buffer ) return; - if ( ! f?$conns ) return; - - for ( cid, c in f$conns ) - { - if ( ! c?$smtp ) next; - - if ( default_entity_excerpt_len > 0 ) - c$smtp$entity$excerpt = f$bof_buffer[0:default_entity_excerpt_len]; - } - } diff --git a/scripts/policy/protocols/smtp/entities-excerpt.zeek b/scripts/policy/protocols/smtp/entities-excerpt.zeek new file mode 100644 index 0000000000..4dad6d3e39 --- /dev/null +++ b/scripts/policy/protocols/smtp/entities-excerpt.zeek @@ -0,0 +1,34 @@ +##! This script is for optionally adding a body excerpt to the SMTP +##! entities log. + +@load base/protocols/smtp/entities + +module SMTP; + +export { + redef record SMTP::Entity+= { + ## The entity body excerpt. + excerpt: string &log &default=""; + }; + + ## This is the default value for how much of the entity body should be + ## included for all MIME entities. The lesser of this value and + ## :zeek:see:`default_file_bof_buffer_size` will be used. + option default_entity_excerpt_len = 0; +} + +event file_new(f: fa_file) &priority=5 + { + if ( ! f?$source ) return; + if ( f$source != "SMTP" ) return; + if ( ! f?$bof_buffer ) return; + if ( ! f?$conns ) return; + + for ( cid, c in f$conns ) + { + if ( ! c?$smtp ) next; + + if ( default_entity_excerpt_len > 0 ) + c$smtp$entity$excerpt = f$bof_buffer[0:default_entity_excerpt_len]; + } + } diff --git a/scripts/policy/protocols/smtp/software.bro b/scripts/policy/protocols/smtp/software.bro deleted file mode 100644 index e4333c10ef..0000000000 --- a/scripts/policy/protocols/smtp/software.bro +++ /dev/null @@ -1,82 +0,0 @@ -##! This script feeds software detected through email into the software -##! framework. Mail clients and webmail interfaces are the only thing -##! currently detected. -##! -##! TODO: -##! -##! * Find some heuristic to determine if email was sent through -##! a MS Exchange webmail interface as opposed to a desktop client. - -@load base/frameworks/software/main -@load base/protocols/smtp/main - -module SMTP; - -export { - redef enum Software::Type += { - MAIL_CLIENT, - MAIL_SERVER, - WEBMAIL_SERVER - }; - - redef record Info += { - ## Boolean indicator of if the message was sent through a - ## webmail interface. - is_webmail: bool &log &default=F; - }; - - ## Assuming that local mail servers are more trustworthy with the - ## headers they insert into message envelopes, this default makes Bro - ## not attempt to detect software in inbound message bodies. If mail - ## coming in from external addresses gives incorrect data in - ## the Received headers, it could populate your SOFTWARE logging stream - ## with incorrect data. If you would like to detect mail clients for - ## incoming messages (network traffic originating from a non-local - ## address), set this variable to EXTERNAL_HOSTS or ALL_HOSTS. - option detect_clients_in_messages_from = LOCAL_HOSTS; - - ## A regular expression to match USER-AGENT-like headers to find if a - ## message was sent with a webmail interface. - option webmail_user_agents = - /^iPlanet Messenger/ - | /^Sun Java\(tm\) System Messenger Express/ - | /\(IMP\)/ # Horde Internet Messaging Program - | /^SquirrelMail/ - | /^NeoMail/ - | /ZimbraWebClient/; -} - -event mime_one_header(c: connection, h: mime_header_rec) &priority=4 - { - if ( ! c?$smtp ) return; - if ( h$name == "USER-AGENT" && webmail_user_agents in c$smtp$user_agent ) - c$smtp$is_webmail = T; - } - -event log_smtp(rec: Info) - { - # If the MUA provided a user-agent string, kick over to the software framework. - # This is done here so that the "Received: from" path has a chance to be - # built since that's where the IP address is pulled from. - if ( rec?$user_agent ) - { - local s_type = MAIL_CLIENT; - local client_ip = rec$path[|rec$path|-1]; - if ( rec$is_webmail ) - { - s_type = WEBMAIL_SERVER; - # If the earliest received header indicates that the connection - # was via HTTP, then that likely means the actual mail software - # is installed on the second address in the path. - if ( rec?$first_received && /via HTTP/ in rec$first_received ) - client_ip = rec$path[|rec$path|-2]; - } - - if ( addr_matches_host(rec$id$orig_h, - detect_clients_in_messages_from) ) - { - Software::found(rec$id, [$unparsed_version=rec$user_agent, $host=client_ip, $software_type=s_type]); - } - } - } - diff --git a/scripts/policy/protocols/smtp/software.zeek b/scripts/policy/protocols/smtp/software.zeek new file mode 100644 index 0000000000..69cfdf4c57 --- /dev/null +++ b/scripts/policy/protocols/smtp/software.zeek @@ -0,0 +1,82 @@ +##! This script feeds software detected through email into the software +##! framework. Mail clients and webmail interfaces are the only thing +##! currently detected. +##! +##! TODO: +##! +##! * Find some heuristic to determine if email was sent through +##! a MS Exchange webmail interface as opposed to a desktop client. + +@load base/frameworks/software/main +@load base/protocols/smtp/main + +module SMTP; + +export { + redef enum Software::Type += { + MAIL_CLIENT, + MAIL_SERVER, + WEBMAIL_SERVER + }; + + redef record Info += { + ## Boolean indicator of if the message was sent through a + ## webmail interface. + is_webmail: bool &log &default=F; + }; + + ## Assuming that local mail servers are more trustworthy with the + ## headers they insert into message envelopes, this default makes Zeek + ## not attempt to detect software in inbound message bodies. If mail + ## coming in from external addresses gives incorrect data in + ## the Received headers, it could populate your SOFTWARE logging stream + ## with incorrect data. If you would like to detect mail clients for + ## incoming messages (network traffic originating from a non-local + ## address), set this variable to EXTERNAL_HOSTS or ALL_HOSTS. + option detect_clients_in_messages_from = LOCAL_HOSTS; + + ## A regular expression to match USER-AGENT-like headers to find if a + ## message was sent with a webmail interface. + option webmail_user_agents = + /^iPlanet Messenger/ + | /^Sun Java\(tm\) System Messenger Express/ + | /\(IMP\)/ # Horde Internet Messaging Program + | /^SquirrelMail/ + | /^NeoMail/ + | /ZimbraWebClient/; +} + +event mime_one_header(c: connection, h: mime_header_rec) &priority=4 + { + if ( ! c?$smtp ) return; + if ( h$name == "USER-AGENT" && webmail_user_agents in c$smtp$user_agent ) + c$smtp$is_webmail = T; + } + +event log_smtp(rec: Info) + { + # If the MUA provided a user-agent string, kick over to the software framework. + # This is done here so that the "Received: from" path has a chance to be + # built since that's where the IP address is pulled from. + if ( rec?$user_agent ) + { + local s_type = MAIL_CLIENT; + local client_ip = rec$path[|rec$path|-1]; + if ( rec$is_webmail ) + { + s_type = WEBMAIL_SERVER; + # If the earliest received header indicates that the connection + # was via HTTP, then that likely means the actual mail software + # is installed on the second address in the path. + if ( rec?$first_received && /via HTTP/ in rec$first_received ) + client_ip = rec$path[|rec$path|-2]; + } + + if ( addr_matches_host(rec$id$orig_h, + detect_clients_in_messages_from) ) + { + Software::found(rec$id, [$unparsed_version=rec$user_agent, $host=client_ip, $software_type=s_type]); + } + } + } + diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/scripts/policy/protocols/ssh/detect-bruteforcing.bro deleted file mode 100644 index 55687e2afd..0000000000 --- a/scripts/policy/protocols/ssh/detect-bruteforcing.bro +++ /dev/null @@ -1,90 +0,0 @@ -##! Detect hosts which are doing password guessing attacks and/or password -##! bruteforcing over SSH. - -@load base/protocols/ssh -@load base/frameworks/sumstats -@load base/frameworks/notice -@load base/frameworks/intel - -module SSH; - -export { - redef enum Notice::Type += { - ## Indicates that a host has been identified as crossing the - ## :bro:id:`SSH::password_guesses_limit` threshold with - ## failed logins. - Password_Guessing, - ## Indicates that a host previously identified as a "password - ## guesser" has now had a successful login - ## attempt. This is not currently implemented. - Login_By_Password_Guesser, - }; - - redef enum Intel::Where += { - ## An indicator of the login for the intel framework. - SSH::SUCCESSFUL_LOGIN, - }; - - ## The number of failed SSH connections before a host is designated as - ## guessing passwords. - const password_guesses_limit: double = 30 &redef; - - ## The amount of time to remember presumed non-successful logins to - ## build a model of a password guesser. - const guessing_timeout = 30 mins &redef; - - ## This value can be used to exclude hosts or entire networks from being - ## tracked as potential "guessers". The index represents - ## client subnets and the yield value represents server subnets. - const ignore_guessers: table[subnet] of subnet &redef; -} - -event bro_init() - { - local r1: SumStats::Reducer = [$stream="ssh.login.failure", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=5]; - SumStats::create([$name="detect-ssh-bruteforcing", - $epoch=guessing_timeout, - $reducers=set(r1), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["ssh.login.failure"]$sum; - }, - $threshold=password_guesses_limit, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["ssh.login.failure"]; - local sub_msg = fmt("Sampled servers: "); - local samples = r$samples; - for ( i in samples ) - { - if ( samples[i]?$str ) - sub_msg = fmt("%s%s %s", sub_msg, i==0 ? "":",", samples[i]$str); - } - # Generate the notice. - NOTICE([$note=Password_Guessing, - $msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num), - $sub=sub_msg, - $src=key$host, - $identifier=cat(key$host)]); - }]); - } - -event ssh_auth_successful(c: connection, auth_method_none: bool) - { - local id = c$id; - - Intel::seen([$host=id$orig_h, - $conn=c, - $where=SSH::SUCCESSFUL_LOGIN]); - } - -event ssh_auth_failed(c: connection) - { - local id = c$id; - - # Add data to the FAILED_LOGIN metric unless this connection should - # be ignored. - if ( ! (id$orig_h in ignore_guessers && - id$resp_h in ignore_guessers[id$orig_h]) ) - SumStats::observe("ssh.login.failure", [$host=id$orig_h], [$str=cat(id$resp_h)]); - } diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.zeek b/scripts/policy/protocols/ssh/detect-bruteforcing.zeek new file mode 100644 index 0000000000..4368258b98 --- /dev/null +++ b/scripts/policy/protocols/ssh/detect-bruteforcing.zeek @@ -0,0 +1,90 @@ +##! Detect hosts which are doing password guessing attacks and/or password +##! bruteforcing over SSH. + +@load base/protocols/ssh +@load base/frameworks/sumstats +@load base/frameworks/notice +@load base/frameworks/intel + +module SSH; + +export { + redef enum Notice::Type += { + ## Indicates that a host has been identified as crossing the + ## :zeek:id:`SSH::password_guesses_limit` threshold with + ## failed logins. + Password_Guessing, + ## Indicates that a host previously identified as a "password + ## guesser" has now had a successful login + ## attempt. This is not currently implemented. + Login_By_Password_Guesser, + }; + + redef enum Intel::Where += { + ## An indicator of the login for the intel framework. + SSH::SUCCESSFUL_LOGIN, + }; + + ## The number of failed SSH connections before a host is designated as + ## guessing passwords. + const password_guesses_limit: double = 30 &redef; + + ## The amount of time to remember presumed non-successful logins to + ## build a model of a password guesser. + const guessing_timeout = 30 mins &redef; + + ## This value can be used to exclude hosts or entire networks from being + ## tracked as potential "guessers". The index represents + ## client subnets and the yield value represents server subnets. + const ignore_guessers: table[subnet] of subnet &redef; +} + +event zeek_init() + { + local r1: SumStats::Reducer = [$stream="ssh.login.failure", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=5]; + SumStats::create([$name="detect-ssh-bruteforcing", + $epoch=guessing_timeout, + $reducers=set(r1), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["ssh.login.failure"]$sum; + }, + $threshold=password_guesses_limit, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["ssh.login.failure"]; + local sub_msg = fmt("Sampled servers: "); + local samples = r$samples; + for ( i in samples ) + { + if ( samples[i]?$str ) + sub_msg = fmt("%s%s %s", sub_msg, i==0 ? "":",", samples[i]$str); + } + # Generate the notice. + NOTICE([$note=Password_Guessing, + $msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num), + $sub=sub_msg, + $src=key$host, + $identifier=cat(key$host)]); + }]); + } + +event ssh_auth_successful(c: connection, auth_method_none: bool) + { + local id = c$id; + + Intel::seen([$host=id$orig_h, + $conn=c, + $where=SSH::SUCCESSFUL_LOGIN]); + } + +event ssh_auth_failed(c: connection) + { + local id = c$id; + + # Add data to the FAILED_LOGIN metric unless this connection should + # be ignored. + if ( ! (id$orig_h in ignore_guessers && + id$resp_h in ignore_guessers[id$orig_h]) ) + SumStats::observe("ssh.login.failure", [$host=id$orig_h], [$str=cat(id$resp_h)]); + } diff --git a/scripts/policy/protocols/ssh/geo-data.bro b/scripts/policy/protocols/ssh/geo-data.bro deleted file mode 100644 index af9e05f011..0000000000 --- a/scripts/policy/protocols/ssh/geo-data.bro +++ /dev/null @@ -1,58 +0,0 @@ -##! Geodata based detections for SSH analysis. - -@load base/frameworks/notice -@load base/protocols/ssh - -module SSH; - -export { - redef enum Notice::Type += { - ## If an SSH login is seen to or from a "watched" country based - ## on the :bro:id:`SSH::watched_countries` variable then this - ## notice will be generated. - Watched_Country_Login, - }; - - redef record Info += { - ## Add geographic data related to the "remote" host of the - ## connection. - remote_location: geo_location &log &optional; - }; - - ## The set of countries for which you'd like to generate notices upon - ## successful login. - option watched_countries: set[string] = {"RO"}; -} - -function get_location(c: connection): geo_location - { - local lookup_ip = (c$ssh$direction == OUTBOUND) ? c$id$resp_h : c$id$orig_h; - return lookup_location(lookup_ip); - } - -event ssh_auth_successful(c: connection, auth_method_none: bool) &priority=3 - { - if ( ! c$ssh?$direction ) - return; - - if ( ! c$ssh?$remote_location ) - return; - - if ( c$ssh$remote_location?$country_code && c$ssh$remote_location$country_code in watched_countries ) - { - NOTICE([$note=Watched_Country_Login, - $conn=c, - $msg=fmt("SSH login %s watched country: %s", - (c$ssh$direction == OUTBOUND) ? "to" : "from", - c$ssh$remote_location$country_code)]); - } - } - -event ssh_auth_attempted(c: connection, authenticated: bool) &priority=3 - { - if ( ! c$ssh?$direction ) - return; - - # Add the location data to the SSH record. - c$ssh$remote_location = get_location(c); - } diff --git a/scripts/policy/protocols/ssh/geo-data.zeek b/scripts/policy/protocols/ssh/geo-data.zeek new file mode 100644 index 0000000000..5c98f62229 --- /dev/null +++ b/scripts/policy/protocols/ssh/geo-data.zeek @@ -0,0 +1,58 @@ +##! Geodata based detections for SSH analysis. + +@load base/frameworks/notice +@load base/protocols/ssh + +module SSH; + +export { + redef enum Notice::Type += { + ## If an SSH login is seen to or from a "watched" country based + ## on the :zeek:id:`SSH::watched_countries` variable then this + ## notice will be generated. + Watched_Country_Login, + }; + + redef record Info += { + ## Add geographic data related to the "remote" host of the + ## connection. + remote_location: geo_location &log &optional; + }; + + ## The set of countries for which you'd like to generate notices upon + ## successful login. + option watched_countries: set[string] = {"RO"}; +} + +function get_location(c: connection): geo_location + { + local lookup_ip = (c$ssh$direction == OUTBOUND) ? c$id$resp_h : c$id$orig_h; + return lookup_location(lookup_ip); + } + +event ssh_auth_successful(c: connection, auth_method_none: bool) &priority=3 + { + if ( ! c$ssh?$direction ) + return; + + if ( ! c$ssh?$remote_location ) + return; + + if ( c$ssh$remote_location?$country_code && c$ssh$remote_location$country_code in watched_countries ) + { + NOTICE([$note=Watched_Country_Login, + $conn=c, + $msg=fmt("SSH login %s watched country: %s", + (c$ssh$direction == OUTBOUND) ? "to" : "from", + c$ssh$remote_location$country_code)]); + } + } + +event ssh_auth_attempted(c: connection, authenticated: bool) &priority=3 + { + if ( ! c$ssh?$direction ) + return; + + # Add the location data to the SSH record. + c$ssh$remote_location = get_location(c); + } diff --git a/scripts/policy/protocols/ssh/interesting-hostnames.bro b/scripts/policy/protocols/ssh/interesting-hostnames.bro deleted file mode 100644 index 064556f9c4..0000000000 --- a/scripts/policy/protocols/ssh/interesting-hostnames.bro +++ /dev/null @@ -1,52 +0,0 @@ -##! This script will generate a notice if an apparent SSH login originates -##! or heads to a host with a reverse hostname that looks suspicious. By -##! default, the regular expression to match "interesting" hostnames includes -##! names that are typically used for infrastructure hosts like nameservers, -##! mail servers, web servers and ftp servers. - -@load base/frameworks/notice - -module SSH; - -export { - redef enum Notice::Type += { - ## Generated if a login originates or responds with a host where - ## the reverse hostname lookup resolves to a name matched by the - ## :bro:id:`SSH::interesting_hostnames` regular expression. - Interesting_Hostname_Login, - }; - - ## Strange/bad host names to see successful SSH logins from or to. - option interesting_hostnames = - /^d?ns[0-9]*\./ | - /^smtp[0-9]*\./ | - /^mail[0-9]*\./ | - /^pop[0-9]*\./ | - /^imap[0-9]*\./ | - /^www[0-9]*\./ | - /^ftp[0-9]*\./; -} - -function check_ssh_hostname(id: conn_id, uid: string, host: addr) - { - when ( local hostname = lookup_addr(host) ) - { - if ( interesting_hostnames in hostname ) - { - NOTICE([$note=Interesting_Hostname_Login, - $msg=fmt("Possible SSH login involving a %s %s with an interesting hostname.", - Site::is_local_addr(host) ? "local" : "remote", - host == id$orig_h ? "client" : "server"), - $sub=hostname, $id=id, $uid=uid]); - } - } - } - -event ssh_auth_successful(c: connection, auth_method_none: bool) - { - for ( host in set(c$id$orig_h, c$id$resp_h) ) - { - check_ssh_hostname(c$id, c$uid, host); - } - } - diff --git a/scripts/policy/protocols/ssh/interesting-hostnames.zeek b/scripts/policy/protocols/ssh/interesting-hostnames.zeek new file mode 100644 index 0000000000..92f7bfc1dd --- /dev/null +++ b/scripts/policy/protocols/ssh/interesting-hostnames.zeek @@ -0,0 +1,52 @@ +##! This script will generate a notice if an apparent SSH login originates +##! or heads to a host with a reverse hostname that looks suspicious. By +##! default, the regular expression to match "interesting" hostnames includes +##! names that are typically used for infrastructure hosts like nameservers, +##! mail servers, web servers and ftp servers. + +@load base/frameworks/notice + +module SSH; + +export { + redef enum Notice::Type += { + ## Generated if a login originates or responds with a host where + ## the reverse hostname lookup resolves to a name matched by the + ## :zeek:id:`SSH::interesting_hostnames` regular expression. + Interesting_Hostname_Login, + }; + + ## Strange/bad host names to see successful SSH logins from or to. + option interesting_hostnames = + /^d?ns[0-9]*\./ | + /^smtp[0-9]*\./ | + /^mail[0-9]*\./ | + /^pop[0-9]*\./ | + /^imap[0-9]*\./ | + /^www[0-9]*\./ | + /^ftp[0-9]*\./; +} + +function check_ssh_hostname(id: conn_id, uid: string, host: addr) + { + when ( local hostname = lookup_addr(host) ) + { + if ( interesting_hostnames in hostname ) + { + NOTICE([$note=Interesting_Hostname_Login, + $msg=fmt("Possible SSH login involving a %s %s with an interesting hostname.", + Site::is_local_addr(host) ? "local" : "remote", + host == id$orig_h ? "client" : "server"), + $sub=hostname, $id=id, $uid=uid]); + } + } + } + +event ssh_auth_successful(c: connection, auth_method_none: bool) + { + for ( host in set(c$id$orig_h, c$id$resp_h) ) + { + check_ssh_hostname(c$id, c$uid, host); + } + } + diff --git a/scripts/policy/protocols/ssh/software.bro b/scripts/policy/protocols/ssh/software.zeek similarity index 100% rename from scripts/policy/protocols/ssh/software.bro rename to scripts/policy/protocols/ssh/software.zeek diff --git a/scripts/policy/protocols/ssl/expiring-certs.bro b/scripts/policy/protocols/ssl/expiring-certs.bro deleted file mode 100644 index 1e806942d7..0000000000 --- a/scripts/policy/protocols/ssl/expiring-certs.bro +++ /dev/null @@ -1,69 +0,0 @@ -##! Generate notices when X.509 certificates over SSL/TLS are expired or -##! going to expire soon based on the date and time values stored within the -##! certificate. - -@load base/protocols/ssl -@load base/files/x509 -@load base/frameworks/notice -@load base/utils/directions-and-hosts - -module SSL; - -export { - redef enum Notice::Type += { - ## Indicates that a certificate's NotValidAfter date has lapsed - ## and the certificate is now invalid. - Certificate_Expired, - ## Indicates that a certificate is going to expire within - ## :bro:id:`SSL::notify_when_cert_expiring_in`. - Certificate_Expires_Soon, - ## Indicates that a certificate's NotValidBefore date is future - ## dated. - Certificate_Not_Valid_Yet, - }; - - ## The category of hosts you would like to be notified about which have - ## certificates that are going to be expiring soon. By default, these - ## notices will be suppressed by the notice framework for 1 day after - ## a particular certificate has had a notice generated. - ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS - option notify_certs_expiration = LOCAL_HOSTS; - - ## The time before a certificate is going to expire that you would like - ## to start receiving :bro:enum:`SSL::Certificate_Expires_Soon` notices. - option notify_when_cert_expiring_in = 30days; -} - -event ssl_established(c: connection) &priority=3 - { - # If there are no certificates or we are not interested in the server, just return. - if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| == 0 || - ! addr_matches_host(c$id$resp_h, notify_certs_expiration) || - ! c$ssl$cert_chain[0]?$x509 || ! c$ssl$cert_chain[0]?$sha1 ) - return; - - local fuid = c$ssl$cert_chain_fuids[0]; - local cert = c$ssl$cert_chain[0]$x509$certificate; - local hash = c$ssl$cert_chain[0]$sha1; - - if ( cert$not_valid_before > network_time() ) - NOTICE([$note=Certificate_Not_Valid_Yet, - $conn=c, $suppress_for=1day, - $msg=fmt("Certificate %s isn't valid until %T", cert$subject, cert$not_valid_before), - $identifier=cat(c$id$resp_h, c$id$resp_p, hash), - $fuid=fuid]); - - else if ( cert$not_valid_after < network_time() ) - NOTICE([$note=Certificate_Expired, - $conn=c, $suppress_for=1day, - $msg=fmt("Certificate %s expired at %T", cert$subject, cert$not_valid_after), - $identifier=cat(c$id$resp_h, c$id$resp_p, hash), - $fuid=fuid]); - - else if ( cert$not_valid_after - notify_when_cert_expiring_in < network_time() ) - NOTICE([$note=Certificate_Expires_Soon, - $msg=fmt("Certificate %s is going to expire at %T", cert$subject, cert$not_valid_after), - $conn=c, $suppress_for=1day, - $identifier=cat(c$id$resp_h, c$id$resp_p, hash), - $fuid=fuid]); - } diff --git a/scripts/policy/protocols/ssl/expiring-certs.zeek b/scripts/policy/protocols/ssl/expiring-certs.zeek new file mode 100644 index 0000000000..630d23d145 --- /dev/null +++ b/scripts/policy/protocols/ssl/expiring-certs.zeek @@ -0,0 +1,69 @@ +##! Generate notices when X.509 certificates over SSL/TLS are expired or +##! going to expire soon based on the date and time values stored within the +##! certificate. + +@load base/protocols/ssl +@load base/files/x509 +@load base/frameworks/notice +@load base/utils/directions-and-hosts + +module SSL; + +export { + redef enum Notice::Type += { + ## Indicates that a certificate's NotValidAfter date has lapsed + ## and the certificate is now invalid. + Certificate_Expired, + ## Indicates that a certificate is going to expire within + ## :zeek:id:`SSL::notify_when_cert_expiring_in`. + Certificate_Expires_Soon, + ## Indicates that a certificate's NotValidBefore date is future + ## dated. + Certificate_Not_Valid_Yet, + }; + + ## The category of hosts you would like to be notified about which have + ## certificates that are going to be expiring soon. By default, these + ## notices will be suppressed by the notice framework for 1 day after + ## a particular certificate has had a notice generated. + ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS + option notify_certs_expiration = LOCAL_HOSTS; + + ## The time before a certificate is going to expire that you would like + ## to start receiving :zeek:enum:`SSL::Certificate_Expires_Soon` notices. + option notify_when_cert_expiring_in = 30days; +} + +event ssl_established(c: connection) &priority=3 + { + # If there are no certificates or we are not interested in the server, just return. + if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| == 0 || + ! addr_matches_host(c$id$resp_h, notify_certs_expiration) || + ! c$ssl$cert_chain[0]?$x509 || ! c$ssl$cert_chain[0]?$sha1 ) + return; + + local fuid = c$ssl$cert_chain_fuids[0]; + local cert = c$ssl$cert_chain[0]$x509$certificate; + local hash = c$ssl$cert_chain[0]$sha1; + + if ( cert$not_valid_before > network_time() ) + NOTICE([$note=Certificate_Not_Valid_Yet, + $conn=c, $suppress_for=1day, + $msg=fmt("Certificate %s isn't valid until %T", cert$subject, cert$not_valid_before), + $identifier=cat(c$id$resp_h, c$id$resp_p, hash), + $fuid=fuid]); + + else if ( cert$not_valid_after < network_time() ) + NOTICE([$note=Certificate_Expired, + $conn=c, $suppress_for=1day, + $msg=fmt("Certificate %s expired at %T", cert$subject, cert$not_valid_after), + $identifier=cat(c$id$resp_h, c$id$resp_p, hash), + $fuid=fuid]); + + else if ( cert$not_valid_after - notify_when_cert_expiring_in < network_time() ) + NOTICE([$note=Certificate_Expires_Soon, + $msg=fmt("Certificate %s is going to expire at %T", cert$subject, cert$not_valid_after), + $conn=c, $suppress_for=1day, + $identifier=cat(c$id$resp_h, c$id$resp_p, hash), + $fuid=fuid]); + } diff --git a/scripts/policy/protocols/ssl/extract-certs-pem.bro b/scripts/policy/protocols/ssl/extract-certs-pem.zeek similarity index 100% rename from scripts/policy/protocols/ssl/extract-certs-pem.bro rename to scripts/policy/protocols/ssl/extract-certs-pem.zeek diff --git a/scripts/policy/protocols/ssl/heartbleed.bro b/scripts/policy/protocols/ssl/heartbleed.bro deleted file mode 100644 index ae4395289d..0000000000 --- a/scripts/policy/protocols/ssl/heartbleed.bro +++ /dev/null @@ -1,238 +0,0 @@ -##! Detect the TLS heartbleed attack. See http://heartbleed.com for more. - -@load base/protocols/ssl -@load base/frameworks/notice - -module Heartbleed; - -export { - redef enum Notice::Type += { - ## Indicates that a host performed a heartbleed attack or scan. - SSL_Heartbeat_Attack, - ## Indicates that a host performing a heartbleed attack was probably successful. - SSL_Heartbeat_Attack_Success, - ## Indicates we saw heartbeat requests with odd length. Probably an attack or scan. - SSL_Heartbeat_Odd_Length, - ## Indicates we saw many heartbeat requests without a reply. Might be an attack. - SSL_Heartbeat_Many_Requests - }; -} - -# Do not disable analyzers after detection - otherwhise we will not notice -# encrypted attacks. -redef SSL::disable_analyzer_after_detection=F; - -redef record SSL::Info += { - last_originator_heartbeat_request_size: count &optional; - last_responder_heartbeat_request_size: count &optional; - - originator_heartbeats: count &default=0; - responder_heartbeats: count &default=0; - - # Unencrypted connections - was an exploit attempt detected yet. - heartbleed_detected: bool &default=F; - - # Count number of appdata packages and bytes exchanged so far. - enc_appdata_packages: count &default=0; - enc_appdata_bytes: count &default=0; -}; - -type min_length: record { - cipher: pattern; - min_length: count; -}; - -global min_lengths: vector of min_length = vector(); -global min_lengths_tls11: vector of min_length = vector(); - -event bro_init() - { - # Minimum length a heartbeat packet must have for different cipher suites. - # Note - tls 1.1f and 1.0 have different lengths :( - # This should be all cipher suites usually supported by vulnerable servers. - min_lengths_tls11 += [$cipher=/_AES_256_GCM_SHA384$/, $min_length=43]; - min_lengths_tls11 += [$cipher=/_AES_128_GCM_SHA256$/, $min_length=43]; - min_lengths_tls11 += [$cipher=/_256_CBC_SHA384$/, $min_length=96]; - min_lengths_tls11 += [$cipher=/_256_CBC_SHA256$/, $min_length=80]; - min_lengths_tls11 += [$cipher=/_256_CBC_SHA$/, $min_length=64]; - min_lengths_tls11 += [$cipher=/_128_CBC_SHA256$/, $min_length=80]; - min_lengths_tls11 += [$cipher=/_128_CBC_SHA$/, $min_length=64]; - min_lengths_tls11 += [$cipher=/_3DES_EDE_CBC_SHA$/, $min_length=48]; - min_lengths_tls11 += [$cipher=/_SEED_CBC_SHA$/, $min_length=64]; - min_lengths_tls11 += [$cipher=/_IDEA_CBC_SHA$/, $min_length=48]; - min_lengths_tls11 += [$cipher=/_DES_CBC_SHA$/, $min_length=48]; - min_lengths_tls11 += [$cipher=/_DES40_CBC_SHA$/, $min_length=48]; - min_lengths_tls11 += [$cipher=/_RC4_128_SHA$/, $min_length=39]; - min_lengths_tls11 += [$cipher=/_RC4_128_MD5$/, $min_length=35]; - min_lengths_tls11 += [$cipher=/_RC4_40_MD5$/, $min_length=35]; - min_lengths_tls11 += [$cipher=/_RC2_CBC_40_MD5$/, $min_length=48]; - min_lengths += [$cipher=/_256_CBC_SHA$/, $min_length=48]; - min_lengths += [$cipher=/_128_CBC_SHA$/, $min_length=48]; - min_lengths += [$cipher=/_3DES_EDE_CBC_SHA$/, $min_length=40]; - min_lengths += [$cipher=/_SEED_CBC_SHA$/, $min_length=48]; - min_lengths += [$cipher=/_IDEA_CBC_SHA$/, $min_length=40]; - min_lengths += [$cipher=/_DES_CBC_SHA$/, $min_length=40]; - min_lengths += [$cipher=/_DES40_CBC_SHA$/, $min_length=40]; - min_lengths += [$cipher=/_RC4_128_SHA$/, $min_length=39]; - min_lengths += [$cipher=/_RC4_128_MD5$/, $min_length=35]; - min_lengths += [$cipher=/_RC4_40_MD5$/, $min_length=35]; - min_lengths += [$cipher=/_RC2_CBC_40_MD5$/, $min_length=40]; - } - -event ssl_heartbeat(c: connection, is_orig: bool, length: count, heartbeat_type: count, payload_length: count, payload: string) - { - if ( ! c?$ssl ) - return; - - if ( heartbeat_type == 1 ) - { - local checklength: count = (length<(3+16)) ? length : (length - 3 - 16); - - if ( payload_length > checklength ) - { - c$ssl$heartbleed_detected = T; - NOTICE([$note=Heartbleed::SSL_Heartbeat_Attack, - $msg=fmt("An TLS heartbleed attack was detected! Record length %d. Payload length %d", length, payload_length), - $conn=c, - $identifier=cat(c$uid, length, payload_length) - ]); - } - else if ( is_orig ) - { - NOTICE([$note=Heartbleed::SSL_Heartbeat_Attack, - $msg=fmt("Heartbeat request before encryption. Probable Scan without exploit attempt. Message length: %d. Payload length: %d", length, payload_length), - $conn=c, - $n=length, - $identifier=cat(c$uid, length) - ]); - } - } - - if ( heartbeat_type == 2 && c$ssl$heartbleed_detected ) - { - NOTICE([$note=Heartbleed::SSL_Heartbeat_Attack_Success, - $msg=fmt("An TLS heartbleed attack detected before was probably exploited. Message length: %d. Payload length: %d", length, payload_length), - $conn=c, - $identifier=c$uid - ]); - } - } - -event ssl_encrypted_heartbeat(c: connection, is_orig: bool, length: count) - { - if ( is_orig ) - ++c$ssl$originator_heartbeats; - else - ++c$ssl$responder_heartbeats; - - local duration = network_time() - c$start_time; - - if ( c$ssl$enc_appdata_packages == 0 ) - NOTICE([$note=SSL_Heartbeat_Attack, - $msg=fmt("Heartbeat before ciphertext. Probable attack or scan. Length: %d, is_orig: %d", length, is_orig), - $conn=c, - $n=length, - $identifier=fmt("%s%s", c$uid, "early") - ]); - else if ( duration < 1min ) - NOTICE([$note=SSL_Heartbeat_Attack, - $msg=fmt("Heartbeat within first minute. Possible attack or scan. Length: %d, is_orig: %d, time: %s", length, is_orig, duration), - $conn=c, - $n=length, - $identifier=fmt("%s%s", c$uid, "early") - ]); - - if ( c$ssl$originator_heartbeats > c$ssl$responder_heartbeats + 3 ) - NOTICE([$note=SSL_Heartbeat_Many_Requests, - $msg=fmt("More than 3 heartbeat requests without replies from server. Possible attack. Client count: %d, server count: %d", c$ssl$originator_heartbeats, c$ssl$responder_heartbeats), - $conn=c, - $n=(c$ssl$originator_heartbeats-c$ssl$responder_heartbeats), - $identifier=fmt("%s%d", c$uid, c$ssl$responder_heartbeats/1000) # re-throw every 1000 heartbeats - ]); - - if ( c$ssl$responder_heartbeats > c$ssl$originator_heartbeats + 3 ) - NOTICE([$note=SSL_Heartbeat_Many_Requests, - $msg=fmt("Server sending more heartbeat responses than requests seen. Possible attack. Client count: %d, server count: %d", c$ssl$originator_heartbeats, c$ssl$responder_heartbeats), - $conn=c, - $n=(c$ssl$originator_heartbeats-c$ssl$responder_heartbeats), - $identifier=fmt("%s%d", c$uid, c$ssl$responder_heartbeats/1000) # re-throw every 1000 heartbeats - ]); - - if ( is_orig && length < 19 ) - NOTICE([$note=SSL_Heartbeat_Odd_Length, - $msg=fmt("Heartbeat message smaller than minimum required length. Probable attack or scan. Message length: %d. Cipher: %s. Time: %f", length, c$ssl$cipher, duration), - $conn=c, - $n=length, - $identifier=fmt("%s-weak-%d", c$uid, length) - ]); - - # Examine request lengths based on used cipher... - local min_length_choice: vector of min_length; - if ( (c$ssl$version == "TLSv11") || (c$ssl$version == "TLSv12") ) # tls 1.1+ have different lengths for CBC - min_length_choice = min_lengths_tls11; - else - min_length_choice = min_lengths; - - for ( i in min_length_choice ) - { - if ( min_length_choice[i]$cipher in c$ssl$cipher ) - { - if ( length < min_length_choice[i]$min_length ) - { - NOTICE([$note=SSL_Heartbeat_Odd_Length, - $msg=fmt("Heartbeat message smaller than minimum required length. Probable attack. Message length: %d. Required length: %d. Cipher: %s. Cipher match: %s", length, min_length_choice[i]$min_length, c$ssl$cipher, min_length_choice[i]$cipher), - $conn=c, - $n=length, - $identifier=fmt("%s-weak-%d", c$uid, length) - ]); - } - - break; - } - - } - - if ( is_orig ) - { - if ( c$ssl?$last_responder_heartbeat_request_size ) - { - # server originated heartbeat. Ignore & continue - delete c$ssl$last_responder_heartbeat_request_size; - } - - else - c$ssl$last_originator_heartbeat_request_size = length; - } - else - { - if ( c$ssl?$last_originator_heartbeat_request_size && c$ssl$last_originator_heartbeat_request_size < length ) - { - NOTICE([$note=SSL_Heartbeat_Attack_Success, - $msg=fmt("An encrypted TLS heartbleed attack was probably detected! First packet client record length %d, first packet server record length %d. Time: %f", - c$ssl$last_originator_heartbeat_request_size, length, duration), - $conn=c, - $identifier=c$uid # only throw once per connection - ]); - } - - else if ( ! c$ssl?$last_originator_heartbeat_request_size ) - c$ssl$last_responder_heartbeat_request_size = length; - - if ( c$ssl?$last_originator_heartbeat_request_size ) - delete c$ssl$last_originator_heartbeat_request_size; - } - } - -event ssl_encrypted_data(c: connection, is_orig: bool, record_version: count, content_type: count, length: count) - { - if ( !c?$ssl ) - return; - - if ( content_type == SSL::HEARTBEAT ) - event ssl_encrypted_heartbeat(c, is_orig, length); - else if ( (content_type == SSL::APPLICATION_DATA) && (length > 0) ) - { - ++c$ssl$enc_appdata_packages; - c$ssl$enc_appdata_bytes += length; - } - } diff --git a/scripts/policy/protocols/ssl/heartbleed.zeek b/scripts/policy/protocols/ssl/heartbleed.zeek new file mode 100644 index 0000000000..483c1f4ce1 --- /dev/null +++ b/scripts/policy/protocols/ssl/heartbleed.zeek @@ -0,0 +1,238 @@ +##! Detect the TLS heartbleed attack. See http://heartbleed.com for more. + +@load base/protocols/ssl +@load base/frameworks/notice + +module Heartbleed; + +export { + redef enum Notice::Type += { + ## Indicates that a host performed a heartbleed attack or scan. + SSL_Heartbeat_Attack, + ## Indicates that a host performing a heartbleed attack was probably successful. + SSL_Heartbeat_Attack_Success, + ## Indicates we saw heartbeat requests with odd length. Probably an attack or scan. + SSL_Heartbeat_Odd_Length, + ## Indicates we saw many heartbeat requests without a reply. Might be an attack. + SSL_Heartbeat_Many_Requests + }; +} + +# Do not disable analyzers after detection - otherwhise we will not notice +# encrypted attacks. +redef SSL::disable_analyzer_after_detection=F; + +redef record SSL::Info += { + last_originator_heartbeat_request_size: count &optional; + last_responder_heartbeat_request_size: count &optional; + + originator_heartbeats: count &default=0; + responder_heartbeats: count &default=0; + + # Unencrypted connections - was an exploit attempt detected yet. + heartbleed_detected: bool &default=F; + + # Count number of appdata packages and bytes exchanged so far. + enc_appdata_packages: count &default=0; + enc_appdata_bytes: count &default=0; +}; + +type min_length: record { + cipher: pattern; + min_length: count; +}; + +global min_lengths: vector of min_length = vector(); +global min_lengths_tls11: vector of min_length = vector(); + +event zeek_init() + { + # Minimum length a heartbeat packet must have for different cipher suites. + # Note - tls 1.1f and 1.0 have different lengths :( + # This should be all cipher suites usually supported by vulnerable servers. + min_lengths_tls11 += [$cipher=/_AES_256_GCM_SHA384$/, $min_length=43]; + min_lengths_tls11 += [$cipher=/_AES_128_GCM_SHA256$/, $min_length=43]; + min_lengths_tls11 += [$cipher=/_256_CBC_SHA384$/, $min_length=96]; + min_lengths_tls11 += [$cipher=/_256_CBC_SHA256$/, $min_length=80]; + min_lengths_tls11 += [$cipher=/_256_CBC_SHA$/, $min_length=64]; + min_lengths_tls11 += [$cipher=/_128_CBC_SHA256$/, $min_length=80]; + min_lengths_tls11 += [$cipher=/_128_CBC_SHA$/, $min_length=64]; + min_lengths_tls11 += [$cipher=/_3DES_EDE_CBC_SHA$/, $min_length=48]; + min_lengths_tls11 += [$cipher=/_SEED_CBC_SHA$/, $min_length=64]; + min_lengths_tls11 += [$cipher=/_IDEA_CBC_SHA$/, $min_length=48]; + min_lengths_tls11 += [$cipher=/_DES_CBC_SHA$/, $min_length=48]; + min_lengths_tls11 += [$cipher=/_DES40_CBC_SHA$/, $min_length=48]; + min_lengths_tls11 += [$cipher=/_RC4_128_SHA$/, $min_length=39]; + min_lengths_tls11 += [$cipher=/_RC4_128_MD5$/, $min_length=35]; + min_lengths_tls11 += [$cipher=/_RC4_40_MD5$/, $min_length=35]; + min_lengths_tls11 += [$cipher=/_RC2_CBC_40_MD5$/, $min_length=48]; + min_lengths += [$cipher=/_256_CBC_SHA$/, $min_length=48]; + min_lengths += [$cipher=/_128_CBC_SHA$/, $min_length=48]; + min_lengths += [$cipher=/_3DES_EDE_CBC_SHA$/, $min_length=40]; + min_lengths += [$cipher=/_SEED_CBC_SHA$/, $min_length=48]; + min_lengths += [$cipher=/_IDEA_CBC_SHA$/, $min_length=40]; + min_lengths += [$cipher=/_DES_CBC_SHA$/, $min_length=40]; + min_lengths += [$cipher=/_DES40_CBC_SHA$/, $min_length=40]; + min_lengths += [$cipher=/_RC4_128_SHA$/, $min_length=39]; + min_lengths += [$cipher=/_RC4_128_MD5$/, $min_length=35]; + min_lengths += [$cipher=/_RC4_40_MD5$/, $min_length=35]; + min_lengths += [$cipher=/_RC2_CBC_40_MD5$/, $min_length=40]; + } + +event ssl_heartbeat(c: connection, is_orig: bool, length: count, heartbeat_type: count, payload_length: count, payload: string) + { + if ( ! c?$ssl ) + return; + + if ( heartbeat_type == 1 ) + { + local checklength: count = (length<(3+16)) ? length : (length - 3 - 16); + + if ( payload_length > checklength ) + { + c$ssl$heartbleed_detected = T; + NOTICE([$note=Heartbleed::SSL_Heartbeat_Attack, + $msg=fmt("An TLS heartbleed attack was detected! Record length %d. Payload length %d", length, payload_length), + $conn=c, + $identifier=cat(c$uid, length, payload_length) + ]); + } + else if ( is_orig ) + { + NOTICE([$note=Heartbleed::SSL_Heartbeat_Attack, + $msg=fmt("Heartbeat request before encryption. Probable Scan without exploit attempt. Message length: %d. Payload length: %d", length, payload_length), + $conn=c, + $n=length, + $identifier=cat(c$uid, length) + ]); + } + } + + if ( heartbeat_type == 2 && c$ssl$heartbleed_detected ) + { + NOTICE([$note=Heartbleed::SSL_Heartbeat_Attack_Success, + $msg=fmt("An TLS heartbleed attack detected before was probably exploited. Message length: %d. Payload length: %d", length, payload_length), + $conn=c, + $identifier=c$uid + ]); + } + } + +event ssl_encrypted_heartbeat(c: connection, is_orig: bool, length: count) + { + if ( is_orig ) + ++c$ssl$originator_heartbeats; + else + ++c$ssl$responder_heartbeats; + + local duration = network_time() - c$start_time; + + if ( c$ssl$enc_appdata_packages == 0 ) + NOTICE([$note=SSL_Heartbeat_Attack, + $msg=fmt("Heartbeat before ciphertext. Probable attack or scan. Length: %d, is_orig: %d", length, is_orig), + $conn=c, + $n=length, + $identifier=fmt("%s%s", c$uid, "early") + ]); + else if ( duration < 1min ) + NOTICE([$note=SSL_Heartbeat_Attack, + $msg=fmt("Heartbeat within first minute. Possible attack or scan. Length: %d, is_orig: %d, time: %s", length, is_orig, duration), + $conn=c, + $n=length, + $identifier=fmt("%s%s", c$uid, "early") + ]); + + if ( c$ssl$originator_heartbeats > c$ssl$responder_heartbeats + 3 ) + NOTICE([$note=SSL_Heartbeat_Many_Requests, + $msg=fmt("More than 3 heartbeat requests without replies from server. Possible attack. Client count: %d, server count: %d", c$ssl$originator_heartbeats, c$ssl$responder_heartbeats), + $conn=c, + $n=(c$ssl$originator_heartbeats-c$ssl$responder_heartbeats), + $identifier=fmt("%s%d", c$uid, c$ssl$responder_heartbeats/1000) # re-throw every 1000 heartbeats + ]); + + if ( c$ssl$responder_heartbeats > c$ssl$originator_heartbeats + 3 ) + NOTICE([$note=SSL_Heartbeat_Many_Requests, + $msg=fmt("Server sending more heartbeat responses than requests seen. Possible attack. Client count: %d, server count: %d", c$ssl$originator_heartbeats, c$ssl$responder_heartbeats), + $conn=c, + $n=(c$ssl$originator_heartbeats-c$ssl$responder_heartbeats), + $identifier=fmt("%s%d", c$uid, c$ssl$responder_heartbeats/1000) # re-throw every 1000 heartbeats + ]); + + if ( is_orig && length < 19 ) + NOTICE([$note=SSL_Heartbeat_Odd_Length, + $msg=fmt("Heartbeat message smaller than minimum required length. Probable attack or scan. Message length: %d. Cipher: %s. Time: %f", length, c$ssl$cipher, duration), + $conn=c, + $n=length, + $identifier=fmt("%s-weak-%d", c$uid, length) + ]); + + # Examine request lengths based on used cipher... + local min_length_choice: vector of min_length; + if ( (c$ssl$version == "TLSv11") || (c$ssl$version == "TLSv12") ) # tls 1.1+ have different lengths for CBC + min_length_choice = min_lengths_tls11; + else + min_length_choice = min_lengths; + + for ( i in min_length_choice ) + { + if ( min_length_choice[i]$cipher in c$ssl$cipher ) + { + if ( length < min_length_choice[i]$min_length ) + { + NOTICE([$note=SSL_Heartbeat_Odd_Length, + $msg=fmt("Heartbeat message smaller than minimum required length. Probable attack. Message length: %d. Required length: %d. Cipher: %s. Cipher match: %s", length, min_length_choice[i]$min_length, c$ssl$cipher, min_length_choice[i]$cipher), + $conn=c, + $n=length, + $identifier=fmt("%s-weak-%d", c$uid, length) + ]); + } + + break; + } + + } + + if ( is_orig ) + { + if ( c$ssl?$last_responder_heartbeat_request_size ) + { + # server originated heartbeat. Ignore & continue + delete c$ssl$last_responder_heartbeat_request_size; + } + + else + c$ssl$last_originator_heartbeat_request_size = length; + } + else + { + if ( c$ssl?$last_originator_heartbeat_request_size && c$ssl$last_originator_heartbeat_request_size < length ) + { + NOTICE([$note=SSL_Heartbeat_Attack_Success, + $msg=fmt("An encrypted TLS heartbleed attack was probably detected! First packet client record length %d, first packet server record length %d. Time: %f", + c$ssl$last_originator_heartbeat_request_size, length, duration), + $conn=c, + $identifier=c$uid # only throw once per connection + ]); + } + + else if ( ! c$ssl?$last_originator_heartbeat_request_size ) + c$ssl$last_responder_heartbeat_request_size = length; + + if ( c$ssl?$last_originator_heartbeat_request_size ) + delete c$ssl$last_originator_heartbeat_request_size; + } + } + +event ssl_encrypted_data(c: connection, is_orig: bool, record_version: count, content_type: count, length: count) + { + if ( !c?$ssl ) + return; + + if ( content_type == SSL::HEARTBEAT ) + event ssl_encrypted_heartbeat(c, is_orig, length); + else if ( (content_type == SSL::APPLICATION_DATA) && (length > 0) ) + { + ++c$ssl$enc_appdata_packages; + c$ssl$enc_appdata_bytes += length; + } + } diff --git a/scripts/policy/protocols/ssl/known-certs.bro b/scripts/policy/protocols/ssl/known-certs.bro deleted file mode 100644 index 63a371b3e1..0000000000 --- a/scripts/policy/protocols/ssl/known-certs.bro +++ /dev/null @@ -1,199 +0,0 @@ -##! Log information about certificates while attempting to avoid duplicate -##! logging. - -@load base/utils/directions-and-hosts -@load base/protocols/ssl -@load base/files/x509 -@load base/frameworks/cluster - -module Known; - -export { - redef enum Log::ID += { CERTS_LOG }; - - type CertsInfo: record { - ## The timestamp when the certificate was detected. - ts: time &log; - ## The address that offered the certificate. - host: addr &log; - ## If the certificate was handed out by a server, this is the - ## port that the server was listening on. - port_num: port &log &optional; - ## Certificate subject. - subject: string &log &optional; - ## Certificate issuer subject. - issuer_subject: string &log &optional; - ## Serial number for the certificate. - serial: string &log &optional; - }; - - ## The certificates whose existence should be logged and tracked. - ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. - option cert_tracking = LOCAL_HOSTS; - - ## Toggles between different implementations of this script. - ## When true, use a Broker data store, else use a regular Bro set - ## with keys uniformly distributed over proxy nodes in cluster - ## operation. - const use_cert_store = T &redef; - - type AddrCertHashPair: record { - host: addr; - hash: string; - }; - - ## Holds the set of all known certificates. Keys in the store are of - ## type :bro:type:`Known::AddrCertHashPair` and their associated value is - ## always the boolean value of "true". - global cert_store: Cluster::StoreInfo; - - ## The Broker topic name to use for :bro:see:`Known::cert_store`. - const cert_store_name = "bro/known/certs" &redef; - - ## The expiry interval of new entries in :bro:see:`Known::cert_store`. - ## This also changes the interval at which certs get logged. - option cert_store_expiry = 1day; - - ## The timeout interval to use for operations against - ## :bro:see:`Known::cert_store`. - option cert_store_timeout = 15sec; - - ## The set of all known certificates to store for preventing duplicate - ## logging. It can also be used from other scripts to - ## inspect if a certificate has been seen in use. The string value - ## in the set is for storing the DER formatted certificate' SHA1 hash. - ## - ## In cluster operation, this set is uniformly distributed across - ## proxy nodes. - global certs: set[addr, string] &create_expire=1day &redef; - - ## Event that can be handled to access the loggable record as it is sent - ## on to the logging framework. - global log_known_certs: event(rec: CertsInfo); -} - -event bro_init() - { - if ( ! Known::use_cert_store ) - return; - - Known::cert_store = Cluster::create_store(Known::cert_store_name); - } - -event Known::cert_found(info: CertsInfo, hash: string) - { - if ( ! Known::use_cert_store ) - return; - - local key = AddrCertHashPair($host = info$host, $hash = hash); - - when ( local r = Broker::put_unique(Known::cert_store$store, key, - T, Known::cert_store_expiry) ) - { - if ( r$status == Broker::SUCCESS ) - { - if ( r$result as bool ) - Log::write(Known::CERTS_LOG, info); - } - else - Reporter::error(fmt("%s: data store put_unique failure", - Known::cert_store_name)); - } - timeout Known::cert_store_timeout - { - # Can't really tell if master store ended up inserting a key. - Log::write(Known::CERTS_LOG, info); - } - } - -event known_cert_add(info: CertsInfo, hash: string) - { - if ( Known::use_cert_store ) - return; - - if ( [info$host, hash] in Known::certs ) - return; - - add Known::certs[info$host, hash]; - - @if ( ! Cluster::is_enabled() || - Cluster::local_node_type() == Cluster::PROXY ) - Log::write(Known::CERTS_LOG, info); - @endif - } - -event Known::cert_found(info: CertsInfo, hash: string) - { - if ( Known::use_cert_store ) - return; - - if ( [info$host, hash] in Known::certs ) - return; - - local key = cat(info$host, hash); - Cluster::publish_hrw(Cluster::proxy_pool, key, known_cert_add, info, hash); - event known_cert_add(info, hash); - } - -event Cluster::node_up(name: string, id: string) - { - if ( Known::use_cert_store ) - return; - - if ( Cluster::local_node_type() != Cluster::WORKER ) - return; - - # Drop local suppression cache on workers to force HRW key repartitioning. - Known::certs = table(); - } - -event Cluster::node_down(name: string, id: string) - { - if ( Known::use_cert_store ) - return; - - if ( Cluster::local_node_type() != Cluster::WORKER ) - return; - - # Drop local suppression cache on workers to force HRW key repartitioning. - Known::certs = table(); - } - -event ssl_established(c: connection) &priority=3 - { - if ( ! c$ssl?$cert_chain ) - return; - - if ( |c$ssl$cert_chain| < 1 ) - return; - - if ( ! c$ssl$cert_chain[0]?$x509 ) - return; - - local fuid = c$ssl$cert_chain_fuids[0]; - - if ( ! c$ssl$cert_chain[0]?$sha1 ) - { - Reporter::error(fmt("Certificate with fuid %s did not contain sha1 hash when checking for known certs. Aborting", - fuid)); - return; - } - - local host = c$id$resp_h; - - if ( ! addr_matches_host(host, cert_tracking) ) - return; - - local hash = c$ssl$cert_chain[0]$sha1; - local cert = c$ssl$cert_chain[0]$x509$certificate; - local info = CertsInfo($ts = network_time(), $host = host, - $port_num = c$id$resp_p, $subject = cert$subject, - $issuer_subject = cert$issuer, - $serial = cert$serial); - event Known::cert_found(info, hash); - } - -event bro_init() &priority=5 - { - Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs, $path="known_certs"]); - } diff --git a/scripts/policy/protocols/ssl/known-certs.zeek b/scripts/policy/protocols/ssl/known-certs.zeek new file mode 100644 index 0000000000..f6aec6267d --- /dev/null +++ b/scripts/policy/protocols/ssl/known-certs.zeek @@ -0,0 +1,199 @@ +##! Log information about certificates while attempting to avoid duplicate +##! logging. + +@load base/utils/directions-and-hosts +@load base/protocols/ssl +@load base/files/x509 +@load base/frameworks/cluster + +module Known; + +export { + redef enum Log::ID += { CERTS_LOG }; + + type CertsInfo: record { + ## The timestamp when the certificate was detected. + ts: time &log; + ## The address that offered the certificate. + host: addr &log; + ## If the certificate was handed out by a server, this is the + ## port that the server was listening on. + port_num: port &log &optional; + ## Certificate subject. + subject: string &log &optional; + ## Certificate issuer subject. + issuer_subject: string &log &optional; + ## Serial number for the certificate. + serial: string &log &optional; + }; + + ## The certificates whose existence should be logged and tracked. + ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. + option cert_tracking = LOCAL_HOSTS; + + ## Toggles between different implementations of this script. + ## When true, use a Broker data store, else use a regular Zeek set + ## with keys uniformly distributed over proxy nodes in cluster + ## operation. + const use_cert_store = T &redef; + + type AddrCertHashPair: record { + host: addr; + hash: string; + }; + + ## Holds the set of all known certificates. Keys in the store are of + ## type :zeek:type:`Known::AddrCertHashPair` and their associated value is + ## always the boolean value of "true". + global cert_store: Cluster::StoreInfo; + + ## The Broker topic name to use for :zeek:see:`Known::cert_store`. + const cert_store_name = "zeek/known/certs" &redef; + + ## The expiry interval of new entries in :zeek:see:`Known::cert_store`. + ## This also changes the interval at which certs get logged. + option cert_store_expiry = 1day; + + ## The timeout interval to use for operations against + ## :zeek:see:`Known::cert_store`. + option cert_store_timeout = 15sec; + + ## The set of all known certificates to store for preventing duplicate + ## logging. It can also be used from other scripts to + ## inspect if a certificate has been seen in use. The string value + ## in the set is for storing the DER formatted certificate' SHA1 hash. + ## + ## In cluster operation, this set is uniformly distributed across + ## proxy nodes. + global certs: set[addr, string] &create_expire=1day &redef; + + ## Event that can be handled to access the loggable record as it is sent + ## on to the logging framework. + global log_known_certs: event(rec: CertsInfo); +} + +event zeek_init() + { + if ( ! Known::use_cert_store ) + return; + + Known::cert_store = Cluster::create_store(Known::cert_store_name); + } + +event Known::cert_found(info: CertsInfo, hash: string) + { + if ( ! Known::use_cert_store ) + return; + + local key = AddrCertHashPair($host = info$host, $hash = hash); + + when ( local r = Broker::put_unique(Known::cert_store$store, key, + T, Known::cert_store_expiry) ) + { + if ( r$status == Broker::SUCCESS ) + { + if ( r$result as bool ) + Log::write(Known::CERTS_LOG, info); + } + else + Reporter::error(fmt("%s: data store put_unique failure", + Known::cert_store_name)); + } + timeout Known::cert_store_timeout + { + # Can't really tell if master store ended up inserting a key. + Log::write(Known::CERTS_LOG, info); + } + } + +event known_cert_add(info: CertsInfo, hash: string) + { + if ( Known::use_cert_store ) + return; + + if ( [info$host, hash] in Known::certs ) + return; + + add Known::certs[info$host, hash]; + + @if ( ! Cluster::is_enabled() || + Cluster::local_node_type() == Cluster::PROXY ) + Log::write(Known::CERTS_LOG, info); + @endif + } + +event Known::cert_found(info: CertsInfo, hash: string) + { + if ( Known::use_cert_store ) + return; + + if ( [info$host, hash] in Known::certs ) + return; + + local key = cat(info$host, hash); + Cluster::publish_hrw(Cluster::proxy_pool, key, known_cert_add, info, hash); + event known_cert_add(info, hash); + } + +event Cluster::node_up(name: string, id: string) + { + if ( Known::use_cert_store ) + return; + + if ( Cluster::local_node_type() != Cluster::WORKER ) + return; + + # Drop local suppression cache on workers to force HRW key repartitioning. + Known::certs = table(); + } + +event Cluster::node_down(name: string, id: string) + { + if ( Known::use_cert_store ) + return; + + if ( Cluster::local_node_type() != Cluster::WORKER ) + return; + + # Drop local suppression cache on workers to force HRW key repartitioning. + Known::certs = table(); + } + +event ssl_established(c: connection) &priority=3 + { + if ( ! c$ssl?$cert_chain ) + return; + + if ( |c$ssl$cert_chain| < 1 ) + return; + + if ( ! c$ssl$cert_chain[0]?$x509 ) + return; + + local fuid = c$ssl$cert_chain_fuids[0]; + + if ( ! c$ssl$cert_chain[0]?$sha1 ) + { + Reporter::error(fmt("Certificate with fuid %s did not contain sha1 hash when checking for known certs. Aborting", + fuid)); + return; + } + + local host = c$id$resp_h; + + if ( ! addr_matches_host(host, cert_tracking) ) + return; + + local hash = c$ssl$cert_chain[0]$sha1; + local cert = c$ssl$cert_chain[0]$x509$certificate; + local info = CertsInfo($ts = network_time(), $host = host, + $port_num = c$id$resp_p, $subject = cert$subject, + $issuer_subject = cert$issuer, + $serial = cert$serial); + event Known::cert_found(info, hash); + } + +event zeek_init() &priority=5 + { + Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs, $path="known_certs"]); + } diff --git a/scripts/policy/protocols/ssl/log-hostcerts-only.bro b/scripts/policy/protocols/ssl/log-hostcerts-only.bro deleted file mode 100644 index 258820664f..0000000000 --- a/scripts/policy/protocols/ssl/log-hostcerts-only.bro +++ /dev/null @@ -1,83 +0,0 @@ -##! When this script is loaded, only the host certificates (client and server) -##! will be logged to x509.log. Logging of all other certificates will be suppressed. - -@load base/protocols/ssl -@load base/files/x509 - -module X509; - -export { - redef record Info += { - ## Logging of certificate is suppressed if set to F - logcert: bool &default=T; - }; -} - -# We need both the Info and the fa_file record modified. -# The only instant when we have both, the connection and the -# file available without having to loop is in the file_over_new_connection -# event. -# When that event is raised, the x509 record in f$info (which is the only -# record the logging framework gets) is not yet available. So - we -# have to do this two times, sorry. -# Alternatively, we could place it info Files::Info first - but we would -# still have to copy it. -redef record fa_file += { - logcert: bool &default=T; -}; - -function host_certs_only(rec: X509::Info): bool - { - return rec$logcert; - } - -event bro_init() &priority=2 - { - local f = Log::get_filter(X509::LOG, "default"); - Log::remove_filter(X509::LOG, "default"); # disable default logging - f$pred=host_certs_only; # and add our predicate - Log::add_filter(X509::LOG, f); - } - -event file_sniff(f: fa_file, meta: fa_metadata) &priority=4 - { - if ( |f$conns| != 1 ) - return; - - if ( ! f?$info || ! f$info?$mime_type ) - return; - - if ( ! ( f$info$mime_type == "application/x-x509-ca-cert" || f$info$mime_type == "application/x-x509-user-cert" - || f$info$mime_type == "application/pkix-cert" ) ) - return; - - local c: connection; - - for ( cid, c in f$conns ) - { - if ( ! c?$ssl ) - return; - } - - local chain: vector of string; - - if ( f$is_orig ) - chain = c$ssl$client_cert_chain_fuids; - else - chain = c$ssl$cert_chain_fuids; - - if ( |chain| == 0 ) - { - Reporter::warning(fmt("Certificate not in chain? (fuid %s)", f$id)); - return; - } - - # Check if this is the host certificate - if ( f$id != chain[0] ) - f$logcert=F; -} - -event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=2 - { - f$info$x509$logcert = f$logcert; # info record available, copy information. - } diff --git a/scripts/policy/protocols/ssl/log-hostcerts-only.zeek b/scripts/policy/protocols/ssl/log-hostcerts-only.zeek new file mode 100644 index 0000000000..3aefac088a --- /dev/null +++ b/scripts/policy/protocols/ssl/log-hostcerts-only.zeek @@ -0,0 +1,83 @@ +##! When this script is loaded, only the host certificates (client and server) +##! will be logged to x509.log. Logging of all other certificates will be suppressed. + +@load base/protocols/ssl +@load base/files/x509 + +module X509; + +export { + redef record Info += { + ## Logging of certificate is suppressed if set to F + logcert: bool &default=T; + }; +} + +# We need both the Info and the fa_file record modified. +# The only instant when we have both, the connection and the +# file available without having to loop is in the file_over_new_connection +# event. +# When that event is raised, the x509 record in f$info (which is the only +# record the logging framework gets) is not yet available. So - we +# have to do this two times, sorry. +# Alternatively, we could place it info Files::Info first - but we would +# still have to copy it. +redef record fa_file += { + logcert: bool &default=T; +}; + +function host_certs_only(rec: X509::Info): bool + { + return rec$logcert; + } + +event zeek_init() &priority=2 + { + local f = Log::get_filter(X509::LOG, "default"); + Log::remove_filter(X509::LOG, "default"); # disable default logging + f$pred=host_certs_only; # and add our predicate + Log::add_filter(X509::LOG, f); + } + +event file_sniff(f: fa_file, meta: fa_metadata) &priority=4 + { + if ( |f$conns| != 1 ) + return; + + if ( ! f?$info || ! f$info?$mime_type ) + return; + + if ( ! ( f$info$mime_type == "application/x-x509-ca-cert" || f$info$mime_type == "application/x-x509-user-cert" + || f$info$mime_type == "application/pkix-cert" ) ) + return; + + local c: connection; + + for ( cid, c in f$conns ) + { + if ( ! c?$ssl ) + return; + } + + local chain: vector of string; + + if ( f$is_orig ) + chain = c$ssl$client_cert_chain_fuids; + else + chain = c$ssl$cert_chain_fuids; + + if ( |chain| == 0 ) + { + Reporter::warning(fmt("Certificate not in chain? (fuid %s)", f$id)); + return; + } + + # Check if this is the host certificate + if ( f$id != chain[0] ) + f$logcert=F; +} + +event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=2 + { + f$info$x509$logcert = f$logcert; # info record available, copy information. + } diff --git a/scripts/policy/protocols/ssl/notary.bro b/scripts/policy/protocols/ssl/notary.zeek similarity index 100% rename from scripts/policy/protocols/ssl/notary.bro rename to scripts/policy/protocols/ssl/notary.zeek diff --git a/scripts/policy/protocols/ssl/validate-certs.bro b/scripts/policy/protocols/ssl/validate-certs.bro deleted file mode 100644 index bd76daeceb..0000000000 --- a/scripts/policy/protocols/ssl/validate-certs.bro +++ /dev/null @@ -1,197 +0,0 @@ -##! Perform full certificate chain validation for SSL certificates. -# -# Also caches all intermediate certificates encountered so far and use them -# for future validations. - -@load base/frameworks/cluster -@load base/frameworks/notice -@load base/protocols/ssl - -module SSL; - -export { - redef enum Notice::Type += { - ## This notice indicates that the result of validating the - ## certificate along with its full certificate chain was - ## invalid. - Invalid_Server_Cert - }; - - redef record Info += { - ## Result of certificate validation for this connection. - validation_status: string &log &optional; - ## Result of certificate validation for this connection, given - ## as OpenSSL validation code. - validation_code: int &optional; - ## Ordered chain of validated certificate, if validation succeeded. - valid_chain: vector of opaque of x509 &optional; - }; - - ## Result values for recently validated chains along with the - ## validation status are kept in this table to avoid constant - ## validation every time the same certificate chain is seen. - global recently_validated_certs: table[string] of X509::Result = table() - &read_expire=5mins &redef; - - ## Use intermediate CA certificate caching when trying to validate - ## certificates. When this is enabled, Bro keeps track of all valid - ## intermediate CA certificates that it has seen in the past. When - ## encountering a host certificate that cannot be validated because - ## of missing intermediate CA certificate, the cached list is used - ## to try to validate the cert. This is similar to how Firefox is - ## doing certificate validation. - ## - ## Disabling this will usually greatly increase the number of validation warnings - ## that you encounter. Only disable if you want to find misconfigured servers. - global ssl_cache_intermediate_ca: bool = T &redef; - - ## Store the valid chain in c$ssl$valid_chain if validation succeeds. - ## This has a potentially high memory impact, depending on the local environment - ## and is thus disabled by default. - global ssl_store_valid_chain: bool = F &redef; - - ## Event from a manager to workers when encountering a new, valid - ## intermediate. - global intermediate_add: event(key: string, value: vector of opaque of x509); - - ## Event from workers to the manager when a new intermediate chain - ## is to be added. - global new_intermediate: event(key: string, value: vector of opaque of x509); -} - -global intermediate_cache: table[string] of vector of opaque of x509; - -@if ( Cluster::is_enabled() ) -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, SSL::intermediate_add); - Broker::auto_publish(Cluster::manager_topic, SSL::new_intermediate); - } -@endif - -function add_to_cache(key: string, value: vector of opaque of x509) - { - intermediate_cache[key] = value; -@if ( Cluster::is_enabled() ) - event SSL::new_intermediate(key, value); -@endif - } - -@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) -event SSL::intermediate_add(key: string, value: vector of opaque of x509) - { - intermediate_cache[key] = value; - } -@endif - -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) -event SSL::new_intermediate(key: string, value: vector of opaque of x509) - { - if ( key in intermediate_cache ) - return; - - intermediate_cache[key] = value; - event SSL::intermediate_add(key, value); - } -@endif - -function cache_validate(chain: vector of opaque of x509): X509::Result - { - local chain_hash: vector of string = vector(); - - for ( i in chain ) - chain_hash[i] = sha1_hash(x509_get_certificate_string(chain[i])); - - local chain_id = join_string_vec(chain_hash, "."); - - # If we tried this certificate recently, just return the cached result. - if ( chain_id in recently_validated_certs ) - return recently_validated_certs[chain_id]; - - local result = x509_verify(chain, root_certs); - if ( ! ssl_store_valid_chain && result?$chain_certs ) - recently_validated_certs[chain_id] = X509::Result($result=result$result, $result_string=result$result_string); - else - recently_validated_certs[chain_id] = result; - - # if we have a working chain where we did not store the intermediate certs - # in our cache yet - do so - if ( ssl_cache_intermediate_ca && - result$result_string == "ok" && - result?$chain_certs && - |result$chain_certs| > 2 ) - { - local result_chain = result$chain_certs; - local isnh = x509_subject_name_hash(result_chain[1], 4); # SHA256 - if ( isnh !in intermediate_cache ) - { - local cachechain: vector of opaque of x509; - for ( i in result_chain ) - { - if ( i >=1 && i<=|result_chain|-2 ) - cachechain[i-1] = result_chain[i]; - } - add_to_cache(isnh, cachechain); - } - } - - return result; - } - -hook ssl_finishing(c: connection) &priority=20 - { - # If there aren't any certs we can't very well do certificate validation. - if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| == 0 || - ! c$ssl$cert_chain[0]?$x509 ) - return; - - local intermediate_chain: vector of opaque of x509 = vector(); - local issuer_name_hash = x509_issuer_name_hash(c$ssl$cert_chain[0]$x509$handle, 4); # SHA256 - local hash = c$ssl$cert_chain[0]$sha1; - local result: X509::Result; - - # Look if we already have a working chain for the issuer of this cert. - # If yes, try this chain first instead of using the chain supplied from - # the server. - if ( ssl_cache_intermediate_ca && issuer_name_hash in intermediate_cache ) - { - intermediate_chain[0] = c$ssl$cert_chain[0]$x509$handle; - for ( i in intermediate_cache[issuer_name_hash] ) - intermediate_chain[i+1] = intermediate_cache[issuer_name_hash][i]; - - result = cache_validate(intermediate_chain); - if ( result$result_string == "ok" ) - { - c$ssl$validation_status = result$result_string; - c$ssl$validation_code = result$result; - if ( result?$chain_certs ) - c$ssl$valid_chain = result$chain_certs; - return; - } - } - - # Validation with known chains failed or there was no fitting intermediate - # in our store. - # Fall back to validating the certificate with the server-supplied chain. - local chain: vector of opaque of x509 = vector(); - for ( i in c$ssl$cert_chain ) - { - if ( c$ssl$cert_chain[i]?$x509 ) - chain[i] = c$ssl$cert_chain[i]$x509$handle; - } - - result = cache_validate(chain); - c$ssl$validation_status = result$result_string; - c$ssl$validation_code = result$result; - if ( result?$chain_certs ) - c$ssl$valid_chain = result$chain_certs; - - if ( result$result_string != "ok" ) - { - local message = fmt("SSL certificate validation failed with (%s)", c$ssl$validation_status); - NOTICE([$note=Invalid_Server_Cert, $msg=message, - $sub=c$ssl$cert_chain[0]$x509$certificate$subject, $conn=c, - $fuid=c$ssl$cert_chain[0]$fuid, - $identifier=cat(c$id$resp_h,c$id$resp_p,hash,c$ssl$validation_code)]); - } - } diff --git a/scripts/policy/protocols/ssl/validate-certs.zeek b/scripts/policy/protocols/ssl/validate-certs.zeek new file mode 100644 index 0000000000..4d23c8e02d --- /dev/null +++ b/scripts/policy/protocols/ssl/validate-certs.zeek @@ -0,0 +1,197 @@ +##! Perform full certificate chain validation for SSL certificates. +# +# Also caches all intermediate certificates encountered so far and use them +# for future validations. + +@load base/frameworks/cluster +@load base/frameworks/notice +@load base/protocols/ssl + +module SSL; + +export { + redef enum Notice::Type += { + ## This notice indicates that the result of validating the + ## certificate along with its full certificate chain was + ## invalid. + Invalid_Server_Cert + }; + + redef record Info += { + ## Result of certificate validation for this connection. + validation_status: string &log &optional; + ## Result of certificate validation for this connection, given + ## as OpenSSL validation code. + validation_code: int &optional; + ## Ordered chain of validated certificate, if validation succeeded. + valid_chain: vector of opaque of x509 &optional; + }; + + ## Result values for recently validated chains along with the + ## validation status are kept in this table to avoid constant + ## validation every time the same certificate chain is seen. + global recently_validated_certs: table[string] of X509::Result = table() + &read_expire=5mins &redef; + + ## Use intermediate CA certificate caching when trying to validate + ## certificates. When this is enabled, Zeek keeps track of all valid + ## intermediate CA certificates that it has seen in the past. When + ## encountering a host certificate that cannot be validated because + ## of missing intermediate CA certificate, the cached list is used + ## to try to validate the cert. This is similar to how Firefox is + ## doing certificate validation. + ## + ## Disabling this will usually greatly increase the number of validation warnings + ## that you encounter. Only disable if you want to find misconfigured servers. + global ssl_cache_intermediate_ca: bool = T &redef; + + ## Store the valid chain in c$ssl$valid_chain if validation succeeds. + ## This has a potentially high memory impact, depending on the local environment + ## and is thus disabled by default. + global ssl_store_valid_chain: bool = F &redef; + + ## Event from a manager to workers when encountering a new, valid + ## intermediate. + global intermediate_add: event(key: string, value: vector of opaque of x509); + + ## Event from workers to the manager when a new intermediate chain + ## is to be added. + global new_intermediate: event(key: string, value: vector of opaque of x509); +} + +global intermediate_cache: table[string] of vector of opaque of x509; + +@if ( Cluster::is_enabled() ) +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, SSL::intermediate_add); + Broker::auto_publish(Cluster::manager_topic, SSL::new_intermediate); + } +@endif + +function add_to_cache(key: string, value: vector of opaque of x509) + { + intermediate_cache[key] = value; +@if ( Cluster::is_enabled() ) + event SSL::new_intermediate(key, value); +@endif + } + +@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) +event SSL::intermediate_add(key: string, value: vector of opaque of x509) + { + intermediate_cache[key] = value; + } +@endif + +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) +event SSL::new_intermediate(key: string, value: vector of opaque of x509) + { + if ( key in intermediate_cache ) + return; + + intermediate_cache[key] = value; + event SSL::intermediate_add(key, value); + } +@endif + +function cache_validate(chain: vector of opaque of x509): X509::Result + { + local chain_hash: vector of string = vector(); + + for ( i in chain ) + chain_hash[i] = sha1_hash(x509_get_certificate_string(chain[i])); + + local chain_id = join_string_vec(chain_hash, "."); + + # If we tried this certificate recently, just return the cached result. + if ( chain_id in recently_validated_certs ) + return recently_validated_certs[chain_id]; + + local result = x509_verify(chain, root_certs); + if ( ! ssl_store_valid_chain && result?$chain_certs ) + recently_validated_certs[chain_id] = X509::Result($result=result$result, $result_string=result$result_string); + else + recently_validated_certs[chain_id] = result; + + # if we have a working chain where we did not store the intermediate certs + # in our cache yet - do so + if ( ssl_cache_intermediate_ca && + result$result_string == "ok" && + result?$chain_certs && + |result$chain_certs| > 2 ) + { + local result_chain = result$chain_certs; + local isnh = x509_subject_name_hash(result_chain[1], 4); # SHA256 + if ( isnh !in intermediate_cache ) + { + local cachechain: vector of opaque of x509; + for ( i in result_chain ) + { + if ( i >=1 && i<=|result_chain|-2 ) + cachechain[i-1] = result_chain[i]; + } + add_to_cache(isnh, cachechain); + } + } + + return result; + } + +hook ssl_finishing(c: connection) &priority=20 + { + # If there aren't any certs we can't very well do certificate validation. + if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| == 0 || + ! c$ssl$cert_chain[0]?$x509 ) + return; + + local intermediate_chain: vector of opaque of x509 = vector(); + local issuer_name_hash = x509_issuer_name_hash(c$ssl$cert_chain[0]$x509$handle, 4); # SHA256 + local hash = c$ssl$cert_chain[0]$sha1; + local result: X509::Result; + + # Look if we already have a working chain for the issuer of this cert. + # If yes, try this chain first instead of using the chain supplied from + # the server. + if ( ssl_cache_intermediate_ca && issuer_name_hash in intermediate_cache ) + { + intermediate_chain[0] = c$ssl$cert_chain[0]$x509$handle; + for ( i in intermediate_cache[issuer_name_hash] ) + intermediate_chain[i+1] = intermediate_cache[issuer_name_hash][i]; + + result = cache_validate(intermediate_chain); + if ( result$result_string == "ok" ) + { + c$ssl$validation_status = result$result_string; + c$ssl$validation_code = result$result; + if ( result?$chain_certs ) + c$ssl$valid_chain = result$chain_certs; + return; + } + } + + # Validation with known chains failed or there was no fitting intermediate + # in our store. + # Fall back to validating the certificate with the server-supplied chain. + local chain: vector of opaque of x509 = vector(); + for ( i in c$ssl$cert_chain ) + { + if ( c$ssl$cert_chain[i]?$x509 ) + chain[i] = c$ssl$cert_chain[i]$x509$handle; + } + + result = cache_validate(chain); + c$ssl$validation_status = result$result_string; + c$ssl$validation_code = result$result; + if ( result?$chain_certs ) + c$ssl$valid_chain = result$chain_certs; + + if ( result$result_string != "ok" ) + { + local message = fmt("SSL certificate validation failed with (%s)", c$ssl$validation_status); + NOTICE([$note=Invalid_Server_Cert, $msg=message, + $sub=c$ssl$cert_chain[0]$x509$certificate$subject, $conn=c, + $fuid=c$ssl$cert_chain[0]$fuid, + $identifier=cat(c$id$resp_h,c$id$resp_p,hash,c$ssl$validation_code)]); + } + } diff --git a/scripts/policy/protocols/ssl/validate-ocsp.bro b/scripts/policy/protocols/ssl/validate-ocsp.zeek similarity index 100% rename from scripts/policy/protocols/ssl/validate-ocsp.bro rename to scripts/policy/protocols/ssl/validate-ocsp.zeek diff --git a/scripts/policy/protocols/ssl/validate-sct.bro b/scripts/policy/protocols/ssl/validate-sct.bro deleted file mode 100644 index 4d79bfd7ad..0000000000 --- a/scripts/policy/protocols/ssl/validate-sct.bro +++ /dev/null @@ -1,212 +0,0 @@ -##! Perform validation of Signed Certificate Timestamps, as used -##! for Certificate Transparency. See RFC6962 for more details. - -@load base/protocols/ssl -@load protocols/ssl/validate-certs - -# We need to know issuer certificates to be able to determine the IssuerKeyHash, -# which is required for validating certificate extensions. -redef SSL::ssl_store_valid_chain = T; - -module SSL; - -export { - - ## List of the different sources for Signed Certificate Timestamp - type SctSource: enum { - ## Signed Certificate Timestamp was encountered in the extension of - ## an X.509 certificate. - SCT_X509_EXT, - ## Signed Certificate Timestamp was encountered in an TLS session - ## extension. - SCT_TLS_EXT, - ## Signed Certificate Timestamp was encountered in the extension of - ## an stapled OCSP reply. - SCT_OCSP_EXT - }; - - ## This record is used to store information about the SCTs that are - ## encountered in a SSL connection. - type SctInfo: record { - ## The version of the encountered SCT (should always be 0 for v1). - version: count; - ## The ID of the log issuing this SCT. - logid: string; - ## The timestamp at which this SCT was issued measured since the - ## epoch (January 1, 1970, 00:00), ignoring leap seconds, in - ## milliseconds. Not converted to a Bro timestamp because we need - ## the exact value for validation. - timestamp: count; - ## The signature algorithm used for this sct. - sig_alg: count; - ## The hash algorithm used for this sct. - hash_alg: count; - ## The signature of this SCT. - signature: string; - ## Source of this SCT. - source: SctSource; - ## Validation result of this SCT. - valid: bool &optional; - }; - - redef record Info += { - ## Number of valid SCTs that were encountered in the connection. - valid_scts: count &optional; - ## Number of SCTs that could not be validated that were encountered in the connection. - invalid_scts: count &optional; - ## Number of different Logs for which valid SCTs were encountered in the connection. - valid_ct_logs: count &log &optional; - ## Number of different Log operators of which valid SCTs were encountered in the connection. - valid_ct_operators: count &log &optional; - ## List of operators for which valid SCTs were encountered in the connection. - valid_ct_operators_list: set[string] &optional; - ## Information about all SCTs that were encountered in the connection. - ct_proofs: vector of SctInfo &default=vector(); - }; -} - -# Used to cache validations for 5 minutes to lessen computational load. -global recently_validated_scts: table[string] of bool = table() - &read_expire=5mins &redef; - -event bro_init() - { - Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); - } - -event ssl_extension_signed_certificate_timestamp(c: connection, is_orig: bool, version: count, logid: string, timestamp: count, signature_and_hashalgorithm: SSL::SignatureAndHashAlgorithm, signature: string) &priority=5 - { - c$ssl$ct_proofs += SctInfo($version=version, $logid=logid, $timestamp=timestamp, $sig_alg=signature_and_hashalgorithm$SignatureAlgorithm, $hash_alg=signature_and_hashalgorithm$HashAlgorithm, $signature=signature, $source=SCT_TLS_EXT); - } - -event x509_ocsp_ext_signed_certificate_timestamp(f: fa_file, version: count, logid: string, timestamp: count, hash_algorithm: count, signature_algorithm: count, signature: string) &priority=5 - { - local src: SctSource; - if ( ! f?$info ) - return; - - if ( f$source == "SSL" && f$info$mime_type == "application/ocsp-response" ) - src = SCT_OCSP_EXT; - else if ( f$source == "SSL" && f$info$mime_type == "application/x-x509-user-cert" ) - src = SCT_X509_EXT; - else - return; - - if ( |f$conns| != 1 ) - return; - - local c: connection; - - for ( cid, c in f$conns ) - { - if ( ! c?$ssl ) - return; - } - - c$ssl$ct_proofs += SctInfo($version=version, $logid=logid, $timestamp=timestamp, $sig_alg=signature_algorithm, $hash_alg=hash_algorithm, $signature=signature, $source=src); - } - -# Priority = 19 will be handled after validation is done -hook ssl_finishing(c: connection) &priority=19 - { - if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| == 0 || ! c$ssl$cert_chain[0]?$x509 ) - return; - - local cert = c$ssl$cert_chain[0]$x509$handle; - local certhash = c$ssl$cert_chain[0]$sha1; - local issuer_name_hash = x509_issuer_name_hash(cert, 4); - local valid_proofs = 0; - local invalid_proofs = 0; - c$ssl$valid_ct_operators_list = string_set(); - local valid_logs = string_set(); - local issuer_key_hash = ""; - - for ( i in c$ssl$ct_proofs ) - { - local proof = c$ssl$ct_proofs[i]; - if ( proof$logid !in SSL::ct_logs ) - { - # Well, if we don't know the log, there is nothing to do here... - proof$valid = F; - next; - } - local log = SSL::ct_logs[proof$logid]; - - local valid = F; - local found_cache = F; - - local validatestring = cat(certhash,proof$logid,proof$timestamp,proof$hash_alg,proof$signature,proof$source); - if ( proof$source == SCT_X509_EXT && c$ssl?$validation_code ) - validatestring = cat(validatestring, c$ssl$validation_code); - local validate_hash = sha1_hash(validatestring); - if ( validate_hash in recently_validated_scts ) - { - valid = recently_validated_scts[validate_hash]; - found_cache = T; - } - - if ( found_cache == F && ( proof$source == SCT_TLS_EXT || proof$source == SCT_OCSP_EXT ) ) - { - valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg); - } - else if ( found_cache == F ) - { - # X.509 proof. Here things get awkward because we need information about - # the issuer cert... and we need to try a few times, because we have to see if we got - # the right issuer cert. - # - # First - Let's try if a previous round already established the correct issuer key hash. - if ( issuer_key_hash != "" ) - { - valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); - } - - # Second - let's see if we might already know the issuer cert through verification. - if ( ! valid && issuer_name_hash in intermediate_cache ) - { - issuer_key_hash = x509_spki_hash(intermediate_cache[issuer_name_hash][0], 4); - valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); - } - if ( ! valid && c$ssl?$valid_chain && |c$ssl$valid_chain| >= 2 ) - { - issuer_key_hash = x509_spki_hash(c$ssl$valid_chain[1], 4); - valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); - } - - # ok, if it still did not work - let's just try with all the certs that were sent - # in the connection. Perhaps it will work with one of them. - if ( !valid ) - for ( i in c$ssl$cert_chain ) - { - if ( i == 0 ) # end-host-cert - next; - if ( ! c$ssl$cert_chain[i]?$x509 || ! c$ssl$cert_chain[i]$x509?$handle ) - next; - - issuer_key_hash = x509_spki_hash(c$ssl$cert_chain[i]$x509$handle, 4); - valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); - if ( valid ) - break; - } - } - - if ( ! found_cache ) - recently_validated_scts[validate_hash] = valid; - - proof$valid = valid; - - if ( valid ) - { - ++valid_proofs; - add c$ssl$valid_ct_operators_list[log$operator]; - add valid_logs[proof$logid]; - } - else - ++invalid_proofs; - } - - c$ssl$valid_scts = valid_proofs; - c$ssl$invalid_scts = invalid_proofs; - c$ssl$valid_ct_operators = |c$ssl$valid_ct_operators_list|; - c$ssl$valid_ct_logs = |valid_logs|; - } diff --git a/scripts/policy/protocols/ssl/validate-sct.zeek b/scripts/policy/protocols/ssl/validate-sct.zeek new file mode 100644 index 0000000000..98c2f0c647 --- /dev/null +++ b/scripts/policy/protocols/ssl/validate-sct.zeek @@ -0,0 +1,212 @@ +##! Perform validation of Signed Certificate Timestamps, as used +##! for Certificate Transparency. See RFC6962 for more details. + +@load base/protocols/ssl +@load protocols/ssl/validate-certs + +# We need to know issuer certificates to be able to determine the IssuerKeyHash, +# which is required for validating certificate extensions. +redef SSL::ssl_store_valid_chain = T; + +module SSL; + +export { + + ## List of the different sources for Signed Certificate Timestamp + type SctSource: enum { + ## Signed Certificate Timestamp was encountered in the extension of + ## an X.509 certificate. + SCT_X509_EXT, + ## Signed Certificate Timestamp was encountered in an TLS session + ## extension. + SCT_TLS_EXT, + ## Signed Certificate Timestamp was encountered in the extension of + ## an stapled OCSP reply. + SCT_OCSP_EXT + }; + + ## This record is used to store information about the SCTs that are + ## encountered in a SSL connection. + type SctInfo: record { + ## The version of the encountered SCT (should always be 0 for v1). + version: count; + ## The ID of the log issuing this SCT. + logid: string; + ## The timestamp at which this SCT was issued measured since the + ## epoch (January 1, 1970, 00:00), ignoring leap seconds, in + ## milliseconds. Not converted to a Zeek timestamp because we need + ## the exact value for validation. + timestamp: count; + ## The signature algorithm used for this sct. + sig_alg: count; + ## The hash algorithm used for this sct. + hash_alg: count; + ## The signature of this SCT. + signature: string; + ## Source of this SCT. + source: SctSource; + ## Validation result of this SCT. + valid: bool &optional; + }; + + redef record Info += { + ## Number of valid SCTs that were encountered in the connection. + valid_scts: count &optional; + ## Number of SCTs that could not be validated that were encountered in the connection. + invalid_scts: count &optional; + ## Number of different Logs for which valid SCTs were encountered in the connection. + valid_ct_logs: count &log &optional; + ## Number of different Log operators of which valid SCTs were encountered in the connection. + valid_ct_operators: count &log &optional; + ## List of operators for which valid SCTs were encountered in the connection. + valid_ct_operators_list: set[string] &optional; + ## Information about all SCTs that were encountered in the connection. + ct_proofs: vector of SctInfo &default=vector(); + }; +} + +# Used to cache validations for 5 minutes to lessen computational load. +global recently_validated_scts: table[string] of bool = table() + &read_expire=5mins &redef; + +event zeek_init() + { + Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); + } + +event ssl_extension_signed_certificate_timestamp(c: connection, is_orig: bool, version: count, logid: string, timestamp: count, signature_and_hashalgorithm: SSL::SignatureAndHashAlgorithm, signature: string) &priority=5 + { + c$ssl$ct_proofs += SctInfo($version=version, $logid=logid, $timestamp=timestamp, $sig_alg=signature_and_hashalgorithm$SignatureAlgorithm, $hash_alg=signature_and_hashalgorithm$HashAlgorithm, $signature=signature, $source=SCT_TLS_EXT); + } + +event x509_ocsp_ext_signed_certificate_timestamp(f: fa_file, version: count, logid: string, timestamp: count, hash_algorithm: count, signature_algorithm: count, signature: string) &priority=5 + { + local src: SctSource; + if ( ! f?$info ) + return; + + if ( f$source == "SSL" && f$info$mime_type == "application/ocsp-response" ) + src = SCT_OCSP_EXT; + else if ( f$source == "SSL" && f$info$mime_type == "application/x-x509-user-cert" ) + src = SCT_X509_EXT; + else + return; + + if ( |f$conns| != 1 ) + return; + + local c: connection; + + for ( cid, c in f$conns ) + { + if ( ! c?$ssl ) + return; + } + + c$ssl$ct_proofs += SctInfo($version=version, $logid=logid, $timestamp=timestamp, $sig_alg=signature_algorithm, $hash_alg=hash_algorithm, $signature=signature, $source=src); + } + +# Priority = 19 will be handled after validation is done +hook ssl_finishing(c: connection) &priority=19 + { + if ( ! c$ssl?$cert_chain || |c$ssl$cert_chain| == 0 || ! c$ssl$cert_chain[0]?$x509 ) + return; + + local cert = c$ssl$cert_chain[0]$x509$handle; + local certhash = c$ssl$cert_chain[0]$sha1; + local issuer_name_hash = x509_issuer_name_hash(cert, 4); + local valid_proofs = 0; + local invalid_proofs = 0; + c$ssl$valid_ct_operators_list = string_set(); + local valid_logs = string_set(); + local issuer_key_hash = ""; + + for ( i in c$ssl$ct_proofs ) + { + local proof = c$ssl$ct_proofs[i]; + if ( proof$logid !in SSL::ct_logs ) + { + # Well, if we don't know the log, there is nothing to do here... + proof$valid = F; + next; + } + local log = SSL::ct_logs[proof$logid]; + + local valid = F; + local found_cache = F; + + local validatestring = cat(certhash,proof$logid,proof$timestamp,proof$hash_alg,proof$signature,proof$source); + if ( proof$source == SCT_X509_EXT && c$ssl?$validation_code ) + validatestring = cat(validatestring, c$ssl$validation_code); + local validate_hash = sha1_hash(validatestring); + if ( validate_hash in recently_validated_scts ) + { + valid = recently_validated_scts[validate_hash]; + found_cache = T; + } + + if ( found_cache == F && ( proof$source == SCT_TLS_EXT || proof$source == SCT_OCSP_EXT ) ) + { + valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg); + } + else if ( found_cache == F ) + { + # X.509 proof. Here things get awkward because we need information about + # the issuer cert... and we need to try a few times, because we have to see if we got + # the right issuer cert. + # + # First - Let's try if a previous round already established the correct issuer key hash. + if ( issuer_key_hash != "" ) + { + valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); + } + + # Second - let's see if we might already know the issuer cert through verification. + if ( ! valid && issuer_name_hash in intermediate_cache ) + { + issuer_key_hash = x509_spki_hash(intermediate_cache[issuer_name_hash][0], 4); + valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); + } + if ( ! valid && c$ssl?$valid_chain && |c$ssl$valid_chain| >= 2 ) + { + issuer_key_hash = x509_spki_hash(c$ssl$valid_chain[1], 4); + valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); + } + + # ok, if it still did not work - let's just try with all the certs that were sent + # in the connection. Perhaps it will work with one of them. + if ( !valid ) + for ( i in c$ssl$cert_chain ) + { + if ( i == 0 ) # end-host-cert + next; + if ( ! c$ssl$cert_chain[i]?$x509 || ! c$ssl$cert_chain[i]$x509?$handle ) + next; + + issuer_key_hash = x509_spki_hash(c$ssl$cert_chain[i]$x509$handle, 4); + valid = sct_verify(cert, proof$logid, log$key, proof$signature, proof$timestamp, proof$hash_alg, issuer_key_hash); + if ( valid ) + break; + } + } + + if ( ! found_cache ) + recently_validated_scts[validate_hash] = valid; + + proof$valid = valid; + + if ( valid ) + { + ++valid_proofs; + add c$ssl$valid_ct_operators_list[log$operator]; + add valid_logs[proof$logid]; + } + else + ++invalid_proofs; + } + + c$ssl$valid_scts = valid_proofs; + c$ssl$invalid_scts = invalid_proofs; + c$ssl$valid_ct_operators = |c$ssl$valid_ct_operators_list|; + c$ssl$valid_ct_logs = |valid_logs|; + } diff --git a/scripts/policy/protocols/ssl/weak-keys.bro b/scripts/policy/protocols/ssl/weak-keys.zeek similarity index 100% rename from scripts/policy/protocols/ssl/weak-keys.bro rename to scripts/policy/protocols/ssl/weak-keys.zeek diff --git a/scripts/policy/tuning/__load__.bro b/scripts/policy/tuning/__load__.zeek similarity index 100% rename from scripts/policy/tuning/__load__.bro rename to scripts/policy/tuning/__load__.zeek diff --git a/scripts/policy/tuning/defaults/__load__.bro b/scripts/policy/tuning/defaults/__load__.bro deleted file mode 100644 index fd52f92401..0000000000 --- a/scripts/policy/tuning/defaults/__load__.bro +++ /dev/null @@ -1,3 +0,0 @@ -@load ./packet-fragments -@load ./warnings -@load ./extracted_file_limits.bro diff --git a/scripts/policy/tuning/defaults/__load__.zeek b/scripts/policy/tuning/defaults/__load__.zeek new file mode 100644 index 0000000000..2b574a6845 --- /dev/null +++ b/scripts/policy/tuning/defaults/__load__.zeek @@ -0,0 +1,3 @@ +@load ./packet-fragments +@load ./warnings +@load ./extracted_file_limits diff --git a/scripts/policy/tuning/defaults/extracted_file_limits.bro b/scripts/policy/tuning/defaults/extracted_file_limits.zeek similarity index 100% rename from scripts/policy/tuning/defaults/extracted_file_limits.bro rename to scripts/policy/tuning/defaults/extracted_file_limits.zeek diff --git a/scripts/policy/tuning/defaults/packet-fragments.bro b/scripts/policy/tuning/defaults/packet-fragments.zeek similarity index 100% rename from scripts/policy/tuning/defaults/packet-fragments.bro rename to scripts/policy/tuning/defaults/packet-fragments.zeek diff --git a/scripts/policy/tuning/defaults/warnings.bro b/scripts/policy/tuning/defaults/warnings.bro deleted file mode 100644 index cedc3d62ad..0000000000 --- a/scripts/policy/tuning/defaults/warnings.bro +++ /dev/null @@ -1,11 +0,0 @@ -##! This file is meant to print messages on stdout for settings that would be -##! good to set in most cases or other things that could be done to achieve -##! better detection. - -@load base/utils/site - -event bro_init() &priority=-10 - { - if ( |Site::local_nets| == 0 ) - print "WARNING: No Site::local_nets have been defined. It's usually a good idea to define your local networks."; - } diff --git a/scripts/policy/tuning/defaults/warnings.zeek b/scripts/policy/tuning/defaults/warnings.zeek new file mode 100644 index 0000000000..6c31e82d4e --- /dev/null +++ b/scripts/policy/tuning/defaults/warnings.zeek @@ -0,0 +1,11 @@ +##! This file is meant to print messages on stdout for settings that would be +##! good to set in most cases or other things that could be done to achieve +##! better detection. + +@load base/utils/site + +event zeek_init() &priority=-10 + { + if ( |Site::local_nets| == 0 ) + print "WARNING: No Site::local_nets have been defined. It's usually a good idea to define your local networks."; + } diff --git a/scripts/policy/tuning/json-logs.bro b/scripts/policy/tuning/json-logs.zeek similarity index 100% rename from scripts/policy/tuning/json-logs.bro rename to scripts/policy/tuning/json-logs.zeek diff --git a/scripts/policy/tuning/track-all-assets.bro b/scripts/policy/tuning/track-all-assets.zeek similarity index 100% rename from scripts/policy/tuning/track-all-assets.bro rename to scripts/policy/tuning/track-all-assets.zeek diff --git a/scripts/site/local.bro b/scripts/site/local.zeek similarity index 100% rename from scripts/site/local.bro rename to scripts/site/local.zeek diff --git a/scripts/test-all-policy.bro b/scripts/test-all-policy.bro deleted file mode 100644 index be2efbbc19..0000000000 --- a/scripts/test-all-policy.bro +++ /dev/null @@ -1,113 +0,0 @@ -# This file loads ALL policy scripts that are part of the Bro distribution. -# -# This is rarely makes sense, and is for testing only. -# -# Note that we have a unit test that makes sure that all policy files shipped are -# actually loaded here. If we have files that are part of the distribution yet -# can't be loaded here, these must still be listed here with their load command -# commented out. - -# The base/ scripts are all loaded by default and not included here. - -# @load frameworks/control/controllee.bro -# @load frameworks/control/controller.bro -@load frameworks/dpd/detect-protocols.bro -@load frameworks/dpd/packet-segment-logging.bro -@load frameworks/intel/do_notice.bro -@load frameworks/intel/do_expire.bro -@load frameworks/intel/whitelist.bro -@load frameworks/intel/removal.bro -@load frameworks/intel/seen/__load__.bro -@load frameworks/intel/seen/conn-established.bro -@load frameworks/intel/seen/dns.bro -@load frameworks/intel/seen/file-hashes.bro -@load frameworks/intel/seen/file-names.bro -@load frameworks/intel/seen/http-headers.bro -@load frameworks/intel/seen/http-url.bro -@load frameworks/intel/seen/pubkey-hashes.bro -@load frameworks/intel/seen/smb-filenames.bro -@load frameworks/intel/seen/smtp-url-extraction.bro -@load frameworks/intel/seen/smtp.bro -@load frameworks/intel/seen/ssl.bro -@load frameworks/intel/seen/where-locations.bro -@load frameworks/intel/seen/x509.bro -@load frameworks/files/detect-MHR.bro -@load frameworks/files/entropy-test-all-files.bro -#@load frameworks/files/extract-all-files.bro -@load frameworks/files/hash-all-files.bro -@load frameworks/notice/__load__.bro -@load frameworks/notice/extend-email/hostnames.bro -@load files/x509/log-ocsp.bro -@load frameworks/packet-filter/shunt.bro -@load frameworks/software/version-changes.bro -@load frameworks/software/vulnerable.bro -@load frameworks/software/windows-version-detection.bro -@load integration/barnyard2/__load__.bro -@load integration/barnyard2/main.bro -@load integration/barnyard2/types.bro -@load integration/collective-intel/__load__.bro -@load integration/collective-intel/main.bro -@load misc/capture-loss.bro -@load misc/detect-traceroute/__load__.bro -@load misc/detect-traceroute/main.bro -# @load misc/dump-events.bro -@load misc/load-balancing.bro -@load misc/loaded-scripts.bro -@load misc/profiling.bro -@load misc/scan.bro -@load misc/stats.bro -@load misc/weird-stats.bro -@load misc/trim-trace-file.bro -@load protocols/conn/known-hosts.bro -@load protocols/conn/known-services.bro -@load protocols/conn/mac-logging.bro -@load protocols/conn/vlan-logging.bro -@load protocols/conn/weirds.bro -#@load protocols/dhcp/deprecated_events.bro -@load protocols/dhcp/msg-orig.bro -@load protocols/dhcp/software.bro -@load protocols/dhcp/sub-opts.bro -@load protocols/dns/auth-addl.bro -@load protocols/dns/detect-external-names.bro -@load protocols/ftp/detect-bruteforcing.bro -@load protocols/ftp/detect.bro -@load protocols/ftp/software.bro -@load protocols/http/detect-sqli.bro -@load protocols/http/detect-webapps.bro -@load protocols/http/header-names.bro -@load protocols/http/software-browser-plugins.bro -@load protocols/http/software.bro -@load protocols/http/var-extraction-cookies.bro -@load protocols/http/var-extraction-uri.bro -@load protocols/krb/ticket-logging.bro -@load protocols/modbus/known-masters-slaves.bro -@load protocols/modbus/track-memmap.bro -@load protocols/mysql/software.bro -@load protocols/rdp/indicate_ssl.bro -#@load protocols/smb/__load__.bro -@load protocols/smb/log-cmds.bro -@load protocols/smtp/blocklists.bro -@load protocols/smtp/detect-suspicious-orig.bro -@load protocols/smtp/entities-excerpt.bro -@load protocols/smtp/software.bro -@load protocols/ssh/detect-bruteforcing.bro -@load protocols/ssh/geo-data.bro -@load protocols/ssh/interesting-hostnames.bro -@load protocols/ssh/software.bro -@load protocols/ssl/expiring-certs.bro -@load protocols/ssl/extract-certs-pem.bro -@load protocols/ssl/heartbleed.bro -@load protocols/ssl/known-certs.bro -@load protocols/ssl/log-hostcerts-only.bro -#@load protocols/ssl/notary.bro -@load protocols/ssl/validate-certs.bro -@load protocols/ssl/validate-ocsp.bro -@load protocols/ssl/validate-sct.bro -@load protocols/ssl/weak-keys.bro -@load tuning/__load__.bro -@load tuning/defaults/__load__.bro -@load tuning/defaults/extracted_file_limits.bro -@load tuning/defaults/packet-fragments.bro -@load tuning/defaults/warnings.bro -@load tuning/json-logs.bro -@load tuning/track-all-assets.bro diff --git a/scripts/test-all-policy.zeek b/scripts/test-all-policy.zeek new file mode 100644 index 0000000000..1741d42a18 --- /dev/null +++ b/scripts/test-all-policy.zeek @@ -0,0 +1,115 @@ +# This file loads ALL policy scripts that are part of the Zeek distribution. +# +# This is rarely makes sense, and is for testing only. +# +# Note that we have a unit test that makes sure that all policy files shipped are +# actually loaded here. If we have files that are part of the distribution yet +# can't be loaded here, these must still be listed here with their load command +# commented out. + +# The base/ scripts are all loaded by default and not included here. + +# @load frameworks/control/controllee.zeek +# @load frameworks/control/controller.zeek +@load frameworks/dpd/detect-protocols.zeek +@load frameworks/dpd/packet-segment-logging.zeek +@load frameworks/intel/do_notice.zeek +@load frameworks/intel/do_expire.zeek +@load frameworks/intel/whitelist.zeek +@load frameworks/intel/removal.zeek +@load frameworks/intel/seen/__load__.zeek +@load frameworks/intel/seen/conn-established.zeek +@load frameworks/intel/seen/dns.zeek +@load frameworks/intel/seen/file-hashes.zeek +@load frameworks/intel/seen/file-names.zeek +@load frameworks/intel/seen/http-headers.zeek +@load frameworks/intel/seen/http-url.zeek +@load frameworks/intel/seen/pubkey-hashes.zeek +@load frameworks/intel/seen/smb-filenames.zeek +@load frameworks/intel/seen/smtp-url-extraction.zeek +@load frameworks/intel/seen/smtp.zeek +@load frameworks/intel/seen/ssl.zeek +@load frameworks/intel/seen/where-locations.zeek +@load frameworks/intel/seen/x509.zeek +@load frameworks/netcontrol/catch-and-release.zeek +@load frameworks/files/detect-MHR.zeek +@load frameworks/files/entropy-test-all-files.zeek +#@load frameworks/files/extract-all-files.zeek +@load frameworks/files/hash-all-files.zeek +@load frameworks/notice/__load__.zeek +@load frameworks/notice/actions/drop.zeek +@load frameworks/notice/extend-email/hostnames.zeek +@load files/unified2/__load__.zeek +@load files/unified2/main.zeek +@load files/x509/log-ocsp.zeek +@load frameworks/packet-filter/shunt.zeek +@load frameworks/software/version-changes.zeek +@load frameworks/software/vulnerable.zeek +@load frameworks/software/windows-version-detection.zeek +@load integration/barnyard2/__load__.zeek +@load integration/barnyard2/main.zeek +@load integration/barnyard2/types.zeek +@load integration/collective-intel/__load__.zeek +@load integration/collective-intel/main.zeek +@load misc/capture-loss.zeek +@load misc/detect-traceroute/__load__.zeek +@load misc/detect-traceroute/main.zeek +# @load misc/dump-events.zeek +@load misc/load-balancing.zeek +@load misc/loaded-scripts.zeek +@load misc/profiling.zeek +@load misc/scan.zeek +@load misc/stats.zeek +@load misc/weird-stats.zeek +@load misc/trim-trace-file.zeek +@load protocols/conn/known-hosts.zeek +@load protocols/conn/known-services.zeek +@load protocols/conn/mac-logging.zeek +@load protocols/conn/vlan-logging.zeek +@load protocols/conn/weirds.zeek +@load protocols/dhcp/msg-orig.zeek +@load protocols/dhcp/software.zeek +@load protocols/dhcp/sub-opts.zeek +@load protocols/dns/auth-addl.zeek +@load protocols/dns/detect-external-names.zeek +@load protocols/ftp/detect-bruteforcing.zeek +@load protocols/ftp/detect.zeek +@load protocols/ftp/software.zeek +@load protocols/http/detect-sqli.zeek +@load protocols/http/detect-webapps.zeek +@load protocols/http/header-names.zeek +@load protocols/http/software-browser-plugins.zeek +@load protocols/http/software.zeek +@load protocols/http/var-extraction-cookies.zeek +@load protocols/http/var-extraction-uri.zeek +@load protocols/krb/ticket-logging.zeek +@load protocols/modbus/known-masters-slaves.zeek +@load protocols/modbus/track-memmap.zeek +@load protocols/mysql/software.zeek +@load protocols/rdp/indicate_ssl.zeek +@load protocols/smb/log-cmds.zeek +@load protocols/smtp/blocklists.zeek +@load protocols/smtp/detect-suspicious-orig.zeek +@load protocols/smtp/entities-excerpt.zeek +@load protocols/smtp/software.zeek +@load protocols/ssh/detect-bruteforcing.zeek +@load protocols/ssh/geo-data.zeek +@load protocols/ssh/interesting-hostnames.zeek +@load protocols/ssh/software.zeek +@load protocols/ssl/expiring-certs.zeek +@load protocols/ssl/extract-certs-pem.zeek +@load protocols/ssl/heartbleed.zeek +@load protocols/ssl/known-certs.zeek +@load protocols/ssl/log-hostcerts-only.zeek +#@load protocols/ssl/notary.zeek +@load protocols/ssl/validate-certs.zeek +@load protocols/ssl/validate-ocsp.zeek +@load protocols/ssl/validate-sct.zeek +@load protocols/ssl/weak-keys.zeek +@load tuning/__load__.zeek +@load tuning/defaults/__load__.zeek +@load tuning/defaults/extracted_file_limits.zeek +@load tuning/defaults/packet-fragments.zeek +@load tuning/defaults/warnings.zeek +@load tuning/json-logs.zeek +@load tuning/track-all-assets.zeek diff --git a/scripts/zeekygen/README b/scripts/zeekygen/README new file mode 100644 index 0000000000..94982b0730 --- /dev/null +++ b/scripts/zeekygen/README @@ -0,0 +1,4 @@ +This package is loaded during the process which automatically generates +reference documentation for all Zeek scripts (i.e. "Zeekygen"). Its only +purpose is to provide an easy way to load all known Zeek scripts plus any +extra scripts needed or used by the documentation process. diff --git a/scripts/zeekygen/__load__.zeek b/scripts/zeekygen/__load__.zeek new file mode 100644 index 0000000000..00555c57bd --- /dev/null +++ b/scripts/zeekygen/__load__.zeek @@ -0,0 +1,15 @@ +@load test-all-policy.zeek + +# Scripts which are commented out in test-all-policy.zeek. +@load protocols/ssl/notary.zeek +@load frameworks/control/controllee.zeek +@load frameworks/control/controller.zeek +@load frameworks/files/extract-all-files.zeek +@load policy/misc/dump-events.zeek + +@load ./example.zeek + +event zeek_init() + { + terminate(); + } diff --git a/scripts/zeekygen/example.zeek b/scripts/zeekygen/example.zeek new file mode 100644 index 0000000000..1fcdd8390b --- /dev/null +++ b/scripts/zeekygen/example.zeek @@ -0,0 +1,194 @@ +##! This is an example script that demonstrates Zeekygen-style +##! documentation. It generally will make most sense when viewing +##! the script's raw source code and comparing to the HTML-rendered +##! version. +##! +##! Comments in the from ``##!`` are meant to summarize the script's +##! purpose. They are transferred directly in to the generated +##! `reStructuredText `_ +##! (reST) document associated with the script. +##! +##! .. tip:: You can embed directives and roles within ``##``-stylized comments. +##! +##! There's also a custom role to reference any identifier node in +##! the Zeek Sphinx domain that's good for "see alsos", e.g. +##! +##! See also: :zeek:see:`ZeekygenExample::a_var`, +##! :zeek:see:`ZeekygenExample::ONE`, :zeek:see:`SSH::Info` +##! +##! And a custom directive does the equivalent references: +##! +##! .. zeek:see:: ZeekygenExample::a_var ZeekygenExample::ONE SSH::Info + +# Comments that use a single pound sign (#) are not significant to +# a script's auto-generated documentation, but ones that use a +# double pound sign (##) do matter. In some cases, like record +# field comments, it's necessary to disambiguate the field with +# which a comment associates: e.g. "##<" can be used on the same line +# as a field to signify the comment relates to it and not the +# following field. "##<" can also be used more generally in any +# variable declarations to associate with the last-declared identifier. +# +# Generally, the auto-doc comments (##) are associated with the +# next declaration/identifier found in the script, but Zeekygen +# will track/render identifiers regardless of whether they have any +# of these special comments associated with them. +# +# The first sentence contained within the "##"-stylized comments for +# a given identifier is special in that it will be used as summary +# text in a table containing all such identifiers and short summaries. +# If there are no sentences (text terminated with '.'), then everything +# in the "##"-stylized comments up until the first empty comment +# is taken as the summary text for a given identifier. + +# @load directives are self-documenting, don't use any ``##`` style +# comments with them. +@load base/frameworks/notice +@load base/protocols/http +@load frameworks/software/vulnerable + +# "module" statements are self-documenting, don't use any ``##`` style +# comments with them. +module ZeekygenExample; + +# Redefinitions of "Notice::Type" are self-documenting, but +# more information can be supplied in two different ways. +redef enum Notice::Type += { + ## Any number of this type of comment + ## will document "Zeekygen_One". + Zeekygen_One, + Zeekygen_Two, ##< Any number of this type of comment + ##< will document "ZEEKYGEN_TWO". + Zeekygen_Three, + ## Omitting comments is fine, and so is mixing ``##`` and ``##<``, but + Zeekygen_Four, ##< it's probably best to use only one style consistently. +}; + +# All redefs are automatically tracked. Comments of the "##" form can be use +# to further document it, but in some cases, like here, they wouldn't be +# ading any interesting information that's not implicit. +redef enum Log::ID += { LOG }; + +# Only identifiers declared in an export section will show up in generated docs. + +export { + + ## Documentation for the "SimpleEnum" type goes here. + ## It can span multiple lines. + type SimpleEnum: enum { + ## Documentation for particular enum values is added like this. + ## And can also span multiple lines. + ONE, + TWO, ##< Or this style is valid to document the preceding enum value. + THREE, + }; + + ## Document the "SimpleEnum" redef here with any special info regarding + ## the *redef* itself. + redef enum SimpleEnum += { + FOUR, ##< And some documentation for "FOUR". + ## Also "FIVE". + FIVE + }; + + ## General documentation for a type "SimpleRecord" goes here. + ## The way fields can be documented is similar to what's already seen + ## for enums. + type SimpleRecord: record { + ## Counts something. + field1: count; + field2: bool; ##< Toggles something. + }; + + ## Document the record extension *redef* itself here. + redef record SimpleRecord += { + ## Document the extending field like this. + field_ext: string &optional; ##< Or here, like this. + }; + + ## General documentation for a type "ComplexRecord" goes here. + type ComplexRecord: record { + field1: count; ##< Counts something. + field2: bool; ##< Toggles something. + field3: SimpleRecord; ##< Zeekygen automatically tracks types + ##< and cross-references are automatically + ##< inserted in to generated docs. + msg: string &default="blah"; ##< Attributes are self-documenting. + } &redef; + + ## An example record to be used with a logging stream. + ## Nothing special about it. If another script redefs this type + ## to add fields, the generated documentation will show all original + ## fields plus the extensions and the scripts which contributed to it + ## (provided they are also @load'ed). + type Info: record { + ts: time &log; + uid: string &log; + status: count &log &optional; + }; + + ## Add documentation for "an_option" here. + ## The type/attribute information is all generated automatically. + const an_option: set[addr, addr, string] &redef; + + ## Default initialization will be generated automatically. + const option_with_init = 0.01 secs &redef; ##< More docs can be added here. + + ## Put some documentation for "a_var" here. Any global/non-const that + ## isn't a function/event/hook is classified as a "state variable" + ## in the generated docs. + global a_var: bool; + + ## Types are inferred, that information is self-documenting. + global var_without_explicit_type = "this works"; + + ## The first sentence for a particular identifier's summary text ends here. + ## And this second sentence doesn't show in the short description provided + ## by the table of all identifiers declared by this script. + global summary_test: string; + + ## Summarize purpose of "a_function" here. + ## Give more details about "a_function" here. + ## Separating the documentation of the params/return values with + ## empty comments is optional, but improves readability of script. + ## + ## tag: Function arguments can be described + ## like this. + ## + ## msg: Another param. + ## + ## Returns: Describe the return type here. + global a_function: function(tag: string, msg: string): string; + + ## Summarize "an_event" here. + ## Give more details about "an_event" here. + ## + ## ZeekygenExample::a_function should not be confused as a parameter + ## in the generated docs, but it also doesn't generate a cross-reference + ## link. Use the see role instead: :zeek:see:`ZeekygenExample::a_function`. + ## + ## name: Describe the argument here. + global an_event: event(name: string); +} + +# This function isn't exported, so it won't appear anywhere in the generated +# documentation. So using ``##``-style comments is pointless here. +function function_without_proto(tag: string): string + { + return "blah"; + } + +# Same thing goes for types -- it's not exported, so it's considered +# private to this script and comments are only interesting to a person +# who is already reading the raw source for the script (so don't use +# ``##`` comments here. +type PrivateRecord: record { + field1: bool; + field2: count; +}; + +# Event handlers are also an implementation detail of a script, so they +# don't show up anywhere in the generated documentation. +event zeek_init() + { + } diff --git a/src/Anon.cc b/src/Anon.cc index a2afc489ca..983c7fbec8 100644 --- a/src/Anon.cc +++ b/src/Anon.cc @@ -415,10 +415,10 @@ void log_anonymization_mapping(ipaddr32_t input, ipaddr32_t output) { if ( anonymization_mapping ) { - val_list* vl = new val_list; - vl->append(new AddrVal(input)); - vl->append(new AddrVal(output)); - mgr.QueueEvent(anonymization_mapping, vl); + mgr.QueueEventFast(anonymization_mapping, { + new AddrVal(input), + new AddrVal(output) + }); } } diff --git a/src/Attr.cc b/src/Attr.cc index 47ea7d4f06..079c125f9e 100644 --- a/src/Attr.cc +++ b/src/Attr.cc @@ -1,22 +1,18 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Attr.h" #include "Expr.h" -#include "Serializer.h" #include "threading/SerialTypes.h" const char* attr_name(attr_tag t) { static const char* attr_names[int(NUM_ATTRS)] = { "&optional", "&default", "&redef", - "&rotate_interval", "&rotate_size", "&add_func", "&delete_func", "&expire_func", "&read_expire", "&write_expire", "&create_expire", - "&persistent", "&synchronized", - "&encrypt", - "&raw_output", "&mergeable", "&priority", + "&raw_output", "&priority", "&group", "&log", "&error_handler", "&type_column", "(&tracked)", "&deprecated", }; @@ -51,7 +47,7 @@ void Attr::Describe(ODesc* d) const void Attr::DescribeReST(ODesc* d) const { - d->Add(":bro:attr:`"); + d->Add(":zeek:attr:`"); AddTag(d); d->Add("`"); @@ -64,14 +60,14 @@ void Attr::DescribeReST(ODesc* d) const if ( expr->Tag() == EXPR_NAME ) { - d->Add(":bro:see:`"); + d->Add(":zeek:see:`"); expr->Describe(d); d->Add("`"); } else if ( expr->Type()->Tag() == TYPE_FUNC ) { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(expr->Type()->AsFuncType()->FlavorString()); d->Add("`"); } @@ -141,7 +137,7 @@ Attributes::~Attributes() void Attributes::AddAttr(Attr* attr) { if ( ! attrs ) - attrs = new attr_list; + attrs = new attr_list(1); if ( ! attr->RedundantAttrOkay() ) // We overwrite old attributes by deleting them first. @@ -358,21 +354,6 @@ void Attributes::CheckAttr(Attr* a) } break; - case ATTR_ROTATE_INTERVAL: - if ( type->Tag() != TYPE_FILE ) - Error("&rotate_interval only applicable to files"); - break; - - case ATTR_ROTATE_SIZE: - if ( type->Tag() != TYPE_FILE ) - Error("&rotate_size only applicable to files"); - break; - - case ATTR_ENCRYPT: - if ( type->Tag() != TYPE_FILE ) - Error("&encrypt only applicable to files"); - break; - case ATTR_EXPIRE_READ: case ATTR_EXPIRE_WRITE: case ATTR_EXPIRE_CREATE: @@ -438,8 +419,6 @@ void Attributes::CheckAttr(Attr* a) } break; - case ATTR_PERSISTENT: - case ATTR_SYNCHRONIZED: case ATTR_TRACKED: // FIXME: Check here for global ID? break; @@ -449,11 +428,6 @@ void Attributes::CheckAttr(Attr* a) Error("&raw_output only applicable to files"); break; - case ATTR_MERGEABLE: - if ( type->Tag() != TYPE_TABLE ) - Error("&mergeable only applicable to tables/sets"); - break; - case ATTR_PRIORITY: Error("&priority only applicable to event bodies"); break; @@ -534,71 +508,3 @@ bool Attributes::operator==(const Attributes& other) const return true; } -bool Attributes::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Attributes* Attributes::Unserialize(UnserialInfo* info) - { - return (Attributes*) SerialObj::Unserialize(info, SER_ATTRIBUTES); - } - -IMPLEMENT_SERIAL(Attributes, SER_ATTRIBUTES); - -bool Attributes::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_ATTRIBUTES, BroObj); - - info->s->WriteOpenTag("Attributes"); - assert(type); - if ( ! (type->Serialize(info) && SERIALIZE(attrs->length())) ) - return false; - - loop_over_list((*attrs), i) - { - Attr* a = (*attrs)[i]; - - // Broccoli doesn't support expressions. - Expr* e = (! info->broccoli_peer) ? a->AttrExpr() : 0; - SERIALIZE_OPTIONAL(e); - - if ( ! SERIALIZE(char(a->Tag())) ) - return false; - } - - info->s->WriteCloseTag("Attributes"); - return true; - } - -bool Attributes::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - type = BroType::Unserialize(info); - if ( ! type ) - return false; - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - attrs = new attr_list(len); - while ( len-- ) - { - Expr* e; - UNSERIALIZE_OPTIONAL(e, Expr::Unserialize(info)) - - char tag; - if ( ! UNSERIALIZE(&tag) ) - { - delete e; - return false; - } - - attrs->append(new Attr((attr_tag)tag, e)); - } - - return true; - } - diff --git a/src/Attr.h b/src/Attr.h index bfb7c4803c..f32c11e50f 100644 --- a/src/Attr.h +++ b/src/Attr.h @@ -15,19 +15,13 @@ typedef enum { ATTR_OPTIONAL, ATTR_DEFAULT, ATTR_REDEF, - ATTR_ROTATE_INTERVAL, - ATTR_ROTATE_SIZE, ATTR_ADD_FUNC, ATTR_DEL_FUNC, ATTR_EXPIRE_FUNC, ATTR_EXPIRE_READ, ATTR_EXPIRE_WRITE, ATTR_EXPIRE_CREATE, - ATTR_PERSISTENT, - ATTR_SYNCHRONIZED, - ATTR_ENCRYPT, ATTR_RAW_OUTPUT, - ATTR_MERGEABLE, ATTR_PRIORITY, ATTR_GROUP, ATTR_LOG, @@ -98,17 +92,12 @@ public: attr_list* Attrs() { return attrs; } - bool Serialize(SerialInfo* info) const; - static Attributes* Unserialize(UnserialInfo* info); - bool operator==(const Attributes& other) const; protected: Attributes() : type(), attrs(), in_record() { } void CheckAttr(Attr* attr); - DECLARE_SERIAL(Attributes); - BroType* type; attr_list* attrs; bool in_record; diff --git a/src/Base64.cc b/src/Base64.cc index 3644740c7e..f7915d8678 100644 --- a/src/Base64.cc +++ b/src/Base64.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #include "Base64.h" #include diff --git a/src/BroList.h b/src/BroList.h index 6168bf7bda..0aa94d55ec 100644 --- a/src/BroList.h +++ b/src/BroList.h @@ -13,10 +13,6 @@ class ID; declare(PList,ID); typedef PList(ID) id_list; -class HashKey; -declare(PList,HashKey); -typedef PList(HashKey) hash_key_list; - class Val; declare(PList,Val); typedef PList(Val) val_list; @@ -29,28 +25,12 @@ class BroType; declare(PList,BroType); typedef PList(BroType) type_list; -class TypeDecl; -declare(PList,TypeDecl); -typedef PList(TypeDecl) type_decl_list; - -class Case; -declare(PList,Case); -typedef PList(Case) case_list; - class Attr; declare(PList,Attr); typedef PList(Attr) attr_list; -class Scope; -declare(PList,Scope); -typedef PList(Scope) scope_list; - class Timer; declare(PList,Timer); typedef PList(Timer) timer_list; -class DNS_Mgr_Request; -declare(PList,DNS_Mgr_Request); -typedef PList(DNS_Mgr_Request) DNS_mgr_request_list; - #endif diff --git a/src/BroString.cc b/src/BroString.cc index 3dca28439c..bb741724a5 100644 --- a/src/BroString.cc +++ b/src/BroString.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -288,7 +288,7 @@ void BroString::ToUpper() BroString* BroString::GetSubstring(int start, int len) const { - // This code used to live in bro.bif's sub_bytes() routine. + // This code used to live in zeek.bif's sub_bytes() routine. if ( start < 0 || start > n ) return 0; diff --git a/src/Brofiler.cc b/src/Brofiler.cc index a31ec469f0..1f0bc0268a 100644 --- a/src/Brofiler.cc +++ b/src/Brofiler.cc @@ -17,7 +17,8 @@ Brofiler::~Brofiler() bool Brofiler::ReadStats() { - char* bf = getenv("BRO_PROFILER_FILE"); + char* bf = zeekenv("ZEEK_PROFILER_FILE"); + if ( ! bf ) return false; @@ -47,14 +48,16 @@ bool Brofiler::ReadStats() bool Brofiler::WriteStats() { - char* bf = getenv("BRO_PROFILER_FILE"); - if ( ! bf ) return false; + char* bf = zeekenv("ZEEK_PROFILER_FILE"); + + if ( ! bf ) + return false; SafeDirname dirname{bf}; if ( ! ensure_intermediate_dirs(dirname.result.data()) ) { - reporter->Error("Failed to open BRO_PROFILER_FILE destination '%s' for writing", bf); + reporter->Error("Failed to open ZEEK_PROFILER_FILE destination '%s' for writing", bf); return false; } @@ -69,7 +72,7 @@ bool Brofiler::WriteStats() if ( fd == -1 ) { - reporter->Error("Failed to generate unique file name from BRO_PROFILER_FILE: %s", bf); + reporter->Error("Failed to generate unique file name from ZEEK_PROFILER_FILE: %s", bf); return false; } f = fdopen(fd, "w"); @@ -81,7 +84,7 @@ bool Brofiler::WriteStats() if ( ! f ) { - reporter->Error("Failed to open BRO_PROFILER_FILE destination '%s' for writing", bf); + reporter->Error("Failed to open ZEEK_PROFILER_FILE destination '%s' for writing", bf); return false; } diff --git a/src/Brofiler.h b/src/Brofiler.h index 88ce434070..55d14d6c79 100644 --- a/src/Brofiler.h +++ b/src/Brofiler.h @@ -17,7 +17,7 @@ public: /** * Imports Bro script Stmt usage information from file pointed to by - * environment variable BRO_PROFILER_FILE. + * environment variable ZEEK_PROFILER_FILE. * * @return: true if usage info was read, otherwise false. */ @@ -26,7 +26,7 @@ public: /** * Combines usage stats from current run with any read from ReadStats(), * then writes information to file pointed to by environment variable - * BRO_PROFILER_FILE. If the value of that env. variable ends with + * ZEEK_PROFILER_FILE. If the value of that env. variable ends with * ".XXXXXX" (exactly 6 X's), then it is first passed through mkstemp * to get a unique file. * diff --git a/src/CCL.cc b/src/CCL.cc index a725257c75..86ca2a03da 100644 --- a/src/CCL.cc +++ b/src/CCL.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "CCL.h" #include "RE.h" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7aa750ac80..ce3dd897f3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -10,8 +10,8 @@ set(bro_ALL_GENERATED_OUTPUTS CACHE INTERNAL "automatically generated files" FO set(bro_AUTO_BIFS CACHE INTERNAL "BIFs for automatic inclusion" FORCE) set(bro_REGISTER_BIFS CACHE INTERNAL "BIFs for automatic registering" FORCE) -set(bro_BASE_BIF_SCRIPTS CACHE INTERNAL "Bro script stubs for BIFs in base distribution of Bro" FORCE) -set(bro_PLUGIN_BIF_SCRIPTS CACHE INTERNAL "Bro script stubs for BIFs in Bro plugins" FORCE) +set(bro_BASE_BIF_SCRIPTS CACHE INTERNAL "Zeek script stubs for BIFs in base distribution of Zeek" FORCE) +set(bro_PLUGIN_BIF_SCRIPTS CACHE INTERNAL "Zeek script stubs for BIFs in Zeek plugins" FORCE) # If TRUE, use CMake's object libraries for sub-directories instead of # static libraries. This requires CMake >= 2.8.8. @@ -103,7 +103,7 @@ set_property(SOURCE scan.cc APPEND_STRING PROPERTY COMPILE_FLAGS "-Wno-sign-comp include(BifCl) set(BIF_SRCS - bro.bif + zeek.bif stats.bif event.bif const.bif @@ -143,7 +143,7 @@ set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) add_subdirectory(analyzer) add_subdirectory(broker) -add_subdirectory(broxygen) +add_subdirectory(zeekygen) add_subdirectory(file_analysis) add_subdirectory(input) add_subdirectory(iosource) @@ -247,7 +247,6 @@ set(bro_SRCS Brofiler.cc BroString.cc CCL.cc - ChunkedIO.cc CompHash.cc Conn.cc ConvertUTF.c @@ -285,9 +284,7 @@ set(bro_SRCS NetVar.cc Obj.cc OpaqueVal.cc - OSFinger.cc PacketFilter.cc - PersistenceSerializer.cc Pipe.cc PolicyFile.cc PrefixTable.cc @@ -296,7 +293,6 @@ set(bro_SRCS RandTest.cc RE.cc Reassem.cc - RemoteSerializer.cc Rule.cc RuleAction.cc RuleCondition.cc @@ -304,10 +300,8 @@ set(bro_SRCS SmithWaterman.cc Scope.cc SerializationFormat.cc - SerialObj.cc - Serializer.cc Sessions.cc - StateAccess.cc + Notifier.cc Stats.cc Stmt.cc Tag.cc @@ -353,24 +347,28 @@ set(bro_SRCS collect_headers(bro_HEADERS ${bro_SRCS}) if ( bro_HAVE_OBJECT_LIBRARIES ) - add_executable(bro ${bro_SRCS} ${bro_HEADERS} ${bro_SUBDIRS}) - target_link_libraries(bro ${brodeps} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) + add_executable(zeek ${bro_SRCS} ${bro_HEADERS} ${bro_SUBDIRS}) + target_link_libraries(zeek ${zeekdeps} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) else () - add_executable(bro ${bro_SRCS} ${bro_HEADERS}) - target_link_libraries(bro ${bro_SUBDIRS} ${brodeps} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) + add_executable(zeek ${bro_SRCS} ${bro_HEADERS}) + target_link_libraries(zeek ${bro_SUBDIRS} ${zeekdeps} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) endif () if ( NOT "${bro_LINKER_FLAGS}" STREQUAL "" ) - set_target_properties(bro PROPERTIES LINK_FLAGS "${bro_LINKER_FLAGS}") + set_target_properties(zeek PROPERTIES LINK_FLAGS "${bro_LINKER_FLAGS}") endif () -install(TARGETS bro DESTINATION bin) +install(TARGETS zeek DESTINATION bin) -set(BRO_EXE bro - CACHE STRING "Bro executable binary" FORCE) +# Install wrapper script for Bro-to-Zeek renaming. +include(InstallSymlink) +InstallSymlink("${CMAKE_INSTALL_PREFIX}/bin/zeek-wrapper" "${CMAKE_INSTALL_PREFIX}/bin/bro") -set(BRO_EXE_PATH ${CMAKE_CURRENT_BINARY_DIR}/bro - CACHE STRING "Path to Bro executable binary" FORCE) +set(BRO_EXE zeek + CACHE STRING "Zeek executable binary" FORCE) + +set(BRO_EXE_PATH ${CMAKE_CURRENT_BINARY_DIR}/zeek + CACHE STRING "Path to Zeek executable binary" FORCE) # Target to create all the autogenerated files. add_custom_target(generate_outputs_stage1) @@ -386,18 +384,18 @@ add_dependencies(generate_outputs_stage2b generate_outputs_stage1) add_custom_target(generate_outputs) add_dependencies(generate_outputs generate_outputs_stage2a generate_outputs_stage2b) -# Build __load__.bro files for standard *.bif.bro. +# Build __load__.zeek files for standard *.bif.zeek. bro_bif_create_loader(bif_loader "${bro_BASE_BIF_SCRIPTS}") add_dependencies(bif_loader ${bro_SUBDIRS}) -add_dependencies(bro bif_loader) +add_dependencies(zeek bif_loader) -# Build __load__.bro files for plugins/*.bif.bro. +# Build __load__.zeek files for plugins/*.bif.zeek. bro_bif_create_loader(bif_loader_plugins "${bro_PLUGIN_BIF_SCRIPTS}") add_dependencies(bif_loader_plugins ${bro_SUBDIRS}) -add_dependencies(bro bif_loader_plugins) +add_dependencies(zeek bif_loader_plugins) -# Install *.bif.bro. -install(DIRECTORY ${CMAKE_BINARY_DIR}/scripts/base/bif DESTINATION ${BRO_SCRIPT_INSTALL_PATH}/base) +# Install *.bif.zeek. +install(DIRECTORY ${CMAKE_BINARY_DIR}/scripts/base/bif DESTINATION ${ZEEK_SCRIPT_INSTALL_PATH}/base) # Create plugin directory at install time. install(DIRECTORY DESTINATION ${BRO_PLUGIN_INSTALL_PATH}) @@ -409,14 +407,14 @@ set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES ${CMAKE_BINARY_D # place, yet make confuse us now. This makes upgrading easier. install(CODE " file(REMOVE_RECURSE - ${BRO_SCRIPT_INSTALL_PATH}/base/frameworks/logging/writers/dataseries.bro - ${BRO_SCRIPT_INSTALL_PATH}/base/frameworks/logging/writers/elasticsearch.bro - ${BRO_SCRIPT_INSTALL_PATH}/policy/tuning/logs-to-elasticsearch.bro + ${ZEEK_SCRIPT_INSTALL_PATH}/base/frameworks/logging/writers/dataseries.bro + ${ZEEK_SCRIPT_INSTALL_PATH}/base/frameworks/logging/writers/elasticsearch.bro + ${ZEEK_SCRIPT_INSTALL_PATH}/policy/tuning/logs-to-elasticsearch.bro ) ") install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ - DESTINATION include/bro + DESTINATION include/zeek FILES_MATCHING PATTERN "*.h" PATTERN "*.pac" @@ -424,7 +422,7 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/ ) install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ - DESTINATION include/bro + DESTINATION include/zeek FILES_MATCHING PATTERN "*.bif.func_h" PATTERN "*.bif.netvar_h" @@ -433,5 +431,5 @@ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ ) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/sqlite3.h - DESTINATION include/bro/3rdparty + DESTINATION include/zeek/3rdparty ) diff --git a/src/ChunkedIO.cc b/src/ChunkedIO.cc deleted file mode 100644 index d2cdbc6425..0000000000 --- a/src/ChunkedIO.cc +++ /dev/null @@ -1,1358 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "bro-config.h" -#include "ChunkedIO.h" -#include "NetVar.h" -#include "RemoteSerializer.h" - -ChunkedIO::ChunkedIO() : stats(), tag(), pure() - { - } - -void ChunkedIO::Stats(char* buffer, int length) - { - safe_snprintf(buffer, length, - "bytes=%luK/%luK chunks=%lu/%lu io=%lu/%lu bytes/io=%.2fK/%.2fK", - stats.bytes_read / 1024, stats.bytes_written / 1024, - stats.chunks_read, stats.chunks_written, - stats.reads, stats.writes, - stats.bytes_read / (1024.0 * stats.reads), - stats.bytes_written / (1024.0 * stats.writes)); - } - -#ifdef DEBUG_COMMUNICATION - -void ChunkedIO::AddToBuffer(uint32 len, char* data, bool is_read) - { - Chunk* copy = new Chunk; - copy->len = len; - copy->data = new char[len]; - memcpy(copy->data, data, len); - - std::list* l = is_read ? &data_read : &data_written; - l->push_back(copy); - - if ( l->size() > DEBUG_COMMUNICATION ) - { - Chunk* old = l->front(); - l->pop_front(); - delete [] old->data; - delete old; - } - } - -void ChunkedIO::AddToBuffer(Chunk* chunk, bool is_read) - { - AddToBuffer(chunk->len, chunk->data, is_read); - } - -void ChunkedIO::DumpDebugData(const char* basefnname, bool want_reads) - { - std::list* l = want_reads ? &data_read : &data_written; - - int count = 0; - - for ( std::list::iterator i = l->begin(); i != l->end(); ++i ) - { - static char buffer[128]; - snprintf(buffer, sizeof(buffer), "%s.%s.%d", basefnname, - want_reads ? "read" : "write", ++count); - buffer[sizeof(buffer) - 1] = '\0'; - - int fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0600); - if ( fd < 0 ) - continue; - - ChunkedIOFd io(fd, "dump-file"); - io.Write(*i); - io.Flush(); - safe_close(fd); - } - - l->clear(); - } - -#endif - -ChunkedIOFd::ChunkedIOFd(int arg_fd, const char* arg_tag, pid_t arg_pid) - { - int flags; - - tag = arg_tag; - fd = arg_fd; - eof = 0; - last_flush = current_time(); - failed_reads = 0; - - if ( (flags = fcntl(fd, F_GETFL, 0)) < 0) - { - Log(fmt("can't obtain socket flags: %s", strerror(errno))); - exit(1); - } - - if ( fcntl(fd, F_SETFL, flags|O_NONBLOCK) < 0 ) - { - Log(fmt("can't set fd to non-blocking: %s (%d)", - strerror(errno), getpid())); - exit(1); - } - - read_buffer = new char[BUFFER_SIZE]; - read_len = 0; - read_pos = 0; - partial = 0; - write_buffer = new char[BUFFER_SIZE]; - write_len = 0; - write_pos = 0; - - pending_head = 0; - pending_tail = 0; - - pid = arg_pid; - } - -ChunkedIOFd::~ChunkedIOFd() - { - Clear(); - - delete [] read_buffer; - delete [] write_buffer; - safe_close(fd); - delete partial; - } - -bool ChunkedIOFd::Write(Chunk* chunk) - { -#ifdef DEBUG - DBG_LOG(DBG_CHUNKEDIO, "write of size %d [%s]", - chunk->len, fmt_bytes(chunk->data, min((uint32)20, chunk->len))); -#endif - -#ifdef DEBUG_COMMUNICATION - AddToBuffer(chunk, false); -#endif - - if ( chunk->len <= BUFFER_SIZE - sizeof(uint32) ) - return WriteChunk(chunk, false); - - // We have to split it up. - char* p = chunk->data; - uint32 left = chunk->len; - - while ( left ) - { - uint32 sz = min(BUFFER_SIZE - sizeof(uint32), left); - Chunk* part = new Chunk(new char[sz], sz); - - memcpy(part->data, p, part->len); - left -= part->len; - p += part->len; - - if ( ! WriteChunk(part, left != 0) ) - return false; - } - - delete chunk; - return true; - } - -bool ChunkedIOFd::WriteChunk(Chunk* chunk, bool partial) - { - assert(chunk->len <= BUFFER_SIZE - sizeof(uint32) ); - - if ( chunk->len == 0 ) - InternalError("attempt to write 0 bytes chunk"); - - if ( partial ) - chunk->len |= FLAG_PARTIAL; - - ++stats.chunks_written; - - // If it fits into the buffer, we're done (but keep care not - // to reorder chunks). - if ( ! pending_head && PutIntoWriteBuffer(chunk) ) - return true; - - // Otherwise queue it. - ++stats.pending; - ChunkQueue* q = new ChunkQueue; - q->chunk = chunk; - q->next = 0; - - if ( pending_tail ) - { - pending_tail->next = q; - pending_tail = q; - } - else - pending_head = pending_tail = q; - - write_flare.Fire(); - return Flush(); - } - - -bool ChunkedIOFd::PutIntoWriteBuffer(Chunk* chunk) - { - uint32 len = chunk->len & ~FLAG_PARTIAL; - - if ( write_len + len + (IsPure() ? 0 : sizeof(len)) > BUFFER_SIZE ) - return false; - - if ( ! IsPure() ) - { - uint32 nlen = htonl(chunk->len); - memcpy(write_buffer + write_len, &nlen, sizeof(nlen)); - write_len += sizeof(nlen); - } - - memcpy(write_buffer + write_len, chunk->data, len); - write_len += len; - - delete chunk; - write_flare.Fire(); - - if ( network_time - last_flush > 0.005 ) - FlushWriteBuffer(); - - return true; - } - -bool ChunkedIOFd::FlushWriteBuffer() - { - last_flush = network_time; - - while ( write_pos != write_len ) - { - uint32 len = write_len - write_pos; - - int written = write(fd, write_buffer + write_pos, len); - - if ( written < 0 ) - { - if ( errno == EPIPE ) - eof = true; - - if ( errno != EINTR ) - // These errnos are equal on POSIX. - return errno == EWOULDBLOCK || errno == EAGAIN; - - else - written = 0; - } - - stats.bytes_written += written; - if ( written > 0 ) - ++stats.writes; - - if ( unsigned(written) == len ) - { - write_pos = write_len = 0; - - if ( ! pending_head ) - write_flare.Extinguish(); - - return true; - } - - if ( written == 0 ) - InternalError("written==0"); - - // Short write. - write_pos += written; - } - - return true; - } - -bool ChunkedIOFd::OptionalFlush() - { - // This threshhold is quite arbitrary. -// if ( current_time() - last_flush > 0.01 ) - return Flush(); - } - -bool ChunkedIOFd::Flush() - { - // Try to write data out. - while ( pending_head ) - { - if ( ! FlushWriteBuffer() ) - return false; - - // If we couldn't write the whole buffer, we stop here - // and try again next time. - if ( write_len > 0 ) - return true; - - // Put as many pending chunks into the buffer as possible. - while ( pending_head ) - { - if ( ! PutIntoWriteBuffer(pending_head->chunk) ) - break; - - ChunkQueue* q = pending_head; - pending_head = pending_head->next; - if ( ! pending_head ) - pending_tail = 0; - - --stats.pending; - delete q; - } - } - - bool rval = FlushWriteBuffer(); - - if ( ! pending_head && write_len == 0 ) - write_flare.Extinguish(); - - return rval; - } - -uint32 ChunkedIOFd::ChunkAvailable() - { - int bytes_left = read_len - read_pos; - - if ( bytes_left < int(sizeof(uint32)) ) - return 0; - - bytes_left -= sizeof(uint32); - - // We have to copy the value here as it may not be - // aligned correctly in the data. - uint32 len; - memcpy(&len, read_buffer + read_pos, sizeof(len)); - len = ntohl(len); - - if ( uint32(bytes_left) < (len & ~FLAG_PARTIAL) ) - return 0; - - assert(len & ~FLAG_PARTIAL); - - return len; - } - -ChunkedIO::Chunk* ChunkedIOFd::ExtractChunk() - { - uint32 len = ChunkAvailable(); - uint32 real_len = len & ~FLAG_PARTIAL; - if ( ! real_len ) - return 0; - - read_pos += sizeof(uint32); - - Chunk* chunk = new Chunk(new char[real_len], len); - memcpy(chunk->data, read_buffer + read_pos, real_len); - read_pos += real_len; - - ++stats.chunks_read; - - return chunk; - } - -ChunkedIO::Chunk* ChunkedIOFd::ConcatChunks(Chunk* c1, Chunk* c2) - { - uint32 sz = c1->len + c2->len; - Chunk* c = new Chunk(new char[sz], sz); - - memcpy(c->data, c1->data, c1->len); - memcpy(c->data + c1->len, c2->data, c2->len); - - delete c1; - delete c2; - - return c; - } - -void ChunkedIO::Log(const char* str) - { - RemoteSerializer::Log(RemoteSerializer::LogError, str); - } - -bool ChunkedIOFd::Read(Chunk** chunk, bool may_block) - { - *chunk = 0; - - // We will be called regularly. So take the opportunity - // to flush the write buffer once in a while. - OptionalFlush(); - - if ( ! ReadChunk(chunk, may_block) ) - { -#ifdef DEBUG_COMMUNICATION - AddToBuffer("", true); -#endif - if ( ! ChunkAvailable() ) - read_flare.Extinguish(); - - return false; - } - - if ( ! *chunk ) - { -#ifdef DEBUG_COMMUNICATION - AddToBuffer("", true); -#endif - read_flare.Extinguish(); - return true; - } - - if ( ChunkAvailable() ) - read_flare.Fire(); - else - read_flare.Extinguish(); - -#ifdef DEBUG - if ( *chunk ) - DBG_LOG(DBG_CHUNKEDIO, "read of size %d %s[%s]", - (*chunk)->len & ~FLAG_PARTIAL, - (*chunk)->len & FLAG_PARTIAL ? "(P) " : "", - fmt_bytes((*chunk)->data, - min((uint32)20, (*chunk)->len))); -#endif - - if ( ! ((*chunk)->len & FLAG_PARTIAL) ) - { - if ( ! partial ) - { -#ifdef DEBUG_COMMUNICATION - AddToBuffer(*chunk, true); -#endif - return true; - } - else - { - // This is the last chunk of an oversized one. - *chunk = ConcatChunks(partial, *chunk); - partial = 0; - -#ifdef DEBUG - if ( *chunk ) - DBG_LOG(DBG_CHUNKEDIO, - "built virtual chunk of size %d [%s]", - (*chunk)->len, - fmt_bytes((*chunk)->data, 20)); -#endif - -#ifdef DEBUG_COMMUNICATION - AddToBuffer(*chunk, true); -#endif - return true; - } - } - - // This chunk is the non-last part of an oversized. - (*chunk)->len &= ~FLAG_PARTIAL; - - if ( ! partial ) - // First part of oversized chunk. - partial = *chunk; - else - partial = ConcatChunks(partial, *chunk); - -#ifdef DEBUG_COMMUNICATION - AddToBuffer("", true); -#endif - - *chunk = 0; - return true; // Read following part next time. - } - -bool ChunkedIOFd::ReadChunk(Chunk** chunk, bool may_block) - { - // We will be called regularly. So take the opportunity - // to flush the write buffer once in a while. - OptionalFlush(); - - *chunk = ExtractChunk(); - if ( *chunk ) - return true; - - int bytes_left = read_len - read_pos; - - // If we have a partial chunk left, move this to the head of - // the buffer. - if ( bytes_left ) - memmove(read_buffer, read_buffer + read_pos, bytes_left); - - read_pos = 0; - read_len = bytes_left; - - if ( ! ChunkAvailable() ) - read_flare.Extinguish(); - - // If allowed, wait a bit for something to read. - if ( may_block ) - { - fd_set fd_read, fd_write, fd_except; - - FD_ZERO(&fd_read); - FD_ZERO(&fd_write); - FD_ZERO(&fd_except); - FD_SET(fd, &fd_read); - - struct timeval small_timeout; - small_timeout.tv_sec = 0; - small_timeout.tv_usec = 50; - - select(fd + 1, &fd_read, &fd_write, &fd_except, &small_timeout); - } - - // Make sure the process is still runnning - // (only checking for EPIPE after a read doesn't - // seem to be sufficient). - if ( pid && kill(pid, 0) < 0 && errno != EPERM ) - { - eof = true; - errno = EPIPE; - return false; - } - - // Try to fill the buffer. - while ( true ) - { - int len = BUFFER_SIZE - read_len; - int read = ::read(fd, read_buffer + read_len, len); - - if ( read < 0 ) - { - if ( errno != EINTR ) - { - // These errnos are equal on POSIX. - if ( errno == EWOULDBLOCK || errno == EAGAIN ) - { - // Let's see if we have a chunk now -- - // even if we time out, we may have read - // just enough in previous iterations! - *chunk = ExtractChunk(); - ++failed_reads; - return true; - } - - if ( errno == EPIPE ) - eof = true; - - return false; - } - - else - read = 0; - } - - failed_reads = 0; - - if ( read == 0 && len != 0 ) - { - *chunk = ExtractChunk(); - if ( *chunk ) - return true; - - eof = true; - return false; - } - - read_len += read; - - ++stats.reads; - stats.bytes_read += read; - - if ( read == len ) - break; - } - - // Let's see if we have a chunk now. - *chunk = ExtractChunk(); - - return true; - } - -bool ChunkedIOFd::CanRead() - { - // We will be called regularly. So take the opportunity - // to flush the write buffer once in a while. - OptionalFlush(); - - if ( ChunkAvailable() ) - return true; - - fd_set fd_read; - FD_ZERO(&fd_read); - FD_SET(fd, &fd_read); - - struct timeval no_timeout; - no_timeout.tv_sec = 0; - no_timeout.tv_usec = 0; - - return select(fd + 1, &fd_read, 0, 0, &no_timeout) > 0; - } - -bool ChunkedIOFd::CanWrite() - { - return pending_head != 0; - } - -bool ChunkedIOFd::IsIdle() - { - if ( pending_head || ChunkAvailable() ) - return false; - - if ( failed_reads > 0 ) - return true; - - return false; - } - -bool ChunkedIOFd::IsFillingUp() - { - return stats.pending > chunked_io_buffer_soft_cap; - } - -iosource::FD_Set ChunkedIOFd::ExtraReadFDs() const - { - iosource::FD_Set rval; - rval.Insert(write_flare.FD()); - rval.Insert(read_flare.FD()); - return rval; - } - -void ChunkedIOFd::Clear() - { - while ( pending_head ) - { - ChunkQueue* next = pending_head->next; - delete pending_head->chunk; - delete pending_head; - pending_head = next; - } - - pending_head = pending_tail = 0; - - if ( write_len == 0 ) - write_flare.Extinguish(); - } - -const char* ChunkedIOFd::Error() - { - static char buffer[1024]; - safe_snprintf(buffer, sizeof(buffer), "%s [%d]", strerror(errno), errno); - - return buffer; - } - -void ChunkedIOFd::Stats(char* buffer, int length) - { - int i = safe_snprintf(buffer, length, "pending=%d ", stats.pending); - ChunkedIO::Stats(buffer + i, length - i); - } - -SSL_CTX* ChunkedIOSSL::ctx; - -ChunkedIOSSL::ChunkedIOSSL(int arg_socket, bool arg_server) - { - socket = arg_socket; - last_ret = 0; - eof = false; - setup = false; - server = arg_server; - ssl = 0; - - write_state = LEN; - write_head = 0; - write_tail = 0; - - read_state = LEN; - read_chunk = 0; - read_ptr = 0; - } - -ChunkedIOSSL::~ChunkedIOSSL() - { - if ( setup ) - { - SSL_shutdown(ssl); - - // We don't care if the other side closes properly. - setup = false; - } - - if ( ssl ) - { - SSL_free(ssl); - ssl = 0; - } - - safe_close(socket); - } - - -static int pem_passwd_cb(char* buf, int size, int rwflag, void* passphrase) - { - safe_strncpy(buf, (char*) passphrase, size); - buf[size - 1] = '\0'; - return strlen(buf); - } - -bool ChunkedIOSSL::Init() - { - // If the handshake doesn't succeed immediately we will - // be called multiple times. - if ( ! ctx ) - { - SSL_load_error_strings(); - - ctx = SSL_CTX_new(SSLv23_method()); - if ( ! ctx ) - { - Log("can't create SSL context"); - return false; - } - - // We access global variables here. But as they are - // declared const and we don't modify them this should - // be fine. - const char* key = ssl_private_key->AsString()->CheckString(); - - if ( ! (key && *key && - SSL_CTX_use_certificate_chain_file(ctx, key)) ) - { - Log(fmt("can't read certificate from file %s", key)); - return false; - } - - const char* passphrase = - ssl_passphrase->AsString()->CheckString(); - - if ( passphrase && ! streq(passphrase, "") ) - { - SSL_CTX_set_default_passwd_cb(ctx, pem_passwd_cb); - SSL_CTX_set_default_passwd_cb_userdata(ctx, - (void*) passphrase); - } - - if ( ! (key && *key && - SSL_CTX_use_PrivateKey_file(ctx, key, SSL_FILETYPE_PEM)) ) - { - Log(fmt("can't read private key from file %s", key)); - return false; - } - - const char* ca = ssl_ca_certificate->AsString()->CheckString(); - if ( ! (ca && *ca && SSL_CTX_load_verify_locations(ctx, ca, 0)) ) - { - Log(fmt("can't read CA certificate from file %s", ca)); - return false; - } - - // Only use real ciphers. - if ( ! SSL_CTX_set_cipher_list(ctx, "HIGH") ) - { - Log("can't set cipher list"); - return false; - } - - // Require client certificate. - SSL_CTX_set_verify(ctx, - SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, 0); - } - - int flags; - - if ( (flags = fcntl(socket, F_GETFL, 0)) < 0) - { - Log(fmt("can't obtain socket flags: %s", strerror(errno))); - return false; - } - - if ( fcntl(socket, F_SETFL, flags|O_NONBLOCK) < 0 ) - { - Log(fmt("can't set socket to non-blocking: %s", - strerror(errno))); - return false; - } - - if ( ! ssl ) - { - ssl = SSL_new(ctx); - if ( ! ssl ) - { - Log("can't create SSL object"); - return false; - } - - BIO* bio = BIO_new_socket(socket, BIO_NOCLOSE); - BIO_set_nbio(bio, 1); - SSL_set_bio(ssl, bio, bio); - } - - int success; - if ( server ) - success = last_ret = SSL_accept(ssl); - else - success = last_ret = SSL_connect(ssl); - - if ( success > 0 ) - { // handshake done - setup = true; - return true; - } - - int error = SSL_get_error(ssl, success); - - if ( success <= 0 && - (error == SSL_ERROR_WANT_WRITE || error == SSL_ERROR_WANT_READ) ) - // Handshake not finished yet, but that's ok for now. - return true; - - // Some error. - eof = true; - return false; - } - -bool ChunkedIOSSL::Write(Chunk* chunk) - { -#ifdef DEBUG - DBG_LOG(DBG_CHUNKEDIO, "ssl write of size %d [%s]", - chunk->len, fmt_bytes(chunk->data, 20)); -#endif - - // Queue it. - ++stats.pending; - Queue* q = new Queue; - q->chunk = chunk; - q->next = 0; - - // Temporarily convert len into network byte order. - chunk->len = htonl(chunk->len); - - if ( write_tail ) - { - write_tail->next = q; - write_tail = q; - } - else - write_head = write_tail = q; - - write_flare.Fire(); - Flush(); - return true; - } - -bool ChunkedIOSSL::WriteData(char* p, uint32 len, bool* error) - { - *error = false; - - double t = current_time(); - - int written = last_ret = SSL_write(ssl, p, len); - - switch ( SSL_get_error(ssl, written) ) { - case SSL_ERROR_NONE: - // SSL guarantees us that all bytes have been written. - // That's nice. :-) - return true; - - case SSL_ERROR_WANT_READ: - case SSL_ERROR_WANT_WRITE: - // Would block. - DBG_LOG(DBG_CHUNKEDIO, - "SSL_write: SSL_ERROR_WANT_READ [%d,%d]", - written, SSL_get_error(ssl, written)); - *error = false; - return false; - - case SSL_ERROR_ZERO_RETURN: - // Regular remote connection shutdown. - DBG_LOG(DBG_CHUNKEDIO, - "SSL_write: SSL_ZERO_RETURN [%d,%d]", - written, SSL_get_error(ssl, written)); - *error = eof = true; - return false; - - case SSL_ERROR_SYSCALL: - DBG_LOG(DBG_CHUNKEDIO, - "SSL_write: SSL_SYS_CALL [%d,%d]", - written, SSL_get_error(ssl, written)); - - if ( written == 0 ) - { - // Socket connection closed. - *error = eof = true; - return false; - } - - // Fall through. - - default: - DBG_LOG(DBG_CHUNKEDIO, - "SSL_write: fatal error [%d,%d]", - written, SSL_get_error(ssl, written)); - // Fatal SSL error. - *error = true; - return false; - } - - InternalError("can't be reached"); - return false; - } - -bool ChunkedIOSSL::Flush() - { - if ( ! setup ) - { - // We may need to finish the handshake. - if ( ! Init() ) - return false; - if ( ! setup ) - return true; - } - - while ( write_head ) - { - bool error; - - Chunk* c = write_head->chunk; - - if ( write_state == LEN ) - { - if ( ! WriteData((char*)&c->len, sizeof(c->len), &error) ) - return ! error; - write_state = DATA; - - // Convert back from network byte order. - c->len = ntohl(c->len); - } - - if ( ! WriteData(c->data, c->len, &error) ) - return ! error; - - // Chunk written, throw away. - Queue* q = write_head; - write_head = write_head->next; - if ( ! write_head ) - write_tail = 0; - --stats.pending; - delete q; - - delete c; - - write_state = LEN; - } - - write_flare.Extinguish(); - return true; - } - -bool ChunkedIOSSL::ReadData(char* p, uint32 len, bool* error) - { - if ( ! read_ptr ) - read_ptr = p; - - while ( true ) - { - double t = current_time(); - - int read = last_ret = - SSL_read(ssl, read_ptr, len - (read_ptr - p)); - - switch ( SSL_get_error(ssl, read) ) { - case SSL_ERROR_NONE: - // We're fine. - read_ptr += read; - - if ( unsigned(read_ptr - p) == len ) - { - // We have read as much as requested.. - read_ptr = 0; - *error = false; - return true; - } - - break; - - case SSL_ERROR_WANT_READ: - case SSL_ERROR_WANT_WRITE: - // Would block. - DBG_LOG(DBG_CHUNKEDIO, - "SSL_read: SSL_ERROR_WANT_READ [%d,%d]", - read, SSL_get_error(ssl, read)); - *error = false; - return false; - - case SSL_ERROR_ZERO_RETURN: - // Regular remote connection shutdown. - DBG_LOG(DBG_CHUNKEDIO, - "SSL_read: SSL_ZERO_RETURN [%d,%d]", - read, SSL_get_error(ssl, read)); - *error = eof = true; - return false; - - case SSL_ERROR_SYSCALL: - DBG_LOG(DBG_CHUNKEDIO, "SSL_read: SSL_SYS_CALL [%d,%d]", - read, SSL_get_error(ssl, read)); - - if ( read == 0 ) - { - // Socket connection closed. - *error = eof = true; - return false; - } - - // Fall through. - - default: - DBG_LOG(DBG_CHUNKEDIO, - "SSL_read: fatal error [%d,%d]", - read, SSL_get_error(ssl, read)); - - // Fatal SSL error. - *error = true; - return false; - } - } - - // Can't be reached. - InternalError("can't be reached"); - return false; - } - -bool ChunkedIOSSL::Read(Chunk** chunk, bool mayblock) - { - *chunk = 0; - - if ( ! setup ) - { - // We may need to finish the handshake. - if ( ! Init() ) - return false; - if ( ! setup ) - return true; - } - - bool error; - - Flush(); - - if ( read_state == LEN ) - { - if ( ! read_chunk ) - { - read_chunk = new Chunk; - read_chunk->data = 0; - } - - if ( ! ReadData((char*)&read_chunk->len, - sizeof(read_chunk->len), - &error) ) - return ! error; - - read_state = DATA; - read_chunk->len = ntohl(read_chunk->len); - } - - if ( ! read_chunk->data ) - { - read_chunk->data = new char[read_chunk->len]; - read_chunk->free_func = Chunk::free_func_delete; - } - - if ( ! ReadData(read_chunk->data, read_chunk->len, &error) ) - return ! error; - - // Chunk fully read. Pass it on. - *chunk = read_chunk; - read_chunk = 0; - read_state = LEN; - -#ifdef DEBUG - DBG_LOG(DBG_CHUNKEDIO, "ssl read of size %d [%s]", - (*chunk)->len, fmt_bytes((*chunk)->data, 20)); -#endif - - return true; - } - -bool ChunkedIOSSL::CanRead() - { - // We will be called regularly. So take the opportunity - // to flush the write buffer. - Flush(); - - if ( SSL_pending(ssl) ) - return true; - - fd_set fd_read; - FD_ZERO(&fd_read); - FD_SET(socket, &fd_read); - - struct timeval notimeout; - notimeout.tv_sec = 0; - notimeout.tv_usec = 0; - - return select(socket + 1, &fd_read, NULL, NULL, ¬imeout) > 0; - } - -bool ChunkedIOSSL::CanWrite() - { - return write_head != 0; - } - -bool ChunkedIOSSL::IsIdle() - { - return ! (CanRead() || CanWrite()); - } - -bool ChunkedIOSSL::IsFillingUp() - { - // We don't really need this at the moment (since SSL is only used for - // peer-to-peer communication). Thus, we always return false for now. - return false; - } - -iosource::FD_Set ChunkedIOSSL::ExtraReadFDs() const - { - iosource::FD_Set rval; - rval.Insert(write_flare.FD()); - return rval; - } - -void ChunkedIOSSL::Clear() - { - while ( write_head ) - { - Queue* next = write_head->next; - delete write_head->chunk; - delete write_head; - write_head = next; - } - write_head = write_tail = 0; - write_flare.Extinguish(); - } - -const char* ChunkedIOSSL::Error() - { - const int BUFLEN = 512; - static char buffer[BUFLEN]; - - int sslcode = SSL_get_error(ssl, last_ret); - int errcode = ERR_get_error(); - - int count = safe_snprintf(buffer, BUFLEN, "[%d,%d,%d] SSL error: ", - errcode, sslcode, last_ret); - - if ( errcode ) - ERR_error_string_n(errcode, buffer + count, BUFLEN - count); - - else if ( sslcode == SSL_ERROR_SYSCALL ) - { - if ( last_ret ) - // Look at errno. - safe_snprintf(buffer + count, BUFLEN - count, - "syscall: %s", strerror(errno)); - else - // Errno is not valid in this case. - safe_strncpy(buffer + count, - "syscall: unexpected end-of-file", - BUFLEN - count); - } - else - safe_strncpy(buffer + count, "unknown error", BUFLEN - count); - - return buffer; - } - -void ChunkedIOSSL::Stats(char* buffer, int length) - { - int i = safe_snprintf(buffer, length, "pending=%ld ", stats.pending); - ChunkedIO::Stats(buffer + i, length - i); - } - -bool CompressedChunkedIO::Init() - { - zin.zalloc = 0; - zin.zfree = 0; - zin.opaque = 0; - - zout.zalloc = 0; - zout.zfree = 0; - zout.opaque = 0; - - compress = uncompress = false; - error = 0; - uncompressed_bytes_read = 0; - uncompressed_bytes_written = 0; - - return true; - } - -bool CompressedChunkedIO::Read(Chunk** chunk, bool may_block) - { - if ( ! io->Read(chunk, may_block) ) - return false; - - if ( ! uncompress ) - return true; - - if ( ! *chunk ) - return true; - - uint32 uncompressed_len = - *(uint32*)((*chunk)->data + (*chunk)->len - sizeof(uint32)); - - if ( uncompressed_len == 0 ) - { - // Not compressed. - DBG_LOG(DBG_CHUNKEDIO, "zlib read pass-through: size=%d", - (*chunk)->len); - return true; - } - - char* uncompressed = new char[uncompressed_len]; - - DBG_LOG(DBG_CHUNKEDIO, "zlib read: size=%d uncompressed=%d", - (*chunk)->len, uncompressed_len); - - zin.next_in = (Bytef*) (*chunk)->data; - zin.avail_in = (*chunk)->len - sizeof(uint32); - zin.next_out = (Bytef*) uncompressed; - zin.avail_out = uncompressed_len; - - if ( inflate(&zin, Z_SYNC_FLUSH) != Z_OK ) - { - error = zin.msg; - return false; - } - - if ( zin.avail_in > 0 ) - { - error = "compressed data longer than expected"; - return false; - } - - (*chunk)->free_func((*chunk)->data); - - uncompressed_bytes_read += uncompressed_len; - - (*chunk)->len = uncompressed_len; - (*chunk)->data = uncompressed; - (*chunk)->free_func = Chunk::free_func_delete; - - return true; - } - -bool CompressedChunkedIO::Write(Chunk* chunk) - { - if ( (! compress) || IsPure() ) - // No compression. - return io->Write(chunk); - - // We compress block-wise (rather than stream-wise) because: - // - // (1) it's significantly easier to implement due to our block-oriented - // communication model (with a stream compression, we'd need to chop - // the stream into blocks during decompression which would require - // additional buffering and copying). - // - // (2) it ensures that we do not introduce any additional latencies (a - // stream compression may decide to wait for the next chunk of data - // before writing anything out). - // - // The block-wise compression comes at the cost of a smaller compression - // factor. - // - // A compressed chunk's data looks like this: - // char[] compressed data - // uint32 uncompressed_length - // - // By including uncompressed_length, we again trade easier - // decompression for a smaller reduction factor. If uncompressed_length - // is zero, the data is *not* compressed. - - uncompressed_bytes_written += chunk->len; - uint32 original_size = chunk->len; - - char* compressed = new char[chunk->len + sizeof(uint32)]; - - if ( chunk->len < MIN_COMPRESS_SIZE ) - { - // Too small; not worth any compression. - memcpy(compressed, chunk->data, chunk->len); - *(uint32*) (compressed + chunk->len) = 0; // uncompressed_length - - chunk->free_func(chunk->data); - chunk->data = compressed; - chunk->free_func = Chunk::free_func_delete; - chunk->len += 4; - - DBG_LOG(DBG_CHUNKEDIO, "zlib write pass-through: size=%d", chunk->len); - } - else - { - zout.next_in = (Bytef*) chunk->data; - zout.avail_in = chunk->len; - zout.next_out = (Bytef*) compressed; - zout.avail_out = chunk->len; - - if ( deflate(&zout, Z_SYNC_FLUSH) != Z_OK ) - { - error = zout.msg; - return false; - } - - while ( zout.avail_out == 0 ) - { - // D'oh! Not enough space, i.e., it hasn't got smaller. - char* old = compressed; - int old_size = (char*) zout.next_out - compressed; - int new_size = old_size * 2 + sizeof(uint32); - - compressed = new char[new_size]; - memcpy(compressed, old, old_size); - delete [] old; - - zout.next_out = (Bytef*) (compressed + old_size); - zout.avail_out = old_size; // Sic! We doubled. - - if ( deflate(&zout, Z_SYNC_FLUSH) != Z_OK ) - { - error = zout.msg; - return false; - } - } - - *(uint32*) zout.next_out = original_size; // uncompressed_length - - chunk->free_func(chunk->data); - chunk->data = compressed; - chunk->free_func = Chunk::free_func_delete; - chunk->len = - ((char*) zout.next_out - compressed) + sizeof(uint32); - - DBG_LOG(DBG_CHUNKEDIO, "zlib write: size=%d compressed=%d", - original_size, chunk->len); - } - - return io->Write(chunk); - } - -void CompressedChunkedIO::Stats(char* buffer, int length) - { - const Statistics* stats = io->Stats(); - - int i = snprintf(buffer, length, "compression=%.2f/%.2f ", - uncompressed_bytes_read ? double(stats->bytes_read) / uncompressed_bytes_read : -1, - uncompressed_bytes_written ? double(stats->bytes_written) / uncompressed_bytes_written : -1 ); - - io->Stats(buffer + i, length - i); - buffer[length-1] = '\0'; - } diff --git a/src/ChunkedIO.h b/src/ChunkedIO.h deleted file mode 100644 index e9b41476df..0000000000 --- a/src/ChunkedIO.h +++ /dev/null @@ -1,362 +0,0 @@ -// Implements non-blocking chunk-wise I/O. - -#ifndef CHUNKEDIO_H -#define CHUNKEDIO_H - -#include "bro-config.h" -#include "List.h" -#include "util.h" -#include "Flare.h" -#include "iosource/FD_Set.h" -#include - -#ifdef NEED_KRB5_H -# include -#endif - -class CompressedChunkedIO; - -// #define DEBUG_COMMUNICATION 10 - -// Abstract base class. -class ChunkedIO { -public: - ChunkedIO(); - virtual ~ChunkedIO() { } - - struct Chunk { - typedef void (*FreeFunc)(char*); - static void free_func_free(char* data) { free(data); } - static void free_func_delete(char* data) { delete [] data; } - - Chunk() - : data(), len(), free_func(free_func_delete) - { } - - // Takes ownership of data. - Chunk(char* arg_data, uint32 arg_len, - FreeFunc arg_ff = free_func_delete) - : data(arg_data), len(arg_len), free_func(arg_ff) - { } - - ~Chunk() - { free_func(data); } - - char* data; - uint32 len; - FreeFunc free_func; - }; - - // Initialization before any I/O operation is performed. Returns false - // on any form of error. - virtual bool Init() { return true; } - - // Tries to read the next chunk of data. If it can be read completely, - // a pointer to it is returned in 'chunk' (ownership of chunk is - // passed). If not, 'chunk' is set to nil. Returns false if any - // I/O error occurred (use Eof() to see if it's an end-of-file). - // If 'may_block' is true, we explicitly allow blocking. - virtual bool Read(Chunk** chunk, bool may_block = false) = 0; - - // Puts the chunk into the write queue and writes as much data - // as possible (takes ownership of chunk). - // Returns false on any I/O error. - virtual bool Write(Chunk* chunk) = 0; - - // Tries to write as much as currently possible. - // Returns false on any I/O error. - virtual bool Flush() = 0; - - // If an I/O error has been encountered, returns a string describing it. - virtual const char* Error() = 0; - - // Return true if there is currently at least one chunk available - // for reading. - virtual bool CanRead() = 0; - - // Return true if there is currently at least one chunk waiting to be - // written. - virtual bool CanWrite() = 0; - - // Returns true if source believes that there won't be much data soon. - virtual bool IsIdle() = 0; - - // Returns true if internal write buffers are about to fill up. - virtual bool IsFillingUp() = 0; - - // Throws away buffered data. - virtual void Clear() = 0; - - // Returns true,if end-of-file has been reached. - virtual bool Eof() = 0; - - // Returns underlying fd if available, -1 otherwise. - virtual int Fd() { return -1; } - - // Returns supplementary file descriptors that become read-ready in order - // to signal that there is some work that can be performed. - virtual iosource::FD_Set ExtraReadFDs() const - { return iosource::FD_Set(); } - - // Makes sure that no additional protocol data is written into - // the output stream. If this is activated, the output cannot - // be read again by any of these classes! - void MakePure() { pure = true; } - bool IsPure() { return pure; } - - // Writes a log message to the error_fd. - void Log(const char* str); - - struct Statistics { - Statistics() - { - bytes_read = 0; - bytes_written = 0; - chunks_read = 0; - chunks_written = 0; - reads = 0; - writes = 0; - pending = 0; - } - - unsigned long bytes_read; - unsigned long bytes_written; - unsigned long chunks_read; - unsigned long chunks_written; - unsigned long reads; // # calls which transferred > 0 bytes - unsigned long writes; - unsigned long pending; - }; - - // Returns raw statistics. - const Statistics* Stats() const { return &stats; } - - // Puts a formatted string containing statistics into buffer. - virtual void Stats(char* buffer, int length); - -#ifdef DEBUG_COMMUNICATION - void DumpDebugData(const char* basefnname, bool want_reads); -#endif - -protected: - void InternalError(const char* msg) - // We can't use the reporter here as we might be running in a - // sub-process. - { fprintf(stderr, "%s", msg); abort(); } - - Statistics stats; - const char* tag; - -#ifdef DEBUG_COMMUNICATION - void AddToBuffer(char* data, bool is_read) - { AddToBuffer(strlen(data), data, is_read); } - void AddToBuffer(uint32 len, char* data, bool is_read); - void AddToBuffer(Chunk* chunk, bool is_read); - std::list data_read; - std::list data_written; -#endif - -private: - bool pure; -}; - -// Chunked I/O using a file descriptor. -class ChunkedIOFd : public ChunkedIO { -public: - // fd is an open bidirectional file descriptor, tag is used in error - // messages, and pid gives a pid to monitor (if the process dies, we - // return EOF). - ChunkedIOFd(int fd, const char* tag, pid_t pid = 0); - ~ChunkedIOFd() override; - - bool Read(Chunk** chunk, bool may_block = false) override; - bool Write(Chunk* chunk) override; - bool Flush() override; - const char* Error() override; - bool CanRead() override; - bool CanWrite() override; - bool IsIdle() override; - bool IsFillingUp() override; - void Clear() override; - bool Eof() override { return eof; } - int Fd() override { return fd; } - iosource::FD_Set ExtraReadFDs() const override; - void Stats(char* buffer, int length) override; - -private: - - bool PutIntoWriteBuffer(Chunk* chunk); - bool FlushWriteBuffer(); - Chunk* ExtractChunk(); - - // Returns size of next chunk in buffer or 0 if none. - uint32 ChunkAvailable(); - - // Flushes if it thinks it is time to. - bool OptionalFlush(); - - // Concatenates the the data of the two chunks forming a new one. - // The old chunkds are deleted. - Chunk* ConcatChunks(Chunk* c1, Chunk* c2); - - // Reads/writes on chunk of upto BUFFER_SIZE bytes. - bool WriteChunk(Chunk* chunk, bool partial); - bool ReadChunk(Chunk** chunk, bool may_block); - - int fd; - bool eof; - double last_flush; - int failed_reads; - - // Optimally, this should match the file descriptor's - // buffer size (for sockets, it may be helpful to - // increase the send/receive buffers). - static const unsigned int BUFFER_SIZE = 1024 * 1024 * 1; - - // We 'or' this to the length of a data chunk to mark - // that it's part of a larger one. This has to be larger - // than BUFFER_SIZE. - static const uint32 FLAG_PARTIAL = 0x80000000; - - char* read_buffer; - uint32 read_len; - uint32 read_pos; - Chunk* partial; // when we read an oversized chunk, we store it here - - char* write_buffer; - uint32 write_len; - uint32 write_pos; - - struct ChunkQueue { - Chunk* chunk; - ChunkQueue* next; - }; - - // Chunks that don't fit into our write buffer. - ChunkQueue* pending_head; - ChunkQueue* pending_tail; - - pid_t pid; - bro::Flare write_flare; - bro::Flare read_flare; -}; - -// From OpenSSL. We forward-declare these here to avoid introducing a -// dependency on OpenSSL headers just for this header file. -typedef struct ssl_ctx_st SSL_CTX; -typedef struct ssl_st SSL; - -// Chunked I/O using an SSL connection. -class ChunkedIOSSL : public ChunkedIO { -public: - // Argument is an open socket and a flag indicating whether we are the - // server side of the connection. - ChunkedIOSSL(int socket, bool server); - ~ChunkedIOSSL() override; - - bool Init() override; - bool Read(Chunk** chunk, bool mayblock = false) override; - bool Write(Chunk* chunk) override; - bool Flush() override; - const char* Error() override; - bool CanRead() override; - bool CanWrite() override; - bool IsIdle() override; - bool IsFillingUp() override; - void Clear() override; - bool Eof() override { return eof; } - int Fd() override { return socket; } - iosource::FD_Set ExtraReadFDs() const override; - void Stats(char* buffer, int length) override; - -private: - - // Only returns true if all data has been read. If not, call - // it again with the same parameters as long as error is not - // set to true. - bool ReadData(char* p, uint32 len, bool* error); - // Same for writing. - bool WriteData(char* p, uint32 len, bool* error); - - int socket; - int last_ret; // last error code - bool eof; - - bool server; // are we the server? - bool setup; // has the connection been setup successfully? - - SSL* ssl; - - // Write queue. - struct Queue { - Chunk* chunk; - Queue* next; - }; - - // The chunk part we are reading/writing - enum State { LEN, DATA }; - - State write_state; - Queue* write_head; - Queue* write_tail; - - State read_state; - Chunk* read_chunk; - char* read_ptr; - - // One SSL for all connections. - static SSL_CTX* ctx; - - bro::Flare write_flare; -}; - -#include - -// Wrapper class around a another ChunkedIO which the (un-)compresses data. -class CompressedChunkedIO : public ChunkedIO { -public: - explicit CompressedChunkedIO(ChunkedIO* arg_io) // takes ownership - : io(arg_io), zin(), zout(), error(), compress(), uncompress(), - uncompressed_bytes_read(), uncompressed_bytes_written() {} - ~CompressedChunkedIO() override { delete io; } - - bool Init() override; // does *not* call arg_io->Init() - bool Read(Chunk** chunk, bool may_block = false) override; - bool Write(Chunk* chunk) override; - bool Flush() override { return io->Flush(); } - const char* Error() override { return error ? error : io->Error(); } - bool CanRead() override { return io->CanRead(); } - bool CanWrite() override { return io->CanWrite(); } - bool IsIdle() override { return io->IsIdle(); } - bool IsFillingUp() override { return io->IsFillingUp(); } - void Clear() override { return io->Clear(); } - bool Eof() override { return io->Eof(); } - - int Fd() override { return io->Fd(); } - iosource::FD_Set ExtraReadFDs() const override - { return io->ExtraReadFDs(); } - void Stats(char* buffer, int length) override; - - void EnableCompression(int level) - { deflateInit(&zout, level); compress = true; } - void EnableDecompression() - { inflateInit(&zin); uncompress = true; } - -protected: - // Only compress block with size >= this. - static const unsigned int MIN_COMPRESS_SIZE = 30; - - ChunkedIO* io; - z_stream zin; - z_stream zout; - const char* error; - - bool compress; - bool uncompress; - - // Keep some statistics. - unsigned long uncompressed_bytes_read; - unsigned long uncompressed_bytes_written; -}; - -#endif diff --git a/src/CompHash.cc b/src/CompHash.cc index cc3ad8cb72..4e5366edde 100644 --- a/src/CompHash.cc +++ b/src/CompHash.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "CompHash.h" #include "Val.h" @@ -677,7 +677,7 @@ ListVal* CompositeHash::RecoverVals(const HashKey* k) const loop_over_list(*tl, i) { - Val* v; + Val* v = nullptr; kp = RecoverOneVal(k, kp, k_end, (*tl)[i], v, false); ASSERT(v); l->Append(v); diff --git a/src/Conn.cc b/src/Conn.cc index 03ecf32703..31c1b1a191 100644 --- a/src/Conn.cc +++ b/src/Conn.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -50,70 +50,10 @@ void ConnectionTimer::Dispatch(double t, int is_expire) reporter->InternalError("reference count inconsistency in ConnectionTimer::Dispatch"); } -IMPLEMENT_SERIAL(ConnectionTimer, SER_CONNECTION_TIMER); - -bool ConnectionTimer::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CONNECTION_TIMER, Timer); - - // We enumerate all the possible timer functions here ... This - // has to match the list is DoUnserialize()! - char type = 0; - - if ( timer == timer_func(&Connection::DeleteTimer) ) - type = 1; - else if ( timer == timer_func(&Connection::InactivityTimer) ) - type = 2; - else if ( timer == timer_func(&Connection::StatusUpdateTimer) ) - type = 3; - else if ( timer == timer_func(&Connection::RemoveConnectionTimer) ) - type = 4; - else - reporter->InternalError("unknown function in ConnectionTimer::DoSerialize()"); - - return conn->Serialize(info) && SERIALIZE(type) && SERIALIZE(do_expire); - } - -bool ConnectionTimer::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Timer); - - conn = Connection::Unserialize(info); - if ( ! conn ) - return false; - - char type; - - if ( ! UNSERIALIZE(&type) || ! UNSERIALIZE(&do_expire) ) - return false; - - switch ( type ) { - case 1: - timer = timer_func(&Connection::DeleteTimer); - break; - case 2: - timer = timer_func(&Connection::InactivityTimer); - break; - case 3: - timer = timer_func(&Connection::StatusUpdateTimer); - break; - case 4: - timer = timer_func(&Connection::RemoveConnectionTimer); - break; - default: - info->s->Error("unknown connection timer function"); - return false; - } - - return true; - } - uint64 Connection::total_connections = 0; uint64 Connection::current_connections = 0; uint64 Connection::external_connections = 0; -IMPLEMENT_SERIAL(Connection, SER_CONNECTION); - Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, uint32 flow, const Packet* pkt, const EncapsulationStack* arg_encap) @@ -151,7 +91,6 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id, is_active = 1; skip = 0; weird = 0; - persistent = 0; suppress_event = 0; @@ -325,12 +264,11 @@ void Connection::HistoryThresholdEvent(EventHandlerPtr e, bool is_orig, // and at this stage it's not a *multiple* instance. return; - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(val_mgr->GetCount(threshold)); - - ConnectionEvent(e, 0, vl); + ConnectionEventFast(e, 0, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + val_mgr->GetCount(threshold) + }); } void Connection::DeleteTimer(double /* t */) @@ -390,9 +328,7 @@ void Connection::EnableStatusUpdateTimer() void Connection::StatusUpdateTimer(double t) { - val_list* vl = new val_list(1); - vl->append(BuildConnVal()); - ConnectionEvent(connection_status_update, 0, vl); + ConnectionEventFast(connection_status_update, 0, { BuildConnVal() }); ADD_TIMER(&Connection::StatusUpdateTimer, network_time + connection_status_update_interval, 0, TIMER_CONN_STATUS_UPDATE); @@ -630,23 +566,23 @@ int Connection::VersionFoundEvent(const IPAddr& addr, const char* s, int len, { if ( software_parse_error ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new AddrVal(addr)); - vl->append(new StringVal(len, s)); - ConnectionEvent(software_parse_error, analyzer, vl); + ConnectionEventFast(software_parse_error, analyzer, { + BuildConnVal(), + new AddrVal(addr), + new StringVal(len, s), + }); } return 0; } if ( software_version_found ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new AddrVal(addr)); - vl->append(val); - vl->append(new StringVal(len, s)); - ConnectionEvent(software_version_found, 0, vl); + ConnectionEventFast(software_version_found, 0, { + BuildConnVal(), + new AddrVal(addr), + val, + new StringVal(len, s), + }); } else Unref(val); @@ -669,11 +605,11 @@ int Connection::UnparsedVersionFoundEvent(const IPAddr& addr, if ( software_unparsed_version_found ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new AddrVal(addr)); - vl->append(new StringVal(len, full)); - ConnectionEvent(software_unparsed_version_found, analyzer, vl); + ConnectionEventFast(software_unparsed_version_found, analyzer, { + BuildConnVal(), + new AddrVal(addr), + new StringVal(len, full), + }); } return 1; @@ -684,12 +620,11 @@ void Connection::Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, const ch if ( ! f ) return; - val_list* vl = new val_list(2); if ( name ) - vl->append(new StringVal(name)); - vl->append(BuildConnVal()); + ConnectionEventFast(f, analyzer, {new StringVal(name), BuildConnVal()}); + else + ConnectionEventFast(f, analyzer, {BuildConnVal()}); - ConnectionEvent(f, analyzer, vl); } void Connection::Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, Val* v1, Val* v2) @@ -701,33 +636,42 @@ void Connection::Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, Val* v1, return; } - val_list* vl = new val_list(3); - vl->append(BuildConnVal()); - vl->append(v1); - if ( v2 ) - vl->append(v2); - - ConnectionEvent(f, analyzer, vl); + ConnectionEventFast(f, analyzer, {BuildConnVal(), v1, v2}); + else + ConnectionEventFast(f, analyzer, {BuildConnVal(), v1}); } -void Connection::ConnectionEvent(EventHandlerPtr f, analyzer::Analyzer* a, val_list* vl) +void Connection::ConnectionEvent(EventHandlerPtr f, analyzer::Analyzer* a, val_list vl) { if ( ! f ) { // This may actually happen if there is no local handler // and a previously existing remote handler went away. - loop_over_list(*vl, i) - Unref((*vl)[i]); - delete vl; + loop_over_list(vl, i) + Unref(vl[i]); + return; } // "this" is passed as a cookie for the event - mgr.QueueEvent(f, vl, SOURCE_LOCAL, + mgr.QueueEvent(f, std::move(vl), SOURCE_LOCAL, a ? a->GetID() : 0, GetTimerMgr(), this); } +void Connection::ConnectionEventFast(EventHandlerPtr f, analyzer::Analyzer* a, val_list vl) + { + // "this" is passed as a cookie for the event + mgr.QueueEventFast(f, std::move(vl), SOURCE_LOCAL, + a ? a->GetID() : 0, GetTimerMgr(), this); + } + +void Connection::ConnectionEvent(EventHandlerPtr f, analyzer::Analyzer* a, val_list* vl) + { + ConnectionEvent(f, a, std::move(*vl)); + delete vl; + } + void Connection::Weird(const char* name, const char* addl) { weird = 1; @@ -896,144 +840,6 @@ void Connection::IDString(ODesc* d) const d->Add(ntohs(resp_port)); } -bool Connection::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Connection* Connection::Unserialize(UnserialInfo* info) - { - return (Connection*) SerialObj::Unserialize(info, SER_CONNECTION); - } - -bool Connection::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CONNECTION, BroObj); - - // First we write the members which are needed to - // create the HashKey. - if ( ! SERIALIZE(orig_addr) || ! SERIALIZE(resp_addr) ) - return false; - - if ( ! SERIALIZE(orig_port) || ! SERIALIZE(resp_port) ) - return false; - - if ( ! SERIALIZE(timers.length()) ) - return false; - - loop_over_list(timers, i) - if ( ! timers[i]->Serialize(info) ) - return false; - - SERIALIZE_OPTIONAL(conn_val); - - // FIXME: RuleEndpointState not yet serializable. - // FIXME: Analyzers not yet serializable. - - return - SERIALIZE(int(proto)) && - SERIALIZE(history) && - SERIALIZE(hist_seen) && - SERIALIZE(start_time) && - SERIALIZE(last_time) && - SERIALIZE(inactivity_timeout) && - SERIALIZE(suppress_event) && - SERIALIZE(login_conn != 0) && - SERIALIZE_BIT(installed_status_timer) && - SERIALIZE_BIT(timers_canceled) && - SERIALIZE_BIT(is_active) && - SERIALIZE_BIT(skip) && - SERIALIZE_BIT(weird) && - SERIALIZE_BIT(finished) && - SERIALIZE_BIT(record_packets) && - SERIALIZE_BIT(record_contents) && - SERIALIZE_BIT(persistent); - } - -bool Connection::DoUnserialize(UnserialInfo* info) - { - // Make sure this is initialized for the condition in Unserialize(). - persistent = 0; - - DO_UNSERIALIZE(BroObj); - - // Build the hash key first. Some of the recursive *::Unserialize() - // functions may need it. - ConnID id; - - if ( ! UNSERIALIZE(&orig_addr) || ! UNSERIALIZE(&resp_addr) ) - goto error; - - if ( ! UNSERIALIZE(&orig_port) || ! UNSERIALIZE(&resp_port) ) - goto error; - - id.src_addr = orig_addr; - id.dst_addr = resp_addr; - // This doesn't work for ICMP. But I guess this is not really important. - id.src_port = orig_port; - id.dst_port = resp_port; - id.is_one_way = 0; // ### incorrect for ICMP - key = BuildConnIDHashKey(id); - - int len; - if ( ! UNSERIALIZE(&len) ) - goto error; - - while ( len-- ) - { - Timer* t = Timer::Unserialize(info); - if ( ! t ) - goto error; - timers.append(t); - } - - UNSERIALIZE_OPTIONAL(conn_val, - (RecordVal*) Val::Unserialize(info, connection_type)); - - int iproto; - - if ( ! (UNSERIALIZE(&iproto) && - UNSERIALIZE(&history) && - UNSERIALIZE(&hist_seen) && - UNSERIALIZE(&start_time) && - UNSERIALIZE(&last_time) && - UNSERIALIZE(&inactivity_timeout) && - UNSERIALIZE(&suppress_event)) ) - goto error; - - proto = static_cast(iproto); - - bool has_login_conn; - if ( ! UNSERIALIZE(&has_login_conn) ) - goto error; - - login_conn = has_login_conn ? (LoginConn*) this : 0; - - UNSERIALIZE_BIT(installed_status_timer); - UNSERIALIZE_BIT(timers_canceled); - UNSERIALIZE_BIT(is_active); - UNSERIALIZE_BIT(skip); - UNSERIALIZE_BIT(weird); - UNSERIALIZE_BIT(finished); - UNSERIALIZE_BIT(record_packets); - UNSERIALIZE_BIT(record_contents); - UNSERIALIZE_BIT(persistent); - - // Hmm... Why does each connection store a sessions ptr? - sessions = ::sessions; - - root_analyzer = 0; - primary_PIA = 0; - conn_timer_mgr = 0; - - return true; - -error: - abort(); - CancelTimers(); - return false; - } - void Connection::SetRootAnalyzer(analyzer::TransportLayerAnalyzer* analyzer, analyzer::pia::PIA* pia) { root_analyzer = analyzer; @@ -1055,12 +861,12 @@ void Connection::CheckFlowLabel(bool is_orig, uint32 flow_label) if ( connection_flow_label_changed && (is_orig ? saw_first_orig_packet : saw_first_resp_packet) ) { - val_list* vl = new val_list(4); - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(val_mgr->GetCount(my_flow_label)); - vl->append(val_mgr->GetCount(flow_label)); - ConnectionEvent(connection_flow_label_changed, 0, vl); + ConnectionEventFast(connection_flow_label_changed, 0, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + val_mgr->GetCount(my_flow_label), + val_mgr->GetCount(flow_label), + }); } my_flow_label = flow_label; diff --git a/src/Conn.h b/src/Conn.h index e49314968a..bd5ddaae92 100644 --- a/src/Conn.h +++ b/src/Conn.h @@ -11,8 +11,6 @@ #include "Dict.h" #include "Val.h" #include "Timer.h" -#include "Serializer.h" -#include "PersistenceSerializer.h" #include "RuleMatcher.h" #include "IPAddr.h" #include "TunnelEncapsulation.h" @@ -174,11 +172,42 @@ public: int UnparsedVersionFoundEvent(const IPAddr& addr, const char* full_descr, int len, analyzer::Analyzer* analyzer); + // If a handler exists for 'f', an event will be generated. If 'name' is + // given that event's first argument will be it, and it's second will be + // the connection value. If 'name' is null, then the event's first + // argument is the connection value. void Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, const char* name = 0); + + // If a handler exists for 'f', an event will be generated. In any case, + // 'v1' and 'v2' reference counts get decremented. The event's first + // argument is the connection value, second argument is 'v1', and if 'v2' + // is given that will be it's third argument. void Event(EventHandlerPtr f, analyzer::Analyzer* analyzer, Val* v1, Val* v2 = 0); + + // If a handler exists for 'f', an event will be generated. In any case, + // reference count for each element in the 'vl' list are decremented. The + // arguments used for the event are whatevever is provided in 'vl'. + void ConnectionEvent(EventHandlerPtr f, analyzer::Analyzer* analyzer, + val_list vl); + + // Same as ConnectionEvent, except taking the event's argument list via a + // pointer instead of by value. This function takes ownership of the + // memory pointed to by 'vl' and also for decrementing the reference count + // of each of its elements. void ConnectionEvent(EventHandlerPtr f, analyzer::Analyzer* analyzer, val_list* vl); + // Queues an event without first checking if there's any available event + // handlers (or remote consumes). If it turns out there's actually nothing + // that will consume the event, then this may leak memory due to failing to + // decrement the reference count of each element in 'vl'. i.e. use this + // function instead of ConnectionEvent() if you've already guarded against + // the case where there's no handlers (one usually also does that because + // it would be a waste of effort to construct all the event arguments when + // there's no handlers to consume them). + void ConnectionEventFast(EventHandlerPtr f, analyzer::Analyzer* analyzer, + val_list vl); + void Weird(const char* name, const char* addl = ""); bool DidWeird() const { return weird != 0; } @@ -197,14 +226,6 @@ public: return 1; } - void MakePersistent() - { - persistent = 1; - persistence_serializer->Register(this); - } - - bool IsPersistent() { return persistent; } - void Describe(ODesc* d) const override; void IDString(ODesc* d) const; @@ -213,11 +234,6 @@ public: // Returns true if connection has been received externally. bool IsExternal() const { return conn_timer_mgr != 0; } - bool Serialize(SerialInfo* info) const; - static Connection* Unserialize(UnserialInfo* info); - - DECLARE_SERIAL(Connection); - // Statistics. // Just a lower bound. @@ -284,7 +300,7 @@ public: protected: - Connection() { persistent = 0; } + Connection() { } // Add the given timer to expire at time t. If do_expire // is true, then the timer is also evaluated when Bro terminates, @@ -330,7 +346,6 @@ protected: unsigned int weird:1; unsigned int finished:1; unsigned int record_packets:1, record_contents:1; - unsigned int persistent:1; unsigned int record_current_packet:1, record_current_content:1; unsigned int saw_first_orig_packet:1, saw_first_resp_packet:1; @@ -364,8 +379,6 @@ protected: void Init(Connection* conn, timer_func timer, int do_expire); - DECLARE_SERIAL(ConnectionTimer); - Connection* conn; timer_func timer; int do_expire; diff --git a/src/DFA.cc b/src/DFA.cc index 00f56ef16e..448307e3fe 100644 --- a/src/DFA.cc +++ b/src/DFA.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "EquivClass.h" #include "DFA.h" diff --git a/src/DFA.h b/src/DFA.h index 2f06f4e98f..1b58774da0 100644 --- a/src/DFA.h +++ b/src/DFA.h @@ -111,9 +111,6 @@ private: PDict(CacheEntry) states; }; -declare(PList,DFA_State); -typedef PList(DFA_State) DFA_state_list; - class DFA_Machine : public BroObj { public: DFA_Machine(NFA_Machine* n, EquivClass* ec); diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 2fff6903b0..c02be19d82 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -289,10 +289,13 @@ ListVal* DNS_Mapping::Addrs() TableVal* DNS_Mapping::AddrsSet() { ListVal* l = Addrs(); - if ( l ) - return l->ConvertToSet(); - else + + if ( ! l ) return empty_addr_set(); + + auto rval = l->ConvertToSet(); + Unref(l); + return rval; } StringVal* DNS_Mapping::Host() @@ -388,6 +391,8 @@ DNS_Mgr::DNS_Mgr(DNS_MgrMode arg_mode) num_requests = 0; successful = 0; failed = 0; + nb_dns = nullptr; + next_timestamp = -1.0; } DNS_Mgr::~DNS_Mgr() @@ -399,16 +404,21 @@ DNS_Mgr::~DNS_Mgr() delete [] dir; } -void DNS_Mgr::InitPostScript() +void DNS_Mgr::Init() { if ( did_init ) return; - auto dns_resolver_id = global_scope()->Lookup("dns_resolver"); - auto dns_resolver_addr = dns_resolver_id->ID_Val()->AsAddr(); + // Note that Init() may be called by way of LookupHost() during the act of + // parsing a hostname literal (e.g. google.com), so we can't use a + // script-layer option to configure the DNS resolver as it may not be + // configured to the user's desired address at the time when we need to to + // the lookup. + auto dns_resolver = zeekenv("ZEEK_DNS_RESOLVER"); + auto dns_resolver_addr = dns_resolver ? IPAddr(dns_resolver) : IPAddr(); char err[NB_DNS_ERRSIZE]; - if ( dns_resolver_addr == IPAddr("::") ) + if ( dns_resolver_addr == IPAddr() ) nb_dns = nb_dns_init(err); else { @@ -433,19 +443,11 @@ void DNS_Mgr::InitPostScript() if ( ! nb_dns ) reporter->Warning("problem initializing NB-DNS: %s", err); - const char* cache_dir = dir ? dir : "."; - - if ( mode == DNS_PRIME && ! ensure_dir(cache_dir) ) - { - did_init = 0; - return; - } - - cache_name = new char[strlen(cache_dir) + 64]; - sprintf(cache_name, "%s/%s", cache_dir, ".bro-dns-cache"); - - LoadCache(fopen(cache_name, "r")); + did_init = true; + } +void DNS_Mgr::InitPostScript() + { dns_mapping_valid = internal_handler("dns_mapping_valid"); dns_mapping_unverified = internal_handler("dns_mapping_unverified"); dns_mapping_new_name = internal_handler("dns_mapping_new_name"); @@ -455,14 +457,18 @@ void DNS_Mgr::InitPostScript() dm_rec = internal_type("dns_mapping")->AsRecordType(); - did_init = 1; - + // Registering will call Init() iosource_mgr->Register(this, true); // We never set idle to false, having the main loop only calling us from // time to time. If we're issuing more DNS requests than we can handle // in this way, we are having problems anyway ... SetIdle(true); + + const char* cache_dir = dir ? dir : "."; + cache_name = new char[strlen(cache_dir) + 64]; + sprintf(cache_name, "%s/%s", cache_dir, ".bro-dns-cache"); + LoadCache(fopen(cache_name, "r")); } static TableVal* fake_name_lookup_result(const char* name) @@ -497,12 +503,11 @@ TableVal* DNS_Mgr::LookupHost(const char* name) if ( mode == DNS_FAKE ) return fake_name_lookup_result(name); + Init(); + if ( ! nb_dns ) return empty_addr_set(); - if ( ! did_init ) - Init(); - if ( mode != DNS_PRIME ) { HostMap::iterator it = host_mappings.find(name); @@ -553,8 +558,7 @@ TableVal* DNS_Mgr::LookupHost(const char* name) Val* DNS_Mgr::LookupAddr(const IPAddr& addr) { - if ( ! did_init ) - Init(); + Init(); if ( mode != DNS_PRIME ) { @@ -699,25 +703,27 @@ int DNS_Mgr::Save() return 1; } +void DNS_Mgr::Event(EventHandlerPtr e, DNS_Mapping* dm) + { + if ( ! e ) + return; + + mgr.QueueEventFast(e, {BuildMappingVal(dm)}); + } + void DNS_Mgr::Event(EventHandlerPtr e, DNS_Mapping* dm, ListVal* l1, ListVal* l2) { if ( ! e ) return; - val_list* vl = new val_list; - vl->append(BuildMappingVal(dm)); + Unref(l1); + Unref(l2); - if ( l1 ) - { - vl->append(l1->ConvertToSet()); - if ( l2 ) - vl->append(l2->ConvertToSet()); - - Unref(l1); - Unref(l2); - } - - mgr.QueueEvent(e, vl); + mgr.QueueEventFast(e, { + BuildMappingVal(dm), + l1->ConvertToSet(), + l2->ConvertToSet(), + }); } void DNS_Mgr::Event(EventHandlerPtr e, DNS_Mapping* old_dm, DNS_Mapping* new_dm) @@ -725,10 +731,10 @@ void DNS_Mgr::Event(EventHandlerPtr e, DNS_Mapping* old_dm, DNS_Mapping* new_dm) if ( ! e ) return; - val_list* vl = new val_list; - vl->append(BuildMappingVal(old_dm)); - vl->append(BuildMappingVal(new_dm)); - mgr.QueueEvent(e, vl); + mgr.QueueEventFast(e, { + BuildMappingVal(old_dm), + BuildMappingVal(new_dm), + }); } Val* DNS_Mgr::BuildMappingVal(DNS_Mapping* dm) @@ -1072,8 +1078,7 @@ static void resolve_lookup_cb(DNS_Mgr::LookupCallback* callback, void DNS_Mgr::AsyncLookupAddr(const IPAddr& host, LookupCallback* callback) { - if ( ! did_init ) - Init(); + Init(); if ( mode == DNS_FAKE ) { @@ -1111,8 +1116,7 @@ void DNS_Mgr::AsyncLookupAddr(const IPAddr& host, LookupCallback* callback) void DNS_Mgr::AsyncLookupName(const string& name, LookupCallback* callback) { - if ( ! did_init ) - Init(); + Init(); if ( mode == DNS_FAKE ) { @@ -1150,8 +1154,7 @@ void DNS_Mgr::AsyncLookupName(const string& name, LookupCallback* callback) void DNS_Mgr::AsyncLookupNameText(const string& name, LookupCallback* callback) { - if ( ! did_init ) - Init(); + Init(); if ( mode == DNS_FAKE ) { @@ -1250,8 +1253,17 @@ void DNS_Mgr::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, double DNS_Mgr::NextTimestamp(double* network_time) { - // This is kind of cheating ... - return asyncs_timeouts.size() ? timer_mgr->Time() : -1.0; + if ( asyncs_timeouts.empty() ) + // No pending requests. + return -1.0; + + if ( next_timestamp < 0 ) + // Store the timestamp to help prevent starvation by some other + // IOSource always trying to use the same timestamp + // (assuming network_time does actually increase). + next_timestamp = timer_mgr->Time(); + + return next_timestamp; } void DNS_Mgr::CheckAsyncAddrRequest(const IPAddr& addr, bool timeout) @@ -1357,7 +1369,7 @@ void DNS_Mgr::CheckAsyncHostRequest(const char* host, bool timeout) void DNS_Mgr::Flush() { - DoProcess(false); + DoProcess(); HostMap::iterator it; for ( it = host_mappings.begin(); it != host_mappings.end(); ++it ) @@ -1379,10 +1391,11 @@ void DNS_Mgr::Flush() void DNS_Mgr::Process() { - DoProcess(false); + DoProcess(); + next_timestamp = -1.0; } -void DNS_Mgr::DoProcess(bool flush) +void DNS_Mgr::DoProcess() { if ( ! nb_dns ) return; @@ -1391,23 +1404,23 @@ void DNS_Mgr::DoProcess(bool flush) { AsyncRequest* req = asyncs_timeouts.top(); - if ( req->time + DNS_TIMEOUT > current_time() || flush ) + if ( req->time + DNS_TIMEOUT > current_time() ) break; - if ( req->IsAddrReq() ) - CheckAsyncAddrRequest(req->host, true); - else if ( req->is_txt ) - CheckAsyncTextRequest(req->name.c_str(), true); - else - CheckAsyncHostRequest(req->name.c_str(), true); + if ( ! req->processed ) + { + if ( req->IsAddrReq() ) + CheckAsyncAddrRequest(req->host, true); + else if ( req->is_txt ) + CheckAsyncTextRequest(req->name.c_str(), true); + else + CheckAsyncHostRequest(req->name.c_str(), true); + } asyncs_timeouts.pop(); delete req; } - if ( asyncs_addrs.size() == 0 && asyncs_names.size() == 0 && asyncs_texts.size() == 0 ) - return; - if ( AnswerAvailable(0) <= 0 ) return; diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index 0358ceba18..39f728c812 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -9,7 +9,7 @@ #include #include "util.h" -#include "BroList.h" +#include "List.h" #include "Dict.h" #include "EventHandler.h" #include "iosource/IOSource.h" @@ -23,6 +23,9 @@ class EventHandler; class RecordType; class DNS_Mgr_Request; +declare(PList,DNS_Mgr_Request); +typedef PList(DNS_Mgr_Request) DNS_mgr_request_list; + struct nb_dns_info; struct nb_dns_result; @@ -96,8 +99,8 @@ protected: friend class LookupCallback; friend class DNS_Mgr_Request; - void Event(EventHandlerPtr e, DNS_Mapping* dm, - ListVal* l1 = 0, ListVal* l2 = 0); + void Event(EventHandlerPtr e, DNS_Mapping* dm); + void Event(EventHandlerPtr e, DNS_Mapping* dm, ListVal* l1, ListVal* l2); void Event(EventHandlerPtr e, DNS_Mapping* old_dm, DNS_Mapping* new_dm); Val* BuildMappingVal(DNS_Mapping* dm); @@ -129,13 +132,14 @@ protected: void CheckAsyncTextRequest(const char* host, bool timeout); // Process outstanding requests. - void DoProcess(bool flush); + void DoProcess(); // IOSource interface. void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, iosource::FD_Set* except) override; double NextTimestamp(double* network_time) override; void Process() override; + void Init() override; const char* Tag() override { return "DNS_Mgr"; } DNS_MgrMode mode; @@ -168,12 +172,13 @@ protected: struct AsyncRequest { double time; + bool is_txt; + bool processed; IPAddr host; string name; - bool is_txt; CallbackList callbacks; - AsyncRequest() : time(0.0), is_txt(false) { } + AsyncRequest() : time(0.0), is_txt(false), processed(false) { } bool IsAddrReq() const { return name.length() == 0; } @@ -186,6 +191,7 @@ protected: delete *i; } callbacks.clear(); + processed = true; } void Resolved(TableVal* addrs) @@ -197,6 +203,7 @@ protected: delete *i; } callbacks.clear(); + processed = true; } void Timeout() @@ -208,6 +215,7 @@ protected: delete *i; } callbacks.clear(); + processed = true; } }; @@ -224,7 +232,14 @@ protected: typedef list QueuedList; QueuedList asyncs_queued; - typedef priority_queue TimeoutQueue; + struct AsyncRequestCompare { + bool operator()(const AsyncRequest* a, const AsyncRequest* b) + { + return a->time > b->time; + } + }; + + typedef priority_queue, AsyncRequestCompare> TimeoutQueue; TimeoutQueue asyncs_timeouts; int asyncs_pending; @@ -232,6 +247,7 @@ protected: unsigned long num_requests; unsigned long successful; unsigned long failed; + double next_timestamp; }; extern DNS_Mgr* dns_mgr; diff --git a/src/DbgBreakpoint.cc b/src/DbgBreakpoint.cc index c573a8d3b8..b1223486d3 100644 --- a/src/DbgBreakpoint.cc +++ b/src/DbgBreakpoint.cc @@ -1,6 +1,6 @@ // Implementation of breakpoints. -#include "bro-config.h" +#include "zeek-config.h" #include diff --git a/src/DbgHelp.cc b/src/DbgHelp.cc index 6bbf9c6ecb..d7d11de3f0 100644 --- a/src/DbgHelp.cc +++ b/src/DbgHelp.cc @@ -1,5 +1,5 @@ // Bro Debugger Help -#include "bro-config.h" +#include "zeek-config.h" #include "Debug.h" diff --git a/src/DbgWatch.cc b/src/DbgWatch.cc index c34144dc1f..8ea7d96fa1 100644 --- a/src/DbgWatch.cc +++ b/src/DbgWatch.cc @@ -1,6 +1,6 @@ // Implementation of watches -#include "bro-config.h" +#include "zeek-config.h" #include "Debug.h" #include "DbgWatch.h" diff --git a/src/Debug.cc b/src/Debug.cc index 54a40c58d1..bf23ff105b 100644 --- a/src/Debug.cc +++ b/src/Debug.cc @@ -1,6 +1,6 @@ // Debugging support for Bro policy files. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -348,7 +348,7 @@ vector parse_location_string(const string& s) if ( ! sscanf(line_string.c_str(), "%d", &plr.line) ) plr.type = plrUnknown; - string path(find_file(filename, bro_path(), "bro")); + string path(find_script_file(filename, bro_path())); if ( path.empty() ) { @@ -721,7 +721,7 @@ static char* get_prompt(bool reset_counter = false) if ( reset_counter ) counter = 0; - safe_snprintf(prompt, sizeof(prompt), "(Bro [%d]) ", counter++); + safe_snprintf(prompt, sizeof(prompt), "(Zeek [%d]) ", counter++); return prompt; } diff --git a/src/DebugCmdInfoConstants.cc b/src/DebugCmdInfoConstants.cc index 13b83049ab..c947b3a1fd 100644 --- a/src/DebugCmdInfoConstants.cc +++ b/src/DebugCmdInfoConstants.cc @@ -35,7 +35,7 @@ void init_global_dbg_constants () { "quit" }; - info = new DebugCmdInfo (dcQuit, names, 1, false, "Exit Bro", + info = new DebugCmdInfo (dcQuit, names, 1, false, "Exit Zeek", false); g_DebugCmdInfos.push_back(info); } diff --git a/src/DebugCmdInfoConstants.in b/src/DebugCmdInfoConstants.in index 339733c645..ad90d7ed83 100644 --- a/src/DebugCmdInfoConstants.in +++ b/src/DebugCmdInfoConstants.in @@ -13,7 +13,7 @@ help: Get help with debugger commands cmd: dcQuit names: quit resume: false -help: Exit Bro +help: Exit Zeek cmd: dcNext names: next diff --git a/src/DebugCmds.cc b/src/DebugCmds.cc index 4e856b00f5..d11efb0390 100644 --- a/src/DebugCmds.cc +++ b/src/DebugCmds.cc @@ -1,7 +1,7 @@ // Support routines to help deal with Bro debugging commands and // implementation of most commands. -#include "bro-config.h" +#include "zeek-config.h" #include diff --git a/src/DebugLogger.cc b/src/DebugLogger.cc index 07590590df..6af7e26e38 100644 --- a/src/DebugLogger.cc +++ b/src/DebugLogger.cc @@ -11,14 +11,13 @@ DebugLogger debug_logger; // Same order here as in DebugStream. DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = { - { "serial", 0, false }, { "rules", 0, false }, { "comm", 0, false }, - { "state", 0, false }, { "chunkedio", 0, false }, - { "compressor", 0, false }, {"string", 0, false }, + { "serial", 0, false }, { "rules", 0, false }, + { "string", 0, false }, { "notifiers", 0, false }, { "main-loop", 0, false }, { "dpd", 0, false }, { "tm", 0, false }, { "logging", 0, false }, {"input", 0, false }, { "threading", 0, false }, { "file_analysis", 0, false }, - { "plugins", 0, false }, { "broxygen", 0, false }, + { "plugins", 0, false }, { "zeekygen", 0, false }, { "pktio", 0, false }, { "broker", 0, false }, { "scripts", 0, false} }; @@ -72,7 +71,7 @@ void DebugLogger::ShowStreamsHelp() fprintf(stderr," %s\n", streams[i].prefix); fprintf(stderr, "\n"); - fprintf(stderr, " plugin- (replace '::' in name with '-'; e.g., '-B plugin-Bro-Netmap')\n"); + fprintf(stderr, " plugin- (replace '::' in name with '-'; e.g., '-B plugin-Zeek-Netmap')\n"); fprintf(stderr, "\n"); fprintf(stderr, "Pseudo streams\n"); fprintf(stderr, " verbose Increase verbosity.\n"); diff --git a/src/DebugLogger.h b/src/DebugLogger.h index 1eb8e30417..0e2862dc23 100644 --- a/src/DebugLogger.h +++ b/src/DebugLogger.h @@ -14,14 +14,10 @@ // an entry to DebugLogger::streams in DebugLogger.cc. enum DebugStream { - DBG_SERIAL, // Serialization + DBG_SERIAL, // Serialization DBG_RULES, // Signature matching - DBG_COMM, // Remote communication - DBG_STATE, // StateAccess logging - DBG_CHUNKEDIO, // ChunkedIO logging - DBG_COMPRESSOR, // Connection compressor DBG_STRING, // String code - DBG_NOTIFIERS, // Notifiers (see StateAccess.h) + DBG_NOTIFIERS, // Notifiers DBG_MAINLOOP, // Main IOSource loop DBG_ANALYZER, // Analyzer framework DBG_TM, // Time-machine packet input via Brocolli @@ -30,7 +26,7 @@ enum DebugStream { DBG_THREADING, // Threading system DBG_FILE_ANALYSIS, // File analysis DBG_PLUGINS, // Plugin system - DBG_BROXYGEN, // Broxygen + DBG_ZEEKYGEN, // Zeekygen DBG_PKTIO, // Packet sources and dumpers. DBG_BROKER, // Broker communication DBG_SCRIPTS, // Script initialization diff --git a/src/Desc.cc b/src/Desc.cc index b64bcec8d8..f10f61fa77 100644 --- a/src/Desc.cc +++ b/src/Desc.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/Dict.cc b/src/Dict.cc index d639b0c912..02886c6d5d 100644 --- a/src/Dict.cc +++ b/src/Dict.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #ifdef HAVE_MEMORY_H #include diff --git a/src/Discard.cc b/src/Discard.cc index 2a20c897aa..f84e901143 100644 --- a/src/Discard.cc +++ b/src/Discard.cc @@ -2,7 +2,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "Net.h" #include "Var.h" @@ -33,12 +33,11 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) if ( check_ip ) { - val_list* args = new val_list; - args->append(ip->BuildPktHdrVal()); + val_list args{ip->BuildPktHdrVal()}; try { - discard_packet = check_ip->Call(args)->AsBool(); + discard_packet = check_ip->Call(&args)->AsBool(); } catch ( InterpreterException& e ) @@ -46,8 +45,6 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) discard_packet = false; } - delete args; - if ( discard_packet ) return discard_packet; } @@ -88,21 +85,20 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) const struct tcphdr* tp = (const struct tcphdr*) data; int th_len = tp->th_off * 4; - val_list* args = new val_list; - args->append(ip->BuildPktHdrVal()); - args->append(BuildData(data, th_len, len, caplen)); + val_list args{ + ip->BuildPktHdrVal(), + BuildData(data, th_len, len, caplen), + }; try { - discard_packet = check_tcp->Call(args)->AsBool(); + discard_packet = check_tcp->Call(&args)->AsBool(); } catch ( InterpreterException& e ) { discard_packet = false; } - - delete args; } } @@ -113,21 +109,20 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) const struct udphdr* up = (const struct udphdr*) data; int uh_len = sizeof (struct udphdr); - val_list* args = new val_list; - args->append(ip->BuildPktHdrVal()); - args->append(BuildData(data, uh_len, len, caplen)); + val_list args{ + ip->BuildPktHdrVal(), + BuildData(data, uh_len, len, caplen), + }; try { - discard_packet = check_udp->Call(args)->AsBool(); + discard_packet = check_udp->Call(&args)->AsBool(); } catch ( InterpreterException& e ) { discard_packet = false; } - - delete args; } } @@ -137,20 +132,17 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen) { const struct icmp* ih = (const struct icmp*) data; - val_list* args = new val_list; - args->append(ip->BuildPktHdrVal()); + val_list args{ip->BuildPktHdrVal()}; try { - discard_packet = check_icmp->Call(args)->AsBool(); + discard_packet = check_icmp->Call(&args)->AsBool(); } catch ( InterpreterException& e ) { discard_packet = false; } - - delete args; } } diff --git a/src/EquivClass.cc b/src/EquivClass.cc index 7f54f07060..6b2a7aa593 100644 --- a/src/EquivClass.cc +++ b/src/EquivClass.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "EquivClass.h" diff --git a/src/Event.cc b/src/Event.cc index 36ba2dfc3c..2389c618d7 100644 --- a/src/Event.cc +++ b/src/Event.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Event.h" #include "Func.h" @@ -13,28 +13,27 @@ EventMgr mgr; uint64 num_events_queued = 0; uint64 num_events_dispatched = 0; +Event::Event(EventHandlerPtr arg_handler, val_list arg_args, + SourceID arg_src, analyzer::ID arg_aid, TimerMgr* arg_mgr, + BroObj* arg_obj) + : handler(arg_handler), + args(std::move(arg_args)), + src(arg_src), + aid(arg_aid), + mgr(arg_mgr ? arg_mgr : timer_mgr), + obj(arg_obj), + next_event(nullptr) + { + if ( obj ) + Ref(obj); + } + Event::Event(EventHandlerPtr arg_handler, val_list* arg_args, SourceID arg_src, analyzer::ID arg_aid, TimerMgr* arg_mgr, BroObj* arg_obj) + : Event(arg_handler, std::move(*arg_args), arg_src, arg_aid, arg_mgr, arg_obj) { - handler = arg_handler; - args = arg_args; - src = arg_src; - mgr = arg_mgr ? arg_mgr : timer_mgr; // default is global - aid = arg_aid; - obj = arg_obj; - - if ( obj ) - Ref(obj); - - next_event = 0; - } - -Event::~Event() - { - // We don't Unref() the individual arguments by using delete_vals() - // here, because Func::Call already did that. - delete args; + delete arg_args; } void Event::Describe(ODesc* d) const @@ -49,7 +48,7 @@ void Event::Describe(ODesc* d) const if ( ! d->IsBinary() ) d->Add("("); - describe_vals(args, d); + describe_vals(&args, d); if ( ! d->IsBinary() ) d->Add("("); } @@ -59,18 +58,12 @@ void Event::Dispatch(bool no_remote) if ( src == SOURCE_BROKER ) no_remote = true; - if ( event_serializer ) - { - SerialInfo info(event_serializer); - event_serializer->Serialize(&info, handler->Name(), args); - } - if ( handler->ErrorHandler() ) reporter->BeginErrorHandler(); try { - handler->Call(args, no_remote); + handler->Call(&args, no_remote); } catch ( InterpreterException& e ) @@ -129,7 +122,7 @@ void EventMgr::QueueEvent(Event* event) void EventMgr::Drain() { if ( event_queue_flush_point ) - QueueEvent(event_queue_flush_point, new val_list()); + QueueEventFast(event_queue_flush_point, val_list{}); SegmentProfiler(segment_logger, "draining-events"); @@ -190,21 +183,3 @@ void EventMgr::Describe(ODesc* d) const d->NL(); } } - -RecordVal* EventMgr::GetLocalPeerVal() - { - if ( ! src_val ) - { - src_val = new RecordVal(peer); - src_val->Assign(0, val_mgr->GetCount(0)); - src_val->Assign(1, new AddrVal("127.0.0.1")); - src_val->Assign(2, val_mgr->GetPort(0)); - src_val->Assign(3, val_mgr->GetTrue()); - - Ref(peer_description); - src_val->Assign(4, peer_description); - src_val->Assign(5, 0); // class (optional). - } - - return src_val; - } diff --git a/src/Event.h b/src/Event.h index 69860daf50..475dc5f577 100644 --- a/src/Event.h +++ b/src/Event.h @@ -4,19 +4,23 @@ #define event_h #include "EventRegistry.h" -#include "Serializer.h" #include "analyzer/Tag.h" #include "analyzer/Analyzer.h" class EventMgr; +// We don't Unref() the individual arguments by using delete_vals() +// in a dtor because Func::Call already does that. class Event : public BroObj { public: + Event(EventHandlerPtr handler, val_list args, + SourceID src = SOURCE_LOCAL, analyzer::ID aid = 0, + TimerMgr* mgr = 0, BroObj* obj = 0); + Event(EventHandlerPtr handler, val_list* args, SourceID src = SOURCE_LOCAL, analyzer::ID aid = 0, TimerMgr* mgr = 0, BroObj* obj = 0); - ~Event() override; void SetNext(Event* n) { next_event = n; } Event* NextEvent() const { return next_event; } @@ -25,7 +29,7 @@ public: analyzer::ID Analyzer() const { return aid; } TimerMgr* Mgr() const { return mgr; } EventHandlerPtr Handler() const { return handler; } - val_list* Args() const { return args; } + const val_list* Args() const { return &args; } void Describe(ODesc* d) const override; @@ -37,7 +41,7 @@ protected: void Dispatch(bool no_remote = false); EventHandlerPtr handler; - val_list* args; + val_list args; SourceID src; analyzer::ID aid; TimerMgr* mgr; @@ -53,14 +57,50 @@ public: EventMgr(); ~EventMgr() override; - void QueueEvent(const EventHandlerPtr &h, val_list* vl, + // Queues an event without first checking if there's any available event + // handlers (or remote consumers). If it turns out there's actually + // nothing that will consume the event, then this may leak memory due to + // failing to decrement the reference count of each element in 'vl'. i.e. + // use this function instead of QueueEvent() if you've already guarded + // against the case where there's no handlers (one usually also does that + // because it would be a waste of effort to construct all the event + // arguments when there's no handlers to consume them). + void QueueEventFast(const EventHandlerPtr &h, val_list vl, + SourceID src = SOURCE_LOCAL, analyzer::ID aid = 0, + TimerMgr* mgr = 0, BroObj* obj = 0) + { + QueueEvent(new Event(h, std::move(vl), src, aid, mgr, obj)); + } + + // Queues an event if there's an event handler (or remote consumer). This + // function always takes ownership of decrementing the reference count of + // each element of 'vl', even if there's no event handler. If you've + // checked for event handler existence, you may wish to call + // QueueEventFast() instead of this function to prevent the redundant + // existence check. + void QueueEvent(const EventHandlerPtr &h, val_list vl, SourceID src = SOURCE_LOCAL, analyzer::ID aid = 0, TimerMgr* mgr = 0, BroObj* obj = 0) { if ( h ) - QueueEvent(new Event(h, vl, src, aid, mgr, obj)); + QueueEvent(new Event(h, std::move(vl), src, aid, mgr, obj)); else - delete_vals(vl); + { + loop_over_list(vl, i) + Unref(vl[i]); + } + } + + // Same as QueueEvent, except taking the event's argument list via a + // pointer instead of by value. This function takes ownership of the + // memory pointed to by 'vl' as well as decrementing the reference count of + // each of its elements. + void QueueEvent(const EventHandlerPtr &h, val_list* vl, + SourceID src = SOURCE_LOCAL, analyzer::ID aid = 0, + TimerMgr* mgr = 0, BroObj* obj = 0) + { + QueueEvent(h, std::move(*vl), src, aid, mgr, obj); + delete vl; } void Dispatch(Event* event, bool no_remote = false) @@ -88,9 +128,6 @@ public: int Size() const { return num_events_queued - num_events_dispatched; } - // Returns a peer record describing the local Bro. - RecordVal* GetLocalPeerVal(); - void Describe(ODesc* d) const override; protected: diff --git a/src/EventHandler.cc b/src/EventHandler.cc index 00b19f7832..0f06ad50ca 100644 --- a/src/EventHandler.cc +++ b/src/EventHandler.cc @@ -2,7 +2,6 @@ #include "EventHandler.h" #include "Func.h" #include "Scope.h" -#include "RemoteSerializer.h" #include "NetVar.h" #include "broker/Manager.h" @@ -28,7 +27,6 @@ EventHandler::~EventHandler() EventHandler::operator bool() const { return enabled && ((local && local->HasBodies()) - || receivers.length() || generate_always || ! auto_publish.empty()); } @@ -73,12 +71,6 @@ void EventHandler::Call(val_list* vl, bool no_remote) if ( ! no_remote ) { - loop_over_list(receivers, i) - { - SerialInfo info(remote_serializer); - remote_serializer->SendCall(&info, receivers[i], name, vl); - } - if ( ! auto_publish.empty() ) { // Send event in form [name, xs...] where xs represent the arguments. @@ -172,41 +164,10 @@ void EventHandler::NewEvent(val_list* vl) vargs->Assign(i, rec); } - val_list* mvl = new val_list(2); - mvl->append(new StringVal(name)); - mvl->append(vargs); - - Event* ev = new Event(new_event, mvl); + Event* ev = new Event(new_event, { + new StringVal(name), + vargs, + }); mgr.Dispatch(ev); } -void EventHandler::AddRemoteHandler(SourceID peer) - { - receivers.append(peer); - } - -void EventHandler::RemoveRemoteHandler(SourceID peer) - { - receivers.remove(peer); - } - -bool EventHandler::Serialize(SerialInfo* info) const - { - return SERIALIZE(name); - } - -EventHandler* EventHandler::Unserialize(UnserialInfo* info) - { - char* name; - if ( ! UNSERIALIZE_STR(&name, 0) ) - return 0; - - EventHandler* h = event_registry->Lookup(name); - if ( ! h ) - { - h = new EventHandler(name); - event_registry->Register(h); - } - - return h; - } diff --git a/src/EventHandler.h b/src/EventHandler.h index bad3d278fa..1f3e902b8f 100644 --- a/src/EventHandler.h +++ b/src/EventHandler.h @@ -11,9 +11,6 @@ class Func; class FuncType; -class Serializer; -class SerialInfo; -class UnserialInfo; class EventHandler { public: @@ -26,9 +23,6 @@ public: void SetLocalHandler(Func* f); - void AddRemoteHandler(SourceID peer); - void RemoveRemoteHandler(SourceID peer); - void AutoPublish(std::string topic) { auto_publish.insert(std::move(topic)); @@ -59,11 +53,6 @@ public: void SetGenerateAlways() { generate_always = true; } bool GenerateAlways() { return generate_always; } - // We don't serialize the handler(s) itself here, but - // just the reference to it. - bool Serialize(SerialInfo* info) const; - static EventHandler* Unserialize(UnserialInfo* info); - private: void NewEvent(val_list* vl); // Raise new_event() meta event. @@ -75,10 +64,6 @@ private: bool error_handler; // this handler reports error messages. bool generate_always; - declare(List, SourceID); - typedef List(SourceID) receiver_list; - receiver_list receivers; - std::unordered_set auto_publish; }; diff --git a/src/EventRegistry.cc b/src/EventRegistry.cc index 875d6d6b26..be3cf13799 100644 --- a/src/EventRegistry.cc +++ b/src/EventRegistry.cc @@ -1,6 +1,6 @@ #include "EventRegistry.h" #include "RE.h" -#include "RemoteSerializer.h" +#include "Reporter.h" void EventRegistry::Register(EventHandlerPtr handler) { @@ -73,7 +73,7 @@ EventRegistry::string_list* EventRegistry::UsedHandlers() EventRegistry::string_list* EventRegistry::AllHandlers() { - string_list* names = new string_list; + string_list* names = new string_list(handlers.Length()); IterCookie* c = handlers.InitForIteration(); diff --git a/src/Expr.cc b/src/Expr.cc index 737a9455ca..450ac73437 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Expr.h" #include "Event.h" @@ -10,7 +10,6 @@ #include "Scope.h" #include "Stmt.h" #include "EventRegistry.h" -#include "RemoteSerializer.h" #include "Net.h" #include "Traverse.h" #include "Trigger.h" @@ -33,7 +32,7 @@ const char* expr_name(BroExprTag t) "$=", "in", "<<>>", "()", "event", "schedule", "coerce", "record_coerce", "table_coerce", - "sizeof", "flatten", "cast", "is" + "sizeof", "flatten", "cast", "is", "[:]=" }; if ( int(t) >= NUM_EXPRS ) @@ -98,7 +97,7 @@ void Expr::EvalIntoAggregate(const BroType* /* t */, Val* /* aggr */, Internal("Expr::EvalIntoAggregate called"); } -void Expr::Assign(Frame* /* f */, Val* /* v */, Opcode /* op */) +void Expr::Assign(Frame* /* f */, Val* /* v */) { Internal("Expr::Assign called"); } @@ -202,56 +201,6 @@ void Expr::RuntimeErrorWithCallStack(const std::string& msg) const } } -bool Expr::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Expr* Expr::Unserialize(UnserialInfo* info, BroExprTag want) - { - Expr* e = (Expr*) SerialObj::Unserialize(info, SER_EXPR); - - if ( ! e ) - return 0; - - if ( want != EXPR_ANY && e->tag != want ) - { - info->s->Error("wrong expression type"); - Unref(e); - return 0; - } - - return e; - } - -bool Expr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EXPR, BroObj); - - if ( ! (SERIALIZE(char(tag)) && SERIALIZE(paren)) ) - return false; - - SERIALIZE_OPTIONAL(type); - return true; - } - -bool Expr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - char c; - if ( ! (UNSERIALIZE(&c) && UNSERIALIZE(&paren)) ) - return 0; - - tag = BroExprTag(c); - - BroType* t = 0; - UNSERIALIZE_OPTIONAL(t, BroType::Unserialize(info)); - SetType(t); - return true; - } - - NameExpr::NameExpr(ID* arg_id, bool const_init) : Expr(EXPR_NAME) { id = arg_id; @@ -312,10 +261,10 @@ Expr* NameExpr::MakeLvalue() return new RefExpr(this); } -void NameExpr::Assign(Frame* f, Val* v, Opcode op) +void NameExpr::Assign(Frame* f, Val* v) { if ( id->IsGlobal() ) - id->SetVal(v, op); + id->SetVal(v); else f->SetElement(id->Offset(), v); } @@ -351,55 +300,6 @@ void NameExpr::ExprDescribe(ODesc* d) const } } -IMPLEMENT_SERIAL(NameExpr, SER_NAME_EXPR); - -bool NameExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_NAME_EXPR, Expr); - - // Write out just the name of the function if requested. - if ( info->globals_as_names && id->IsGlobal() ) - return SERIALIZE('n') && SERIALIZE(id->Name()) && - SERIALIZE(in_const_init); - else - return SERIALIZE('f') && id->Serialize(info) && - SERIALIZE(in_const_init); - } - -bool NameExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - char type; - if ( ! UNSERIALIZE(&type) ) - return false; - - if ( type == 'n' ) - { - const char* name; - if ( ! UNSERIALIZE_STR(&name, 0) ) - return false; - - id = global_scope()->Lookup(name); - if ( id ) - ::Ref(id); - else - reporter->Warning("configuration changed: unserialized unknown global name from persistent state"); - - delete [] name; - } - else - id = ID::Unserialize(info); - - if ( ! id ) - return false; - - if ( ! UNSERIALIZE(&in_const_init) ) - return false; - - return true; - } - ConstExpr::ConstExpr(Val* arg_val) : Expr(EXPR_CONST) { val = arg_val; @@ -438,22 +338,6 @@ TraversalCode ConstExpr::Traverse(TraversalCallback* cb) const HANDLE_TC_EXPR_POST(tc); } -IMPLEMENT_SERIAL(ConstExpr, SER_CONST_EXPR); - -bool ConstExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CONST_EXPR, Expr); - return val->Serialize(info); - } - -bool ConstExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - val = Val::Unserialize(info); - return val != 0; - } - - UnaryExpr::UnaryExpr(BroExprTag arg_tag, Expr* arg_op) : Expr(arg_tag) { op = arg_op; @@ -552,21 +436,6 @@ void UnaryExpr::ExprDescribe(ODesc* d) const } } -IMPLEMENT_SERIAL(UnaryExpr, SER_UNARY_EXPR); - -bool UnaryExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_UNARY_EXPR, Expr); - return op->Serialize(info); - } - -bool UnaryExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - op = Expr::Unserialize(info); - return op != 0; - } - BinaryExpr::~BinaryExpr() { Unref(op1); @@ -1040,26 +909,6 @@ void BinaryExpr::PromoteType(TypeTag t, bool is_vector) SetType(is_vector ? new VectorType(base_type(t)) : base_type(t)); } -IMPLEMENT_SERIAL(BinaryExpr, SER_BINARY_EXPR); - -bool BinaryExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BINARY_EXPR, Expr); - return op1->Serialize(info) && op2->Serialize(info); - } - -bool BinaryExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - op1 = Expr::Unserialize(info); - if ( ! op1 ) - return false; - - op2 = Expr::Unserialize(info); - return op2 != 0; - } - CloneExpr::CloneExpr(Expr* arg_op) : UnaryExpr(EXPR_CLONE, arg_op) { if ( IsError() ) @@ -1090,20 +939,6 @@ Val* CloneExpr::Fold(Val* v) const return v->Clone(); } -IMPLEMENT_SERIAL(CloneExpr, SER_CLONE_EXPR); - -bool CloneExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CLONE_EXPR, UnaryExpr); - return true; - } - -bool CloneExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - IncrExpr::IncrExpr(BroExprTag arg_tag, Expr* arg_op) : UnaryExpr(arg_tag, arg_op->MakeLvalue()) { @@ -1172,18 +1007,18 @@ Val* IncrExpr::Eval(Frame* f) const if ( elt ) { Val* new_elt = DoSingleEval(f, elt); - v_vec->Assign(i, new_elt, OP_INCR); + v_vec->Assign(i, new_elt); } else - v_vec->Assign(i, 0, OP_INCR); + v_vec->Assign(i, 0); } - op->Assign(f, v_vec, OP_INCR); + op->Assign(f, v_vec); } else { Val* old_v = v; - op->Assign(f, v = DoSingleEval(f, old_v), OP_INCR); + op->Assign(f, v = DoSingleEval(f, old_v)); Unref(old_v); } @@ -1195,20 +1030,6 @@ int IncrExpr::IsPure() const return 0; } -IMPLEMENT_SERIAL(IncrExpr, SER_INCR_EXPR); - -bool IncrExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_INCR_EXPR, UnaryExpr); - return true; - } - -bool IncrExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - ComplementExpr::ComplementExpr(Expr* arg_op) : UnaryExpr(EXPR_COMPLEMENT, arg_op) { if ( IsError() ) @@ -1228,20 +1049,6 @@ Val* ComplementExpr::Fold(Val* v) const return val_mgr->GetCount(~ v->InternalUnsigned()); } -IMPLEMENT_SERIAL(ComplementExpr, SER_COMPLEMENT_EXPR); - -bool ComplementExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_COMPLEMENT_EXPR, UnaryExpr); - return true; - } - -bool ComplementExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - NotExpr::NotExpr(Expr* arg_op) : UnaryExpr(EXPR_NOT, arg_op) { if ( IsError() ) @@ -1261,20 +1068,6 @@ Val* NotExpr::Fold(Val* v) const return val_mgr->GetBool(! v->InternalInt()); } -IMPLEMENT_SERIAL(NotExpr, SER_NOT_EXPR); - -bool NotExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_NOT_EXPR, UnaryExpr); - return true; - } - -bool NotExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - PosExpr::PosExpr(Expr* arg_op) : UnaryExpr(EXPR_POSITIVE, arg_op) { if ( IsError() ) @@ -1311,20 +1104,6 @@ Val* PosExpr::Fold(Val* v) const return val_mgr->GetInt(v->CoerceToInt()); } -IMPLEMENT_SERIAL(PosExpr, SER_POS_EXPR); - -bool PosExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_POS_EXPR, UnaryExpr); - return true; - } - -bool PosExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - NegExpr::NegExpr(Expr* arg_op) : UnaryExpr(EXPR_NEGATE, arg_op) { if ( IsError() ) @@ -1361,21 +1140,6 @@ Val* NegExpr::Fold(Val* v) const return val_mgr->GetInt(- v->CoerceToInt()); } - -IMPLEMENT_SERIAL(NegExpr, SER_NEG_EXPR); - -bool NegExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_NEG_EXPR, UnaryExpr); - return true; - } - -bool NegExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - SizeExpr::SizeExpr(Expr* arg_op) : UnaryExpr(EXPR_SIZE, arg_op) { if ( IsError() ) @@ -1403,21 +1167,6 @@ Val* SizeExpr::Fold(Val* v) const return v->SizeVal(); } -IMPLEMENT_SERIAL(SizeExpr, SER_SIZE_EXPR); - -bool SizeExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SIZE_EXPR, UnaryExpr); - return true; - } - -bool SizeExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - - AddExpr::AddExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_ADD, arg_op1, arg_op2) { @@ -1465,20 +1214,6 @@ void AddExpr::Canonicize() SwapOps(); } -IMPLEMENT_SERIAL(AddExpr, SER_ADD_EXPR); - -bool AddExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_ADD_EXPR, BinaryExpr); - return true; - } - -bool AddExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - AddToExpr::AddToExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_ADD_TO, is_vector(arg_op1) ? arg_op1 : arg_op1->MakeLvalue(), arg_op2) @@ -1560,20 +1295,6 @@ Val* AddToExpr::Eval(Frame* f) const return 0; } -IMPLEMENT_SERIAL(AddToExpr, SER_ADD_TO_EXPR); - -bool AddToExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_ADD_TO_EXPR, BinaryExpr); - return true; - } - -bool AddToExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - SubExpr::SubExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_SUB, arg_op1, arg_op2) { @@ -1625,20 +1346,6 @@ SubExpr::SubExpr(Expr* arg_op1, Expr* arg_op2) } } -IMPLEMENT_SERIAL(SubExpr, SER_SUB_EXPR); - -bool SubExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SUB_EXPR, BinaryExpr); - return true; - } - -bool SubExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - RemoveFromExpr::RemoveFromExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_REMOVE_FROM, arg_op1->MakeLvalue(), arg_op2) { @@ -1683,20 +1390,6 @@ Val* RemoveFromExpr::Eval(Frame* f) const return 0; } -IMPLEMENT_SERIAL(RemoveFromExpr, SER_REMOVE_FROM_EXPR); - -bool RemoveFromExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_REMOVE_FROM_EXPR, BinaryExpr); - return true; - } - -bool RemoveFromExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - TimesExpr::TimesExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_TIMES, arg_op1, arg_op2) { @@ -1733,20 +1426,6 @@ void TimesExpr::Canonicize() SwapOps(); } -IMPLEMENT_SERIAL(TimesExpr, SER_TIMES_EXPR); - -bool TimesExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_TIMES_EXPR, BinaryExpr); - return true; - } - -bool TimesExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - DivideExpr::DivideExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_DIVIDE, arg_op1, arg_op2) { @@ -1811,20 +1490,6 @@ Val* DivideExpr::AddrFold(Val* v1, Val* v2) const return new SubNetVal(a, mask); } -IMPLEMENT_SERIAL(DivideExpr, SER_DIVIDE_EXPR); - -bool DivideExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_DIVIDE_EXPR, BinaryExpr); - return true; - } - -bool DivideExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - ModExpr::ModExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_MOD, arg_op1, arg_op2) { @@ -1845,20 +1510,6 @@ ModExpr::ModExpr(Expr* arg_op1, Expr* arg_op2) ExprError("requires integral operands"); } -IMPLEMENT_SERIAL(ModExpr, SER_MOD_EXPR); - -bool ModExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_MOD_EXPR, BinaryExpr); - return true; - } - -bool ModExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - BoolExpr::BoolExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) : BinaryExpr(arg_tag, arg_op1, arg_op2) { @@ -1884,13 +1535,6 @@ BoolExpr::BoolExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) else SetType(base_type(TYPE_BOOL)); } - - else if ( bt1 == TYPE_PATTERN && bt2 == bt1 ) - { - reporter->Warning("&& and || operators deprecated for pattern operands"); - SetType(base_type(TYPE_PATTERN)); - } - else ExprError("requires boolean operands"); } @@ -2022,20 +1666,6 @@ Val* BoolExpr::Eval(Frame* f) const return result; } -IMPLEMENT_SERIAL(BoolExpr, SER_BOOL_EXPR); - -bool BoolExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BOOL_EXPR, BinaryExpr); - return true; - } - -bool BoolExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - BitExpr::BitExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) : BinaryExpr(arg_tag, arg_op1, arg_op2) { @@ -2086,20 +1716,6 @@ BitExpr::BitExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) ExprError("requires \"count\" or compatible \"set\" operands"); } -IMPLEMENT_SERIAL(BitExpr, SER_BIT_EXPR); - -bool BitExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BIT_EXPR, BinaryExpr); - return true; - } - -bool BitExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - EqExpr::EqExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) : BinaryExpr(arg_tag, arg_op1, arg_op2) { @@ -2201,20 +1817,6 @@ Val* EqExpr::Fold(Val* v1, Val* v2) const return BinaryExpr::Fold(v1, v2); } -IMPLEMENT_SERIAL(EqExpr, SER_EQ_EXPR); - -bool EqExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EQ_EXPR, BinaryExpr); - return true; - } - -bool EqExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - RelExpr::RelExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) : BinaryExpr(arg_tag, arg_op1, arg_op2) { @@ -2272,20 +1874,6 @@ void RelExpr::Canonicize() } } -IMPLEMENT_SERIAL(RelExpr, SER_REL_EXPR); - -bool RelExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_REL_EXPR, BinaryExpr); - return true; - } - -bool RelExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - CondExpr::CondExpr(Expr* arg_op1, Expr* arg_op2, Expr* arg_op3) : Expr(EXPR_COND) { @@ -2437,32 +2025,6 @@ void CondExpr::ExprDescribe(ODesc* d) const op3->Describe(d); } -IMPLEMENT_SERIAL(CondExpr, SER_COND_EXPR); - -bool CondExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_COND_EXPR, Expr); - return op1->Serialize(info) && op2->Serialize(info) - && op3->Serialize(info); - } - -bool CondExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - op1 = Expr::Unserialize(info); - if ( ! op1 ) - return false; - - op2 = Expr::Unserialize(info); - if ( ! op2 ) - return false; - - op3 = Expr::Unserialize(info); - - return op3 != 0; - } - RefExpr::RefExpr(Expr* arg_op) : UnaryExpr(EXPR_REF, arg_op) { if ( IsError() ) @@ -2479,23 +2041,9 @@ Expr* RefExpr::MakeLvalue() return this; } -void RefExpr::Assign(Frame* f, Val* v, Opcode opcode) +void RefExpr::Assign(Frame* f, Val* v) { - op->Assign(f, v, opcode); - } - -IMPLEMENT_SERIAL(RefExpr, SER_REF_EXPR); - -bool RefExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_REF_EXPR, UnaryExpr); - return true; - } - -bool RefExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; + op->Assign(f, v); } AssignExpr::AssignExpr(Expr* arg_op1, Expr* arg_op2, int arg_is_init, @@ -2562,19 +2110,30 @@ bool AssignExpr::TypeCheck(attr_list* attrs) if ( bt1 == TYPE_TABLE && op2->Tag() == EXPR_LIST ) { attr_list* attr_copy = 0; - if ( attrs ) { - attr_copy = new attr_list; + attr_copy = new attr_list(attrs->length()); loop_over_list(*attrs, i) attr_copy->append((*attrs)[i]); } + bool empty_list_assignment = (op2->AsListExpr()->Exprs().length() == 0); + if ( op1->Type()->IsSet() ) op2 = new SetConstructorExpr(op2->AsListExpr(), attr_copy); else op2 = new TableConstructorExpr(op2->AsListExpr(), attr_copy); + if ( ! empty_list_assignment && ! same_type(op1->Type(), op2->Type()) ) + { + if ( op1->Type()->IsSet() ) + ExprError("set type mismatch in assignment"); + else + ExprError("table type mismatch in assignment"); + + return false; + } + return true; } @@ -2588,7 +2147,7 @@ bool AssignExpr::TypeCheck(attr_list* attrs) if ( op2->Tag() == EXPR_LIST ) { - op2 = new VectorConstructorExpr(op2->AsListExpr()); + op2 = new VectorConstructorExpr(op2->AsListExpr(), op1->Type()); return true; } } @@ -2634,7 +2193,7 @@ bool AssignExpr::TypeCheck(attr_list* attrs) if ( sce->Attrs() ) { attr_list* a = sce->Attrs()->Attrs(); - attrs = new attr_list; + attrs = new attr_list(a->length()); loop_over_list(*a, i) attrs->append((*a)[i]); } @@ -2886,32 +2445,40 @@ int AssignExpr::IsPure() const return 0; } -IMPLEMENT_SERIAL(AssignExpr, SER_ASSIGN_EXPR); - -bool AssignExpr::DoSerialize(SerialInfo* info) const +IndexSliceAssignExpr::IndexSliceAssignExpr(Expr* op1, Expr* op2, int is_init) + : AssignExpr(op1, op2, is_init) { - DO_SERIALIZE(SER_ASSIGN_EXPR, BinaryExpr); - SERIALIZE_OPTIONAL(val); - return SERIALIZE(is_init); } -bool AssignExpr::DoUnserialize(UnserialInfo* info) +Val* IndexSliceAssignExpr::Eval(Frame* f) const { - DO_UNSERIALIZE(BinaryExpr); - UNSERIALIZE_OPTIONAL(val, Val::Unserialize(info)); - return UNSERIALIZE(&is_init); + if ( is_init ) + { + RuntimeError("illegal assignment in initialization"); + return 0; + } + + Val* v = op2->Eval(f); + + if ( v ) + { + op1->Assign(f, v); + Unref(v); + } + + return 0; } -IndexExpr::IndexExpr(Expr* arg_op1, ListExpr* arg_op2, bool is_slice) -: BinaryExpr(EXPR_INDEX, arg_op1, arg_op2) +IndexExpr::IndexExpr(Expr* arg_op1, ListExpr* arg_op2, bool arg_is_slice) +: BinaryExpr(EXPR_INDEX, arg_op1, arg_op2), is_slice(arg_is_slice) { if ( IsError() ) return; if ( is_slice ) { - if ( ! IsString(op1->Type()->Tag()) ) - ExprError("slice notation indexing only supported for strings currently"); + if ( ! IsString(op1->Type()->Tag()) && ! IsVector(op1->Type()->Tag()) ) + ExprError("slice notation indexing only supported for strings and vectors currently"); } else if ( IsString(op1->Type()->Tag()) ) @@ -2925,12 +2492,16 @@ IndexExpr::IndexExpr(Expr* arg_op1, ListExpr* arg_op2, bool is_slice) int match_type = op1->Type()->MatchesIndex(arg_op2); if ( match_type == DOES_NOT_MATCH_INDEX ) - SetError("not an index type"); + { + std::string error_msg = + fmt("expression with type '%s' is not a type that can be indexed", + type_name(op1->Type()->Tag())); + SetError(error_msg.data()); + } else if ( ! op1->Type()->YieldType() ) { - if ( IsString(op1->Type()->Tag()) && - match_type == MATCHES_INDEX_SCALAR ) + if ( IsString(op1->Type()->Tag()) && match_type == MATCHES_INDEX_SCALAR ) SetType(base_type(TYPE_STRING)); else // It's a set - so indexing it yields void. We don't @@ -3096,7 +2667,32 @@ Val* IndexExpr::Fold(Val* v1, Val* v2) const switch ( v1->Type()->Tag() ) { case TYPE_VECTOR: - v = v1->AsVectorVal()->Lookup(v2); + { + VectorVal* vect = v1->AsVectorVal(); + const ListVal* lv = v2->AsListVal(); + + if ( lv->Length() == 1 ) + v = vect->Lookup(v2); + else + { + int len = vect->Size(); + VectorVal* result = new VectorVal(vect->Type()->AsVectorType()); + + bro_int_t first = get_slice_index(lv->Index(0)->CoerceToInt(), len); + bro_int_t last = get_slice_index(lv->Index(1)->CoerceToInt(), len); + int sub_length = last - first; + + if ( sub_length >= 0 ) + { + result->Resize(sub_length); + + for ( int idx = first; idx < last; idx++ ) + result->Assign(idx - first, vect->Lookup(idx)->Ref()); + } + + return result; + } + } break; case TYPE_TABLE: @@ -3147,7 +2743,7 @@ Val* IndexExpr::Fold(Val* v1, Val* v2) const return 0; } -void IndexExpr::Assign(Frame* f, Val* v, Opcode op) +void IndexExpr::Assign(Frame* f, Val* v) { if ( IsError() ) return; @@ -3167,7 +2763,27 @@ void IndexExpr::Assign(Frame* f, Val* v, Opcode op) switch ( v1->Type()->Tag() ) { case TYPE_VECTOR: - if ( ! v1->AsVectorVal()->Assign(v2, v, op) ) + { + const ListVal* lv = v2->AsListVal(); + VectorVal* v1_vect = v1->AsVectorVal(); + + if ( lv->Length() > 1 ) + { + auto len = v1_vect->Size(); + bro_int_t first = get_slice_index(lv->Index(0)->CoerceToInt(), len); + bro_int_t last = get_slice_index(lv->Index(1)->CoerceToInt(), len); + + // Remove the elements from the vector within the slice + for ( auto idx = first; idx < last; idx++ ) + v1_vect->Remove(first); + + // Insert the new elements starting at the first position + VectorVal* v_vect = v->AsVectorVal(); + + for ( auto idx = 0u; idx < v_vect->Size(); idx++, first++ ) + v1_vect->Insert(first, v_vect->Lookup(idx)->Ref()); + } + else if ( ! v1_vect->Assign(v2, v) ) { if ( v ) { @@ -3184,9 +2800,10 @@ void IndexExpr::Assign(Frame* f, Val* v, Opcode op) RuntimeErrorWithCallStack("assignment failed with null value"); } break; + } case TYPE_TABLE: - if ( ! v1->AsTableVal()->Assign(v2, v, op) ) + if ( ! v1->AsTableVal()->Assign(v2, v) ) { if ( v ) { @@ -3243,21 +2860,6 @@ TraversalCode IndexExpr::Traverse(TraversalCallback* cb) const HANDLE_TC_EXPR_POST(tc); } - -IMPLEMENT_SERIAL(IndexExpr, SER_INDEX_EXPR); - -bool IndexExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_INDEX_EXPR, BinaryExpr); - return true; - } - -bool IndexExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - FieldExpr::FieldExpr(Expr* arg_op, const char* arg_field_name) : UnaryExpr(EXPR_FIELD, arg_op) { @@ -3282,9 +2884,8 @@ FieldExpr::FieldExpr(Expr* arg_op, const char* arg_field_name) SetType(rt->FieldType(field)->Ref()); td = rt->FieldDecl(field); - if ( td->FindAttr(ATTR_DEPRECATED) ) - reporter->Warning("deprecated (%s$%s)", rt->GetName().c_str(), - field_name); + if ( rt->IsFieldDeprecated(field) ) + reporter->Warning("%s", rt->GetFieldDeprecationWarning(field, false).c_str()); } } } @@ -3304,7 +2905,7 @@ int FieldExpr::CanDel() const return td->FindAttr(ATTR_DEFAULT) || td->FindAttr(ATTR_OPTIONAL); } -void FieldExpr::Assign(Frame* f, Val* v, Opcode opcode) +void FieldExpr::Assign(Frame* f, Val* v) { if ( IsError() ) return; @@ -3313,14 +2914,14 @@ void FieldExpr::Assign(Frame* f, Val* v, Opcode opcode) if ( op_v ) { RecordVal* r = op_v->AsRecordVal(); - r->Assign(field, v, opcode); + r->Assign(field, v); Unref(r); } } void FieldExpr::Delete(Frame* f) { - Assign(f, 0, OP_ASSIGN_IDX); + Assign(f, 0); } Val* FieldExpr::Fold(Val* v) const @@ -3355,29 +2956,6 @@ void FieldExpr::ExprDescribe(ODesc* d) const d->Add(field); } -IMPLEMENT_SERIAL(FieldExpr, SER_FIELD_EXPR); - -bool FieldExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FIELD_EXPR, UnaryExpr); - - if ( ! (SERIALIZE(field_name) && SERIALIZE(field) ) ) - return false; - - return td->Serialize(info); - } - -bool FieldExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - - if ( ! (UNSERIALIZE_STR(&field_name, 0) && UNSERIALIZE(&field) ) ) - return false; - - td = TypeDecl::Unserialize(info); - return td != 0; - } - HasFieldExpr::HasFieldExpr(Expr* arg_op, const char* arg_field_name) : UnaryExpr(EXPR_HAS_FIELD, arg_op) { @@ -3396,9 +2974,8 @@ HasFieldExpr::HasFieldExpr(Expr* arg_op, const char* arg_field_name) if ( field < 0 ) ExprError("no such field in record"); - else if ( rt->FieldDecl(field)->FindAttr(ATTR_DEPRECATED) ) - reporter->Warning("deprecated (%s?$%s)", rt->GetName().c_str(), - field_name); + else if ( rt->IsFieldDeprecated(field) ) + reporter->Warning("%s", rt->GetFieldDeprecationWarning(field, true).c_str()); SetType(base_type(TYPE_BOOL)); } @@ -3440,24 +3017,6 @@ void HasFieldExpr::ExprDescribe(ODesc* d) const d->Add(field); } -IMPLEMENT_SERIAL(HasFieldExpr, SER_HAS_FIELD_EXPR); - -bool HasFieldExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_HAS_FIELD_EXPR, UnaryExpr); - - // Serialize former "bool is_attr" member first for backwards compatibility. - return SERIALIZE(false) && SERIALIZE(field_name) && SERIALIZE(field); - } - -bool HasFieldExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - // Unserialize former "bool is_attr" member for backwards compatibility. - bool not_used; - return UNSERIALIZE(¬_used) && UNSERIALIZE_STR(&field_name, 0) && UNSERIALIZE(&field); - } - RecordConstructorExpr::RecordConstructorExpr(ListExpr* constructor_list) : UnaryExpr(EXPR_RECORD_CONSTRUCTOR, constructor_list) { @@ -3467,9 +3026,9 @@ RecordConstructorExpr::RecordConstructorExpr(ListExpr* constructor_list) // Spin through the list, which should be comprised only of // record-field-assign expressions, and build up a // record type to associate with this constructor. - type_decl_list* record_types = new type_decl_list; - const expr_list& exprs = constructor_list->Exprs(); + type_decl_list* record_types = new type_decl_list(exprs.length()); + loop_over_list(exprs, i) { Expr* e = exprs[i]; @@ -3537,20 +3096,6 @@ void RecordConstructorExpr::ExprDescribe(ODesc* d) const d->Add("]"); } -IMPLEMENT_SERIAL(RecordConstructorExpr, SER_RECORD_CONSTRUCTOR_EXPR); - -bool RecordConstructorExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_RECORD_CONSTRUCTOR_EXPR, UnaryExpr); - return true; - } - -bool RecordConstructorExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - TableConstructorExpr::TableConstructorExpr(ListExpr* constructor_list, attr_list* arg_attrs, BroType* arg_type) : UnaryExpr(EXPR_TABLE_CONSTRUCTOR, constructor_list) @@ -3661,22 +3206,6 @@ void TableConstructorExpr::ExprDescribe(ODesc* d) const d->Add(")"); } -IMPLEMENT_SERIAL(TableConstructorExpr, SER_TABLE_CONSTRUCTOR_EXPR); - -bool TableConstructorExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_TABLE_CONSTRUCTOR_EXPR, UnaryExpr); - SERIALIZE_OPTIONAL(attrs); - return true; - } - -bool TableConstructorExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - UNSERIALIZE_OPTIONAL(attrs, Attributes::Unserialize(info)); - return true; - } - SetConstructorExpr::SetConstructorExpr(ListExpr* constructor_list, attr_list* arg_attrs, BroType* arg_type) : UnaryExpr(EXPR_SET_CONSTRUCTOR, constructor_list) @@ -3797,22 +3326,6 @@ void SetConstructorExpr::ExprDescribe(ODesc* d) const d->Add(")"); } -IMPLEMENT_SERIAL(SetConstructorExpr, SER_SET_CONSTRUCTOR_EXPR); - -bool SetConstructorExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SET_CONSTRUCTOR_EXPR, UnaryExpr); - SERIALIZE_OPTIONAL(attrs); - return true; - } - -bool SetConstructorExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - UNSERIALIZE_OPTIONAL(attrs, Attributes::Unserialize(info)); - return true; - } - VectorConstructorExpr::VectorConstructorExpr(ListExpr* constructor_list, BroType* arg_type) : UnaryExpr(EXPR_VECTOR_CONSTRUCTOR, constructor_list) @@ -3916,20 +3429,6 @@ void VectorConstructorExpr::ExprDescribe(ODesc* d) const d->Add(")"); } -IMPLEMENT_SERIAL(VectorConstructorExpr, SER_VECTOR_CONSTRUCTOR_EXPR); - -bool VectorConstructorExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_VECTOR_CONSTRUCTOR_EXPR, UnaryExpr); - return true; - } - -bool VectorConstructorExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - FieldAssignExpr::FieldAssignExpr(const char* arg_field_name, Expr* value) : UnaryExpr(EXPR_FIELD_ASSIGN, value), field_name(arg_field_name) { @@ -3978,20 +3477,6 @@ void FieldAssignExpr::ExprDescribe(ODesc* d) const op->Describe(d); } -IMPLEMENT_SERIAL(FieldAssignExpr, SER_FIELD_ASSIGN_EXPR); - -bool FieldAssignExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FIELD_ASSIGN_EXPR, UnaryExpr); - return true; - } - -bool FieldAssignExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - ArithCoerceExpr::ArithCoerceExpr(Expr* arg_op, TypeTag t) : UnaryExpr(EXPR_ARITH_COERCE, arg_op) { @@ -4069,21 +3554,6 @@ Val* ArithCoerceExpr::Fold(Val* v) const return result; } -IMPLEMENT_SERIAL(ArithCoerceExpr, SER_ARITH_COERCE_EXPR); - -bool ArithCoerceExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_ARITH_COERCE_EXPR, UnaryExpr); - return true; - } - -bool ArithCoerceExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - - RecordCoerceExpr::RecordCoerceExpr(Expr* op, RecordType* r) : UnaryExpr(EXPR_RECORD_COERCE, op) { @@ -4128,15 +3598,41 @@ RecordCoerceExpr::RecordCoerceExpr(Expr* op, RecordType* r) if ( ! same_type(sup_t_i, sub_t_i) ) { - if ( sup_t_i->Tag() != TYPE_RECORD || - sub_t_i->Tag() != TYPE_RECORD || - ! record_promotion_compatible(sup_t_i->AsRecordType(), - sub_t_i->AsRecordType()) ) + auto is_arithmetic_promotable = [](BroType* sup, BroType* sub) -> bool { - char buf[512]; - safe_snprintf(buf, sizeof(buf), + auto sup_tag = sup->Tag(); + auto sub_tag = sub->Tag(); + + if ( ! BothArithmetic(sup_tag, sub_tag) ) + return false; + + if ( sub_tag == TYPE_DOUBLE && IsIntegral(sup_tag) ) + return false; + + if ( sub_tag == TYPE_INT && sup_tag == TYPE_COUNT ) + return false; + + return true; + }; + + auto is_record_promotable = [](BroType* sup, BroType* sub) -> bool + { + if ( sup->Tag() != TYPE_RECORD ) + return false; + + if ( sub->Tag() != TYPE_RECORD ) + return false; + + return record_promotion_compatible(sup->AsRecordType(), + sub->AsRecordType()); + }; + + if ( ! is_arithmetic_promotable(sup_t_i, sub_t_i) && + ! is_record_promotable(sup_t_i, sub_t_i) ) + { + string error_msg = fmt( "type clash for field \"%s\"", sub_r->FieldName(i)); - Error(buf, sub_t_i); + Error(error_msg.c_str(), sub_t_i); SetError(); break; } @@ -4154,22 +3650,15 @@ RecordCoerceExpr::RecordCoerceExpr(Expr* op, RecordType* r) { if ( ! t_r->FieldDecl(i)->FindAttr(ATTR_OPTIONAL) ) { - char buf[512]; - safe_snprintf(buf, sizeof(buf), - "non-optional field \"%s\" missing", - t_r->FieldName(i)); - Error(buf); + string error_msg = fmt( + "non-optional field \"%s\" missing", t_r->FieldName(i)); + Error(error_msg.c_str()); SetError(); break; } } - else - { - if ( t_r->FieldDecl(i)->FindAttr(ATTR_DEPRECATED) ) - reporter->Warning("deprecated (%s$%s)", - t_r->GetName().c_str(), - t_r->FieldName(i)); - } + else if ( t_r->IsFieldDeprecated(i) ) + reporter->Warning("%s", t_r->GetFieldDeprecationWarning(i, false).c_str()); } } } @@ -4246,6 +3735,20 @@ Val* RecordCoerceExpr::Fold(Val* v) const rhs = new_val; } } + else if ( BothArithmetic(rhs_type->Tag(), field_type->Tag()) && + ! same_type(rhs_type, field_type) ) + { + if ( Val* new_val = check_and_promote(rhs, field_type, false, op->GetLocationInfo()) ) + { + // Don't call unref here on rhs because check_and_promote already called it. + rhs = new_val; + } + else + { + Unref(val); + RuntimeError("Failed type conversion"); + } + } val->Assign(i, rhs); } @@ -4284,39 +3787,6 @@ Val* RecordCoerceExpr::Fold(Val* v) const return val; } -IMPLEMENT_SERIAL(RecordCoerceExpr, SER_RECORD_COERCE_EXPR); - -bool RecordCoerceExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_RECORD_COERCE_EXPR, UnaryExpr); - - if ( ! SERIALIZE(map_size) ) - return false; - - for ( int i = 0; i < map_size; ++i ) - if ( ! SERIALIZE(map[i]) ) - return false; - - return true; - } - -bool RecordCoerceExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - - if ( ! UNSERIALIZE(&map_size) ) - return false; - - map = new int[map_size]; - - for ( int i = 0; i < map_size; ++i ) - if ( ! UNSERIALIZE(&map[i]) ) - return false; - - return true; - } - - TableCoerceExpr::TableCoerceExpr(Expr* op, TableType* r) : UnaryExpr(EXPR_TABLE_COERCE, op) { @@ -4347,20 +3817,6 @@ Val* TableCoerceExpr::Fold(Val* v) const return new TableVal(Type()->AsTableType(), tv->Attrs()); } -IMPLEMENT_SERIAL(TableCoerceExpr, SER_TABLE_COERCE_EXPR); - -bool TableCoerceExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_TABLE_COERCE_EXPR, UnaryExpr); - return true; - } - -bool TableCoerceExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - VectorCoerceExpr::VectorCoerceExpr(Expr* op, VectorType* v) : UnaryExpr(EXPR_VECTOR_COERCE, op) { @@ -4391,20 +3847,6 @@ Val* VectorCoerceExpr::Fold(Val* v) const return new VectorVal(Type()->Ref()->AsVectorType()); } -IMPLEMENT_SERIAL(VectorCoerceExpr, SER_VECTOR_COERCE_EXPR); - -bool VectorCoerceExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_VECTOR_COERCE_EXPR, UnaryExpr); - return true; - } - -bool VectorCoerceExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - FlattenExpr::FlattenExpr(Expr* arg_op) : UnaryExpr(EXPR_FLATTEN, arg_op) { @@ -4453,27 +3895,14 @@ Val* FlattenExpr::Fold(Val* v) const return l; } -IMPLEMENT_SERIAL(FlattenExpr, SER_FLATTEN_EXPR); - -bool FlattenExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FLATTEN_EXPR, UnaryExpr); - return SERIALIZE(num_fields); - } - -bool FlattenExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return UNSERIALIZE(&num_fields); - } - ScheduleTimer::ScheduleTimer(EventHandlerPtr arg_event, val_list* arg_args, double t, TimerMgr* arg_tmgr) -: Timer(t, TIMER_SCHEDULE) + : Timer(t, TIMER_SCHEDULE), + event(arg_event), + args(std::move(*arg_args)), + tmgr(arg_tmgr) { - event = arg_event; - args = arg_args; - tmgr = arg_tmgr; + delete arg_args; } ScheduleTimer::~ScheduleTimer() @@ -4482,7 +3911,7 @@ ScheduleTimer::~ScheduleTimer() void ScheduleTimer::Dispatch(double /* t */, int /* is_expire */) { - mgr.QueueEvent(event, args, SOURCE_LOCAL, 0, tmgr); + mgr.QueueEvent(event, std::move(args), SOURCE_LOCAL, 0, tmgr); } ScheduleExpr::ScheduleExpr(Expr* arg_when, EventExpr* arg_event) @@ -4577,26 +4006,6 @@ void ScheduleExpr::ExprDescribe(ODesc* d) const event->Describe(d); } -IMPLEMENT_SERIAL(ScheduleExpr, SER_SCHEDULE_EXPR); - -bool ScheduleExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SCHEDULE_EXPR, Expr); - return when->Serialize(info) && event->Serialize(info); - } - -bool ScheduleExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - when = Expr::Unserialize(info); - if ( ! when ) - return false; - - event = (EventExpr*) Expr::Unserialize(info, EXPR_EVENT); - return event != 0; - } - InExpr::InExpr(Expr* arg_op1, Expr* arg_op2) : BinaryExpr(EXPR_IN, arg_op1, arg_op2) { @@ -4713,20 +4122,6 @@ Val* InExpr::Fold(Val* v1, Val* v2) const return val_mgr->GetBool(0); } -IMPLEMENT_SERIAL(InExpr, SER_IN_EXPR); - -bool InExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_IN_EXPR, BinaryExpr); - return true; - } - -bool InExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BinaryExpr); - return true; - } - CallExpr::CallExpr(Expr* arg_func, ListExpr* arg_args, bool in_hook) : Expr(EXPR_CALL) { @@ -4924,26 +4319,6 @@ void CallExpr::ExprDescribe(ODesc* d) const args->Describe(d); } -IMPLEMENT_SERIAL(CallExpr, SER_CALL_EXPR); - -bool CallExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CALL_EXPR, Expr); - return func->Serialize(info) && args->Serialize(info); - } - -bool CallExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - func = Expr::Unserialize(info); - if ( ! func ) - return false; - - args = (ListExpr*) Expr::Unserialize(info, EXPR_LIST); - return args != 0; - } - EventExpr::EventExpr(const char* arg_name, ListExpr* arg_args) : Expr(EXPR_EVENT) { @@ -4998,7 +4373,8 @@ Val* EventExpr::Eval(Frame* f) const return 0; val_list* v = eval_list(f, args); - mgr.QueueEvent(handler, v); + mgr.QueueEvent(handler, std::move(*v)); + delete v; return 0; } @@ -5028,35 +4404,6 @@ void EventExpr::ExprDescribe(ODesc* d) const args->Describe(d); } -IMPLEMENT_SERIAL(EventExpr, SER_EVENT_EXPR); - -bool EventExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EVENT_EXPR, Expr); - - if ( ! handler->Serialize(info) ) - return false; - - return SERIALIZE(name) && args->Serialize(info); - } - -bool EventExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - EventHandler* h = EventHandler::Unserialize(info); - if ( ! h ) - return false; - - handler = h; - - if ( ! UNSERIALIZE(&name) ) - return false; - - args = (ListExpr*) Expr::Unserialize(info, EXPR_LIST); - return args; - } - ListExpr::ListExpr() : Expr(EXPR_LIST) { SetType(new TypeList()); @@ -5128,7 +4475,7 @@ BroType* ListExpr::InitType() const if ( exprs[0]->IsRecordElement(0) ) { - type_decl_list* types = new type_decl_list; + type_decl_list* types = new type_decl_list(exprs.length()); loop_over_list(exprs, i) { TypeDecl* td = new TypeDecl(0, 0); @@ -5399,7 +4746,7 @@ Expr* ListExpr::MakeLvalue() return new RefExpr(this); } -void ListExpr::Assign(Frame* f, Val* v, Opcode op) +void ListExpr::Assign(Frame* f, Val* v) { ListVal* lv = v->AsListVal(); @@ -5407,7 +4754,7 @@ void ListExpr::Assign(Frame* f, Val* v, Opcode op) RuntimeError("mismatch in list lengths"); loop_over_list(exprs, i) - exprs[i]->Assign(f, (*lv->Vals())[i]->Ref(), op); + exprs[i]->Assign(f, (*lv->Vals())[i]->Ref()); Unref(lv); } @@ -5427,42 +4774,6 @@ TraversalCode ListExpr::Traverse(TraversalCallback* cb) const HANDLE_TC_EXPR_POST(tc); } -IMPLEMENT_SERIAL(ListExpr, SER_LIST_EXPR); - -bool ListExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_LIST_EXPR, Expr); - - if ( ! SERIALIZE(exprs.length()) ) - return false; - - loop_over_list(exprs, i) - if ( ! exprs[i]->Serialize(info) ) - return false; - - return true; - } - -bool ListExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Expr); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - while ( len-- ) - { - Expr* e = Expr::Unserialize(info); - if ( ! e ) - return false; - - exprs.append(e); - } - - return true; - } - RecordAssignExpr::RecordAssignExpr(Expr* record, Expr* init_list, int is_init) { const expr_list& inits = init_list->AsListExpr()->Exprs(); @@ -5524,20 +4835,6 @@ RecordAssignExpr::RecordAssignExpr(Expr* record, Expr* init_list, int is_init) } } -IMPLEMENT_SERIAL(RecordAssignExpr, SER_RECORD_ASSIGN_EXPR); - -bool RecordAssignExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_RECORD_ASSIGN_EXPR, ListExpr); - return true; - } - -bool RecordAssignExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ListExpr); - return true; - } - CastExpr::CastExpr(Expr* arg_op, BroType* t) : UnaryExpr(EXPR_CAST, arg_op) { auto stype = Op()->Type(); @@ -5590,20 +4887,6 @@ void CastExpr::ExprDescribe(ODesc* d) const Type()->Describe(d); } -IMPLEMENT_SERIAL(CastExpr, SER_CAST_EXPR); - -bool CastExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CAST_EXPR, UnaryExpr); - return true; - } - -bool CastExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - IsExpr::IsExpr(Expr* arg_op, BroType* arg_t) : UnaryExpr(EXPR_IS, arg_op) { t = arg_t; @@ -5635,25 +4918,13 @@ void IsExpr::ExprDescribe(ODesc* d) const t->Describe(d); } -IMPLEMENT_SERIAL(IsExpr, SER_IS_EXPR_ /* sic */); - -bool IsExpr::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_IS_EXPR_, UnaryExpr); - return true; - } - -bool IsExpr::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(UnaryExpr); - return true; - } - Expr* get_assign_expr(Expr* op1, Expr* op2, int is_init) { if ( op1->Type()->Tag() == TYPE_RECORD && op2->Type()->Tag() == TYPE_LIST ) return new RecordAssignExpr(op1, op2, is_init); + else if ( op1->Tag() == EXPR_INDEX && op1->AsIndexExpr()->IsSlice() ) + return new IndexSliceAssignExpr(op1, op2, is_init); else return new AssignExpr(op1, op2, is_init); } diff --git a/src/Expr.h b/src/Expr.h index 820de2b876..4e929bdf16 100644 --- a/src/Expr.h +++ b/src/Expr.h @@ -49,7 +49,8 @@ typedef enum { EXPR_FLATTEN, EXPR_CAST, EXPR_IS, -#define NUM_EXPRS (int(EXPR_IS) + 1) + EXPR_INDEX_SLICE_ASSIGN, +#define NUM_EXPRS (int(EXPR_INDEX_SLICE_ASSIGN) + 1) } BroExprTag; extern const char* expr_name(BroExprTag t); @@ -58,6 +59,7 @@ class Stmt; class Frame; class ListExpr; class NameExpr; +class IndexExpr; class AssignExpr; class CallExpr; class EventExpr; @@ -84,7 +86,7 @@ public: const; // Assign to the given value, if appropriate. - virtual void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN); + virtual void Assign(Frame* f, Val* v); // Returns the type corresponding to this expression interpreted // as an initialization. The type should be Unref()'d when done @@ -187,10 +189,19 @@ public: return (AssignExpr*) this; } - void Describe(ODesc* d) const override; + const IndexExpr* AsIndexExpr() const + { + CHECK_TAG(tag, EXPR_INDEX, "ExprVal::AsIndexExpr", expr_name) + return (const IndexExpr*) this; + } - bool Serialize(SerialInfo* info) const; - static Expr* Unserialize(UnserialInfo* info, BroExprTag want = EXPR_ANY); + IndexExpr* AsIndexExpr() + { + CHECK_TAG(tag, EXPR_INDEX, "ExprVal::AsIndexExpr", expr_name) + return (IndexExpr*) this; + } + + void Describe(ODesc* d) const override; virtual TraversalCode Traverse(TraversalCallback* cb) const = 0; @@ -214,8 +225,6 @@ protected: void RuntimeErrorWithCallStack(const std::string& msg) const; - DECLARE_ABSTRACT_SERIAL(Expr); - BroExprTag tag; BroType* type; @@ -230,7 +239,7 @@ public: ID* Id() const { return id; } Val* Eval(Frame* f) const override; - void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN) override; + void Assign(Frame* f, Val* v) override; Expr* MakeLvalue() override; int IsPure() const override; @@ -242,8 +251,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(NameExpr); - ID* id; bool in_const_init; }; @@ -264,8 +271,6 @@ protected: ConstExpr() { val = 0; } void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(ConstExpr); - Val* val; }; @@ -294,8 +299,6 @@ protected: // Returns the expression folded using the given constant. virtual Val* Fold(Val* v) const; - DECLARE_SERIAL(UnaryExpr); - Expr* op; }; @@ -357,8 +360,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(BinaryExpr); - Expr* op1; Expr* op2; }; @@ -373,8 +374,6 @@ protected: CloneExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(CloneExpr); }; class IncrExpr : public UnaryExpr { @@ -388,8 +387,6 @@ public: protected: friend class Expr; IncrExpr() { } - - DECLARE_SERIAL(IncrExpr); }; class ComplementExpr : public UnaryExpr { @@ -401,8 +398,6 @@ protected: ComplementExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(ComplementExpr); }; class NotExpr : public UnaryExpr { @@ -414,8 +409,6 @@ protected: NotExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(NotExpr); }; class PosExpr : public UnaryExpr { @@ -427,8 +420,6 @@ protected: PosExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(PosExpr); }; class NegExpr : public UnaryExpr { @@ -440,8 +431,6 @@ protected: NegExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(NegExpr); }; class SizeExpr : public UnaryExpr { @@ -454,7 +443,6 @@ protected: SizeExpr() { } Val* Fold(Val* v) const override; - DECLARE_SERIAL(SizeExpr); }; class AddExpr : public BinaryExpr { @@ -465,9 +453,6 @@ public: protected: friend class Expr; AddExpr() { } - - DECLARE_SERIAL(AddExpr); - }; class AddToExpr : public BinaryExpr { @@ -478,8 +463,6 @@ public: protected: friend class Expr; AddToExpr() { } - - DECLARE_SERIAL(AddToExpr); }; class RemoveFromExpr : public BinaryExpr { @@ -490,8 +473,6 @@ public: protected: friend class Expr; RemoveFromExpr() { } - - DECLARE_SERIAL(RemoveFromExpr); }; class SubExpr : public BinaryExpr { @@ -501,9 +482,6 @@ public: protected: friend class Expr; SubExpr() { } - - DECLARE_SERIAL(SubExpr); - }; class TimesExpr : public BinaryExpr { @@ -514,9 +492,6 @@ public: protected: friend class Expr; TimesExpr() { } - - DECLARE_SERIAL(TimesExpr); - }; class DivideExpr : public BinaryExpr { @@ -528,9 +503,6 @@ protected: DivideExpr() { } Val* AddrFold(Val* v1, Val* v2) const override; - - DECLARE_SERIAL(DivideExpr); - }; class ModExpr : public BinaryExpr { @@ -540,8 +512,6 @@ public: protected: friend class Expr; ModExpr() { } - - DECLARE_SERIAL(ModExpr); }; class BoolExpr : public BinaryExpr { @@ -554,8 +524,6 @@ public: protected: friend class Expr; BoolExpr() { } - - DECLARE_SERIAL(BoolExpr); }; class BitExpr : public BinaryExpr { @@ -565,8 +533,6 @@ public: protected: friend class Expr; BitExpr() { } - - DECLARE_SERIAL(BitExpr); }; class EqExpr : public BinaryExpr { @@ -579,8 +545,6 @@ protected: EqExpr() { } Val* Fold(Val* v1, Val* v2) const override; - - DECLARE_SERIAL(EqExpr); }; class RelExpr : public BinaryExpr { @@ -591,8 +555,6 @@ public: protected: friend class Expr; RelExpr() { } - - DECLARE_SERIAL(RelExpr); }; class CondExpr : public Expr { @@ -615,8 +577,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(CondExpr); - Expr* op1; Expr* op2; Expr* op3; @@ -626,14 +586,12 @@ class RefExpr : public UnaryExpr { public: explicit RefExpr(Expr* op); - void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN) override; + void Assign(Frame* f, Val* v) override; Expr* MakeLvalue() override; protected: friend class Expr; RefExpr() { } - - DECLARE_SERIAL(RefExpr); }; class AssignExpr : public BinaryExpr { @@ -657,12 +615,20 @@ protected: bool TypeCheck(attr_list* attrs = 0); bool TypeCheckArithmetics(TypeTag bt1, TypeTag bt2); - DECLARE_SERIAL(AssignExpr); - int is_init; Val* val; // optional }; +class IndexSliceAssignExpr : public AssignExpr { +public: + IndexSliceAssignExpr(Expr* op1, Expr* op2, int is_init); + Val* Eval(Frame* f) const override; + +protected: + friend class Expr; + IndexSliceAssignExpr() {} +}; + class IndexExpr : public BinaryExpr { public: IndexExpr(Expr* op1, ListExpr* op2, bool is_slice = false); @@ -673,7 +639,7 @@ public: void Add(Frame* f) override; void Delete(Frame* f) override; - void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN) override; + void Assign(Frame* f, Val* v) override; Expr* MakeLvalue() override; // Need to override Eval since it can take a vector arg but does @@ -682,6 +648,8 @@ public: TraversalCode Traverse(TraversalCallback* cb) const override; + bool IsSlice() const { return is_slice; } + protected: friend class Expr; IndexExpr() { } @@ -690,7 +658,7 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(IndexExpr); + bool is_slice; }; class FieldExpr : public UnaryExpr { @@ -703,7 +671,7 @@ public: int CanDel() const override; - void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN) override; + void Assign(Frame* f, Val* v) override; void Delete(Frame* f) override; Expr* MakeLvalue() override; @@ -716,8 +684,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(FieldExpr); - const char* field_name; const TypeDecl* td; int field; // -1 = attributes @@ -740,8 +706,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(HasFieldExpr); - const char* field_name; int field; }; @@ -759,8 +723,6 @@ protected: Val* Fold(Val* v) const override; void ExprDescribe(ODesc* d) const override; - - DECLARE_SERIAL(RecordConstructorExpr); }; class TableConstructorExpr : public UnaryExpr { @@ -781,8 +743,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(TableConstructorExpr); - Attributes* attrs; }; @@ -804,8 +764,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(SetConstructorExpr); - Attributes* attrs; }; @@ -822,8 +780,6 @@ protected: Val* InitVal(const BroType* t, Val* aggr) const override; void ExprDescribe(ODesc* d) const override; - - DECLARE_SERIAL(VectorConstructorExpr); }; class FieldAssignExpr : public UnaryExpr { @@ -841,8 +797,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(FieldAssignExpr); - string field_name; }; @@ -856,8 +810,6 @@ protected: Val* FoldSingleVal(Val* v, InternalTypeTag t) const; Val* Fold(Val* v) const override; - - DECLARE_SERIAL(ArithCoerceExpr); }; class RecordCoerceExpr : public UnaryExpr { @@ -872,8 +824,6 @@ protected: Val* InitVal(const BroType* t, Val* aggr) const override; Val* Fold(Val* v) const override; - DECLARE_SERIAL(RecordCoerceExpr); - // For each super-record slot, gives subrecord slot with which to // fill it. int* map; @@ -890,8 +840,6 @@ protected: TableCoerceExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(TableCoerceExpr); }; class VectorCoerceExpr : public UnaryExpr { @@ -904,8 +852,6 @@ protected: VectorCoerceExpr() { } Val* Fold(Val* v) const override; - - DECLARE_SERIAL(VectorCoerceExpr); }; // An internal operator for flattening array indices that are records @@ -920,8 +866,6 @@ protected: Val* Fold(Val* v) const override; - DECLARE_SERIAL(FlattenExpr); - int num_fields; }; @@ -937,7 +881,7 @@ public: protected: EventHandlerPtr event; - val_list* args; + val_list args; TimerMgr* tmgr; }; @@ -961,8 +905,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(ScheduleExpr); - Expr* when; EventExpr* event; }; @@ -977,8 +919,6 @@ protected: Val* Fold(Val* v1, Val* v2) const override; - DECLARE_SERIAL(InExpr); - }; class CallExpr : public Expr { @@ -1001,8 +941,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(CallExpr); - Expr* func; ListExpr* args; }; @@ -1026,8 +964,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(EventExpr); - string name; EventHandlerPtr handler; ListExpr* args; @@ -1055,7 +991,7 @@ public: BroType* InitType() const override; Val* InitVal(const BroType* t, Val* aggr) const override; Expr* MakeLvalue() override; - void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN) override; + void Assign(Frame* f, Val* v) override; TraversalCode Traverse(TraversalCallback* cb) const override; @@ -1064,8 +1000,6 @@ protected: void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(ListExpr); - expr_list exprs; }; @@ -1079,8 +1013,6 @@ public: protected: friend class Expr; RecordAssignExpr() { } - - DECLARE_SERIAL(RecordAssignExpr); }; class CastExpr : public UnaryExpr { @@ -1093,8 +1025,6 @@ protected: Val* Eval(Frame* f) const override; void ExprDescribe(ODesc* d) const override; - - DECLARE_SERIAL(CastExpr); }; class IsExpr : public UnaryExpr { @@ -1108,7 +1038,6 @@ protected: Val* Fold(Val* v) const override; void ExprDescribe(ODesc* d) const override; - DECLARE_SERIAL(IsExpr); private: BroType* t; diff --git a/src/File.cc b/src/File.cc index 609ea4f0ac..8ac229b9fc 100644 --- a/src/File.cc +++ b/src/File.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #ifdef TIME_WITH_SYS_TIME @@ -18,64 +18,16 @@ #include #include -#include -#include -#include - #include #include "File.h" #include "Type.h" -#include "Timer.h" #include "Expr.h" #include "NetVar.h" #include "Net.h" -#include "Serializer.h" #include "Event.h" #include "Reporter.h" -// Timer which on dispatching rotates the file. -class RotateTimer : public Timer { -public: - RotateTimer(double t, BroFile* f, bool arg_raise) : Timer(t, TIMER_ROTATE) - { file = f; raise = arg_raise; name = copy_string(f->Name()); } - ~RotateTimer(); - - void Dispatch(double t, int is_expire); - -protected: - BroFile* file; - bool raise; - const char* name; -}; - -RotateTimer::~RotateTimer() - { - if ( file->rotate_timer == this ) - file->rotate_timer = 0; - - delete [] name; - } - -void RotateTimer::Dispatch(double t, int is_expire) - { - file->rotate_timer = 0; - - if ( ! is_expire ) - { - if ( raise ) - { - val_list* vl = new val_list; - Ref(file); - vl->append(new Val(file)); - mgr.QueueEvent(rotate_interval, vl); - } - - file->InstallRotateTimer(); - } - } - - // The following could in principle be part of a "file manager" object. #define MAX_FILE_CACHE_SIZE 512 @@ -83,9 +35,6 @@ static int num_files_in_cache = 0; static BroFile* head = 0; static BroFile* tail = 0; -double BroFile::default_rotation_interval = 0; -double BroFile::default_rotation_size = 0; - // Maximizes the number of open file descriptors and returns the number // that we should use for the cache. static int maximize_num_fds() @@ -121,9 +70,6 @@ BroFile::BroFile(FILE* arg_f) name = access = 0; t = base_type(TYPE_STRING); is_open = (f != 0); - - if ( f ) - UpdateFileSize(); } BroFile::BroFile(FILE* arg_f, const char* arg_name, const char* arg_access) @@ -134,9 +80,6 @@ BroFile::BroFile(FILE* arg_f, const char* arg_name, const char* arg_access) access = copy_string(arg_access); t = base_type(TYPE_STRING); is_open = (f != 0); - - if ( f ) - UpdateFileSize(); } BroFile::BroFile(const char* arg_name, const char* arg_access, BroType* arg_t) @@ -171,7 +114,7 @@ const char* BroFile::Name() const return name; if ( f == stdin ) - return"/dev/stdin"; + return "/dev/stdin"; if ( f == stdout ) return "/dev/stdout"; @@ -195,16 +138,6 @@ bool BroFile::Open(FILE* file, const char* mode) f = file; - if ( default_rotation_interval && - (! attrs || ! attrs->FindAttr(ATTR_ROTATE_INTERVAL)) ) - rotate_interval = default_rotation_interval; - - if ( default_rotation_size && - (! attrs || ! attrs->FindAttr(ATTR_ROTATE_SIZE)) ) - rotate_size = default_rotation_size; - - InstallRotateTimer(); - if ( ! f ) { if ( ! mode ) @@ -223,7 +156,6 @@ bool BroFile::Open(FILE* file, const char* mode) is_open = okay_to_manage = 1; InsertAtBeginning(); - UpdateFileSize(); } else { @@ -245,7 +177,6 @@ BroFile::~BroFile() delete [] name; delete [] access; - delete [] cipher_buffer; #ifdef USE_PERFTOOLS_DEBUG heap_checker->UnIgnoreObject(this); @@ -257,18 +188,11 @@ void BroFile::Init() is_open = okay_to_manage = is_in_cache = 0; position = 0; next = prev = 0; - rotate_timer = 0; - rotate_interval = 0.0; - rotate_size = current_size = 0.0; - open_time = 0; attrs = 0; buffered = true; print_hook = true; raw_output = false; t = 0; - pub_key = 0; - cipher_ctx = 0; - cipher_buffer = 0; #ifdef USE_PERFTOOLS_DEBUG heap_checker->IgnoreObject(this); @@ -318,9 +242,6 @@ FILE* BroFile::BringIntoCache() return 0; } - RaiseOpenEvent(); - UpdateFileSize(); - if ( fseek(f, position, SEEK_SET) < 0 ) { bro_strerror_r(errno, buf, sizeof(buf)); @@ -328,6 +249,7 @@ FILE* BroFile::BringIntoCache() } InsertAtBeginning(); + RaiseOpenEvent(); return f; } @@ -356,17 +278,9 @@ void BroFile::SetBuf(bool arg_buffered) int BroFile::Close() { - if ( rotate_timer ) - { - timer_mgr->Cancel(rotate_timer); - rotate_timer = 0; - } - if ( ! is_open ) return 1; - FinishEncrypt(); - // Do not close stdin/stdout/stderr. if ( f == stdin || f == stdout || f == stderr ) return 0; @@ -517,33 +431,8 @@ void BroFile::SetAttrs(Attributes* arg_attrs) attrs = arg_attrs; Ref(attrs); - Attr* ef = attrs->FindAttr(ATTR_ROTATE_INTERVAL); - if ( ef ) - rotate_interval = ef->AttrExpr()->ExprVal()->AsInterval(); - - ef = attrs->FindAttr(ATTR_ROTATE_SIZE); - if ( ef ) - rotate_size = ef->AttrExpr()->ExprVal()->AsDouble(); - - ef = attrs->FindAttr(ATTR_ENCRYPT); - if ( ef ) - { - if ( ef->AttrExpr() ) - InitEncrypt(ef->AttrExpr()->ExprVal()->AsString()->CheckString()); - else - InitEncrypt(opt_internal_string("log_encryption_key")->CheckString()); - } - if ( attrs->FindAttr(ATTR_RAW_OUTPUT) ) EnableRawOutput(); - - InstallRotateTimer(); - } - -void BroFile::SetRotateInterval(double secs) - { - rotate_interval = secs; - InstallRotateTimer(); } RecordVal* BroFile::Rotate() @@ -577,182 +466,17 @@ RecordVal* BroFile::Rotate() return info; } -void BroFile::InstallRotateTimer() - { - if ( terminating ) - return; - - if ( rotate_timer ) - { - timer_mgr->Cancel(rotate_timer); - rotate_timer = 0; - } - - if ( rotate_interval ) - { - // When this is called for the first time, network_time can - // still be zero. If so, we set a timer which fires - // immediately but doesn't rotate when it expires. - - if ( ! network_time ) - rotate_timer = new RotateTimer(1, this, false); - else - { - if ( ! open_time ) - open_time = network_time; - - const char* base_time = log_rotate_base_time ? - log_rotate_base_time->AsString()->CheckString() : 0; - - double base = parse_rotate_base_time(base_time); - double delta_t = - calc_next_rotate(network_time, rotate_interval, base); - rotate_timer = new RotateTimer(network_time + delta_t, - this, true); - } - - timer_mgr->Add(rotate_timer); - } - } - -void BroFile::SetDefaultRotation(double interval, double max_size) - { - for ( BroFile* f = head; f; f = f->next ) - { - if ( ! (f->attrs && f->attrs->FindAttr(ATTR_ROTATE_INTERVAL)) ) - { - f->rotate_interval = interval; - f->InstallRotateTimer(); - } - - if ( ! (f->attrs && f->attrs->FindAttr(ATTR_ROTATE_SIZE)) ) - f->rotate_size = max_size; - } - - default_rotation_interval = interval; - default_rotation_size = max_size; - } - void BroFile::CloseCachedFiles() { BroFile* next; for ( BroFile* f = head; f; f = next ) { - // Send final rotate events (immediately). - if ( f->rotate_interval ) - { - val_list* vl = new val_list; - Ref(f); - vl->append(new Val(f)); - Event* event = new Event(::rotate_interval, vl); - mgr.Dispatch(event, true); - } - - if ( f->rotate_size ) - { - val_list* vl = new val_list; - Ref(f); - vl->append(new Val(f)); - Event* event = new ::Event(::rotate_size, vl); - mgr.Dispatch(event, true); - } - next = f->next; if ( f->is_in_cache ) f->Close(); } } -void BroFile::InitEncrypt(const char* keyfile) - { - if ( ! (pub_key || keyfile) ) - return; - - if ( ! pub_key ) - { - FILE* key = fopen(keyfile, "r"); - - if ( ! key ) - { - reporter->Error("can't open key file %s: %s", keyfile, strerror(errno)); - Close(); - return; - } - - pub_key = PEM_read_PUBKEY(key, 0, 0, 0); - if ( ! pub_key ) - { - reporter->Error("can't read key from %s: %s", keyfile, - ERR_error_string(ERR_get_error(), 0)); - Close(); - return; - } - } - - // Depending on the OpenSSL version, EVP_*_cbc() - // returns a const or a non-const. - EVP_CIPHER* cipher_type = (EVP_CIPHER*) EVP_bf_cbc(); - cipher_ctx = EVP_CIPHER_CTX_new(); - - unsigned char secret[EVP_PKEY_size(pub_key)]; - unsigned char* psecret = secret; - unsigned int secret_len; - - int iv_len = EVP_CIPHER_iv_length(cipher_type); - unsigned char iv[iv_len]; - - if ( ! EVP_SealInit(cipher_ctx, cipher_type, &psecret, - (int*) &secret_len, iv, &pub_key, 1) ) - { - reporter->Error("can't init cipher context for %s: %s", keyfile, - ERR_error_string(ERR_get_error(), 0)); - Close(); - return; - } - - secret_len = htonl(secret_len); - - if ( fwrite("BROENC1", 7, 1, f) < 1 || - fwrite(&secret_len, sizeof(secret_len), 1, f) < 1 || - fwrite(secret, ntohl(secret_len), 1, f) < 1 || - fwrite(iv, iv_len, 1, f) < 1 ) - { - reporter->Error("can't write header to log file %s: %s", - name, strerror(errno)); - Close(); - return; - } - - int buf_size = MIN_BUFFER_SIZE + EVP_CIPHER_block_size(cipher_type); - cipher_buffer = new unsigned char[buf_size]; - } - -void BroFile::FinishEncrypt() - { - if ( ! is_open ) - return; - - if ( ! pub_key ) - return; - - if ( cipher_ctx ) - { - int outl; - EVP_SealFinal(cipher_ctx, cipher_buffer, &outl); - - if ( outl && fwrite(cipher_buffer, outl, 1, f) < 1 ) - { - reporter->Error("write error for %s: %s", - name, strerror(errno)); - return; - } - - EVP_CIPHER_CTX_free(cipher_ctx); - cipher_ctx = 0; - } - } - - int BroFile::Write(const char* data, int len) { if ( ! is_open ) @@ -764,52 +488,9 @@ int BroFile::Write(const char* data, int len) if ( ! len ) len = strlen(data); - if ( cipher_ctx ) - { - while ( len ) - { - int outl; - int inl = min(+MIN_BUFFER_SIZE, len); - - if ( ! EVP_SealUpdate(cipher_ctx, cipher_buffer, &outl, - (unsigned char*)data, inl) ) - { - reporter->Error("encryption error for %s: %s", - name, - ERR_error_string(ERR_get_error(), 0)); - Close(); - return 0; - } - - if ( outl && fwrite(cipher_buffer, outl, 1, f) < 1 ) - { - reporter->Error("write error for %s: %s", - name, strerror(errno)); - Close(); - return 0; - } - - data += inl; - len -= inl; - } - - return 1; - } - if ( fwrite(data, len, 1, f) < 1 ) return false; - if ( rotate_size && current_size < rotate_size && current_size + len >= rotate_size ) - { - val_list* vl = new val_list; - vl->append(new Val(this)); - mgr.QueueEvent(::rotate_size, vl); - } - - // This does not work if we seek around. But none of the logs does that - // and we avoid stat()'ing the file all the time. - current_size += len; - return true; } @@ -818,29 +499,22 @@ void BroFile::RaiseOpenEvent() if ( ! ::file_opened ) return; - val_list* vl = new val_list; Ref(this); - vl->append(new Val(this)); - Event* event = new ::Event(::file_opened, vl); + Event* event = new ::Event(::file_opened, {new Val(this)}); mgr.Dispatch(event, true); } -void BroFile::UpdateFileSize() +double BroFile::Size() { + fflush(f); struct stat s; if ( fstat(fileno(f), &s) < 0 ) { reporter->Error("can't stat fd for %s: %s", name, strerror(errno)); - current_size = 0; - return; + return 0; } - current_size = double(s.st_size); - } - -bool BroFile::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); + return s.st_size; } BroFile* BroFile::GetFile(const char* name) @@ -854,139 +528,3 @@ BroFile* BroFile::GetFile(const char* name) return new BroFile(name, "w", 0); } -BroFile* BroFile::Unserialize(UnserialInfo* info) - { - BroFile* file = (BroFile*) SerialObj::Unserialize(info, SER_BRO_FILE); - - if ( ! file ) - return 0; - - if ( file->is_open ) - return file; - - // If there is already an object for this file, return it. - if ( file->name ) - { - for ( BroFile* f = head; f; f = f->next ) - { - if ( f->name && streq(file->name, f->name) ) - { - Unref(file); - Ref(f); - return f; - } - } - } - - // Otherwise, open, but don't clobber. - if ( ! file->Open(0, "a") ) - { - info->s->Error(fmt("cannot open %s: %s", - file->name, strerror(errno))); - return 0; - } - - // Here comes a hack. This method will return a pointer to a newly - // instantiated file object. As soon as this pointer is Unref'ed, the - // file will be closed. That means that when we unserialize the same - // file next time, we will re-open it and thereby delete the first one, - // i.e., we will be keeping to delete what we've written just before. - // - // To avoid this loop, we do an extra Ref here, i.e., this file will - // *never* be closed anymore (as long the file cache does not overflow). - Ref(file); - - // We deliberately override log rotation attributes with our defaults. - file->rotate_interval = log_rotate_interval; - file->rotate_size = log_max_size; - file->InstallRotateTimer(); - file->SetBuf(file->buffered); - - return file; - } - -IMPLEMENT_SERIAL(BroFile, SER_BRO_FILE); - -bool BroFile::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BRO_FILE, BroObj); - - const char* s = name; - - if ( ! okay_to_manage ) - { - // We can handle stdin/stdout/stderr but no others. - if ( f == stdin ) - s = "/dev/stdin"; - else if ( f == stdout ) - s = "/dev/stdout"; - else if ( f == stderr ) - s = "/dev/stderr"; - else - { - // We don't manage the file, and therefore don't - // really know how to pass it on to the other side. - // However, in order to not abort communication - // when this happens, we still send the name if we - // have one; or if we don't, we create a special - // "dont-have-a-file" file to be created on the - // receiver side. - if ( ! s ) - s = "unmanaged-bro-output-file.log"; - } - } - - if ( ! (SERIALIZE(s) && SERIALIZE(buffered)) ) - return false; - - SERIALIZE_OPTIONAL_STR(access); - - if ( ! t->Serialize(info) ) - return false; - - SERIALIZE_OPTIONAL(attrs); - return true; - } - -bool BroFile::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - if ( ! (UNSERIALIZE_STR(&name, 0) && UNSERIALIZE(&buffered)) ) - return false; - - UNSERIALIZE_OPTIONAL_STR(access); - - t = BroType::Unserialize(info); - if ( ! t ) - return false; - - UNSERIALIZE_OPTIONAL(attrs, Attributes::Unserialize(info)); - - // Parse attributes. - SetAttrs(attrs); - // SetAttrs() has ref'ed attrs again. - Unref(attrs); - - // Bind stdin/stdout/stderr. - FILE* file = 0; - is_open = false; - f = 0; - - if ( streq(name, "/dev/stdin") ) - file = stdin; - else if ( streq(name, "/dev/stdout") ) - file = stdout; - else if ( streq(name, "/dev/stderr") ) - file = stderr; - - if ( file ) - { - delete [] name; - name = 0; - f = file; - is_open = true; - } - - return true; - } diff --git a/src/File.h b/src/File.h index 3660d3caa4..48689b4617 100644 --- a/src/File.h +++ b/src/File.h @@ -12,13 +12,7 @@ # include # endif // NEED_KRB5_H -// From OpenSSL. We forward-declare these here to avoid introducing a -// dependency on OpenSSL headers just for this header file. -typedef struct evp_pkey_st EVP_PKEY; -typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX; - class BroType; -class RotateTimer; class BroFile : public BroObj { public: @@ -51,21 +45,14 @@ public: void Describe(ODesc* d) const override; - void SetRotateInterval(double secs); - // Rotates the logfile. Returns rotate_info. RecordVal* Rotate(); - // Set &rotate_interval, &rotate_size, - // and &raw_output attributes. + // Set &raw_output attribute. void SetAttrs(Attributes* attrs); // Returns the current size of the file, after fresh stat'ing. - double Size() { fflush(f); UpdateFileSize(); return current_size; } - - // Set rotate/postprocessor for all files that don't define them - // by their own. (interval/max_size=0 for no rotation; size in bytes). - static void SetDefaultRotation(double interval, double max_size); + double Size(); // Close all files which are managed by us. static void CloseCachedFiles(); @@ -79,12 +66,7 @@ public: void EnableRawOutput() { raw_output = true; } bool IsRawOutput() const { return raw_output; } - bool Serialize(SerialInfo* info) const; - static BroFile* Unserialize(UnserialInfo* info); - protected: - friend class RotateTimer; - BroFile() { Init(); } void Init(); @@ -105,7 +87,6 @@ protected: void Unlink(); void InsertAtBeginning(); void MoveToBeginning(); - void InstallRotateTimer(); // Returns nil if the file is not active, was in error, etc. // (Protected because we do not want anyone to write directly @@ -113,19 +94,9 @@ protected: FILE* File(); FILE* BringIntoCache(); - // Stats the file to get its current size. - void UpdateFileSize(); - // Raises a file_opened event. void RaiseOpenEvent(); - // Initialize encryption with the given public key. - void InitEncrypt(const char* keyfile); - // Finalize encryption. - void FinishEncrypt(); - - DECLARE_SERIAL(BroFile); - FILE* f; BroType* t; char* name; @@ -137,28 +108,12 @@ protected: BroFile* next; // doubly-linked list of cached files BroFile* prev; Attributes* attrs; - double rotate_interval; bool buffered; - - // Sizes are double's so that it's easy to specify large - // ones with scientific notation, and so they can exceed 4GB. - double rotate_size; - double current_size; - - Timer* rotate_timer; double open_time; bool print_hook; bool raw_output; - static double default_rotation_interval; - static double default_rotation_size; - - EVP_PKEY* pub_key; - EVP_CIPHER_CTX* cipher_ctx; - static const int MIN_BUFFER_SIZE = 1024; - unsigned char* cipher_buffer; - }; #endif diff --git a/src/Frag.cc b/src/Frag.cc index 842059e218..c6a5b3ba0d 100644 --- a/src/Frag.cc +++ b/src/Frag.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "Hash.h" diff --git a/src/Frame.cc b/src/Frame.cc index f30312aaec..d065fb440a 100644 --- a/src/Frame.cc +++ b/src/Frame.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Frame.h" #include "Stmt.h" diff --git a/src/Func.cc b/src/Func.cc index cbbbef6fa5..ccd9793291 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -41,8 +41,6 @@ #include "analyzer/protocol/login/Login.h" #include "Sessions.h" #include "RE.h" -#include "Serializer.h" -#include "RemoteSerializer.h" #include "Event.h" #include "Traverse.h" #include "Reporter.h" @@ -128,110 +126,6 @@ void Func::AddBody(Stmt* /* new_body */, id_list* /* new_inits */, Internal("Func::AddBody called"); } -bool Func::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Func* Func::Unserialize(UnserialInfo* info) - { - Func* f = (Func*) SerialObj::Unserialize(info, SER_FUNC); - - // For builtins, we return a reference to the (hopefully) already - // existing function. - if ( f && f->kind == BUILTIN_FUNC ) - { - const char* name = ((BuiltinFunc*) f)->Name(); - ID* id = global_scope()->Lookup(name); - if ( ! id ) - { - info->s->Error(fmt("can't find built-in %s", name)); - return 0; - } - - if ( ! (id->HasVal() && id->ID_Val()->Type()->Tag() == TYPE_FUNC) ) - { - info->s->Error(fmt("ID %s is not a built-in", name)); - return 0; - } - - Unref(f); - f = id->ID_Val()->AsFunc(); - Ref(f); - } - - return f; - } - -bool Func::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FUNC, BroObj); - - if ( ! SERIALIZE(int(bodies.size())) ) - return false; - - for ( unsigned int i = 0; i < bodies.size(); ++i ) - { - if ( ! bodies[i].stmts->Serialize(info) ) - return false; - if ( ! SERIALIZE(bodies[i].priority) ) - return false; - } - - if ( ! SERIALIZE(char(kind) ) ) - return false; - - if ( ! type->Serialize(info) ) - return false; - - if ( ! SERIALIZE(Name()) ) - return false; - - // We don't serialize scope as only global functions are considered here - // anyway. - return true; - } - -bool Func::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - while ( len-- ) - { - Body b; - b.stmts = Stmt::Unserialize(info); - if ( ! b.stmts ) - return false; - - if ( ! UNSERIALIZE(&b.priority) ) - return false; - - bodies.push_back(b); - } - - char c; - if ( ! UNSERIALIZE(&c) ) - return false; - - kind = (Kind) c; - - type = BroType::Unserialize(info); - if ( ! type ) - return false; - - const char* n; - if ( ! UNSERIALIZE_STR(&n, 0) ) - return false; - - name = n; - delete [] n; - - return true; - } void Func::DescribeDebug(ODesc* d, const val_list* args) const { @@ -585,21 +479,6 @@ Stmt* BroFunc::AddInits(Stmt* body, id_list* inits) return stmt_series; } -IMPLEMENT_SERIAL(BroFunc, SER_BRO_FUNC); - -bool BroFunc::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BRO_FUNC, Func); - return SERIALIZE(frame_size); - } - -bool BroFunc::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Func); - - return UNSERIALIZE(&frame_size); - } - BuiltinFunc::BuiltinFunc(built_in_func arg_func, const char* arg_name, int arg_is_pure) : Func(BUILTIN_FUNC) @@ -682,20 +561,6 @@ void BuiltinFunc::Describe(ODesc* d) const d->AddCount(is_pure); } -IMPLEMENT_SERIAL(BuiltinFunc, SER_BUILTIN_FUNC); - -bool BuiltinFunc::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BUILTIN_FUNC, Func); - return true; - } - -bool BuiltinFunc::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Func); - return true; - } - void builtin_error(const char* msg, BroObj* arg) { auto emit = [=](const CallExpr* ce) @@ -762,13 +627,13 @@ void builtin_error(const char* msg, BroObj* arg) emit(last_call.call); } -#include "bro.bif.func_h" +#include "zeek.bif.func_h" #include "stats.bif.func_h" #include "reporter.bif.func_h" #include "strings.bif.func_h" #include "option.bif.func_h" -#include "bro.bif.func_def" +#include "zeek.bif.func_def" #include "stats.bif.func_def" #include "reporter.bif.func_def" #include "strings.bif.func_def" @@ -795,7 +660,7 @@ void init_builtin_funcs() var_sizes = internal_type("var_sizes")->AsTableType(); -#include "bro.bif.func_init" +#include "zeek.bif.func_init" #include "stats.bif.func_init" #include "reporter.bif.func_init" #include "strings.bif.func_init" diff --git a/src/Func.h b/src/Func.h index 48e0c2e8b8..765d1ec499 100644 --- a/src/Func.h +++ b/src/Func.h @@ -59,10 +59,6 @@ public: void Describe(ODesc* d) const override = 0; virtual void DescribeDebug(ODesc* d, const val_list* args) const; - // This (un-)serializes only a single body (as given in SerialInfo). - bool Serialize(SerialInfo* info) const; - static Func* Unserialize(UnserialInfo* info); - virtual TraversalCode Traverse(TraversalCallback* cb) const; uint32 GetUniqueFuncID() const { return unique_id; } @@ -75,8 +71,6 @@ protected: // Helper function for handling result of plugin hook. std::pair HandlePluginResult(std::pair plugin_result, val_list* args, function_flavor flavor) const; - DECLARE_ABSTRACT_SERIAL(Func); - vector bodies; Scope* scope; Kind kind; @@ -106,8 +100,6 @@ protected: BroFunc() : Func(BRO_FUNC) {} Stmt* AddInits(Stmt* body, id_list* inits); - DECLARE_SERIAL(BroFunc); - int frame_size; }; @@ -127,8 +119,6 @@ public: protected: BuiltinFunc() { func = 0; is_pure = 0; } - DECLARE_SERIAL(BuiltinFunc); - built_in_func func; int is_pure; }; diff --git a/src/Hash.cc b/src/Hash.cc index bb1c103677..a40dc4d2f8 100644 --- a/src/Hash.cc +++ b/src/Hash.cc @@ -15,7 +15,7 @@ // for the adversary to construct conflicts, though I do not know if // HMAC/MD5 is provably universal. -#include "bro-config.h" +#include "zeek-config.h" #include "Hash.h" #include "Reporter.h" @@ -26,7 +26,7 @@ void init_hash_function() { // Make sure we have already called init_random_seed(). if ( ! (hmac_key_set && siphash_key_set) ) - reporter->InternalError("Bro's hash functions aren't fully initialized"); + reporter->InternalError("Zeek's hash functions aren't fully initialized"); } HashKey::HashKey(bro_int_t i) diff --git a/src/ID.cc b/src/ID.cc index fd99d7c937..ad7f345667 100644 --- a/src/ID.cc +++ b/src/ID.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "ID.h" #include "Expr.h" @@ -9,12 +9,9 @@ #include "Func.h" #include "Scope.h" #include "File.h" -#include "Serializer.h" -#include "RemoteSerializer.h" -#include "PersistenceSerializer.h" #include "Scope.h" #include "Traverse.h" -#include "broxygen/Manager.h" +#include "zeekygen/Manager.h" ID::ID(const char* arg_name, IDScope arg_scope, bool arg_is_export) { @@ -62,50 +59,14 @@ void ID::ClearVal() val = 0; } -void ID::SetVal(Val* v, Opcode op, bool arg_weak_ref) +void ID::SetVal(Val* v, bool arg_weak_ref) { - if ( op != OP_NONE ) - { - if ( type && val && type->Tag() == TYPE_TABLE && - val->AsTableVal()->FindAttr(ATTR_MERGEABLE) && - v->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - StateAccess::Log(new StateAccess(OP_ASSIGN, this, - v, val)); - v->AsTableVal()->AddTo(val->AsTableVal(), 0, false); - return; - } - - MutableVal::Properties props = 0; - - if ( attrs && attrs->FindAttr(ATTR_SYNCHRONIZED) ) - props |= MutableVal::SYNCHRONIZED; - - if ( attrs && attrs->FindAttr(ATTR_PERSISTENT) ) - props |= MutableVal::PERSISTENT; - - if ( attrs && attrs->FindAttr(ATTR_TRACKED) ) - props |= MutableVal::TRACKED; - - if ( props ) - { - if ( v->IsMutableVal() ) - v->AsMutableVal()->AddProperties(props); - } - -#ifndef DEBUG - if ( props ) -#else - if ( debug_logger.IsVerbose() || props ) -#endif - StateAccess::Log(new StateAccess(op, this, v, val)); - } - if ( ! weak_ref ) Unref(val); val = v; weak_ref = arg_weak_ref; + Modified(); #ifdef DEBUG UpdateValID(); @@ -194,31 +155,6 @@ void ID::UpdateValAttrs() if ( ! attrs ) return; - MutableVal::Properties props = 0; - - if ( val && val->IsMutableVal() ) - { - if ( attrs->FindAttr(ATTR_SYNCHRONIZED) ) - props |= MutableVal::SYNCHRONIZED; - - if ( attrs->FindAttr(ATTR_PERSISTENT) ) - props |= MutableVal::PERSISTENT; - - if ( attrs->FindAttr(ATTR_TRACKED) ) - props |= MutableVal::TRACKED; - - val->AsMutableVal()->AddProperties(props); - } - - if ( ! IsInternalGlobal() ) - { - if ( attrs->FindAttr(ATTR_SYNCHRONIZED) ) - remote_serializer->Register(this); - - if ( attrs->FindAttr(ATTR_PERSISTENT) ) - persistence_serializer->Register(this); - } - if ( val && val->Type()->Tag() == TYPE_TABLE ) val->AsTableVal()->SetAttrs(attrs); @@ -253,16 +189,35 @@ void ID::UpdateValAttrs() } } -void ID::MakeDeprecated() +void ID::MakeDeprecated(Expr* deprecation) { if ( IsDeprecated() ) return; - attr_list* attr = new attr_list; - attr->append(new Attr(ATTR_DEPRECATED)); + attr_list* attr = new attr_list{new Attr(ATTR_DEPRECATED, deprecation)}; AddAttrs(new Attributes(attr, Type(), false)); } +string ID::GetDeprecationWarning() const + { + string result; + Attr* depr_attr = FindAttr(ATTR_DEPRECATED); + if ( depr_attr ) + { + ConstExpr* expr = static_cast(depr_attr->AttrExpr()); + if ( expr ) + { + StringVal* text = expr->Value()->AsStringVal(); + result = text->CheckString(); + } + } + + if ( result.empty() ) + return fmt("deprecated (%s)", Name()); + else + return fmt("deprecated (%s): %s", Name(), result.c_str()); + } + void ID::AddAttrs(Attributes* a) { if ( attrs ) @@ -277,22 +232,6 @@ void ID::RemoveAttr(attr_tag a) { if ( attrs ) attrs->RemoveAttr(a); - - if ( val && val->IsMutableVal() ) - { - MutableVal::Properties props = 0; - - if ( a == ATTR_SYNCHRONIZED ) - props |= MutableVal::SYNCHRONIZED; - - if ( a == ATTR_PERSISTENT ) - props |= MutableVal::PERSISTENT; - - if ( a == ATTR_TRACKED ) - props |= MutableVal::TRACKED; - - val->AsMutableVal()->RemoveProperties(props); - } } void ID::SetOption() @@ -305,8 +244,7 @@ void ID::SetOption() // option implied redefinable if ( ! IsRedefinable() ) { - attr_list* attr = new attr_list; - attr->append(new Attr(ATTR_REDEF)); + attr_list* attr = new attr_list{new Attr(ATTR_REDEF)}; AddAttrs(new Attributes(attr, Type(), false)); } } @@ -324,11 +262,6 @@ void ID::EvalFunc(Expr* ef, Expr* ev) Unref(ce); } -bool ID::Serialize(SerialInfo* info) const - { - return (ID*) SerialObj::Serialize(info); - } - #if 0 void ID::CopyFrom(const ID* id) { @@ -339,9 +272,6 @@ void ID::CopyFrom(const ID* id) offset = id->offset ; infer_return_type = id->infer_return_type; - if ( FindAttr(ATTR_PERSISTENT) ) - persistence_serializer->Unregister(this); - if ( id->type ) Ref(id->type); if ( id->val && ! id->weak_ref ) @@ -362,223 +292,8 @@ void ID::CopyFrom(const ID* id) #ifdef DEBUG UpdateValID(); #endif - - if ( FindAttr(ATTR_PERSISTENT) ) - persistence_serializer->Unregister(this); - } #endif -ID* ID::Unserialize(UnserialInfo* info) - { - ID* id = (ID*) SerialObj::Unserialize(info, SER_ID); - if ( ! id ) - return 0; - - if ( ! id->IsGlobal() ) - return id; - - // Globals. - ID* current = global_scope()->Lookup(id->name); - - if ( ! current ) - { - if ( ! info->install_globals ) - { - info->s->Error("undefined"); - Unref(id); - return 0; - } - - Ref(id); - global_scope()->Insert(id->Name(), id); -#ifdef USE_PERFTOOLS_DEBUG - heap_checker->IgnoreObject(id); -#endif - } - - else - { - if ( info->id_policy != UnserialInfo::InstantiateNew ) - { - persistence_serializer->Unregister(current); - remote_serializer->Unregister(current); - } - - switch ( info->id_policy ) { - - case UnserialInfo::Keep: - Unref(id); - Ref(current); - id = current; - break; - - case UnserialInfo::Replace: - Unref(current); - Ref(id); - global_scope()->Insert(id->Name(), id); - break; - - case UnserialInfo::CopyNewToCurrent: - if ( ! same_type(current->type, id->type) ) - { - info->s->Error("type mismatch"); - Unref(id); - return 0; - } - - if ( ! current->weak_ref ) - Unref(current->val); - - current->val = id->val; - current->weak_ref = id->weak_ref; - if ( current->val && ! current->weak_ref ) - Ref(current->val); - -#ifdef DEBUG - current->UpdateValID(); -#endif - - Unref(id); - Ref(current); - id = current; - - break; - - case UnserialInfo::CopyCurrentToNew: - if ( ! same_type(current->type, id->type) ) - { - info->s->Error("type mismatch"); - return 0; - } - if ( ! id->weak_ref ) - Unref(id->val); - id->val = current->val; - id->weak_ref = current->weak_ref; - if ( id->val && ! id->weak_ref ) - Ref(id->val); - -#ifdef DEBUG - id->UpdateValID(); -#endif - - Unref(current); - Ref(id); - global_scope()->Insert(id->Name(), id); - break; - - case UnserialInfo::InstantiateNew: - // Do nothing. - break; - - default: - reporter->InternalError("unknown type for UnserialInfo::id_policy"); - } - } - - if ( id->FindAttr(ATTR_PERSISTENT) ) - persistence_serializer->Register(id); - - if ( id->FindAttr(ATTR_SYNCHRONIZED) ) - remote_serializer->Register(id); - - return id; - - } - -IMPLEMENT_SERIAL(ID, SER_ID); - -bool ID::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE_WITH_SUSPEND(SER_ID, BroObj); - - if ( info->cont.NewInstance() ) - { - DisableSuspend suspend(info); - - info->s->WriteOpenTag("ID"); - - if ( ! (SERIALIZE(name) && - SERIALIZE(char(scope)) && - SERIALIZE(is_export) && - SERIALIZE(is_const) && - SERIALIZE(is_enum_const) && - SERIALIZE(is_type) && - SERIALIZE(offset) && - SERIALIZE(infer_return_type) && - SERIALIZE(weak_ref) && - type->Serialize(info)) ) - return false; - - SERIALIZE_OPTIONAL(attrs); - } - - SERIALIZE_OPTIONAL(val); - - return true; - } - -bool ID::DoUnserialize(UnserialInfo* info) - { - bool installed_tmp = false; - - DO_UNSERIALIZE(BroObj); - - char id_scope; - - if ( ! (UNSERIALIZE_STR(&name, 0) && - UNSERIALIZE(&id_scope) && - UNSERIALIZE(&is_export) && - UNSERIALIZE(&is_const) && - UNSERIALIZE(&is_enum_const) && - UNSERIALIZE(&is_type) && - UNSERIALIZE(&offset) && - UNSERIALIZE(&infer_return_type) && - UNSERIALIZE(&weak_ref) - ) ) - return false; - - scope = IDScope(id_scope); - - info->s->SetErrorDescr(fmt("unserializing ID %s", name)); - - type = BroType::Unserialize(info); - if ( ! type ) - return false; - - UNSERIALIZE_OPTIONAL(attrs, Attributes::Unserialize(info)); - - // If it's a global function not currently known, - // we temporarily install it in global scope. - // This is necessary for recursive functions. - if ( IsGlobal() && Type()->Tag() == TYPE_FUNC ) - { - ID* current = global_scope()->Lookup(name); - if ( ! current ) - { - installed_tmp = true; - global_scope()->Insert(Name(), this); - } - } - - UNSERIALIZE_OPTIONAL(val, Val::Unserialize(info)); -#ifdef DEBUG - UpdateValID(); -#endif - - if ( weak_ref ) - { - // At this point at least the serialization cache will hold a - // reference so this will not delete the val. - assert(val->RefCnt() > 1); - Unref(val); - } - - if ( installed_tmp && ! global_scope()->Remove(name) ) - reporter->InternalWarning("missing tmp ID in %s unserialization", name); - - return true; - } - TraversalCode ID::Traverse(TraversalCallback* cb) const { TraversalCode tc = cb->PreID(this); @@ -651,9 +366,9 @@ void ID::DescribeExtended(ODesc* d) const void ID::DescribeReSTShort(ODesc* d) const { if ( is_type ) - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); else - d->Add(":bro:id:`"); + d->Add(":zeek:id:`"); d->Add(name); d->Add("`"); @@ -661,7 +376,7 @@ void ID::DescribeReSTShort(ODesc* d) const if ( type ) { d->Add(": "); - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); if ( ! is_type && ! type->GetName().empty() ) d->Add(type->GetName().c_str()); @@ -682,7 +397,7 @@ void ID::DescribeReSTShort(ODesc* d) const if ( is_type ) d->Add(type_name(t)); else - d->Add(broxygen_mgr->GetEnumTypeName(Name()).c_str()); + d->Add(zeekygen_mgr->GetEnumTypeName(Name()).c_str()); break; default: @@ -706,18 +421,18 @@ void ID::DescribeReST(ODesc* d, bool roles_only) const if ( roles_only ) { if ( is_type ) - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); else - d->Add(":bro:id:`"); + d->Add(":zeek:id:`"); d->Add(name); d->Add("`"); } else { if ( is_type ) - d->Add(".. bro:type:: "); + d->Add(".. zeek:type:: "); else - d->Add(".. bro:id:: "); + d->Add(".. zeek:id:: "); d->Add(name); } @@ -730,7 +445,7 @@ void ID::DescribeReST(ODesc* d, bool roles_only) const if ( ! is_type && ! type->GetName().empty() ) { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(type->GetName()); d->Add("`"); } @@ -748,20 +463,26 @@ void ID::DescribeReST(ODesc* d, bool roles_only) const } if ( val && type && - type->Tag() != TYPE_FUNC && - type->InternalType() != TYPE_INTERNAL_VOID && - // Values within Version module are likely to include a - // constantly-changing version number and be a frequent - // source of error/desynchronization, so don't include them. - ModuleName() != "Version" ) + type->Tag() != TYPE_FUNC && + type->InternalType() != TYPE_INTERNAL_VOID && + // Values within Version module are likely to include a + // constantly-changing version number and be a frequent + // source of error/desynchronization, so don't include them. + ModuleName() != "Version" ) { d->Add(":Default:"); + auto ii = zeekygen_mgr->GetIdentifierInfo(Name()); + auto redefs = ii->GetRedefs(); + auto iv = val; + + if ( ! redefs.empty() && ii->InitialVal() ) + iv = ii->InitialVal(); if ( type->InternalType() == TYPE_INTERNAL_OTHER ) { switch ( type->Tag() ) { case TYPE_TABLE: - if ( val->AsTable()->Length() == 0 ) + if ( iv->AsTable()->Length() == 0 ) { d->Add(" ``{}``"); d->NL(); @@ -771,11 +492,12 @@ void ID::DescribeReST(ODesc* d, bool roles_only) const default: d->NL(); - d->NL(); + d->PushIndent(); d->Add("::"); d->NL(); d->PushIndent(); - val->DescribeReST(d); + iv->DescribeReST(d); + d->PopIndent(); d->PopIndent(); } } @@ -783,9 +505,45 @@ void ID::DescribeReST(ODesc* d, bool roles_only) const else { d->SP(); - val->DescribeReST(d); + iv->DescribeReST(d); d->NL(); } + + for ( auto& ir : redefs ) + { + if ( ! ir->init_expr ) + continue; + + if ( ir->ic == INIT_NONE ) + continue; + + std::string redef_str; + ODesc expr_desc; + ir->init_expr->Describe(&expr_desc); + redef_str = expr_desc.Description(); + redef_str = strreplace(redef_str, "\n", " "); + + d->Add(":Redefinition: "); + d->Add(fmt("from :doc:`/scripts/%s`", ir->from_script.data())); + d->NL(); + d->PushIndent(); + + if ( ir->ic == INIT_FULL ) + d->Add("``=``"); + else if ( ir->ic == INIT_EXTRA ) + d->Add("``+=``"); + else if ( ir->ic == INIT_REMOVE ) + d->Add("``-=``"); + else + assert(false); + + d->Add("::"); + d->NL(); + d->PushIndent(); + d->Add(redef_str.data()); + d->PopIndent(); + d->PopIndent(); + } } } diff --git a/src/ID.h b/src/ID.h index 18754584df..b90e5d9597 100644 --- a/src/ID.h +++ b/src/ID.h @@ -5,18 +5,17 @@ #include "Type.h" #include "Attr.h" -#include "StateAccess.h" +#include "Notifier.h" #include "TraverseTypes.h" #include class Val; -class SerialInfo; class Func; typedef enum { INIT_NONE, INIT_FULL, INIT_EXTRA, INIT_REMOVE, } init_class; typedef enum { SCOPE_FUNCTION, SCOPE_MODULE, SCOPE_GLOBAL } IDScope; -class ID : public BroObj { +class ID : public BroObj, public notifier::Modifiable { public: ID(const char* name, IDScope arg_scope, bool arg_is_export); ~ID() override; @@ -47,7 +46,7 @@ public: // reference to the Val, the Val will be destroyed (naturally, // you have to take care that it will not be accessed via // the ID afterwards). - void SetVal(Val* v, Opcode op = OP_ASSIGN, bool weak_ref = false); + void SetVal(Val* v, bool weak_ref = false); void SetVal(Val* v, init_class c); void SetVal(Expr* ev, init_class c); @@ -71,10 +70,6 @@ public: bool IsRedefinable() const { return FindAttr(ATTR_REDEF) != 0; } - // Returns true if ID is one of those internal globally unique IDs - // to which MutableVals are bound (there name start with a '#'). - bool IsInternalGlobal() const { return name && name[0] == '#'; } - void SetAttrs(Attributes* attr); void AddAttrs(Attributes* attr); void RemoveAttr(attr_tag a); @@ -87,7 +82,9 @@ public: bool IsDeprecated() const { return FindAttr(ATTR_DEPRECATED) != 0; } - void MakeDeprecated(); + void MakeDeprecated(Expr* deprecation); + + string GetDeprecationWarning() const; void Error(const char* msg, const BroObj* o2 = 0); @@ -98,9 +95,6 @@ public: void DescribeReST(ODesc* d, bool roles_only = false) const; void DescribeReSTShort(ODesc* d) const; - bool Serialize(SerialInfo* info) const; - static ID* Unserialize(UnserialInfo* info); - bool DoInferReturnType() const { return infer_return_type; } void SetInferReturnType(bool infer) @@ -124,8 +118,6 @@ protected: void UpdateValID(); #endif - DECLARE_SERIAL(ID); - const char* name; IDScope scope; bool is_export; diff --git a/src/IP.h b/src/IP.h index 8be2d3e609..3d5c7bfe96 100644 --- a/src/IP.h +++ b/src/IP.h @@ -3,7 +3,7 @@ #ifndef ip_h #define ip_h -#include "bro-config.h" +#include "zeek-config.h" #include "net_util.h" #include "IPAddr.h" #include "Reporter.h" diff --git a/src/IPAddr.cc b/src/IPAddr.cc index 7917e82c29..76aa34f79a 100644 --- a/src/IPAddr.cc +++ b/src/IPAddr.cc @@ -101,38 +101,44 @@ void IPAddr::ReverseMask(int top_bits_to_chop) p[i] &= mask_bits[i]; } -void IPAddr::Init(const std::string& s) +bool IPAddr::ConvertString(const char* s, in6_addr* result) { - if ( s.find(':') == std::string::npos ) // IPv4. + for ( auto p = s; *p; ++p ) + if ( *p == ':' ) + // IPv6 + return (inet_pton(AF_INET6, s, result->s6_addr) == 1); + + // IPv4 + // Parse the address directly instead of using inet_pton since + // some platforms have more sensitive implementations than others + // that can't e.g. handle leading zeroes. + int a[4]; + int n = 0; + int match_count = sscanf(s, "%d.%d.%d.%d%n", a+0, a+1, a+2, a+3, &n); + + if ( match_count != 4 ) + return false; + + if ( s[n] != '\0' ) + return false; + + for ( auto i = 0; i < 4; ++i ) + if ( a[i] < 0 || a[i] > 255 ) + return false; + + uint32_t addr = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]; + addr = htonl(addr); + memcpy(result->s6_addr, v4_mapped_prefix, sizeof(v4_mapped_prefix)); + memcpy(&result->s6_addr[12], &addr, sizeof(uint32_t)); + return true; + } + +void IPAddr::Init(const char* s) + { + if ( ! ConvertString(s, &in6) ) { - memcpy(in6.s6_addr, v4_mapped_prefix, sizeof(v4_mapped_prefix)); - - // Parse the address directly instead of using inet_pton since - // some platforms have more sensitive implementations than others - // that can't e.g. handle leading zeroes. - int a[4]; - int n = sscanf(s.c_str(), "%d.%d.%d.%d", a+0, a+1, a+2, a+3); - - if ( n != 4 || a[0] < 0 || a[1] < 0 || a[2] < 0 || a[3] < 0 || - a[0] > 255 || a[1] > 255 || a[2] > 255 || a[3] > 255 ) - { - reporter->Error("Bad IP address: %s", s.c_str()); - memset(in6.s6_addr, 0, sizeof(in6.s6_addr)); - return; - } - - uint32_t addr = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]; - addr = htonl(addr); - memcpy(&in6.s6_addr[12], &addr, sizeof(uint32_t)); - } - - else - { - if ( inet_pton(AF_INET6, s.c_str(), in6.s6_addr) <=0 ) - { - reporter->Error("Bad IP address: %s", s.c_str()); - memset(in6.s6_addr, 0, sizeof(in6.s6_addr)); - } + reporter->Error("Bad IP address: %s", s); + memset(in6.s6_addr, 0, sizeof(in6.s6_addr)); } } @@ -275,4 +281,3 @@ string IPPrefix::AsString() const return prefix.AsString() +"/" + l; } - diff --git a/src/IPAddr.h b/src/IPAddr.h index 8ff258a860..1fdff9d979 100644 --- a/src/IPAddr.h +++ b/src/IPAddr.h @@ -68,7 +68,7 @@ public: */ IPAddr(const std::string& s) { - Init(s); + Init(s.data()); } /** @@ -366,6 +366,29 @@ public: unsigned int MemoryAllocation() const { return padded_sizeof(*this); } + /** + * Converts an IPv4 or IPv6 string into a network address structure + * (IPv6 or v4-to-v6-mapping in network bytes order). + * + * @param s the IPv4 or IPv6 string to convert (ASCII, NUL-terminated). + * + * @param result buffer that the caller supplies to store the result. + * + * @return whether the conversion was successful. + */ + static bool ConvertString(const char* s, in6_addr* result); + + /** + * @param s the IPv4 or IPv6 string to convert (ASCII, NUL-terminated). + * + * @return whether the string is a valid IP address + */ + static bool IsValid(const char* s) + { + in6_addr tmp; + return ConvertString(s, &tmp); + } + private: friend class IPPrefix; @@ -373,9 +396,9 @@ private: * Initializes an address instance from a string representation. * * @param s String containing an IP address as either a dotted IPv4 - * address or a hex IPv6 address. + * address or a hex IPv6 address (ASCII, NUL-terminated). */ - void Init(const std::string& s); + void Init(const char* s); in6_addr in6; // IPv6 or v4-to-v6-mapped address diff --git a/src/IntSet.cc b/src/IntSet.cc index f5b004666c..afc538d6ff 100644 --- a/src/IntSet.cc +++ b/src/IntSet.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #ifdef HAVE_MEMORY_H #include diff --git a/src/List.cc b/src/List.cc index 0f7f706bcd..1b8c2fd5e5 100644 --- a/src/List.cc +++ b/src/List.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -12,11 +12,13 @@ BaseList::BaseList(int size) { num_entries = 0; - max_entries = 0; - entry = 0; if ( size <= 0 ) + { + max_entries = 0; + entry = 0; return; + } max_entries = size; @@ -24,7 +26,7 @@ BaseList::BaseList(int size) } -BaseList::BaseList(BaseList& b) +BaseList::BaseList(const BaseList& b) { max_entries = b.max_entries; num_entries = b.num_entries; @@ -38,18 +40,34 @@ BaseList::BaseList(BaseList& b) entry[i] = b.entry[i]; } +BaseList::BaseList(BaseList&& b) + { + entry = b.entry; + num_entries = b.num_entries; + max_entries = b.max_entries; + + b.entry = 0; + b.num_entries = b.max_entries = 0; + } + +BaseList::BaseList(const ent* arr, int n) + { + num_entries = max_entries = n; + entry = (ent*) safe_malloc(max_entries * sizeof(ent)); + memcpy(entry, arr, n * sizeof(ent)); + } + void BaseList::sort(list_cmp_func cmp_func) { qsort(entry, num_entries, sizeof(ent), cmp_func); } -void BaseList::operator=(BaseList& b) +BaseList& BaseList::operator=(const BaseList& b) { if ( this == &b ) - return; // i.e., this already equals itself + return *this; - if ( entry ) - free(entry); + free(entry); max_entries = b.max_entries; num_entries = b.num_entries; @@ -61,6 +79,23 @@ void BaseList::operator=(BaseList& b) for ( int i = 0; i < num_entries; ++i ) entry[i] = b.entry[i]; + + return *this; + } + +BaseList& BaseList::operator=(BaseList&& b) + { + if ( this == &b ) + return *this; + + free(entry); + entry = b.entry; + num_entries = b.num_entries; + max_entries = b.max_entries; + + b.entry = 0; + b.num_entries = b.max_entries = 0; + return *this; } void BaseList::insert(ent a) @@ -145,12 +180,8 @@ ent BaseList::get() void BaseList::clear() { - if ( entry ) - { - free(entry); - entry = 0; - } - + free(entry); + entry = 0; num_entries = max_entries = 0; } diff --git a/src/List.h b/src/List.h index 6fb2bbcec6..15e99eb0dd 100644 --- a/src/List.h +++ b/src/List.h @@ -20,6 +20,8 @@ // Entries must be either a pointer to the data or nonzero data with // sizeof(data) <= sizeof(void*). +#include +#include #include #include "util.h" @@ -28,8 +30,6 @@ typedef int (*list_cmp_func)(const void* v1, const void* v2); class BaseList { public: - ~BaseList() { clear(); } - void clear(); // remove all entries int length() const { return num_entries; } int max() const { return max_entries; } @@ -41,8 +41,14 @@ public: { return padded_sizeof(*this) + pad_size(max_entries * sizeof(ent)); } protected: + ~BaseList() { free(entry); } explicit BaseList(int = 0); - BaseList(BaseList&); + BaseList(const BaseList&); + BaseList(BaseList&&); + BaseList(const ent* arr, int n); + + BaseList& operator=(const BaseList&); + BaseList& operator=(BaseList&&); void insert(ent); // add at head of list @@ -75,7 +81,29 @@ protected: return entry[i]; } - void operator=(BaseList&); + // This could essentially be an std::vector if we wanted. Some + // reasons to maybe not refactor to use std::vector ? + // + // - Harder to use a custom growth factor. Also, the growth + // factor would be implementation-specific, taking some control over + // performance out of our hands. + // + // - It won't ever take advantage of realloc's occasional ability to + // grow in-place. + // + // - Combine above point this with lack of control of growth + // factor means the common choice of 2x growth factor causes + // a growth pattern that crawls forward in memory with no possible + // re-use of previous chunks (the new capacity is always larger than + // all previously allocated chunks combined). This point and + // whether 2x is empirically an issue still seems debated (at least + // GCC seems to stand by 2x as empirically better). + // + // - Sketchy shrinking behavior: standard says that requests to + // shrink are non-binding (it's expected implementations heed, but + // still not great to have no guarantee). Also, it would not take + // advantage of realloc's ability to contract in-place, it would + // allocate-and-copy. ent* entry; int max_entries; @@ -103,10 +131,13 @@ struct List(type) : BaseList \ explicit List(type)(type ...); \ List(type)() : BaseList(0) {} \ explicit List(type)(int sz) : BaseList(sz) {} \ - List(type)(List(type)& l) : BaseList((BaseList&)l) {} \ + List(type)(const List(type)& l) : BaseList(l) {} \ + List(type)(List(type)&& l) : BaseList(std::move(l)) {} \ \ - void operator=(List(type)& l) \ - { BaseList::operator=((BaseList&)l); } \ + List(type)& operator=(const List(type)& l) \ + { return (List(type)&) BaseList::operator=(l); } \ + List(type)& operator=(List(type)&& l) \ + { return (List(type)&) BaseList::operator=(std::move(l)); } \ void insert(type a) { BaseList::insert(ent(a)); } \ void sortedinsert(type a, list_cmp_func cmp_func) \ { BaseList::sortedinsert(ent(a), cmp_func); } \ @@ -144,10 +175,14 @@ struct PList(type) : BaseList \ explicit PList(type)(type* ...); \ PList(type)() : BaseList(0) {} \ explicit PList(type)(int sz) : BaseList(sz) {} \ - PList(type)(PList(type)& l) : BaseList((BaseList&)l) {} \ + PList(type)(const PList(type)& l) : BaseList(l) {} \ + PList(type)(PList(type)&& l) : BaseList(std::move(l)) {} \ + PList(type)(std::initializer_list il) : BaseList((const ent*)il.begin(), il.size()) {} \ \ - void operator=(PList(type)& l) \ - { BaseList::operator=((BaseList&)l); } \ + PList(type)& operator=(const PList(type)& l) \ + { return (PList(type)&) BaseList::operator=(l); } \ + PList(type)& operator=(PList(type)&& l) \ + { return (PList(type)&) BaseList::operator=(std::move(l)); } \ void insert(type* a) { BaseList::insert(ent(a)); } \ void sortedinsert(type* a, list_cmp_func cmp_func) \ { BaseList::sortedinsert(ent(a), cmp_func); } \ diff --git a/src/NFA.cc b/src/NFA.cc index c53aa4304b..cf2650b21d 100644 --- a/src/NFA.cc +++ b/src/NFA.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "NFA.h" #include "EquivClass.h" diff --git a/src/Net.cc b/src/Net.cc index d6cb6632b2..0b1a5346e6 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #ifdef TIME_WITH_SYS_TIME @@ -27,7 +27,6 @@ #include "Reporter.h" #include "Net.h" #include "Anon.h" -#include "Serializer.h" #include "PacketDumper.h" #include "iosource/Manager.h" #include "iosource/PktSrc.h" @@ -49,8 +48,6 @@ int reading_live = 0; int reading_traces = 0; int have_pending_timers = 0; double pseudo_realtime = 0.0; -bool using_communication = false; - double network_time = 0.0; // time according to last packet timestamp // (or current time) double processing_start_time = 0.0; // time started working on current pkt @@ -188,7 +185,7 @@ void net_init(name_list& interfaces, name_list& readfiles, else // have_pending_timers = 1, possibly. We don't set // that here, though, because at this point we don't know - // whether the user's bro_init() event will indeed set + // whether the user's zeek_init() event will indeed set // a timer. reading_traces = reading_live = 0; @@ -309,7 +306,7 @@ void net_run() } #endif current_iosrc = src; - auto communication_enabled = using_communication || broker_mgr->Active(); + auto communication_enabled = broker_mgr->Active(); if ( src ) src->Process(); // which will call net_packet_dispatch() @@ -372,11 +369,6 @@ void net_run() // current packet and its related events. termination_signal(); -#ifdef DEBUG_COMMUNICATION - if ( signal_val == SIGPROF && remote_serializer ) - remote_serializer->DumpDebugData(); -#endif - if ( ! reading_traces ) // Check whether we have timers scheduled for // the future on which we need to wait. diff --git a/src/Net.h b/src/Net.h index bdc84ec74f..26a3d0f883 100644 --- a/src/Net.h +++ b/src/Net.h @@ -7,7 +7,6 @@ #include "util.h" #include "List.h" #include "Func.h" -#include "RemoteSerializer.h" #include "iosource/IOSource.h" #include "iosource/PktSrc.h" #include "iosource/PktDumper.h" @@ -67,9 +66,6 @@ extern double bro_start_network_time; // True if we're a in the process of cleaning-up just before termination. extern bool terminating; -// True if the remote serializer is to be activated. -extern bool using_communication; - // True if Bro is currently parsing scripts. extern bool is_parsing; @@ -83,8 +79,6 @@ extern iosource::PktDumper* pkt_dumper; // where to save packets extern char* writefile; -extern int old_comm_usage_count; - // Script file we have already scanned (or are in the process of scanning). // They are identified by inode number. struct ScannedFile { diff --git a/src/NetVar.cc b/src/NetVar.cc index 57a5452123..f7288847e7 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Var.h" #include "NetVar.h" @@ -30,7 +30,6 @@ RecordType* mime_match; int watchdog_interval; int max_timer_expires; -int max_remote_events_processed; int ignore_checksums; int partial_connection_ok; @@ -78,7 +77,6 @@ bool udp_content_deliver_all_orig; bool udp_content_deliver_all_resp; double dns_session_timeout; -double ntp_session_timeout; double rpc_timeout; ListVal* skip_authentication; @@ -104,8 +102,6 @@ TableType* pm_mappings; RecordType* pm_port_request; RecordType* pm_callit_request; -RecordType* ntp_msg; - RecordType* geo_location; RecordType* entropy_test_result; @@ -144,9 +140,6 @@ RecordType* backdoor_endp_stats; RecordType* software; RecordType* software_version; -RecordType* OS_version; -EnumType* OS_version_inference; -TableVal* generate_OS_version_event; double table_expire_interval; double table_expire_delay; @@ -156,9 +149,6 @@ RecordType* packet_type; double connection_status_update_interval; -StringVal* state_dir; -double state_write_delay; - int orig_addr_anonymization, resp_addr_anonymization; int other_addr_anonymization; TableVal* preserve_orig_addr; @@ -166,23 +156,12 @@ TableVal* preserve_resp_addr; TableVal* preserve_other_addr; int max_files_in_cache; -double log_rotate_interval; -double log_max_size; RecordType* rotate_info; -StringVal* log_encryption_key; StringVal* log_rotate_base_time; StringVal* peer_description; -RecordType* peer; -int forward_remote_state_changes; -int forward_remote_events; -int remote_check_sync_consistency; bro_uint_t chunked_io_buffer_soft_cap; -StringVal* ssl_ca_certificate; -StringVal* ssl_private_key; -StringVal* ssl_passphrase; - Val* profiling_file; double profiling_interval; int expensive_profiling_multiple; @@ -199,8 +178,6 @@ int packet_filter_default; int sig_max_group_size; -int enable_syslog; - TableType* irc_join_list; RecordType* irc_join_info; TableVal* irc_servers; @@ -212,9 +189,6 @@ int dpd_ignore_ports; TableVal* likely_server_ports; -double remote_trace_sync_interval; -int remote_trace_sync_peers; - int check_for_unused_event_handlers; int dump_used_event_handlers; @@ -255,34 +229,17 @@ void init_general_global_var() table_expire_delay = opt_internal_double("table_expire_delay"); table_incremental_step = opt_internal_int("table_incremental_step"); - state_dir = internal_val("state_dir")->AsStringVal(); - state_write_delay = opt_internal_double("state_write_delay"); - max_files_in_cache = opt_internal_int("max_files_in_cache"); - log_rotate_interval = opt_internal_double("log_rotate_interval"); - log_max_size = opt_internal_double("log_max_size"); rotate_info = internal_type("rotate_info")->AsRecordType(); - log_encryption_key = opt_internal_string("log_encryption_key"); log_rotate_base_time = opt_internal_string("log_rotate_base_time"); peer_description = internal_val("peer_description")->AsStringVal(); - peer = internal_type("event_peer")->AsRecordType(); - forward_remote_state_changes = - opt_internal_int("forward_remote_state_changes"); - forward_remote_events = opt_internal_int("forward_remote_events"); - remote_check_sync_consistency = - opt_internal_int("remote_check_sync_consistency"); chunked_io_buffer_soft_cap = opt_internal_unsigned("chunked_io_buffer_soft_cap"); - ssl_ca_certificate = internal_val("ssl_ca_certificate")->AsStringVal(); - ssl_private_key = internal_val("ssl_private_key")->AsStringVal(); - ssl_passphrase = internal_val("ssl_passphrase")->AsStringVal(); - packet_filter_default = opt_internal_int("packet_filter_default"); sig_max_group_size = opt_internal_int("sig_max_group_size"); - enable_syslog = opt_internal_int("enable_syslog"); check_for_unused_event_handlers = opt_internal_int("check_for_unused_event_handlers"); @@ -389,14 +346,11 @@ void init_net_var() bool(internal_val("udp_content_deliver_all_resp")->AsBool()); dns_session_timeout = opt_internal_double("dns_session_timeout"); - ntp_session_timeout = opt_internal_double("ntp_session_timeout"); rpc_timeout = opt_internal_double("rpc_timeout"); watchdog_interval = int(opt_internal_double("watchdog_interval")); max_timer_expires = opt_internal_int("max_timer_expires"); - max_remote_events_processed = - opt_internal_int("max_remote_events_processed"); skip_authentication = internal_list_val("skip_authentication"); direct_login_prompts = internal_list_val("direct_login_prompts"); @@ -421,8 +375,6 @@ void init_net_var() pm_port_request = internal_type("pm_port_request")->AsRecordType(); pm_callit_request = internal_type("pm_callit_request")->AsRecordType(); - ntp_msg = internal_type("ntp_msg")->AsRecordType(); - geo_location = internal_type("geo_location")->AsRecordType(); entropy_test_result = internal_type("entropy_test_result")->AsRecordType(); @@ -462,10 +414,6 @@ void init_net_var() software = internal_type("software")->AsRecordType(); software_version = internal_type("software_version")->AsRecordType(); - OS_version = internal_type("OS_version")->AsRecordType(); - OS_version_inference = internal_type("OS_version_inference")->AsEnumType(); - generate_OS_version_event = - opt_internal_table("generate_OS_version_event"); packet_type = internal_type("packet")->AsRecordType(); @@ -498,10 +446,6 @@ void init_net_var() irc_join_list = internal_type("irc_join_list")->AsTableType(); irc_servers = internal_val("irc_servers")->AsTableVal(); - remote_trace_sync_interval = - opt_internal_double("remote_trace_sync_interval"); - remote_trace_sync_peers = opt_internal_int("remote_trace_sync_peers"); - dpd_reassemble_first_packets = opt_internal_int("dpd_reassemble_first_packets"); dpd_buffer_size = opt_internal_int("dpd_buffer_size"); diff --git a/src/NetVar.h b/src/NetVar.h index 1dee27f372..583589e5ff 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -33,7 +33,6 @@ extern RecordType* mime_match; extern int watchdog_interval; extern int max_timer_expires; -extern int max_remote_events_processed; extern int ignore_checksums; extern int partial_connection_ok; @@ -81,7 +80,6 @@ extern bool udp_content_deliver_all_orig; extern bool udp_content_deliver_all_resp; extern double dns_session_timeout; -extern double ntp_session_timeout; extern double rpc_timeout; extern ListVal* skip_authentication; @@ -107,8 +105,6 @@ extern TableType* pm_mappings; extern RecordType* pm_port_request; extern RecordType* pm_callit_request; -extern RecordType* ntp_msg; - extern RecordType* geo_location; extern RecordType* entropy_test_result; @@ -147,9 +143,6 @@ extern RecordType* backdoor_endp_stats; extern RecordType* software; extern RecordType* software_version; -extern RecordType* OS_version; -extern EnumType* OS_version_inference; -extern TableVal* generate_OS_version_event; extern double table_expire_interval; extern double table_expire_delay; @@ -165,27 +158,13 @@ extern TableVal* preserve_other_addr; extern double connection_status_update_interval; -extern StringVal* state_dir; -extern double state_write_delay; - extern int max_files_in_cache; -extern double log_rotate_interval; -extern double log_max_size; extern RecordType* rotate_info; -extern StringVal* log_encryption_key; extern StringVal* log_rotate_base_time; extern StringVal* peer_description; -extern RecordType* peer; -extern int forward_remote_state_changes; -extern int forward_remote_events; -extern int remote_check_sync_consistency; extern bro_uint_t chunked_io_buffer_soft_cap; -extern StringVal* ssl_ca_certificate; -extern StringVal* ssl_private_key; -extern StringVal* ssl_passphrase; - extern Val* profiling_file; extern double profiling_interval; extern int expensive_profiling_multiple; @@ -201,8 +180,6 @@ extern int packet_filter_default; extern int sig_max_group_size; -extern int enable_syslog; - extern TableType* irc_join_list; extern RecordType* irc_join_info; extern TableVal* irc_servers; @@ -214,9 +191,6 @@ extern int dpd_ignore_ports; extern TableVal* likely_server_ports; -extern double remote_trace_sync_interval; -extern int remote_trace_sync_peers; - extern int check_for_unused_event_handlers; extern int dump_used_event_handlers; diff --git a/src/Notifier.cc b/src/Notifier.cc new file mode 100644 index 0000000000..511eb33beb --- /dev/null +++ b/src/Notifier.cc @@ -0,0 +1,72 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "DebugLogger.h" +#include "Notifier.h" + +notifier::Registry notifier::registry; + +notifier::Receiver::Receiver() + { + DBG_LOG(DBG_NOTIFIERS, "creating receiver %p", this); + } + +notifier::Receiver::~Receiver() + { + DBG_LOG(DBG_NOTIFIERS, "deleting receiver %p", this); + } + +notifier::Registry::~Registry() + { + while ( registrations.begin() != registrations.end() ) + Unregister(registrations.begin()->first); + } + +void notifier::Registry::Register(Modifiable* m, notifier::Receiver* r) + { + DBG_LOG(DBG_NOTIFIERS, "registering object %p for receiver %p", m, r); + + registrations.insert({m, r}); + ++m->num_receivers; + } + +void notifier::Registry::Unregister(Modifiable* m, notifier::Receiver* r) + { + DBG_LOG(DBG_NOTIFIERS, "unregistering object %p from receiver %p", m, r); + + auto x = registrations.equal_range(m); + for ( auto i = x.first; i != x.second; i++ ) + { + if ( i->second == r ) + { + --i->first->num_receivers; + registrations.erase(i); + break; + } + } + } + +void notifier::Registry::Unregister(Modifiable* m) + { + DBG_LOG(DBG_NOTIFIERS, "unregistering object %p from all notifiers", m); + + auto x = registrations.equal_range(m); + for ( auto i = x.first; i != x.second; i++ ) + --i->first->num_receivers; + + registrations.erase(x.first, x.second); + } + +void notifier::Registry::Modified(Modifiable* m) + { + DBG_LOG(DBG_NOTIFIERS, "object %p has been modified", m); + + auto x = registrations.equal_range(m); + for ( auto i = x.first; i != x.second; i++ ) + i->second->Modified(m); + } + +notifier::Modifiable::~Modifiable() + { + if ( num_receivers ) + registry.Unregister(this); + } diff --git a/src/Notifier.h b/src/Notifier.h new file mode 100644 index 0000000000..59ed506599 --- /dev/null +++ b/src/Notifier.h @@ -0,0 +1,116 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// A notification framework to inform interested parties of modifications to +// selected global objects. To get notified about a change, derive a class +// from notifier::Receiver and register the interesting objects with the +// notification::Registry. + +#ifndef NOTIFIER_H +#define NOTIFIER_H + +#include +#include +#include + +#include "util.h" +#include "DebugLogger.h" + +namespace notifier { + +class Modifiable; + +/** Interface class for receivers of notifications. */ +class Receiver { +public: + Receiver(); + virtual ~Receiver(); + + /** + * Callback executed when a register object has been modified. + * + * @param m object that was modified + */ + virtual void Modified(Modifiable* m) = 0; +}; + +/** Singleton class tracking all notification requests globally. */ +class Registry { +public: + ~Registry(); + + /** + * Registers a receiver to be informed when a modifiable object has + * changed. + * + * @param m object to track. Does not take ownership, but the object + * will automatically unregister itself on destruction. + * + * @param r receiver to notify on changes. Does not take ownershop, + * the receiver must remain valid as long as the registration stays + * in place. + */ + void Register(Modifiable* m, Receiver* r); + + /** + * Cancels a receiver's request to be informed about an object's + * modification. The arguments to the method must match what was + * originally registered. + * + * @param m object to no loger track. + * + * @param r receiver to no longer notify. + */ + void Unregister(Modifiable* m, Receiver* Receiver); + + /** + * Cancels any active receiver requests to be informed about a + * partilar object's modifications. + * + * @param m object to no loger track. + */ + void Unregister(Modifiable* m); + +private: + friend class Modifiable; + + // Inform all registered receivers of a modification to an object. + // Will be called from the object itself. + void Modified(Modifiable* m); + + typedef std::unordered_multimap ModifiableMap; + ModifiableMap registrations; +}; + +/** + * Singleton object tracking all global notification requests. + */ +extern Registry registry; + +/** + * Base class for objects that can trigger notifications to receivers when + * modified. + */ +class Modifiable { +public: + /** + * Calling this method signals to all registered receivers that the + * object has been modified. + */ + void Modified() + { + if ( num_receivers ) + registry.Modified(this); + } + +protected: + friend class Registry; + + virtual ~Modifiable(); + + // Number of currently registered receivers. + uint64 num_receivers = 0; +}; + +} + +#endif diff --git a/src/OSFinger.cc b/src/OSFinger.cc deleted file mode 100644 index df5f30b0cc..0000000000 --- a/src/OSFinger.cc +++ /dev/null @@ -1,689 +0,0 @@ -/* - Taken with permission from: - - p0f - passive OS fingerprinting (GNU LESSER GENERAL PUBLIC LICENSE) - ------------------------------------------------------------------- - - "If you sit down at a poker game and don't see a sucker, - get up. You're the sucker." - - (C) Copyright 2000-2003 by Michal Zalewski -*/ - -// To make it easier to upgrade this file to newer releases of p0f, -// it remains in the coding style used by p0f rather than Bro. - -#include "OSFinger.h" -#include "net_util.h" -#include "util.h" -#include "Var.h" -#include -#include -#include - - -void int_delete_func(void* v) - { - delete (int*) v; - } - - -// Initializes data structures for fingerprinting in the given mode. -OSFingerprint::OSFingerprint(FingerprintMode arg_mode) - { - err = 0; - mode = arg_mode; - - sigcnt=gencnt=0; - problems=0; - char* fname; - - memset(sig, 0, sizeof(struct fp_entry)*MAXSIGS); - memset(bh, 0, sizeof(struct fp_entry*)*OSHSIZE); - - os_matches.SetDeleteFunc(int_delete_func); - - if (mode == SYN_FINGERPRINT_MODE) - { - fname = copy_string(internal_val("passive_fingerprint_file")->AsString()->CheckString()); - load_config(fname); - delete [] fname; - } - else if (mode == SYN_ACK_FINGERPRINT_MODE) - {//not yet supported - load_config("p0fsynack.sig"); - } - else if (mode == RST_FINGERPRINT_MODE) - {//not yet supported - load_config("p0frst.sig"); - } - else - { - Error("OS fingerprinting: unknown mode!"); - } -} - -bool OSFingerprint::CacheMatch(const IPAddr& addr, int id) - { - HashKey* key = addr.GetHashKey(); - int* pid = new int; - *pid=id; - int* prev = os_matches.Insert(key, pid); - bool ret = (prev ? *prev != id : 1); - if (prev) - delete prev; - delete key; - return ret; - } - - -// Determines whether the signature file had any collisions. -void OSFingerprint::collide(uint32 id) - { - uint32 i,j; - uint32 cur; - - if (sig[id].ttl % 32 && sig[id].ttl != 255 && sig[id].ttl % 30) - { - problems=1; - reporter->Warning("OS fingerprinting: [!] Unusual TTL (%d) for signature '%s %s' (line %d).", - sig[id].ttl,sig[id].os,sig[id].desc,sig[id].line); - } - - for (i=0;iWarning("OS fingerprinting: [!] Duplicate signature name: '%s %s' (line %d and %d).", - sig[i].os,sig[i].desc,sig[i].line,sig[id].line); - } - - /* If TTLs are sufficiently away from each other, the risk of - a collision is lower. */ - if (abs((int)sig[id].ttl - (int)sig[i].ttl) > 25) continue; - - if (sig[id].df ^ sig[i].df) continue; - if (sig[id].zero_stamp ^ sig[i].zero_stamp) continue; - - /* Zero means >= PACKET_BIG */ - if (sig[id].size) { if (sig[id].size ^ sig[i].size) continue; } - else if (sig[i].size < PACKET_BIG) continue; - - if (sig[id].optcnt ^ sig[i].optcnt) continue; - if (sig[id].quirks ^ sig[i].quirks) continue; - - switch (sig[id].wsize_mod) { - - case 0: /* Current: const */ - - cur=sig[id].wsize; - -do_const: - - switch (sig[i].wsize_mod) { - - case 0: /* Previous is also const */ - - /* A problem if values match */ - if (cur ^ sig[i].wsize) continue; - break; - - case MOD_CONST: /* Current: const, prev: modulo (or *) */ - - /* A problem if current value is a multiple of that modulo */ - if (cur % sig[i].wsize) continue; - break; - - case MOD_MSS: /* Current: const, prev: mod MSS */ - - if (sig[i].mss_mod || sig[i].wsize * - (sig[i].mss ? sig[i].mss : 1460 ) != (int) cur) - continue; - - break; - - case MOD_MTU: /* Current: const, prev: mod MTU */ - - if (sig[i].mss_mod || sig[i].wsize * ( - (sig[i].mss ? sig[i].mss : 1460 )+40) != (int) cur) - continue; - - break; - - } - - break; - - case 1: /* Current signature is modulo something */ - - /* A problem only if this modulo is a multiple of the - previous modulo */ - - if (sig[i].wsize_mod != MOD_CONST) continue; - if (sig[id].wsize % sig[i].wsize) continue; - - break; - - case MOD_MSS: /* Current is modulo MSS */ - - /* There's likely a problem only if the previous one is close - to '*'; we do not check known MTUs, because this particular - signature can be made with some uncommon MTUs in mind. The - problem would also appear if current signature has a fixed - MSS. */ - - if (sig[i].wsize_mod != MOD_CONST || sig[i].wsize >= 8) { - if (!sig[id].mss_mod) { - cur = (sig[id].mss ? sig[id].mss : 1460 ) * sig[id].wsize; - goto do_const; - } - continue; - } - - break; - - case MOD_MTU: /* Current is modulo MTU */ - - if (sig[i].wsize_mod != MOD_CONST || sig[i].wsize <= 8) { - if (!sig[id].mss_mod) { - cur = ( (sig[id].mss ? sig[id].mss : 1460 ) +40) * sig[id].wsize; - goto do_const; - } - continue; - } - - break; - - } - - /* Same for wsc */ - switch (sig[id].wsc_mod) { - - case 0: /* Current: const */ - - cur=sig[id].wsc; - - switch (sig[i].wsc_mod) { - - case 0: /* Previous is also const */ - - /* A problem if values match */ - if (cur ^ sig[i].wsc) continue; - break; - - case 1: /* Current: const, prev: modulo (or *) */ - - /* A problem if current value is a multiple of that modulo */ - if (cur % sig[i].wsc) continue; - break; - - } - - break; - - case MOD_CONST: /* Current signature is modulo something */ - - /* A problem only if this modulo is a multiple of the - previous modulo */ - - if (!sig[i].wsc_mod) continue; - if (sig[id].wsc % sig[i].wsc) continue; - - break; - - } - - /* Same for mss */ - switch (sig[id].mss_mod) { - - case 0: /* Current: const */ - - cur=sig[id].mss; - - switch (sig[i].mss_mod) { - - case 0: /* Previous is also const */ - - /* A problem if values match */ - if (cur ^ sig[i].mss) continue; - break; - - case 1: /* Current: const, prev: modulo (or *) */ - - /* A problem if current value is a multiple of that modulo */ - if (cur % sig[i].mss) continue; - break; - - } - - break; - - case MOD_CONST: /* Current signature is modulo something */ - - /* A problem only if this modulo is a multiple of the - previous modulo */ - - if (!sig[i].mss_mod) continue; - if ((sig[id].mss ? sig[id].mss : 1460 ) % - (sig[i].mss ? sig[i].mss : 1460 )) continue; - - break; - - } - - /* Now check option sequence */ - - for (j=0;jWarning("OS fingerprinting: [!] Signature '%s %s' (line %d)\n" - " is already covered by '%s %s' (line %d).", - sig[id].os,sig[id].desc,sig[id].line,sig[i].os,sig[i].desc, - sig[i].line); - -reloop: - ; - } - } - -// Loads a given file into to classes data structures. -void OSFingerprint::load_config(const char* file) - { - uint32 ln=0; - char buf[MAXLINE]; - char* p; - - FILE* c = open_file(find_file(file, bro_path(), "osf")); - - if (!c) - { - Error("Can't open OS passive fingerprinting signature file", file); - return; - } - sigcnt=0; //every time we read config we reset it to 0; - while ((p=fgets(buf,sizeof(buf),c))) - { - uint32 l; - - char obuf[MAXLINE],genre[MAXLINE],desc[MAXLINE],quirks[MAXLINE]; - char w[MAXLINE],sb[MAXLINE]; - char* gptr = genre; - uint32 t,d,s; - struct fp_entry* e; - - ln++; - - /* Remove leading and trailing blanks */ - while (isspace(*p)) p++; - l=strlen(p); - while (l && isspace(*(p+l-1))) *(p+(l--)-1)=0; - - /* Skip empty lines and comments */ - if (!l) continue; - if (*p == '#') continue; - - if (sscanf(p,"%[0-9%*()ST]:%d:%d:%[0-9()*]:%[^:]:%[^ :]:%[^:]:%[^:]", - w, &t,&d,sb, obuf, quirks,genre,desc) != 8) - Error("OS fingerprinting: Syntax error in p0f signature config line %d.\n",(uint32)ln); - - gptr = genre; - - if (*sb != '*') s = atoi(sb); else s = 0; - -reparse_ptr: - - switch (*gptr) - { - case '-': sig[sigcnt].userland = 1; gptr++; goto reparse_ptr; - case '*': sig[sigcnt].no_detail = 1; gptr++; goto reparse_ptr; - case '@': sig[sigcnt].generic = 1; gptr++; gencnt++; goto reparse_ptr; - case 0: Error("OS fingerprinting: Empty OS genre in line",(uint32)ln); - } - - sig[sigcnt].os = strdup(gptr); - sig[sigcnt].desc = strdup(desc); - sig[sigcnt].ttl = t; - sig[sigcnt].size = s; - sig[sigcnt].df = d; - - if (w[0] == '*') - { - sig[sigcnt].wsize = 1; - sig[sigcnt].wsize_mod = MOD_CONST; - } - else if (tolower(w[0]) == 's') - { - sig[sigcnt].wsize_mod = MOD_MSS; - if (!isdigit(*(w+1))) - Error("OS fingerprinting: Bad Snn value in WSS in line",(uint32)ln); - sig[sigcnt].wsize = atoi(w+1); - } - else if (tolower(w[0]) == 't') - { - sig[sigcnt].wsize_mod = MOD_MTU; - if (!isdigit(*(w+1))) - Error("OS fingerprinting: Bad Tnn value in WSS in line",(uint32)ln); - sig[sigcnt].wsize = atoi(w+1); - } - else if (w[0] == '%') - { - if (!(sig[sigcnt].wsize = atoi(w+1))) - Error("OS fingerprinting: Null modulo for window size in config line",(uint32)ln); - sig[sigcnt].wsize_mod = MOD_CONST; - } - else - sig[sigcnt].wsize = atoi(w); - - /* Now let's parse options */ - - p=obuf; - - sig[sigcnt].zero_stamp = 1; - - if (*p=='.') p++; - - while (*p) - { - uint8 optcnt = sig[sigcnt].optcnt; - switch (tolower(*p)) - { - case 'n': sig[sigcnt].opt[optcnt] = TCPOPT_NOP; - break; - - case 'e': sig[sigcnt].opt[optcnt] = TCPOPT_EOL; - if (*(p+1)) - Error("OS fingerprinting: EOL not the last option, line",(uint32)ln); - break; - - case 's': sig[sigcnt].opt[optcnt] = TCPOPT_SACK_PERMITTED; - break; - - case 't': sig[sigcnt].opt[optcnt] = TCPOPT_TIMESTAMP; - if (*(p+1)!='0') - { - sig[sigcnt].zero_stamp=0; - if (isdigit(*(p+1))) - Error("OS fingerprinting: Bogus Tstamp specification in line",(uint32)ln); - } - break; - - case 'w': sig[sigcnt].opt[optcnt] = TCPOPT_WINDOW; - if (p[1] == '*') - { - sig[sigcnt].wsc = 1; - sig[sigcnt].wsc_mod = MOD_CONST; - } - else if (p[1] == '%') - { - if (!(sig[sigcnt].wsc = atoi(p+2))) - Error("OS fingerprinting: Null modulo for wscale in config line",(uint32)ln); - sig[sigcnt].wsc_mod = MOD_CONST; - } - else if (!isdigit(*(p+1))) - Error("OS fingerprinting: Incorrect W value in line",(uint32)ln); - else sig[sigcnt].wsc = atoi(p+1); - break; - - case 'm': sig[sigcnt].opt[optcnt] = TCPOPT_MAXSEG; - if (p[1] == '*') - { - sig[sigcnt].mss = 1; - sig[sigcnt].mss_mod = MOD_CONST; - } - else if (p[1] == '%') - { - if (!(sig[sigcnt].mss = atoi(p+2))) - Error("OS fingerprinting: Null modulo for MSS in config line",(uint32)ln); - sig[sigcnt].mss_mod = MOD_CONST; - } - else if (!isdigit(*(p+1))) - Error("OS fingerprinting: Incorrect M value in line",(uint32)ln); - else sig[sigcnt].mss = atoi(p+1); - break; - - /* Yuck! */ - case '?': if (!isdigit(*(p+1))) - Error("OS fingerprinting: Bogus ?nn value in line",(uint32)ln); - else sig[sigcnt].opt[optcnt] = atoi(p+1); - break; - - default: Error("OS fingerprinting: Unknown TCP option in config line",(uint32)ln); - } - - if (++sig[sigcnt].optcnt >= MAXOPT) - Error("OS fingerprinting: Too many TCP options specified in config line",(uint32)ln); - - /* Skip separators */ - do { p++; } while (*p && !isalpha(*p) && *p != '?'); - - } - - sig[sigcnt].line = ln; - - p = quirks; - - while (*p) - switch (toupper(*(p++))) - { - case 'E': - Error("OS fingerprinting: Quirk 'E' is obsolete. Remove it, append E to the options. Line",(uint32)ln); - break; - - case 'K': - if ( mode != RST_FINGERPRINT_MODE ) - Error("OS fingerprinting: Quirk 'K' is valid only in RST+ (-R) mode (wrong config file?). Line",(uint32)ln); - sig[sigcnt].quirks |= QUIRK_RSTACK; - break; - - case 'Q': sig[sigcnt].quirks |= QUIRK_SEQEQ; break; - case '0': sig[sigcnt].quirks |= QUIRK_SEQ0; break; - case 'P': sig[sigcnt].quirks |= QUIRK_PAST; break; - case 'Z': sig[sigcnt].quirks |= QUIRK_ZEROID; break; - case 'I': sig[sigcnt].quirks |= QUIRK_IPOPT; break; - case 'U': sig[sigcnt].quirks |= QUIRK_URG; break; - case 'X': sig[sigcnt].quirks |= QUIRK_X2; break; - case 'A': sig[sigcnt].quirks |= QUIRK_ACK; break; - case 'T': sig[sigcnt].quirks |= QUIRK_T2; break; - case 'F': sig[sigcnt].quirks |= QUIRK_FLAGS; break; - case 'D': sig[sigcnt].quirks |= QUIRK_DATA; break; - case '!': sig[sigcnt].quirks |= QUIRK_BROKEN; break; - case '.': break; - default: Error("OS fingerprinting: Bad quirk in line",(uint32)ln); - } - - e = bh[SIGHASH(s,sig[sigcnt].optcnt,sig[sigcnt].quirks,d)]; - - if (!e) - { - bh[SIGHASH(s,sig[sigcnt].optcnt,sig[sigcnt].quirks,d)] = &sig[sigcnt]; - } - else - { - while (e->next) e = e->next; - e->next = &sig[sigcnt]; - } - - collide(sigcnt); - if (++sigcnt >= MAXSIGS) - Error("OS fingerprinting: Maximum signature count exceeded.\n"); - - } - - fclose(c); - - if (!sigcnt) - Error("OS fingerprinting: no signatures loaded from config file."); - - } - -// Does the actual match between the packet and the signature database. -// Modifies retval and contains OS Type and other useful information. -// Returns config-file line of the matching signature as id. -int OSFingerprint::FindMatch(struct os_type* retval, uint16 tot,uint8 df, - uint8 ttl,uint16 wss,uint8 ocnt,uint8* op, - uint16 mss,uint8 wsc,uint32 tstamp, - uint32 quirks,uint8 ecn) const - { - uint32 j; //used for counter in loops - struct fp_entry* p; - uint8 orig_df = df; - - struct fp_entry* fuzzy = 0; - uint8 fuzzy_now = 0; - int id = 0; //return value: 0 indicates no match. - - retval->os="UNKNOWN"; - retval->desc=NULL; - retval->gadgets=0; - retval->match=0; - retval->uptime=0; - -re_lookup: - - p = bh[SIGHASH(tot,ocnt,quirks,df)]; - - while (p) - { - /* Cheap and specific checks first... */ - /* psize set to zero means >= PACKET_BIG */ - if (p->size) { if (tot ^ p->size) { p = p->next; continue; } } - else if (tot < PACKET_BIG) { p = p->next; continue; } - - if (ocnt ^ p->optcnt) { p = p->next; continue; } - - if (p->zero_stamp ^ (!tstamp)) { p = p->next; continue; } - if (p->df ^ df) { p = p->next; continue; } - if (p->quirks ^ quirks) { p = p->next; continue; } - - /* Check MSS and WSCALE... */ - if (!p->mss_mod) { - if (mss ^ p->mss) { p = p->next; continue; } - } else if (mss % p->mss) { p = p->next; continue; } - - if (!p->wsc_mod) { - if (wsc ^ p->wsc) { p = p->next; continue; } - } else if (wsc % p->wsc) { p = p->next; continue; } - - /* Then proceed with the most complex WSS check... */ - switch (p->wsize_mod) - { - case 0: - if (wss ^ p->wsize) { p = p->next; continue; } - break; - case MOD_CONST: - if (wss % p->wsize) { p = p->next; continue; } - break; - case MOD_MSS: - if (mss && !(wss % mss)) - { - if ((wss / mss) ^ p->wsize) { p = p->next; continue; } - } - else if (!(wss % 1460)) - { - if ((wss / 1460) ^ p->wsize) { p = p->next; continue; } - } - else { p = p->next; continue; } - break; - case MOD_MTU: - if (mss && !(wss % (mss+40))) - { - if ((wss / (mss+40)) ^ p->wsize) { p = p->next; continue; } - } - else if (!(wss % 1500)) - { - if ((wss / 1500) ^ p->wsize) { p = p->next; continue; } - } - else { p = p->next; continue; } - break; - } - - /* Numbers agree. Let's check options */ - for (j=0;jopt[j] ^ op[j]) goto continue_search; - - /* Check TTLs last because we might want to go fuzzy. */ - if (p->ttl < ttl) - { - if ( mode != RST_FINGERPRINT_MODE )fuzzy = p; - p = p->next; - continue; - } - - /* Naah... can't happen ;-) */ - if (!p->no_detail) - if (p->ttl - ttl > MAXDIST) - { - if (mode != RST_FINGERPRINT_MODE ) fuzzy = p; - p = p->next; - continue; - } - -continue_fuzzy: - - /* Match! */ - id = p->line; - if (mss & wss) - { - if (p->wsize_mod == MOD_MSS) - { - if ((wss % mss) && !(wss % 1460)) retval->gadgets|=GADGETNAT; - } - else if (p->wsize_mod == MOD_MTU) - { - if ((wss % (mss+40)) && !(wss % 1500)) retval->gadgets|=GADGETNAT2; - } - } - - retval->os=p->os; - retval->desc=p->desc; - retval->dist=p->ttl-ttl; - - if (ecn) retval->gadgets|=GADGETECN; - if (orig_df ^ df) retval->gadgets|=GADGETFIREWALL; - - if (p->generic) retval->match=MATCHGENERIC; - if (fuzzy_now) retval->match=MATCHFUZZY; - - if (!p->no_detail && tstamp) - { - retval->uptime=tstamp/360000; - retval->gadgets|=GADGETUPTIME; - } - - return id; - -continue_search: - - p = p->next; - - } - - if (!df) { df = 1; goto re_lookup; } //not found with df=0 do df=1 - - if (fuzzy) - { - df = orig_df; - fuzzy_now = 1; - p = fuzzy; - fuzzy = 0; - goto continue_fuzzy; - } - - if (mss & wss) - { - if ((wss % mss) && !(wss % 1460)) retval->gadgets|=GADGETNAT; - else if ((wss % (mss+40)) && !(wss % 1500)) retval->gadgets|=GADGETNAT2; - } - - if (ecn) retval->gadgets|=GADGETECN; - - if (tstamp) - { - retval->uptime=tstamp/360000; - retval->gadgets|=GADGETUPTIME; - } - - return id; - } diff --git a/src/OSFinger.h b/src/OSFinger.h deleted file mode 100644 index b7c731900c..0000000000 --- a/src/OSFinger.h +++ /dev/null @@ -1,161 +0,0 @@ -// Taken with permission from: -// -// p0f - passive OS fingerprinting (GNU LESSER GENERAL PUBLIC LICENSE) -// ------------------------------------------------------------------- -// -// "If you sit down at a poker game and don't see a sucker, -// get up. You're the sucker." -// -// (C) Copyright 2000-2003 by Michal Zalewski - -#ifndef osfinger_h -#define osfinger_h - -#include "util.h" -#include "Dict.h" -#include "Reporter.h" -#include "IPAddr.h" - -// Size limit for size wildcards. -#define PACKET_BIG 100 - -// Maximum number of signatures allowed in the config file. -#define MAXSIGS 1024 - -// Max signature line length. -#define MAXLINE 1024 - -// Maximum distance from a host to be taken seriously. Between 35 and 64 -// is sane. Making it too high might result in some (very rare) false -// positives, too low will result in needless UNKNOWNs. -#define MAXDIST 40 - -// Maximum number of TCP options. A TCP packet can have at most 64 bytes -// of header, 20 of which are non-options. Thus, if a single option -// consumes 1 bytes (the minimum, there can only be 44 bytes of options. -// We err on the safe side. -#define MAXOPT 64 - -declare(PDict,int); - -struct os_type { - const char* os; - char* desc; - uint8 dist; - uint16 gadgets; - uint16 match; - uint32 uptime; -}; - -struct fp_entry { - struct fp_entry* next; - char* os; // OS genre - char* desc; // OS description - uint8 no_detail; // disable guesstimates - uint8 generic; // generic hit - uint8 userland; // userland stack - uint16 wsize; // window size - uint8 wsize_mod; // MOD_* for wsize - uint8 ttl; // TTL - uint8 df; // don't fragment bit - uint8 zero_stamp; // timestamp option but zero value? - uint16 size; // packet size - uint8 optcnt; // option count - uint8 opt[MAXOPT]; // TCPOPT_* - uint16 wsc; // window scaling option - uint16 mss; // MSS option - uint8 wsc_mod; // modulo for WSCALE (NONE or CONST) - uint8 mss_mod; // modulo for MSS (NONE or CONST) - uint32 quirks; // packet quirks and bugs - uint32 line; // config file line -}; - -struct mtu_def { - uint16 mtu; - char* dev; -}; - -enum FingerprintMode { - SYN_FINGERPRINT_MODE, SYN_ACK_FINGERPRINT_MODE, RST_FINGERPRINT_MODE, -}; - -class OSFingerprint { -public: - explicit OSFingerprint(FingerprintMode mode); - ~OSFingerprint() {} - - bool Error() const { return err; } - - int FindMatch(struct os_type* retval, uint16 tot, uint8 DF_flag, - uint8 TTL, uint16 WSS, uint8 ocnt, uint8* op, uint16 MSS, - uint8 win_scale, uint32 tstamp, uint32 quirks, uint8 ECN) const; - bool CacheMatch(const IPAddr& addr, int id); - void load_config(const char* file); - -protected: - void collide(uint32 id); - - void Error(const char* msg) - { - reporter->Error("%s", msg); - err = true; - } - - void Error(const char* msg, int n) - { - reporter->Error(msg, n); - err = true; - } - - void Error(const char* msg, const char* s) - { - reporter->Error(msg, s); - err = true; - } - -private: - bool err; // if true, a fatal error has occurred - unsigned int mode; - uint32 sigcnt, gencnt; - uint8 problems; - struct fp_entry sig[MAXSIGS]; - - /* By hash */ -#define OSHSIZE 16 - struct fp_entry* bh[OSHSIZE]; - - PDict(int) os_matches; -}; - -#define SIGHASH(tsize, optcnt, q, df) \ - ((uint8(((tsize) << 1) ^ ((optcnt) << 1) ^ (df) ^ (q) )) & 0x0f) - -#define MOD_NONE 0 -#define MOD_CONST 1 -#define MOD_MSS 2 -#define MOD_MTU 3 - -#define QUIRK_PAST 0x1 /* P */ -#define QUIRK_ZEROID 0x2 /* Z */ -#define QUIRK_IPOPT 0x4 /* I */ -#define QUIRK_URG 0x8 /* U */ -#define QUIRK_X2 0x10 /* X */ -#define QUIRK_ACK 0x20 /* A */ -#define QUIRK_T2 0x40 /* T */ -#define QUIRK_FLAGS 0x80 /* F */ -#define QUIRK_DATA 0x100 /* D */ -#define QUIRK_BROKEN 0x200 /* ! */ -#define QUIRK_RSTACK 0x400 /* K */ -#define QUIRK_SEQEQ 0x800 /* Q */ -#define QUIRK_SEQ0 0x1000 /* 0 */ - -#define GADGETNAT 0x1 -#define GADGETNAT2 0x2 -#define GADGETFIREWALL 0x4 -#define GADGETECN 0x8 -#define GADGETUPTIME 0x10 - -#define MATCHGENERIC 0x1 -#define MATCHFUZZY 0x2 - -#endif diff --git a/src/Obj.cc b/src/Obj.cc index 023fa0d237..c2e47f50b8 100644 --- a/src/Obj.cc +++ b/src/Obj.cc @@ -1,11 +1,10 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include "Obj.h" -#include "Serializer.h" #include "Func.h" #include "File.h" #include "plugin/Manager.h" @@ -14,47 +13,6 @@ Location no_location("", 0, 0, 0, 0); Location start_location("", 0, 0, 0, 0); Location end_location("", 0, 0, 0, 0); -bool Location::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Location* Location::Unserialize(UnserialInfo* info) - { - return (Location*) SerialObj::Unserialize(info, SER_LOCATION); - } - -IMPLEMENT_SERIAL(Location, SER_LOCATION); - -bool Location::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_LOCATION, SerialObj); - info->s->WriteOpenTag("Location"); - - if ( ! (SERIALIZE(filename) && - SERIALIZE(first_line) && - SERIALIZE(last_line) && - SERIALIZE(first_column) && - SERIALIZE(last_column)) ) - return false; - - info->s->WriteCloseTag("Location"); - return true; - } - -bool Location::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - delete_data = true; - - return UNSERIALIZE_STR(&filename, 0) - && UNSERIALIZE(&first_line) - && UNSERIALIZE(&last_line) - && UNSERIALIZE(&first_column) - && UNSERIALIZE(&last_column); - } - void Location::Describe(ODesc* d) const { if ( filename ) @@ -100,21 +58,21 @@ BroObj::~BroObj() delete location; } -void BroObj::Warn(const char* msg, const BroObj* obj2, int pinpoint_only) const +void BroObj::Warn(const char* msg, const BroObj* obj2, int pinpoint_only, const Location* expr_location) const { ODesc d; - DoMsg(&d, msg, obj2, pinpoint_only); + DoMsg(&d, msg, obj2, pinpoint_only, expr_location); reporter->Warning("%s", d.Description()); reporter->PopLocation(); } -void BroObj::Error(const char* msg, const BroObj* obj2, int pinpoint_only) const +void BroObj::Error(const char* msg, const BroObj* obj2, int pinpoint_only, const Location* expr_location) const { if ( suppress_errors ) return; ODesc d; - DoMsg(&d, msg, obj2, pinpoint_only); + DoMsg(&d, msg, obj2, pinpoint_only, expr_location); reporter->Error("%s", d.Description()); reporter->PopLocation(); } @@ -200,7 +158,7 @@ void BroObj::UpdateLocationEndInfo(const Location& end) } void BroObj::DoMsg(ODesc* d, const char s1[], const BroObj* obj2, - int pinpoint_only) const + int pinpoint_only, const Location* expr_location) const { d->SetShort(); @@ -211,6 +169,8 @@ void BroObj::DoMsg(ODesc* d, const char s1[], const BroObj* obj2, if ( obj2 && obj2->GetLocationInfo() != &no_location && *obj2->GetLocationInfo() != *GetLocationInfo() ) loc2 = obj2->GetLocationInfo(); + else if ( expr_location ) + loc2 = expr_location; reporter->PushLocation(GetLocationInfo(), loc2); } @@ -228,29 +188,6 @@ void BroObj::PinPoint(ODesc* d, const BroObj* obj2, int pinpoint_only) const d->Add(")"); } -bool BroObj::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BRO_OBJ, SerialObj); - - info->s->WriteOpenTag("Object"); - - Location* loc = info->include_locations ? location : 0; - SERIALIZE_OPTIONAL(loc); - info->s->WriteCloseTag("Object"); - - return true; - } - -bool BroObj::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - delete location; - - UNSERIALIZE_OPTIONAL(location, Location::Unserialize(info)); - return true; - } - void print(const BroObj* obj) { static BroFile fstderr(stderr); diff --git a/src/Obj.h b/src/Obj.h index 047eec0856..78f8d018d3 100644 --- a/src/Obj.h +++ b/src/Obj.h @@ -7,12 +7,8 @@ #include "input.h" #include "Desc.h" -#include "SerialObj.h" -class Serializer; -class SerialInfo; - -class Location : SerialObj { +class Location { public: Location(const char* fname, int line_f, int line_l, int col_f, int col_l) { @@ -36,7 +32,7 @@ public: text = 0; } - ~Location() override + virtual ~Location() { if ( delete_data ) delete [] filename; @@ -44,9 +40,6 @@ public: void Describe(ODesc* d) const; - bool Serialize(SerialInfo* info) const; - static Location* Unserialize(UnserialInfo* info); - bool operator==(const Location& l) const; bool operator!=(const Location& l) const { return ! (*this == l); } @@ -59,8 +52,6 @@ public: // Timestamp and text for compatibility with Bison's default yyltype. int timestamp; char* text; -protected: - DECLARE_SERIAL(Location); }; #define YYLTYPE yyltype @@ -86,7 +77,7 @@ inline void set_location(const Location start, const Location end) end_location = end; } -class BroObj : public SerialObj { +class BroObj { public: BroObj() { @@ -112,15 +103,15 @@ public: SetLocationInfo(&start_location, &end_location); } - ~BroObj() override; + virtual ~BroObj(); // Report user warnings/errors. If obj2 is given, then it's // included in the message, though if pinpoint_only is non-zero, // then obj2 is only used to pinpoint the location. void Warn(const char* msg, const BroObj* obj2 = 0, - int pinpoint_only = 0) const; + int pinpoint_only = 0, const Location* expr_location = 0) const; void Error(const char* msg, const BroObj* obj2 = 0, - int pinpoint_only = 0) const; + int pinpoint_only = 0, const Location* expr_location = 0) const; // Report internal errors. void BadTag(const char* msg, const char* t1 = 0, @@ -168,17 +159,13 @@ public: bool in_ser_cache; protected: - friend class SerializationCache; - - DECLARE_ABSTRACT_SERIAL(BroObj); - Location* location; // all that matters in real estate private: friend class SuppressErrors; void DoMsg(ODesc* d, const char s1[], const BroObj* obj2 = 0, - int pinpoint_only = 0) const; + int pinpoint_only = 0, const Location* expr_location = 0) const; void PinPoint(ODesc* d, const BroObj* obj2 = 0, int pinpoint_only = 0) const; diff --git a/src/Op.h b/src/Op.h deleted file mode 100644 index a628a6bb68..0000000000 --- a/src/Op.h +++ /dev/null @@ -1,23 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#ifndef op_h -#define op_h - -// BRO operations. - -typedef enum { - OP_INCR, OP_DECR, OP_NOT, OP_NEGATE, - OP_PLUS, OP_MINUS, OP_TIMES, OP_DIVIDE, OP_MOD, - OP_AND, OP_OR, - OP_LT, OP_LE, OP_EQ, OP_NE, OP_GE, OP_GT, - OP_MATCH, - OP_ASSIGN, - OP_INDEX, OP_FIELD, - OP_IN, - OP_LIST, - OP_CALL, - OP_SCHED, - OP_NAME, OP_CONST, OP_THIS -} BroOP; - -#endif diff --git a/src/OpaqueVal.cc b/src/OpaqueVal.cc index ce25ea5475..d372f525a5 100644 --- a/src/OpaqueVal.cc +++ b/src/OpaqueVal.cc @@ -3,10 +3,153 @@ #include "OpaqueVal.h" #include "NetVar.h" #include "Reporter.h" -#include "Serializer.h" #include "probabilistic/BloomFilter.h" #include "probabilistic/CardinalityCounter.h" +#include + +// Helper to retrieve a broker value out of a broker::vector at a specified +// index, and casted to the expected destination type. +template +inline bool get_vector_idx(const V& v, unsigned int i, D* dst) + { + if ( i >= v.size() ) + return false; + + auto x = caf::get_if(&v[i]); + if ( ! x ) + return false; + + *dst = static_cast(*x); + return true; + } + +OpaqueMgr* OpaqueMgr::mgr() + { + static OpaqueMgr mgr; + return &mgr; + } + +OpaqueVal::OpaqueVal(OpaqueType* t) : Val(t) + { + } + +OpaqueVal::~OpaqueVal() + { + } + +const std::string& OpaqueMgr::TypeID(const OpaqueVal* v) const + { + auto x = _types.find(v->OpaqueName()); + + if ( x == _types.end() ) + reporter->InternalError("OpaqueMgr::TypeID: opaque type %s not registered", + v->OpaqueName()); + + return x->first; + } + +OpaqueVal* OpaqueMgr::Instantiate(const std::string& id) const + { + auto x = _types.find(id); + return x != _types.end() ? (*x->second)() : nullptr; + } + +broker::expected OpaqueVal::Serialize() const + { + auto type = OpaqueMgr::mgr()->TypeID(this); + + auto d = DoSerialize(); + if ( ! d ) + return d.error(); + + return {broker::vector{std::move(type), std::move(*d)}}; + } + +OpaqueVal* OpaqueVal::Unserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + + if ( ! (v && v->size() == 2) ) + return nullptr; + + auto type = caf::get_if(&(*v)[0]); + if ( ! type ) + return nullptr; + + auto val = OpaqueMgr::mgr()->Instantiate(*type); + if ( ! val ) + return nullptr; + + if ( ! val->DoUnserialize((*v)[1]) ) + { + Unref(val); + return nullptr; + } + + return val; + } + +broker::expected OpaqueVal::SerializeType(BroType* t) + { + if ( t->InternalType() == TYPE_INTERNAL_ERROR ) + return broker::ec::invalid_data; + + if ( t->InternalType() == TYPE_INTERNAL_OTHER ) + { + // Serialize by name. + assert(t->GetName().size()); + return {broker::vector{true, t->GetName()}}; + } + + // A base type. + return {broker::vector{false, static_cast(t->Tag())}}; + } + +BroType* OpaqueVal::UnserializeType(const broker::data& data) + { + auto v = caf::get_if(&data); + if ( ! (v && v->size() == 2) ) + return nullptr; + + auto by_name = caf::get_if(&(*v)[0]); + if ( ! by_name ) + return nullptr; + + if ( *by_name ) + { + auto name = caf::get_if(&(*v)[1]); + if ( ! name ) + return nullptr; + + ID* id = global_scope()->Lookup(name->c_str()); + if ( ! id ) + return nullptr; + + BroType* t = id->AsType(); + if ( ! t ) + return nullptr; + + return t->Ref(); + } + + auto tag = caf::get_if(&(*v)[1]); + if ( ! tag ) + return nullptr; + + return base_type(static_cast(*tag)); + } + +Val* OpaqueVal::DoClone(CloneState* state) + { + auto d = OpaqueVal::Serialize(); + if ( ! d ) + return nullptr; + + auto rval = OpaqueVal::Unserialize(std::move(*d)); + return state->NewClone(this, rval); + } + bool HashVal::IsValid() const { return valid; @@ -63,20 +206,6 @@ HashVal::HashVal(OpaqueType* t) : OpaqueVal(t) valid = false; } -IMPLEMENT_SERIAL(HashVal, SER_HASH_VAL); - -bool HashVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_HASH_VAL, OpaqueVal); - return SERIALIZE(valid); - } - -bool HashVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(OpaqueVal); - return UNSERIALIZE(&valid); - } - MD5Val::MD5Val() : HashVal(md5_type) { } @@ -87,6 +216,19 @@ MD5Val::~MD5Val() EVP_MD_CTX_free(ctx); } +Val* MD5Val::DoClone(CloneState* state) + { + auto out = new MD5Val(); + if ( IsValid() ) + { + if ( ! out->Init() ) + return nullptr; + EVP_MD_CTX_copy_ex(out->ctx, ctx); + } + + return state->NewClone(this, out); + } + void MD5Val::digest(val_list& vlist, u_char result[MD5_DIGEST_LENGTH]) { EVP_MD_CTX* h = hash_init(Hash_MD5); @@ -147,64 +289,72 @@ StringVal* MD5Val::DoGet() return new StringVal(md5_digest_print(digest)); } -IMPLEMENT_SERIAL(MD5Val, SER_MD5_VAL); +IMPLEMENT_OPAQUE_VALUE(MD5Val) -bool MD5Val::DoSerialize(SerialInfo* info) const +broker::expected MD5Val::DoSerialize() const { - DO_SERIALIZE(SER_MD5_VAL, HashVal); - if ( ! IsValid() ) - return true; + return {broker::vector{false}}; MD5_CTX* md = (MD5_CTX*) EVP_MD_CTX_md_data(ctx); - if ( ! (SERIALIZE(md->A) && - SERIALIZE(md->B) && - SERIALIZE(md->C) && - SERIALIZE(md->D) && - SERIALIZE(md->Nl) && - SERIALIZE(md->Nh)) ) - return false; + broker::vector d = { + true, + static_cast(md->A), + static_cast(md->B), + static_cast(md->C), + static_cast(md->D), + static_cast(md->Nl), + static_cast(md->Nh), + static_cast(md->num) + }; for ( int i = 0; i < MD5_LBLOCK; ++i ) - { - if ( ! SERIALIZE(md->data[i]) ) - return false; - } + d.emplace_back(static_cast(md->data[i])); - if ( ! SERIALIZE(md->num) ) - return false; - - return true; + return {std::move(d)}; } -bool MD5Val::DoUnserialize(UnserialInfo* info) +bool MD5Val::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(HashVal); + auto d = caf::get_if(&data); + if ( ! d ) + return false; - if ( ! IsValid() ) + auto valid = caf::get_if(&(*d)[0]); + if ( ! valid ) + return false; + + if ( ! *valid ) + { + assert(! IsValid()); // default set by ctor return true; + } - ctx = hash_init(Hash_MD5); + Init(); MD5_CTX* md = (MD5_CTX*) EVP_MD_CTX_md_data(ctx); - if ( ! (UNSERIALIZE(&md->A) && - UNSERIALIZE(&md->B) && - UNSERIALIZE(&md->C) && - UNSERIALIZE(&md->D) && - UNSERIALIZE(&md->Nl) && - UNSERIALIZE(&md->Nh)) ) + if ( ! get_vector_idx(*d, 1, &md->A) ) + return false; + if ( ! get_vector_idx(*d, 2, &md->B) ) + return false; + if ( ! get_vector_idx(*d, 3, &md->C) ) + return false; + if ( ! get_vector_idx(*d, 4, &md->D) ) + return false; + if ( ! get_vector_idx(*d, 5, &md->Nl) ) + return false; + if ( ! get_vector_idx(*d, 6, &md->Nh) ) + return false; + if ( ! get_vector_idx(*d, 7, &md->num) ) return false; for ( int i = 0; i < MD5_LBLOCK; ++i ) { - if ( ! UNSERIALIZE(&md->data[i]) ) + if ( ! get_vector_idx(*d, 8 + i, &md->data[i]) ) return false; } - if ( ! UNSERIALIZE(&md->num) ) - return false; - return true; } @@ -218,6 +368,19 @@ SHA1Val::~SHA1Val() EVP_MD_CTX_free(ctx); } +Val* SHA1Val::DoClone(CloneState* state) + { + auto out = new SHA1Val(); + if ( IsValid() ) + { + if ( ! out->Init() ) + return nullptr; + EVP_MD_CTX_copy_ex(out->ctx, ctx); + } + + return state->NewClone(this, out); + } + void SHA1Val::digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH]) { EVP_MD_CTX* h = hash_init(Hash_SHA1); @@ -267,66 +430,75 @@ StringVal* SHA1Val::DoGet() return new StringVal(sha1_digest_print(digest)); } -IMPLEMENT_SERIAL(SHA1Val, SER_SHA1_VAL); +IMPLEMENT_OPAQUE_VALUE(SHA1Val) -bool SHA1Val::DoSerialize(SerialInfo* info) const +broker::expected SHA1Val::DoSerialize() const { - DO_SERIALIZE(SER_SHA1_VAL, HashVal); - if ( ! IsValid() ) - return true; + return {broker::vector{false}}; SHA_CTX* md = (SHA_CTX*) EVP_MD_CTX_md_data(ctx); - if ( ! (SERIALIZE(md->h0) && - SERIALIZE(md->h1) && - SERIALIZE(md->h2) && - SERIALIZE(md->h3) && - SERIALIZE(md->h4) && - SERIALIZE(md->Nl) && - SERIALIZE(md->Nh)) ) - return false; + broker::vector d = { + true, + static_cast(md->h0), + static_cast(md->h1), + static_cast(md->h2), + static_cast(md->h3), + static_cast(md->h4), + static_cast(md->Nl), + static_cast(md->Nh), + static_cast(md->num) + }; for ( int i = 0; i < SHA_LBLOCK; ++i ) - { - if ( ! SERIALIZE(md->data[i]) ) - return false; - } + d.emplace_back(static_cast(md->data[i])); - if ( ! SERIALIZE(md->num) ) - return false; - - return true; + return {std::move(d)}; } -bool SHA1Val::DoUnserialize(UnserialInfo* info) +bool SHA1Val::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(HashVal); + auto d = caf::get_if(&data); + if ( ! d ) + return false; - if ( ! IsValid() ) + auto valid = caf::get_if(&(*d)[0]); + if ( ! valid ) + return false; + + if ( ! *valid ) + { + assert(! IsValid()); // default set by ctor return true; + } - ctx = hash_init(Hash_SHA1); + Init(); SHA_CTX* md = (SHA_CTX*) EVP_MD_CTX_md_data(ctx); - if ( ! (UNSERIALIZE(&md->h0) && - UNSERIALIZE(&md->h1) && - UNSERIALIZE(&md->h2) && - UNSERIALIZE(&md->h3) && - UNSERIALIZE(&md->h4) && - UNSERIALIZE(&md->Nl) && - UNSERIALIZE(&md->Nh)) ) + if ( ! get_vector_idx(*d, 1, &md->h0) ) + return false; + if ( ! get_vector_idx(*d, 2, &md->h1) ) + return false; + if ( ! get_vector_idx(*d, 3, &md->h2) ) + return false; + if ( ! get_vector_idx(*d, 4, &md->h3) ) + return false; + if ( ! get_vector_idx(*d, 5, &md->h4) ) + return false; + if ( ! get_vector_idx(*d, 6, &md->Nl) ) + return false; + if ( ! get_vector_idx(*d, 7, &md->Nh) ) + return false; + if ( ! get_vector_idx(*d, 8, &md->num) ) return false; for ( int i = 0; i < SHA_LBLOCK; ++i ) { - if ( ! UNSERIALIZE(&md->data[i]) ) + if ( ! get_vector_idx(*d, 9 + i, &md->data[i]) ) return false; } - if ( ! UNSERIALIZE(&md->num) ) - return false; - return true; } @@ -340,6 +512,19 @@ SHA256Val::~SHA256Val() EVP_MD_CTX_free(ctx); } +Val* SHA256Val::DoClone(CloneState* state) + { + auto out = new SHA256Val(); + if ( IsValid() ) + { + if ( ! out->Init() ) + return nullptr; + EVP_MD_CTX_copy_ex(out->ctx, ctx); + } + + return state->NewClone(this, out); + } + void SHA256Val::digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH]) { EVP_MD_CTX* h = hash_init(Hash_SHA256); @@ -389,71 +574,72 @@ StringVal* SHA256Val::DoGet() return new StringVal(sha256_digest_print(digest)); } -IMPLEMENT_SERIAL(SHA256Val, SER_SHA256_VAL); +IMPLEMENT_OPAQUE_VALUE(SHA256Val) -bool SHA256Val::DoSerialize(SerialInfo* info) const +broker::expected SHA256Val::DoSerialize() const { - DO_SERIALIZE(SER_SHA256_VAL, HashVal); - if ( ! IsValid() ) - return true; + return {broker::vector{false}}; SHA256_CTX* md = (SHA256_CTX*) EVP_MD_CTX_md_data(ctx); - for ( int i = 0; i < 8; ++i ) - { - if ( ! SERIALIZE(md->h[i]) ) - return false; - } + broker::vector d = { + true, + static_cast(md->Nl), + static_cast(md->Nh), + static_cast(md->num), + static_cast(md->md_len) + }; - if ( ! (SERIALIZE(md->Nl) && - SERIALIZE(md->Nh)) ) - return false; + for ( int i = 0; i < 8; ++i ) + d.emplace_back(static_cast(md->h[i])); for ( int i = 0; i < SHA_LBLOCK; ++i ) - { - if ( ! SERIALIZE(md->data[i]) ) - return false; - } + d.emplace_back(static_cast(md->data[i])); - if ( ! (SERIALIZE(md->num) && - SERIALIZE(md->md_len)) ) - return false; - - return true; + return {std::move(d)}; } -bool SHA256Val::DoUnserialize(UnserialInfo* info) +bool SHA256Val::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(HashVal); + auto d = caf::get_if(&data); + if ( ! d ) + return false; - if ( ! IsValid() ) + auto valid = caf::get_if(&(*d)[0]); + if ( ! valid ) + return false; + + if ( ! *valid ) + { + assert(! IsValid()); // default set by ctor return true; + } - ctx = hash_init(Hash_SHA256); + Init(); SHA256_CTX* md = (SHA256_CTX*) EVP_MD_CTX_md_data(ctx); + if ( ! get_vector_idx(*d, 1, &md->Nl) ) + return false; + if ( ! get_vector_idx(*d, 2, &md->Nh) ) + return false; + if ( ! get_vector_idx(*d, 3, &md->num) ) + return false; + if ( ! get_vector_idx(*d, 4, &md->md_len) ) + return false; + for ( int i = 0; i < 8; ++i ) { - if ( ! UNSERIALIZE(&md->h[i]) ) + if ( ! get_vector_idx(*d, 5 + i, &md->h[i]) ) return false; } - if ( ! (UNSERIALIZE(&md->Nl) && - UNSERIALIZE(&md->Nh)) ) - return false; - for ( int i = 0; i < SHA_LBLOCK; ++i ) { - if ( ! UNSERIALIZE(&md->data[i]) ) + if ( ! get_vector_idx(*d, 13 + i, &md->data[i]) ) return false; } - - if ( ! (UNSERIALIZE(&md->num) && - UNSERIALIZE(&md->md_len)) ) - return false; - return true; } @@ -474,79 +660,86 @@ bool EntropyVal::Get(double *r_ent, double *r_chisq, double *r_mean, return true; } -IMPLEMENT_SERIAL(EntropyVal, SER_ENTROPY_VAL); +IMPLEMENT_OPAQUE_VALUE(EntropyVal) -bool EntropyVal::DoSerialize(SerialInfo* info) const +broker::expected EntropyVal::DoSerialize() const { - DO_SERIALIZE(SER_ENTROPY_VAL, OpaqueVal); + broker::vector d = + { + static_cast(state.totalc), + static_cast(state.mp), + static_cast(state.sccfirst), + static_cast(state.inmont), + static_cast(state.mcount), + static_cast(state.cexp), + static_cast(state.montex), + static_cast(state.montey), + static_cast(state.montepi), + static_cast(state.sccu0), + static_cast(state.scclast), + static_cast(state.scct1), + static_cast(state.scct2), + static_cast(state.scct3), + }; + + d.reserve(256 + 3 + RT_MONTEN + 11); for ( int i = 0; i < 256; ++i ) - { - if ( ! SERIALIZE(state.ccount[i]) ) - return false; - } + d.emplace_back(static_cast(state.ccount[i])); - if ( ! (SERIALIZE(state.totalc) && - SERIALIZE(state.mp) && - SERIALIZE(state.sccfirst)) ) - return false; + for ( int i = 0; i < RT_MONTEN; ++i ) + d.emplace_back(static_cast(state.monte[i])); - for ( int i = 0; i < RT_MONTEN; ++i ) - { - if ( ! SERIALIZE(state.monte[i]) ) - return false; - } - - if ( ! (SERIALIZE(state.inmont) && - SERIALIZE(state.mcount) && - SERIALIZE(state.cexp) && - SERIALIZE(state.montex) && - SERIALIZE(state.montey) && - SERIALIZE(state.montepi) && - SERIALIZE(state.sccu0) && - SERIALIZE(state.scclast) && - SERIALIZE(state.scct1) && - SERIALIZE(state.scct2) && - SERIALIZE(state.scct3)) ) - return false; - - return true; + return {std::move(d)}; } -bool EntropyVal::DoUnserialize(UnserialInfo* info) +bool EntropyVal::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(OpaqueVal); + auto d = caf::get_if(&data); + if ( ! d ) + return false; + + if ( ! get_vector_idx(*d, 0, &state.totalc) ) + return false; + if ( ! get_vector_idx(*d, 1, &state.mp) ) + return false; + if ( ! get_vector_idx(*d, 2, &state.sccfirst) ) + return false; + if ( ! get_vector_idx(*d, 3, &state.inmont) ) + return false; + if ( ! get_vector_idx(*d, 4, &state.mcount) ) + return false; + if ( ! get_vector_idx(*d, 5, &state.cexp) ) + return false; + if ( ! get_vector_idx(*d, 6, &state.montex) ) + return false; + if ( ! get_vector_idx(*d, 7, &state.montey) ) + return false; + if ( ! get_vector_idx(*d, 8, &state.montepi) ) + return false; + if ( ! get_vector_idx(*d, 9, &state.sccu0) ) + return false; + if ( ! get_vector_idx(*d, 10, &state.scclast) ) + return false; + if ( ! get_vector_idx(*d, 11, &state.scct1) ) + return false; + if ( ! get_vector_idx(*d, 12, &state.scct2) ) + return false; + if ( ! get_vector_idx(*d, 13, &state.scct3) ) + return false; for ( int i = 0; i < 256; ++i ) { - if ( ! UNSERIALIZE(&state.ccount[i]) ) + if ( ! get_vector_idx(*d, 14 + i, &state.ccount[i]) ) return false; } - if ( ! (UNSERIALIZE(&state.totalc) && - UNSERIALIZE(&state.mp) && - UNSERIALIZE(&state.sccfirst)) ) - return false; - for ( int i = 0; i < RT_MONTEN; ++i ) { - if ( ! UNSERIALIZE(&state.monte[i]) ) + if ( ! get_vector_idx(*d, 14 + 256 + i, &state.monte[i]) ) return false; } - if ( ! (UNSERIALIZE(&state.inmont) && - UNSERIALIZE(&state.mcount) && - UNSERIALIZE(&state.cexp) && - UNSERIALIZE(&state.montex) && - UNSERIALIZE(&state.montey) && - UNSERIALIZE(&state.montepi) && - UNSERIALIZE(&state.sccu0) && - UNSERIALIZE(&state.scclast) && - UNSERIALIZE(&state.scct1) && - UNSERIALIZE(&state.scct2) && - UNSERIALIZE(&state.scct3)) ) - return false; - return true; } @@ -574,6 +767,18 @@ BloomFilterVal::BloomFilterVal(probabilistic::BloomFilter* bf) bloom_filter = bf; } +Val* BloomFilterVal::DoClone(CloneState* state) + { + if ( bloom_filter ) + { + auto bf = new BloomFilterVal(bloom_filter->Clone()); + bf->Typify(type); + return state->NewClone(this, bf); + } + + return state->NewClone(this, new BloomFilterVal()); + } + bool BloomFilterVal::Typify(BroType* arg_type) { if ( type ) @@ -668,42 +873,52 @@ BloomFilterVal::~BloomFilterVal() delete bloom_filter; } -IMPLEMENT_SERIAL(BloomFilterVal, SER_BLOOMFILTER_VAL); +IMPLEMENT_OPAQUE_VALUE(BloomFilterVal) -bool BloomFilterVal::DoSerialize(SerialInfo* info) const +broker::expected BloomFilterVal::DoSerialize() const { - DO_SERIALIZE(SER_BLOOMFILTER_VAL, OpaqueVal); + broker::vector d; - bool is_typed = (type != 0); + if ( type ) + { + auto t = SerializeType(type); + if ( ! t ) + return broker::ec::invalid_data; - if ( ! SERIALIZE(is_typed) ) - return false; + d.emplace_back(std::move(*t)); + } + else + d.emplace_back(broker::none()); - if ( is_typed && ! type->Serialize(info) ) - return false; + auto bf = bloom_filter->Serialize(); + if ( ! bf ) + return broker::ec::invalid_data; // Cannot serialize; - return bloom_filter->Serialize(info); + d.emplace_back(*bf); + return {std::move(d)}; } -bool BloomFilterVal::DoUnserialize(UnserialInfo* info) +bool BloomFilterVal::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(OpaqueVal); + auto v = caf::get_if(&data); - bool is_typed; - if ( ! UNSERIALIZE(&is_typed) ) + if ( ! (v && v->size() == 2) ) return false; - if ( is_typed ) + auto no_type = caf::get_if(&(*v)[0]); + if ( ! no_type ) { - BroType* t = BroType::Unserialize(info); - if ( ! Typify(t) ) + BroType* t = UnserializeType((*v)[0]); + if ( ! (t && Typify(t)) ) return false; - - Unref(t); } - bloom_filter = probabilistic::BloomFilter::Unserialize(info); - return bloom_filter != 0; + auto bf = probabilistic::BloomFilter::Unserialize((*v)[1]); + if ( ! bf ) + return false; + + bloom_filter = bf.release(); + return true; } CardinalityVal::CardinalityVal() : OpaqueVal(cardinality_type) @@ -728,42 +943,10 @@ CardinalityVal::~CardinalityVal() delete hash; } -IMPLEMENT_SERIAL(CardinalityVal, SER_CARDINALITY_VAL); - -bool CardinalityVal::DoSerialize(SerialInfo* info) const +Val* CardinalityVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_CARDINALITY_VAL, OpaqueVal); - - bool valid = true; - bool is_typed = (type != 0); - - valid &= SERIALIZE(is_typed); - - if ( is_typed ) - valid &= type->Serialize(info); - - return c->Serialize(info); - } - -bool CardinalityVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(OpaqueVal); - - bool is_typed; - if ( ! UNSERIALIZE(&is_typed) ) - return false; - - if ( is_typed ) - { - BroType* t = BroType::Unserialize(info); - if ( ! Typify(t) ) - return false; - - Unref(t); - } - - c = probabilistic::CardinalityCounter::Unserialize(info); - return c != 0; + return state->NewClone(this, + new CardinalityVal(new probabilistic::CardinalityCounter(*c))); } bool CardinalityVal::Typify(BroType* arg_type) @@ -794,3 +977,135 @@ void CardinalityVal::Add(const Val* val) delete key; } +IMPLEMENT_OPAQUE_VALUE(CardinalityVal) + +broker::expected CardinalityVal::DoSerialize() const + { + broker::vector d; + + if ( type ) + { + auto t = SerializeType(type); + if ( ! t ) + return broker::ec::invalid_data; + + d.emplace_back(std::move(*t)); + } + else + d.emplace_back(broker::none()); + + auto cs = c->Serialize(); + if ( ! cs ) + return broker::ec::invalid_data; + + d.emplace_back(*cs); + return {std::move(d)}; + } + +bool CardinalityVal::DoUnserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + + if ( ! (v && v->size() == 2) ) + return false; + + auto no_type = caf::get_if(&(*v)[0]); + if ( ! no_type ) + { + BroType* t = UnserializeType((*v)[0]); + if ( ! (t && Typify(t)) ) + return false; + } + + auto cu = probabilistic::CardinalityCounter::Unserialize((*v)[1]); + if ( ! cu ) + return false; + + c = cu.release(); + return true; + } + +ParaglobVal::ParaglobVal(std::unique_ptr p) +: OpaqueVal(paraglob_type) + { + this->internal_paraglob = std::move(p); + } + +VectorVal* ParaglobVal::Get(StringVal* &pattern) + { + VectorVal* rval = new VectorVal(internal_type("string_vec")->AsVectorType()); + std::string string_pattern (reinterpret_cast(pattern->Bytes()), pattern->Len()); + + std::vector matches = this->internal_paraglob->get(string_pattern); + for (unsigned int i = 0; i < matches.size(); i++) + rval->Assign(i, new StringVal(matches.at(i))); + + return rval; + } + +bool ParaglobVal::operator==(const ParaglobVal& other) const + { + return *(this->internal_paraglob) == *(other.internal_paraglob); + } + +IMPLEMENT_OPAQUE_VALUE(ParaglobVal) + +broker::expected ParaglobVal::DoSerialize() const + { + broker::vector d; + std::unique_ptr> iv = this->internal_paraglob->serialize(); + for (uint8_t a : *(iv.get())) + d.emplace_back(static_cast(a)); + return {std::move(d)}; + } + +bool ParaglobVal::DoUnserialize(const broker::data& data) + { + auto d = caf::get_if(&data); + if ( ! d ) + return false; + + std::unique_ptr> iv (new std::vector); + iv->resize(d->size()); + + for (std::vector::size_type i = 0; i < d->size(); ++i) + { + if ( ! get_vector_idx(*d, i, iv.get()->data() + i) ) + return false; + } + + try + { + this->internal_paraglob = build_unique(std::move(iv)); + } + catch (const paraglob::underflow_error& e) + { + reporter->Error("Paraglob underflow error -> %s", e.what()); + return false; + } + catch (const paraglob::overflow_error& e) + { + reporter->Error("Paraglob overflow error -> %s", e.what()); + return false; + } + + return true; + } + +Val* ParaglobVal::DoClone(CloneState* state) + { + try { + return new ParaglobVal + (build_unique(this->internal_paraglob->serialize())); + } + catch (const paraglob::underflow_error& e) + { + reporter->Error("Paraglob underflow error while cloning -> %s", e.what()); + return nullptr; + } + catch (const paraglob::overflow_error& e) + { + reporter->Error("Paraglob overflow error while cloning -> %s", e.what()); + return nullptr; + } + } diff --git a/src/OpaqueVal.h b/src/OpaqueVal.h index 89c7b2a8d2..ad9edc28ba 100644 --- a/src/OpaqueVal.h +++ b/src/OpaqueVal.h @@ -3,11 +3,154 @@ #ifndef OPAQUEVAL_H #define OPAQUEVAL_H -#include +#include +#include #include "RandTest.h" #include "Val.h" #include "digest.h" +#include "src/paraglob.h" + +class OpaqueVal; + +/** + * Singleton that registers all available all available types of opaque + * values. This faciliates their serialization into Broker values. + */ +class OpaqueMgr { +public: + using Factory = OpaqueVal* (); + + /** + * Return's a unique ID for the type of an opaque value. + * @param v opaque value to return type for; its class must have been + * registered with the manager, otherwise this method will abort + * execution. + * + * @return type ID, which can used with *Instantiate()* to create a + * new instance of the same type. + */ + const std::string& TypeID(const OpaqueVal* v) const; + + /** + * Instantiates a new opaque value of a specific opaque type. + * + * @param id unique type ID for the class to instantiate; this will + * normally have been returned earlier by *TypeID()*. + * + * @return A freshly instantiated value of the OpaqueVal-derived + * classes that *id* specifies, with reference count at +1. If *id* + * is unknown, this will return null. + * + */ + OpaqueVal* Instantiate(const std::string& id) const; + + /** Returns the global manager singleton object. */ + static OpaqueMgr* mgr(); + + /** + * Internal helper class to register an OpaqueVal-derived classes + * with the manager. + */ + template + class Register { + public: + Register(const char* id) + { OpaqueMgr::mgr()->_types.emplace(id, &T::OpaqueInstantiate); } + }; + +private: + std::unordered_map _types; +}; + +/** Macro to insert into an OpaqueVal-derived class's declaration. */ +#define DECLARE_OPAQUE_VALUE(T) \ + friend class OpaqueMgr::Register; \ + broker::expected DoSerialize() const override; \ + bool DoUnserialize(const broker::data& data) override; \ + const char* OpaqueName() const override { return #T; } \ + static OpaqueVal* OpaqueInstantiate() { return new T(); } + +#define __OPAQUE_MERGE(a, b) a ## b +#define __OPAQUE_ID(x) __OPAQUE_MERGE(_opaque, x) + +/** Macro to insert into an OpaqueVal-derived class's implementation file. */ +#define IMPLEMENT_OPAQUE_VALUE(T) static OpaqueMgr::Register __OPAQUE_ID(__LINE__)(#T); + +/** + * Base class for all opaque values. Opaque values are types that are managed + * completely internally, with no further script-level operators provided + * (other than bif functions). See OpaqueVal.h for derived classes. + */ +class OpaqueVal : public Val { +public: + explicit OpaqueVal(OpaqueType* t); + ~OpaqueVal() override; + + /** + * Serializes the value into a Broker representation. + * + * @return the broker representation, or an error if serialization + * isn't supported or failed. + */ + broker::expected Serialize() const; + + /** + * Reinstantiates a value from its serialized Broker representation. + * + * @param data Broker representation as returned by *Serialize()*. + * @return unserialized instances with reference count at +1 + */ + static OpaqueVal* Unserialize(const broker::data& data); + +protected: + friend class Val; + friend class OpaqueMgr; + OpaqueVal() { } + + /** + * Must be overridden to provide a serialized version of the derived + * class' state. + * + * @return the serialized data or an error if serialization + * isn't supported or failed. + */ + virtual broker::expected DoSerialize() const = 0; + + /** + * Must be overridden to recreate the the derived class' state from a + * serialization. + * + * @return true if successful. + */ + virtual bool DoUnserialize(const broker::data& data) = 0; + + /** + * Internal helper for the serialization machinery. Automatically + * overridden by the `DECLARE_OPAQUE_VALUE` macro. + */ + virtual const char* OpaqueName() const = 0; + + /** + * Provides an implementation of *Val::DoClone()* that leverages the + * serialization methods to deep-copy an instance. Derived classes + * may also override this with a more efficient custom clone + * implementation of their own. + */ + Val* DoClone(CloneState* state) override; + + /** + * Helper function for derived class that need to record a type + * during serialization. + */ + static broker::expected SerializeType(BroType* t); + + /** + * Helper function for derived class that need to restore a type + * during unserialization. Returns the type at reference count +1. + */ + static BroType* UnserializeType(const broker::data& data); +}; namespace probabilistic { class BloomFilter; @@ -22,15 +165,13 @@ public: virtual StringVal* Get(); protected: - HashVal() { }; + HashVal() { valid = false; } explicit HashVal(OpaqueType* t); virtual bool DoInit(); virtual bool DoFeed(const void* data, size_t size); virtual StringVal* DoGet(); - DECLARE_SERIAL(HashVal); - private: // This flag exists because Get() can only be called once. bool valid; @@ -47,6 +188,8 @@ public: MD5Val(); ~MD5Val(); + Val* DoClone(CloneState* state) override; + protected: friend class Val; @@ -54,8 +197,7 @@ protected: bool DoFeed(const void* data, size_t size) override; StringVal* DoGet() override; - DECLARE_SERIAL(MD5Val); - + DECLARE_OPAQUE_VALUE(MD5Val) private: EVP_MD_CTX* ctx; }; @@ -67,6 +209,8 @@ public: SHA1Val(); ~SHA1Val(); + Val* DoClone(CloneState* state) override; + protected: friend class Val; @@ -74,8 +218,7 @@ protected: bool DoFeed(const void* data, size_t size) override; StringVal* DoGet() override; - DECLARE_SERIAL(SHA1Val); - + DECLARE_OPAQUE_VALUE(SHA1Val) private: EVP_MD_CTX* ctx; }; @@ -87,6 +230,8 @@ public: SHA256Val(); ~SHA256Val(); + Val* DoClone(CloneState* state) override; + protected: friend class Val; @@ -94,8 +239,7 @@ protected: bool DoFeed(const void* data, size_t size) override; StringVal* DoGet() override; - DECLARE_SERIAL(SHA256Val); - + DECLARE_OPAQUE_VALUE(SHA256Val) private: EVP_MD_CTX* ctx; }; @@ -111,8 +255,7 @@ public: protected: friend class Val; - DECLARE_SERIAL(EntropyVal); - + DECLARE_OPAQUE_VALUE(EntropyVal) private: RandTest state; }; @@ -122,6 +265,8 @@ public: explicit BloomFilterVal(probabilistic::BloomFilter* bf); ~BloomFilterVal() override; + Val* DoClone(CloneState* state) override; + BroType* Type() const; bool Typify(BroType* type); @@ -139,8 +284,7 @@ protected: BloomFilterVal(); explicit BloomFilterVal(OpaqueType* t); - DECLARE_SERIAL(BloomFilterVal); - + DECLARE_OPAQUE_VALUE(BloomFilterVal) private: // Disable. BloomFilterVal(const BloomFilterVal&); @@ -149,7 +293,7 @@ private: BroType* type; CompositeHash* hash; probabilistic::BloomFilter* bloom_filter; - }; +}; class CardinalityVal: public OpaqueVal { @@ -157,6 +301,8 @@ public: explicit CardinalityVal(probabilistic::CardinalityCounter*); ~CardinalityVal() override; + Val* DoClone(CloneState* state) override; + void Add(const Val* val); BroType* Type() const; @@ -167,12 +313,27 @@ public: protected: CardinalityVal(); + DECLARE_OPAQUE_VALUE(CardinalityVal) private: BroType* type; CompositeHash* hash; probabilistic::CardinalityCounter* c; +}; - DECLARE_SERIAL(CardinalityVal); +class ParaglobVal : public OpaqueVal { +public: + explicit ParaglobVal(std::unique_ptr p); + VectorVal* Get(StringVal* &pattern); + Val* DoClone(CloneState* state) override; + bool operator==(const ParaglobVal& other) const; + +protected: + ParaglobVal() : OpaqueVal(paraglob_type) {} + + DECLARE_OPAQUE_VALUE(ParaglobVal) + +private: + std::unique_ptr internal_paraglob; }; #endif diff --git a/src/PacketDumper.cc b/src/PacketDumper.cc index 1a53550dfd..0d64c89290 100644 --- a/src/PacketDumper.cc +++ b/src/PacketDumper.cc @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/PersistenceSerializer.cc b/src/PersistenceSerializer.cc deleted file mode 100644 index ae5c531aa7..0000000000 --- a/src/PersistenceSerializer.cc +++ /dev/null @@ -1,576 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "PersistenceSerializer.h" -#include "RemoteSerializer.h" -#include "Conn.h" -#include "Event.h" -#include "Reporter.h" -#include "Net.h" - -static void persistence_serializer_delete_func(void* val) - { - time_t* t = reinterpret_cast(val); - free(t); - } - -class IncrementalWriteTimer : public Timer { -public: - IncrementalWriteTimer(double t, PersistenceSerializer::SerialStatus* s) - : Timer(t, TIMER_INCREMENTAL_WRITE), status(s) {} - - void Dispatch(double t, int is_expire); - - PersistenceSerializer::SerialStatus* status; -}; - -void IncrementalWriteTimer::Dispatch(double t, int is_expire) - { - // Never suspend when we're finishing up. - if ( terminating ) - status->info.may_suspend = false; - - persistence_serializer->RunSerialization(status); - } - -PersistenceSerializer::PersistenceSerializer() - { - dir = 0; - files.SetDeleteFunc(persistence_serializer_delete_func); - } - -PersistenceSerializer::~PersistenceSerializer() - { - } - -void PersistenceSerializer::Register(ID* id) - { - if ( id->Type()->Tag() == TYPE_FUNC ) - { - Error("can't register functions as persistent ID"); - return; - } - - DBG_LOG(DBG_STATE, "&persistent %s", id->Name()); - - HashKey key(id->Name()); - if ( persistent_ids.Lookup(&key) ) - return; - - Ref(id); - persistent_ids.Insert(&key, id); - } - -void PersistenceSerializer::Unregister(ID* id) - { - HashKey key(id->Name()); - Unref((ID*) persistent_ids.Remove(&key)); - } - -void PersistenceSerializer::Register(Connection* conn) - { - if ( persistent_conns.Lookup(conn->Key()) ) - return; - - Ref(conn); - HashKey* k = conn->Key(); - HashKey* new_key = new HashKey(k->Key(), k->Size(), k->Hash()); - persistent_conns.Insert(new_key, conn); - delete new_key; - } - -void PersistenceSerializer::Unregister(Connection* conn) - { - Unref(persistent_conns.RemoveEntry(conn->Key())); - } - -bool PersistenceSerializer::CheckTimestamp(const char* file) - { - struct stat s; - if ( stat(file, &s) < 0 ) - return false; - - if ( ! S_ISREG(s.st_mode) ) - return false; - - bool changed = true; - - HashKey* key = new HashKey(file, strlen(file)); - time_t* t = files.Lookup(key); - - if ( ! t ) - { - t = (time_t*) malloc(sizeof(time_t)); - if ( ! t ) - out_of_memory("saving file timestamp"); - files.Insert(key, t); - } - - else if ( *t >= s.st_mtime ) - changed = false; - - *t = s.st_mtime; - - delete key; - return changed; - } - -bool PersistenceSerializer::CheckForFile(UnserialInfo* info, const char* file, - bool delete_file) - { - bool ret = true; - if ( CheckTimestamp(file) ) - { - // Need to copy the filename here, as it may be passed - // in via fmt(). - const char* f = copy_string(file); - - bool ret = Read(info, f); - - if ( delete_file && unlink(f) < 0 ) - Error(fmt("can't delete file %s: %s", f, strerror(errno))); - - delete [] f; - } - - return ret; - } - -bool PersistenceSerializer::ReadAll(bool is_init, bool delete_files) - { -#ifdef USE_PERFTOOLS_DEBUG - HeapLeakChecker::Disabler disabler; -#endif - - assert(dir); - - UnserialInfo config_info(this); - config_info.id_policy = is_init ? - UnserialInfo::Replace : UnserialInfo::CopyCurrentToNew; - - if ( ! CheckForFile(&config_info, fmt("%s/config.bst", dir), - delete_files) ) - return false; - - UnserialInfo state_info(this); - state_info.id_policy = UnserialInfo::CopyNewToCurrent; - if ( ! CheckForFile(&state_info, fmt("%s/state.bst", dir), - delete_files) ) - return false; - - return true; - } - -bool PersistenceSerializer::MoveFileUp(const char* dir, const char* file) - { - char oldname[PATH_MAX]; - char newname[PATH_MAX]; - - safe_snprintf(oldname, PATH_MAX, "%s/.tmp/%s", dir, file ); - safe_snprintf(newname, PATH_MAX, "%s/%s", dir, file ); - - if ( rename(oldname, newname) < 0 ) - { - Error(fmt("can't move %s to %s: %s", oldname, newname, - strerror(errno))); - return false; - } - - CheckTimestamp(newname); - return true; - } - -#if 0 -void PersistenceSerializer::RaiseFinishedSendState() - { - val_list* vl = new val_list; - vl->append(new AddrVal(htonl(remote_host))); - vl->append(val_mgr->GetPort(remote_port)); - - mgr.QueueEvent(finished_send_state, vl); - reporter->Log("Serialization done."); - } -#endif - -void PersistenceSerializer::GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) - { - mgr.QueueEvent(event, args); - } - -void PersistenceSerializer::GotFunctionCall(const char* name, double time, - Func* func, val_list* args) - { - try - { - func->Call(args); - } - - catch ( InterpreterException& e ) - { /* Already reported. */ } - } - -void PersistenceSerializer::GotStateAccess(StateAccess* s) - { - s->Replay(); - delete s; - } - -void PersistenceSerializer::GotTimer(Timer* s) - { - reporter->Error("PersistenceSerializer::GotTimer not implemented"); - } - -void PersistenceSerializer::GotConnection(Connection* c) - { - Unref(c); - } - -void PersistenceSerializer::GotID(ID* id, Val* /* val */) - { - Unref(id); - } - -void PersistenceSerializer::GotPacket(Packet* p) - { - reporter->Error("PersistenceSerializer::GotPacket not implemented"); - } - -bool PersistenceSerializer::LogAccess(const StateAccess& s) - { - if ( ! IsSerializationRunning() ) - return true; - - loop_over_list(running, i) - { - running[i]->accesses.append(new StateAccess(s)); - } - - return true; - } - -bool PersistenceSerializer::WriteState(bool may_suspend) - { - SerialStatus* status = - new SerialStatus(this, SerialStatus::WritingState); - - status->info.may_suspend = may_suspend; - - status->ids = &persistent_ids; - status->conns = &persistent_conns; - status->filename = "state.bst"; - - return RunSerialization(status); - } - -bool PersistenceSerializer::WriteConfig(bool may_suspend) - { - if ( mgr.IsDraining() && may_suspend ) - // Events which trigger checkpoint are flushed. Ignore; we'll - // checkpoint at termination in any case. - return true; - - SerialStatus* status = - new SerialStatus(this, SerialStatus::WritingConfig); - - status->info.may_suspend = may_suspend; - status->info.clear_containers = true; - status->ids = global_scope()->GetIDs(); - status->filename = "config.bst"; - - return RunSerialization(status); - } - -bool PersistenceSerializer::SendState(SourceID peer, bool may_suspend) - { - SerialStatus* status = - new SerialStatus(remote_serializer, SerialStatus::SendingState); - - status->info.may_suspend = may_suspend; - status->ids = &persistent_ids; - status->conns = &persistent_conns; - status->peer = peer; - - reporter->Info("Sending state..."); - - return RunSerialization(status); - } - -bool PersistenceSerializer::SendConfig(SourceID peer, bool may_suspend) - { - SerialStatus* status = - new SerialStatus(remote_serializer, SerialStatus::SendingConfig); - - status->info.may_suspend = may_suspend; - status->info.clear_containers = true; - status->ids = global_scope()->GetIDs(); - status->peer = peer; - - reporter->Info("Sending config..."); - - return RunSerialization(status); - } - -bool PersistenceSerializer::RunSerialization(SerialStatus* status) - { - Continuation* cont = &status->info.cont; - - if ( cont->NewInstance() ) - { - // Serialization is starting. Initialize. - - // See if there is already a serialization of this type running. - loop_over_list(running, i) - { - if ( running[i]->type == status->type ) - { - reporter->Warning("Serialization of type %d already running.", status->type); - return false; - } - } - - running.append(status); - - // Initialize. - if ( ! (ensure_dir(dir) && ensure_dir(fmt("%s/.tmp", dir))) ) - return false; - - if ( ! OpenFile(fmt("%s/.tmp/%s", dir, status->filename), false) ) - return false; - - if ( ! PrepareForWriting() ) - return false; - - if ( status->ids ) - { - status->id_cookie = status->ids->InitForIteration(); - status->ids->MakeRobustCookie(status->id_cookie); - } - - if ( status->conns ) - { - status->conn_cookie = status->conns->InitForIteration(); - status->conns->MakeRobustCookie(status->conn_cookie); - } - } - - else if ( cont->ChildSuspended() ) - { - // One of our former Serialize() calls suspended itself. - // We have to call it again. - - if ( status->id_cookie ) - { - if ( ! DoIDSerialization(status, status->current.id) ) - return false; - - if ( cont->ChildSuspended() ) - { - // Oops, it did it again. - timer_mgr->Add(new IncrementalWriteTimer(network_time + state_write_delay, status)); - return true; - } - } - - else if ( status->conn_cookie ) - { - if ( ! DoConnSerialization(status, status->current.conn) ) - return false; - - if ( cont->ChildSuspended() ) - { - // Oops, it did it again. - timer_mgr->Add(new IncrementalWriteTimer(network_time + state_write_delay, status)); - return true; - } - } - - else - reporter->InternalError("unknown suspend state"); - } - - else if ( cont->Resuming() ) - cont->Resume(); - - else - reporter->InternalError("unknown continuation state"); - - if ( status->id_cookie ) - { - ID* id; - - while ( (id = status->ids->NextEntry(status->id_cookie)) ) - { - if ( ! DoIDSerialization(status, id) ) - return false; - - if ( cont->ChildSuspended() ) - { - timer_mgr->Add(new IncrementalWriteTimer(network_time + state_write_delay, status)); - return true; - } - - if ( status->info.may_suspend ) - { - timer_mgr->Add(new IncrementalWriteTimer(network_time + state_write_delay, status)); - cont->Suspend(); - return true; - } - } - - // Cookie has been set to 0 by NextEntry(). - } - - if ( status->conn_cookie ) - { - Connection* conn; - while ( (conn = status->conns->NextEntry(status->conn_cookie)) ) - { - if ( ! DoConnSerialization(status, conn) ) - return false; - - if ( cont->ChildSuspended() ) - { - timer_mgr->Add(new IncrementalWriteTimer(network_time + state_write_delay, status)); - return true; - } - - if ( status->info.may_suspend ) - { - timer_mgr->Add(new IncrementalWriteTimer(network_time + state_write_delay, status)); - cont->Suspend(); - return true; - } - - } - - // Cookie has been set to 0 by NextEntry(). - } - - DBG_LOG(DBG_STATE, "finished serialization; %d accesses pending", - status->accesses.length()); - - if ( status->accesses.length() ) - { - // Serialize pending state accesses. - // FIXME: Does this need to suspend? - StateAccess* access; - loop_over_list(status->accesses, i) - { - // Serializing a StateAccess will not suspend. - if ( ! DoAccessSerialization(status, status->accesses[i]) ) - return false; - - delete status->accesses[i]; - } - } - - // Finalize. - CloseFile(); - - bool ret = MoveFileUp(dir, status->filename); - - loop_over_list(running, i) - { - if ( running[i]->type == status->type ) - { - running.remove_nth(i); - break; - } - } - - delete status; - return ret; - } - -bool PersistenceSerializer::DoIDSerialization(SerialStatus* status, ID* id) - { - bool success = false; - Continuation* cont = &status->info.cont; - - status->current.id = id; - - switch ( status->type ) { - case SerialStatus::WritingState: - case SerialStatus::WritingConfig: - cont->SaveContext(); - success = Serialize(&status->info, *id); - cont->RestoreContext(); - break; - - case SerialStatus::SendingState: - case SerialStatus::SendingConfig: - cont->SaveContext(); - success = remote_serializer->SendID(&status->info, - status->peer, *id); - cont->RestoreContext(); - break; - - default: - reporter->InternalError("unknown serialization type"); - } - - return success; - } - -bool PersistenceSerializer::DoConnSerialization(SerialStatus* status, - Connection* conn) - { - bool success = false; - Continuation* cont = &status->info.cont; - - status->current.conn = conn; - - switch ( status->type ) { - case SerialStatus::WritingState: - case SerialStatus::WritingConfig: - cont->SaveContext(); - success = Serialize(&status->info, *conn); - cont->RestoreContext(); - break; - - case SerialStatus::SendingState: - case SerialStatus::SendingConfig: - cont->SaveContext(); - success = remote_serializer->SendConnection(&status->info, - status->peer, *conn); - cont->RestoreContext(); - break; - - default: - reporter->InternalError("unknown serialization type"); - } - - return success; - } - -bool PersistenceSerializer::DoAccessSerialization(SerialStatus* status, - StateAccess* access) - { - bool success = false; - DisableSuspend suspend(&status->info); - - switch ( status->type ) { - case SerialStatus::WritingState: - case SerialStatus::WritingConfig: - success = Serialize(&status->info, *access); - break; - - case SerialStatus::SendingState: - case SerialStatus::SendingConfig: - success = remote_serializer->SendAccess(&status->info, - status->peer, *access); - break; - - default: - reporter->InternalError("unknown serialization type"); - } - - return success; - } diff --git a/src/PersistenceSerializer.h b/src/PersistenceSerializer.h deleted file mode 100644 index 99d8da88c4..0000000000 --- a/src/PersistenceSerializer.h +++ /dev/null @@ -1,165 +0,0 @@ -// Implements persistance for Bro's data structures. - -#ifndef persistence_serializer_h -#define persistence_serializer_h - -#include "Serializer.h" -#include "List.h" - -class StateAccess; - -class PersistenceSerializer : public FileSerializer { -public: - PersistenceSerializer(); - - ~PersistenceSerializer() override; - - // Define the directory where to store the data. - void SetDir(const char* arg_dir) { dir = copy_string(arg_dir); } - - // Register/unregister the ID/connection to be saved by WriteAll(). - void Register(ID* id); - void Unregister(ID* id); - void Register(Connection* conn); - void Unregister(Connection* conn); - - // Read all data that has been changed since last scan of directory. - // is_init should be true for the first read upon start-up. All existing - // state will be cleared. If delete_files is true, file which have been - // read are removed (even if the read was unsuccessful!). - bool ReadAll(bool is_init, bool delete_files); - - // Each of the following four methods may suspend operation. - // If they do, they install a Timer which resumes after some - // amount of time. If a function is called again before it - // has completely finished its task, it will do nothing and - // return false. - - bool WriteState(bool may_suspend); - - // Writes Bro's configuration (w/o dynamic state). - bool WriteConfig(bool may_suspend); - - // Sends all registered state to remote host - // (by leveraging the remote_serializer). - bool SendState(SourceID peer, bool may_suspend); - - // Sends Bro's config to remote host - // (by leveraging the remote_serializer). - bool SendConfig(SourceID peer, bool may_suspend); - - // Returns true if a serialization is currently running. - bool IsSerializationRunning() const { return running.length(); } - - // Tells the serializer that this access was performed. If a - // serialization is going on, it may store it. (Need only be called if - // IsSerializationRunning() returns true.) - bool LogAccess(const StateAccess& s); - -protected: - friend class RemoteSerializer; - friend class IncrementalWriteTimer; - - void GotID(ID* id, Val* val) override; - void GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) override; - void GotFunctionCall(const char* name, double time, - Func* func, val_list* args) override; - void GotStateAccess(StateAccess* s) override; - void GotTimer(Timer* t) override; - void GotConnection(Connection* c) override; - void GotPacket(Packet* packet) override; - - // If file has changed since last check, read it. - bool CheckForFile(UnserialInfo* info, const char* file, - bool delete_file); - - // Returns true if it's a regular file and has a more recent timestamp - // than last time we checked it. - bool CheckTimestamp(const char* file); - - // Move file from /tmp/ to /. Afterwards, call - // CheckTimestamp() with /. - bool MoveFileUp(const char* dir, const char* file); - - // Generates an error message, terminates current serialization, - // and returns false. - bool SerialError(const char* msg); - - // Start a new serialization. - struct SerialStatus; - bool RunSerialization(SerialStatus* status); - - // Helpers for RunSerialization. - bool DoIDSerialization(SerialStatus* status, ID* id); - bool DoConnSerialization(SerialStatus* status, Connection* conn); - bool DoAccessSerialization(SerialStatus* status, StateAccess* access); - - typedef PDict(ID) id_map; - - declare(PDict, Connection); - typedef PDict(Connection) conn_map; - - struct SerialStatus { - enum Type { - WritingState, WritingConfig, - SendingState, SendingConfig, - }; - - SerialStatus(Serializer* s, Type arg_type) : info(s) - { - type = arg_type; - ids = 0; - id_cookie = 0; - conns = 0; - conn_cookie = 0; - peer = SOURCE_LOCAL; - filename = 0; - } - - Type type; - SerialInfo info; - - // IDs to serialize. - id_map* ids; - IterCookie* id_cookie; - - // Connections to serialize. - conn_map* conns; - IterCookie* conn_cookie; - - // Accesses performed while we're serializing. - declare(PList,StateAccess); - typedef PList(StateAccess) state_access_list; - state_access_list accesses; - - // The ID/Conn we're currently serializing. - union { - ID* id; - Connection* conn; - } current; - - // Only set if type is Writing{State,Config}. - const char* filename; - - // Only set if type is Sending{State,Config}. - SourceID peer; - }; - - const char* dir; - - declare(PList, SerialStatus); - PList(SerialStatus) running; - - id_map persistent_ids; - conn_map persistent_conns; - - // To keep track of files' modification times. - declare(PDict, time_t); - typedef PDict(time_t) file_map; - file_map files; -}; - -extern PersistenceSerializer* persistence_serializer; - -#endif diff --git a/src/PolicyFile.cc b/src/PolicyFile.cc index 22f09e6970..a6f93c8d88 100644 --- a/src/PolicyFile.cc +++ b/src/PolicyFile.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/PriorityQueue.cc b/src/PriorityQueue.cc index 5fe0cbef81..9d5278108b 100644 --- a/src/PriorityQueue.cc +++ b/src/PriorityQueue.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/Queue.cc b/src/Queue.cc index 587e37063f..90f63a85be 100644 --- a/src/Queue.cc +++ b/src/Queue.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include diff --git a/src/RE.cc b/src/RE.cc index 517fab4c91..98f120efe0 100644 --- a/src/RE.cc +++ b/src/RE.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -9,7 +9,7 @@ #include "DFA.h" #include "CCL.h" #include "EquivClass.h" -#include "Serializer.h" +#include "Reporter.h" CCL* curr_ccl = 0; @@ -469,57 +469,6 @@ int RE_Matcher::Compile(int lazy) return re_anywhere->Compile(lazy) && re_exact->Compile(lazy); } -bool RE_Matcher::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -RE_Matcher* RE_Matcher::Unserialize(UnserialInfo* info) - { - return (RE_Matcher*) SerialObj::Unserialize(info, SER_RE_MATCHER); - } - -IMPLEMENT_SERIAL(RE_Matcher, SER_RE_MATCHER); - -bool RE_Matcher::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_RE_MATCHER, SerialObj); - return SERIALIZE(re_anywhere->PatternText()) - && SERIALIZE(re_exact->PatternText()); - } - -bool RE_Matcher::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - re_anywhere = new Specific_RE_Matcher(MATCH_ANYWHERE); - re_exact = new Specific_RE_Matcher(MATCH_EXACTLY); - - const char* pat; - if ( ! UNSERIALIZE_STR(&pat, 0) ) - return false; - - re_anywhere->SetPat(pat); - if ( ! re_anywhere->Compile() ) - { - info->s->Error(fmt("Can't compile regexp '%s'", pat)); - return false; - } - - if ( ! UNSERIALIZE_STR(&pat, 0) ) - return false; - - re_exact->SetPat(pat); - if ( ! re_exact->Compile() ) - { - info->s->Error(fmt("Can't compile regexp '%s'", pat)); - return false; - } - - return true; - } - - static RE_Matcher* matcher_merge(const RE_Matcher* re1, const RE_Matcher* re2, const char* merge_op) { diff --git a/src/RE.h b/src/RE.h index 06b0699864..9386aa6f5f 100644 --- a/src/RE.h +++ b/src/RE.h @@ -171,12 +171,12 @@ protected: int current_pos; }; -class RE_Matcher : SerialObj { +class RE_Matcher { public: RE_Matcher(); explicit RE_Matcher(const char* pat); RE_Matcher(const char* exact_pat, const char* anywhere_pat); - virtual ~RE_Matcher() override; + virtual ~RE_Matcher(); void AddPat(const char* pat); @@ -212,9 +212,6 @@ public: const char* PatternText() const { return re_exact->PatternText(); } const char* AnywherePatternText() const { return re_anywhere->PatternText(); } - bool Serialize(SerialInfo* info) const; - static RE_Matcher* Unserialize(UnserialInfo* info); - unsigned int MemoryAllocation() const { return padded_sizeof(*this) @@ -223,15 +220,10 @@ public: } protected: - DECLARE_SERIAL(RE_Matcher); - Specific_RE_Matcher* re_anywhere; Specific_RE_Matcher* re_exact; }; -declare(PList, RE_Matcher); -typedef PList(RE_Matcher) re_matcher_list; - extern RE_Matcher* RE_Matcher_conjunction(const RE_Matcher* re1, const RE_Matcher* re2); extern RE_Matcher* RE_Matcher_disjunction(const RE_Matcher* re1, const RE_Matcher* re2); diff --git a/src/Reassem.cc b/src/Reassem.cc index 0cdeadf80d..d952f6e839 100644 --- a/src/Reassem.cc +++ b/src/Reassem.cc @@ -3,10 +3,9 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "Reassem.h" -#include "Serializer.h" static const bool DEBUG_reassem = false; @@ -357,37 +356,3 @@ uint64 Reassembler::MemoryAllocation(ReassemblerType rtype) return Reassembler::sizes[rtype]; } -bool Reassembler::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Reassembler* Reassembler::Unserialize(UnserialInfo* info) - { - return (Reassembler*) SerialObj::Unserialize(info, SER_REASSEMBLER); - } - -bool Reassembler::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_REASSEMBLER, BroObj); - - // I'm not sure if it makes sense to actually save the buffered data. - // For now, we just remember the seq numbers so that we don't get - // complaints about missing content. - return SERIALIZE(trim_seq) && SERIALIZE(int(0)); - } - -bool Reassembler::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - blocks = last_block = 0; - - int dummy; // For backwards compatibility. - if ( ! UNSERIALIZE(&trim_seq) || ! UNSERIALIZE(&dummy) ) - return false; - - last_reassem_seq = trim_seq; - - return true; - } diff --git a/src/Reassem.h b/src/Reassem.h index 501cd23a18..ee5e4d42b6 100644 --- a/src/Reassem.h +++ b/src/Reassem.h @@ -62,9 +62,6 @@ public: void Describe(ODesc* d) const override; - bool Serialize(SerialInfo* info) const; - static Reassembler* Unserialize(UnserialInfo* info); - // Sum over all data buffered in some reassembler. static uint64 TotalMemoryAllocation() { return total_size; } @@ -76,8 +73,6 @@ public: protected: Reassembler() { } - DECLARE_ABSTRACT_SERIAL(Reassembler); - friend class DataBlock; virtual void Undelivered(uint64 up_to_seq); diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc deleted file mode 100644 index f55fba167c..0000000000 --- a/src/RemoteSerializer.cc +++ /dev/null @@ -1,4583 +0,0 @@ -// Processes involved in the communication: -// -// (Local-Parent) <-> (Local-Child) <-> (Remote-Child) <-> (Remote-Parent) -// -// Message types (for parent<->child communication the CMsg's peer indicates -// about whom we're talking). -// -// Communication protocol version -// VERSION -// [] -// -// Send serialization -// SERIAL -// -// Terminate(d) connection -// CLOSE -// -// Close(d) all connections -// CLOSE_ALL -// -// Connect to remote side -// CONNECT_TO -// -// Connected to remote side -// CONNECTED -// -// Request events from remote side -// REQUEST_EVENTS -// -// Request synchronization of IDs with remote side -// REQUEST_SYNC -// -// Listen for connection on ip/port (ip may be INADDR_ANY) -// LISTEN -// -// Close listen ports. -// LISTEN_STOP -// -// Error caused by host -// ERROR -// -// Some statistics about the given peer connection -// STATS -// -// Requests to set a new capture_filter -// CAPTURE_FILTER -// -// Ping to peer -// PING -// -// Pong from peer -// PONG -// -// Announce our capabilities -// CAPS -// -// Activate compression (parent->child) -// COMPRESS -// -// Indicate that all following blocks are compressed (child->child) -// COMPRESS -// -// Synchronize for pseudo-realtime processing. -// Signals that we have reached sync-point number . -// SYNC_POINT -// -// Signals the child that we want to terminate. Anything sent after this may -// get lost. When the child answers with another TERMINATE it is safe to -// shutdown. -// TERMINATE -// -// Debug-only: tell child to dump recently received/sent data to disk. -// DEBUG_DUMP -// -// Valid messages between processes: -// -// Main -> Child -// CONNECT_TO -// REQUEST_EVENTS -// SERIAL -// CLOSE -// CLOSE_ALL -// LISTEN -// LISTEN_STOP -// CAPTURE_FILTER -// VERSION -// REQUEST_SYNC -// PHASE_DONE -// PING -// PONG -// CAPS -// COMPRESS -// SYNC_POINT -// DEBUG_DUMP -// REMOTE_PRINT -// -// Child -> Main -// CONNECTED -// REQUEST_EVENTS -// SERIAL -// CLOSE -// ERROR -// STATS -// VERSION -// CAPTURE_FILTER -// REQUEST_SYNC -// PHASE_DONE -// PING -// PONG -// CAPS -// LOG -// SYNC_POINT -// REMOTE_PRINT -// -// Child <-> Child -// VERSION -// SERIAL -// REQUEST_EVENTS -// CAPTURE_FILTER -// REQUEST_SYNC -// PHASE_DONE -// PING -// PONG -// CAPS -// COMPRESS -// SYNC_POINT -// REMOTE_PRINT -// -// A connection between two peers has four phases: -// -// Setup: -// Initial phase. -// VERSION messages must be exchanged. -// Ends when both peers have sent VERSION. -// Handshake: -// REQUEST_EVENTS/REQUEST_SYNC/CAPTURE_FILTER/CAPS/selected SERIALs -// may be exchanged. -// Phase ends when both peers have sent PHASE_DONE. -// State synchronization: -// Entered iff at least one of the peers has sent REQUEST_SYNC. -// The peer with the smallest runtime (incl. in VERSION msg) sends -// SERIAL messages compromising all of its state. -// Phase ends when peer sends another PHASE_DONE. -// Running: -// Peers exchange SERIAL (and PING/PONG) messages. -// Phase ends with connection tear-down by one of the peers. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bro-config.h" -#ifdef TIME_WITH_SYS_TIME -# include -# include -#else -# ifdef HAVE_SYS_TIME_H -# include -# else -# include -# endif -#endif -#include - -#include -#include -#include -#include - -#include "RemoteSerializer.h" -#include "Func.h" -#include "EventRegistry.h" -#include "Event.h" -#include "Net.h" -#include "NetVar.h" -#include "Scope.h" -#include "Sessions.h" -#include "File.h" -#include "Conn.h" -#include "Reporter.h" -#include "IPAddr.h" -#include "bro_inet_ntop.h" -#include "iosource/Manager.h" -#include "logging/Manager.h" -#include "logging/logging.bif.h" - -extern "C" { -#include "setsignal.h" -}; - -// Gets incremented each time there's an incompatible change -// to the communication internals. -static const unsigned short PROTOCOL_VERSION = 0x07; - -static const char MSG_NONE = 0x00; -static const char MSG_VERSION = 0x01; -static const char MSG_SERIAL = 0x02; -static const char MSG_CLOSE = 0x03; -static const char MSG_CLOSE_ALL = 0x04; -static const char MSG_ERROR = 0x05; -static const char MSG_CONNECT_TO = 0x06; -static const char MSG_CONNECTED = 0x07; -static const char MSG_REQUEST_EVENTS = 0x08; -static const char MSG_LISTEN = 0x09; -static const char MSG_LISTEN_STOP = 0x0a; -static const char MSG_STATS = 0x0b; -static const char MSG_CAPTURE_FILTER = 0x0c; -static const char MSG_REQUEST_SYNC = 0x0d; -static const char MSG_PHASE_DONE = 0x0e; -static const char MSG_PING = 0x0f; -static const char MSG_PONG = 0x10; -static const char MSG_CAPS = 0x11; -static const char MSG_COMPRESS = 0x12; -static const char MSG_LOG = 0x13; -static const char MSG_SYNC_POINT = 0x14; -static const char MSG_TERMINATE = 0x15; -static const char MSG_DEBUG_DUMP = 0x16; -static const char MSG_REMOTE_PRINT = 0x17; -static const char MSG_LOG_CREATE_WRITER = 0x18; -static const char MSG_LOG_WRITE = 0x19; -static const char MSG_REQUEST_LOGS = 0x20; - -// Update this one whenever adding a new ID: -static const char MSG_ID_MAX = MSG_REQUEST_LOGS; - -static const uint32 FINAL_SYNC_POINT = /* UINT32_MAX */ 4294967295U; - -// Buffer size for remote-print data -static const int PRINT_BUFFER_SIZE = 10 * 1024; -static const int SOCKBUF_SIZE = 1024 * 1024; - -// Buffer size for remote-log data. -static const int LOG_BUFFER_SIZE = 50 * 1024; - -struct ping_args { - uint32 seq; - double time1; // Round-trip time parent1<->parent2 - double time2; // Round-trip time child1<->parent2 - double time3; // Round-trip time child2<->parent2 -}; - -#ifdef DEBUG -# define DEBUG_COMM(msg) DBG_LOG(DBG_COMM, "%s", msg) -#else -# define DEBUG_COMM(msg) -#endif - -#define READ_CHUNK(i, c, do_if_eof, kill_me) \ - { \ - if ( ! i->Read(&c) ) \ - { \ - if ( i->Eof() ) \ - { \ - do_if_eof; \ - } \ - else \ - Error(fmt("can't read data chunk: %s", io->Error()), kill_me); \ - return false; \ - } \ - \ - if ( ! c ) \ - return true; \ - } - -#define READ_CHUNK_FROM_CHILD(c) \ - { \ - if ( ! io->Read(&c) ) \ - { \ - if ( io->Eof() ) \ - ChildDied(); \ - else \ - Error(fmt("can't read data chunk: %s", io->Error())); \ - return false; \ - } \ - \ - if ( ! c ) \ - { \ - SetIdle(io->IsIdle());\ - return true; \ - } \ - SetIdle(false); \ - } - -static const char* msgToStr(int msg) - { -# define MSG_STR(x) case x: return #x; - switch ( msg ) { - MSG_STR(MSG_VERSION) - MSG_STR(MSG_NONE) - MSG_STR(MSG_SERIAL) - MSG_STR(MSG_CLOSE) - MSG_STR(MSG_CLOSE_ALL) - MSG_STR(MSG_ERROR) - MSG_STR(MSG_CONNECT_TO) - MSG_STR(MSG_CONNECTED) - MSG_STR(MSG_REQUEST_EVENTS) - MSG_STR(MSG_LISTEN) - MSG_STR(MSG_LISTEN_STOP) - MSG_STR(MSG_STATS) - MSG_STR(MSG_CAPTURE_FILTER) - MSG_STR(MSG_REQUEST_SYNC) - MSG_STR(MSG_PHASE_DONE) - MSG_STR(MSG_PING) - MSG_STR(MSG_PONG) - MSG_STR(MSG_CAPS) - MSG_STR(MSG_COMPRESS) - MSG_STR(MSG_LOG) - MSG_STR(MSG_SYNC_POINT) - MSG_STR(MSG_TERMINATE) - MSG_STR(MSG_DEBUG_DUMP) - MSG_STR(MSG_REMOTE_PRINT) - MSG_STR(MSG_LOG_CREATE_WRITER) - MSG_STR(MSG_LOG_WRITE) - MSG_STR(MSG_REQUEST_LOGS) - default: - return "UNKNOWN_MSG"; - } - } - -static vector tokenize(const string& s, char delim) - { - vector tokens; - stringstream ss(s); - string token; - - while ( std::getline(ss, token, delim) ) - tokens.push_back(token); - - return tokens; - } - -// Start of every message between two processes. We do the low-level work -// ourselves to make this 64-bit safe. (The actual layout is an artifact of -// an earlier design that depended on how a 32-bit GCC lays out its structs ...) -class CMsg { -public: - CMsg(char type, RemoteSerializer::PeerID peer) - { - buffer[0] = type; - uint32 tmp = htonl(peer); - memcpy(buffer + 4, &tmp, sizeof(tmp)); - } - - char Type() { return buffer[0]; } - - RemoteSerializer::PeerID Peer() - { - uint32 tmp; - memcpy(&tmp, buffer + 4, sizeof(tmp)); - return ntohl(tmp); - } - - const char* Raw() { return buffer; } - -private: - char buffer[8]; -}; - -static bool sendCMsg(ChunkedIO* io, char msg_type, RemoteSerializer::PeerID id) - { - // We use the new[] operator here to avoid mismatches - // when deleting the data. - CMsg* msg = (CMsg*) new char[sizeof(CMsg)]; - new (msg) CMsg(msg_type, id); - - ChunkedIO::Chunk* c = new ChunkedIO::Chunk((char*)msg, sizeof(CMsg)); - return io->Write(c); - } - -static ChunkedIO::Chunk* makeSerialMsg(RemoteSerializer::PeerID id) - { - // We use the new[] operator here to avoid mismatches - // when deleting the data. - CMsg* msg = (CMsg*) new char[sizeof(CMsg)]; - new (msg) CMsg(MSG_SERIAL, id); - - ChunkedIO::Chunk* c = new ChunkedIO::Chunk((char*)msg, sizeof(CMsg)); - return c; - } - -inline void RemoteSerializer::SetupSerialInfo(SerialInfo* info, Peer* peer) - { - info->chunk = makeSerialMsg(peer->id); - if ( peer->caps & Peer::NO_CACHING ) - info->cache = false; - - if ( ! (peer->caps & Peer::PID_64BIT) || peer->phase != Peer::RUNNING ) - info->pid_32bit = true; - - if ( (peer->caps & Peer::NEW_CACHE_STRATEGY) && - peer->phase == Peer::RUNNING ) - info->new_cache_strategy = true; - - if ( (peer->caps & Peer::BROCCOLI_PEER) ) - info->broccoli_peer = true; - - info->include_locations = false; - } - -static bool sendToIO(ChunkedIO* io, ChunkedIO::Chunk* c) - { - if ( ! io->Write(c) ) - { - reporter->Warning("can't send chunk: %s", io->Error()); - return false; - } - - return true; - } - -static bool sendToIO(ChunkedIO* io, char msg_type, RemoteSerializer::PeerID id, - const char* str, int len = -1, bool delete_with_free = false) - { - if ( ! sendCMsg(io, msg_type, id) ) - { - reporter->Warning("can't send message of type %d: %s", msg_type, io->Error()); - return false; - } - - uint32 sz = len >= 0 ? len : strlen(str) + 1; - ChunkedIO::Chunk* c = new ChunkedIO::Chunk(const_cast(str), sz); - - if ( delete_with_free ) - c->free_func = ChunkedIO::Chunk::free_func_free; - else - c->free_func = ChunkedIO::Chunk::free_func_delete; - - return sendToIO(io, c); - } - -static bool sendToIO(ChunkedIO* io, char msg_type, RemoteSerializer::PeerID id, - int nargs, va_list ap) - { - if ( ! sendCMsg(io, msg_type, id) ) - { - reporter->Warning("can't send message of type %d: %s", msg_type, io->Error()); - return false; - } - - if ( nargs == 0 ) - return true; - - uint32* args = new uint32[nargs]; - - for ( int i = 0; i < nargs; i++ ) - args[i] = htonl(va_arg(ap, uint32)); - - ChunkedIO::Chunk* c = new ChunkedIO::Chunk((char*)args, - sizeof(uint32) * nargs); - return sendToIO(io, c); - } - -#ifdef DEBUG -static inline char* fmt_uint32s(int nargs, va_list ap) - { - static char buf[512]; - char* p = buf; - *p = '\0'; - for ( int i = 0; i < nargs; i++ ) - p += snprintf(p, sizeof(buf) - (p - buf), - " 0x%08x", va_arg(ap, uint32)); - buf[511] = '\0'; - return buf; - } -#endif - -static pid_t child_pid = 0; - -// Return true if message type is sent by a peer (rather than the child -// process itself). -static inline bool is_peer_msg(int msg) - { - return msg == MSG_VERSION || - msg == MSG_SERIAL || - msg == MSG_REQUEST_EVENTS || - msg == MSG_REQUEST_SYNC || - msg == MSG_CAPTURE_FILTER || - msg == MSG_PHASE_DONE || - msg == MSG_PING || - msg == MSG_PONG || - msg == MSG_CAPS || - msg == MSG_COMPRESS || - msg == MSG_SYNC_POINT || - msg == MSG_REMOTE_PRINT || - msg == MSG_LOG_CREATE_WRITER || - msg == MSG_LOG_WRITE || - msg == MSG_REQUEST_LOGS; - } - -bool RemoteSerializer::IsConnectedPeer(PeerID id) - { - if ( id == PEER_NONE ) - return true; - - return LookupPeer(id, true) != 0; - } - -class IncrementalSendTimer : public Timer { -public: - IncrementalSendTimer(double t, RemoteSerializer::Peer* p, SerialInfo* i) - : Timer(t, TIMER_INCREMENTAL_SEND), info(i), peer(p) {} - virtual void Dispatch(double t, int is_expire) - { - // Never suspend when we're finishing up. - if ( terminating ) - info->may_suspend = false; - - remote_serializer->SendAllSynchronized(peer, info); - } - - SerialInfo* info; - RemoteSerializer::Peer* peer; -}; - -RemoteSerializer::RemoteSerializer() - { - initialized = false; - current_peer = 0; - msgstate = TYPE; - id_counter = 1; - listening = false; - ignore_accesses = false; - propagate_accesses = 1; - current_sync_point = 0; - syncing_times = false; - io = 0; - terminating = false; - in_sync = 0; - last_flush = 0; - received_logs = 0; - current_id = 0; - current_msgtype = 0; - current_args = 0; - source_peer = 0; - - // Register as a "dont-count" source first, we may change that later. - iosource_mgr->Register(this, true); - } - -RemoteSerializer::~RemoteSerializer() - { - if ( child_pid ) - { - if ( kill(child_pid, SIGKILL) < 0 ) - reporter->Warning("warning: cannot kill child (pid %d), %s", child_pid, strerror(errno)); - - else if ( waitpid(child_pid, 0, 0) < 0 ) - reporter->Warning("warning: error encountered during waitpid(%d), %s", child_pid, strerror(errno)); - } - - delete io; - } - -void RemoteSerializer::Enable() - { - if ( initialized ) - return; - - if ( reading_traces && ! pseudo_realtime ) - { - using_communication = 0; - return; - } - - Fork(); - - Log(LogInfo, fmt("communication started, parent pid is %d, child pid is %d", getpid(), child_pid)); - initialized = 1; - } - -void RemoteSerializer::SetSocketBufferSize(int fd, int opt, const char *what, int size, int verbose) - { - int defsize = 0; - socklen_t len = sizeof(defsize); - - if ( getsockopt(fd, SOL_SOCKET, opt, (void *)&defsize, &len) < 0 ) - { - if ( verbose ) - Log(LogInfo, fmt("warning: cannot get socket buffer size (%s): %s", what, strerror(errno))); - return; - } - - for ( int trysize = size; trysize > defsize; trysize -= 1024 ) - { - if ( setsockopt(fd, SOL_SOCKET, opt, &trysize, sizeof(trysize)) >= 0 ) - { - if ( verbose ) - { - if ( trysize == size ) - Log(LogInfo, fmt("raised pipe's socket buffer size from %dK to %dK", defsize / 1024, trysize / 1024)); - else - Log(LogInfo, fmt("raised pipe's socket buffer size from %dK to %dK (%dK was requested)", defsize / 1024, trysize / 1024, size / 1024)); - } - return; - } - } - - Log(LogInfo, fmt("warning: cannot increase %s socket buffer size from %dK (%dK was requested)", what, defsize / 1024, size / 1024)); - } - -void RemoteSerializer::Fork() - { - if ( child_pid ) - return; - - // Register as a "does-count" source now. - iosource_mgr->Register(this, false); - - // If we are re-forking, remove old entries - loop_over_list(peers, i) - RemovePeer(peers[i]); - - // Create pipe for communication between parent and child. - int pipe[2]; - - if ( socketpair(AF_UNIX, SOCK_STREAM, 0, pipe) < 0 ) - { - Error(fmt("can't create pipe: %s", strerror(errno))); - return; - } - - // Try to increase the size of the socket send and receive buffers. - SetSocketBufferSize(pipe[0], SO_SNDBUF, "SO_SNDBUF", SOCKBUF_SIZE, 1); - SetSocketBufferSize(pipe[0], SO_RCVBUF, "SO_RCVBUF", SOCKBUF_SIZE, 0); - SetSocketBufferSize(pipe[1], SO_SNDBUF, "SO_SNDBUF", SOCKBUF_SIZE, 0); - SetSocketBufferSize(pipe[1], SO_RCVBUF, "SO_RCVBUF", SOCKBUF_SIZE, 0); - - child_pid = 0; - - int pid = fork(); - - if ( pid < 0 ) - { - Error(fmt("can't fork: %s", strerror(errno))); - return; - } - - if ( pid > 0 ) - { - // Parent - child_pid = pid; - - io = new ChunkedIOFd(pipe[0], "parent->child", child_pid); - if ( ! io->Init() ) - { - Error(fmt("can't init child io: %s", io->Error())); - exit(1); // FIXME: Better way to handle this? - } - - safe_close(pipe[1]); - - return; - } - else - { // child - SocketComm child; - - ChunkedIOFd* io = - new ChunkedIOFd(pipe[1], "child->parent", getppid()); - if ( ! io->Init() ) - { - Error(fmt("can't init parent io: %s", io->Error())); - exit(1); - } - - child.SetParentIO(io); - safe_close(pipe[0]); - - // Close file descriptors. - safe_close(0); - safe_close(1); - safe_close(2); - - // Be nice. - setpriority(PRIO_PROCESS, 0, 5); - - child.Run(); - reporter->InternalError("cannot be reached"); - } - } - -RemoteSerializer::PeerID RemoteSerializer::Connect(const IPAddr& ip, - const string& zone_id, uint16 port, const char* our_class, double retry, - bool use_ssl) - { - if ( ! using_communication ) - return true; - - if ( ! initialized ) - reporter->InternalError("remote serializer not initialized"); - - if ( ! child_pid ) - Fork(); - - Peer* p = AddPeer(ip, port); - p->orig = true; - - if ( our_class ) - p->our_class = our_class; - - const size_t BUFSIZE = 1024; - char* data = new char[BUFSIZE]; - snprintf(data, BUFSIZE, - "%" PRI_PTR_COMPAT_UINT",%s,%s,%" PRIu16",%" PRIu32",%d", p->id, - ip.AsString().c_str(), zone_id.c_str(), port, uint32(retry), - use_ssl); - - if ( ! SendToChild(MSG_CONNECT_TO, p, data) ) - { - RemovePeer(p); - return false; - } - - p->state = Peer::PENDING; - return p->id; - } - -bool RemoteSerializer::CloseConnection(PeerID id) - { - if ( ! using_communication ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - { - reporter->Error("unknown peer id %d for closing connection", int(id)); - return false; - } - - return CloseConnection(peer); - } - -bool RemoteSerializer::CloseConnection(Peer* peer) - { - if ( peer->suspended_processing ) - { - net_continue_processing(); - peer->suspended_processing = false; - } - - if ( peer->state == Peer::CLOSING ) - return true; - - FlushPrintBuffer(peer); - FlushLogBuffer(peer); - - Log(LogInfo, "closing connection", peer); - - peer->state = Peer::CLOSING; - return SendToChild(MSG_CLOSE, peer, 0); - } - -bool RemoteSerializer::RequestSync(PeerID id, bool auth) - { - if ( ! using_communication ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - { - reporter->Error("unknown peer id %d for request sync", int(id)); - return false; - } - - if ( peer->phase != Peer::HANDSHAKE ) - { - reporter->Error("can't request sync from peer; wrong phase %d", - peer->phase); - return false; - } - - if ( ! SendToChild(MSG_REQUEST_SYNC, peer, 1, auth ? 1 : 0) ) - return false; - - peer->sync_requested |= Peer::WE | (auth ? Peer::AUTH_WE : 0); - - return true; - } - -bool RemoteSerializer::RequestLogs(PeerID id) - { - if ( ! using_communication ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - { - reporter->Error("unknown peer id %d for request logs", int(id)); - return false; - } - - if ( peer->phase != Peer::HANDSHAKE ) - { - reporter->Error("can't request logs from peer; wrong phase %d", - peer->phase); - return false; - } - - if ( ! SendToChild(MSG_REQUEST_LOGS, peer, 0) ) - return false; - - return true; - } - -bool RemoteSerializer::RequestEvents(PeerID id, RE_Matcher* pattern) - { - if ( ! using_communication ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - { - reporter->Error("unknown peer id %d for request sync", int(id)); - return false; - } - - if ( peer->phase != Peer::HANDSHAKE ) - { - reporter->Error("can't request events from peer; wrong phase %d", - peer->phase); - return false; - } - - EventRegistry::string_list* handlers = event_registry->Match(pattern); - - // Concat the handlers' names. - int len = 0; - loop_over_list(*handlers, i) - len += strlen((*handlers)[i]) + 1; - - if ( ! len ) - { - Log(LogInfo, "warning: no events to request"); - delete handlers; - return true; - } - - char* data = new char[len]; - char* d = data; - loop_over_list(*handlers, j) - { - for ( const char* p = (*handlers)[j]; *p; *d++ = *p++ ) - ; - *d++ = '\0'; - } - - delete handlers; - - return SendToChild(MSG_REQUEST_EVENTS, peer, data, len); - } - -bool RemoteSerializer::SetAcceptState(PeerID id, bool accept) - { - Peer* p = LookupPeer(id, false); - if ( ! p ) - return true; - - p->accept_state = accept; - return true; - } - -bool RemoteSerializer::SetCompressionLevel(PeerID id, int level) - { - Peer* p = LookupPeer(id, false); - if ( ! p ) - return true; - - p->comp_level = level; - return true; - } - -bool RemoteSerializer::CompleteHandshake(PeerID id) - { - Peer* p = LookupPeer(id, false); - if ( ! p ) - return true; - - if ( p->phase != Peer::HANDSHAKE ) - { - reporter->Error("can't complete handshake; wrong phase %d", - p->phase); - return false; - } - - p->handshake_done |= Peer::WE; - - if ( ! SendToChild(MSG_PHASE_DONE, p, 0) ) - return false; - - if ( p->handshake_done == Peer::BOTH ) - HandshakeDone(p); - - return true; - } - -bool RemoteSerializer::SendCall(SerialInfo* info, PeerID id, - const char* name, val_list* vl) - { - if ( ! using_communication || terminating ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - return false; - - return SendCall(info, peer, name, vl); - } - -bool RemoteSerializer::SendCall(SerialInfo* info, Peer* peer, - const char* name, val_list* vl) - { - if ( peer->phase != Peer::RUNNING || terminating ) - return false; - - ++stats.events.out; - SetCache(peer->cache_out); - SetupSerialInfo(info, peer); - - if ( ! Serialize(info, name, vl) ) - { - FatalError(io->Error()); - return false; - } - - return true; - } - -bool RemoteSerializer::SendCall(SerialInfo* info, const char* name, - val_list* vl) - { - if ( ! IsOpen() || ! PropagateAccesses() || terminating ) - return true; - - loop_over_list(peers, i) - { - // Do not send event back to originating peer. - if ( peers[i] == current_peer ) - continue; - - SerialInfo new_info(*info); - if ( ! SendCall(&new_info, peers[i], name, vl) ) - return false; - } - - return true; - } - -bool RemoteSerializer::SendAccess(SerialInfo* info, Peer* peer, - const StateAccess& access) - { - if ( ! (peer->sync_requested & Peer::PEER) || terminating ) - return true; - -#ifdef DEBUG - ODesc desc; - access.Describe(&desc); - DBG_LOG(DBG_COMM, "Sending %s", desc.Description()); -#endif - - ++stats.accesses.out; - SetCache(peer->cache_out); - SetupSerialInfo(info, peer); - info->globals_as_names = true; - - if ( ! Serialize(info, access) ) - { - FatalError(io->Error()); - return false; - } - - return true; - } - -bool RemoteSerializer::SendAccess(SerialInfo* info, PeerID pid, - const StateAccess& access) - { - Peer* p = LookupPeer(pid, false); - if ( ! p ) - return true; - - return SendAccess(info, p, access); - } - -bool RemoteSerializer::SendAccess(SerialInfo* info, const StateAccess& access) - { - if ( ! IsOpen() || ! PropagateAccesses() || terminating ) - return true; - - // A real broadcast would be nice here. But the different peers have - // different serialization caches, so we cannot simply send the same - // serialization to all of them ... - loop_over_list(peers, i) - { - // Do not send access back to originating peer. - if ( peers[i] == source_peer ) - continue; - - // Only sent accesses for fully setup peers. - if ( peers[i]->phase != Peer::RUNNING ) - continue; - - SerialInfo new_info(*info); - if ( ! SendAccess(&new_info, peers[i], access) ) - return false; - } - - return true; - } - -bool RemoteSerializer::SendAllSynchronized(Peer* peer, SerialInfo* info) - { - // FIXME: When suspending ID serialization works, remove! - DisableSuspend suspend(info); - - current_peer = peer; - - Continuation* cont = &info->cont; - ptr_compat_int index; - - if ( info->cont.NewInstance() ) - { - Log(LogInfo, "starting to send full state", peer); - index = 0; - } - - else - { - index = int(ptr_compat_int(cont->RestoreState())); - if ( ! cont->ChildSuspended() ) - cont->Resume(); - } - - for ( ; index < sync_ids.length(); ++index ) - { - if ( ! sync_ids[index]->ID_Val() ) - { -#ifdef DEBUG - DBG_LOG(DBG_COMM, "Skip sync of ID with null value: %s\n", - sync_ids[index]->Name()); -#endif - continue; - } - cont->SaveContext(); - - StateAccess sa(OP_ASSIGN, sync_ids[index], - sync_ids[index]->ID_Val()); - // FIXME: When suspending ID serialization works, we need to - // addsupport to StateAccesses, too. - bool result = SendAccess(info, peer, sa); - cont->RestoreContext(); - - if ( ! result ) - return false; - - if ( cont->ChildSuspended() || info->may_suspend ) - { - double t = network_time + state_write_delay; - timer_mgr->Add(new IncrementalSendTimer(t, peer, info)); - - cont->SaveState((void*) index); - if ( info->may_suspend ) - cont->Suspend(); - - return true; - } - } - - if ( ! SendToChild(MSG_PHASE_DONE, peer, 0) ) - return false; - - suspend.Release(); - delete info; - - Log(LogInfo, "done sending full state", peer); - - return EnterPhaseRunning(peer); - } - -bool RemoteSerializer::SendID(SerialInfo* info, Peer* peer, const ID& id) - { - if ( terminating ) - return true; - - // FIXME: When suspending ID serialization works, remove! - DisableSuspend suspend(info); - - if ( info->cont.NewInstance() ) - ++stats.ids.out; - - SetCache(peer->cache_out); - SetupSerialInfo(info, peer); - info->cont.SaveContext(); - bool result = Serialize(info, id); - info->cont.RestoreContext(); - - if ( ! result ) - { - FatalError(io->Error()); - return false; - } - - return true; - } - -bool RemoteSerializer::SendID(SerialInfo* info, PeerID pid, const ID& id) - { - if ( ! using_communication || terminating ) - return true; - - Peer* peer = LookupPeer(pid, true); - if ( ! peer ) - return false; - - if ( peer->phase != Peer::RUNNING ) - return false; - - return SendID(info, peer, id); - } - -bool RemoteSerializer::SendConnection(SerialInfo* info, PeerID id, - const Connection& c) - { - if ( ! using_communication || terminating ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - return false; - - if ( peer->phase != Peer::RUNNING ) - return false; - - ++stats.conns.out; - SetCache(peer->cache_out); - SetupSerialInfo(info, peer); - - if ( ! Serialize(info, c) ) - { - FatalError(io->Error()); - return false; - } - - return true; - } - -bool RemoteSerializer::SendCaptureFilter(PeerID id, const char* filter) - { - if ( ! using_communication || terminating ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - return false; - - if ( peer->phase != Peer::HANDSHAKE ) - { - reporter->Error("can't sent capture filter to peer; wrong phase %d", peer->phase); - return false; - } - - return SendToChild(MSG_CAPTURE_FILTER, peer, copy_string(filter)); - } - -bool RemoteSerializer::SendPacket(SerialInfo* info, const Packet& p) - { - if ( ! IsOpen() || !PropagateAccesses() || terminating ) - return true; - - loop_over_list(peers, i) - { - // Only sent packet for fully setup peers. - if ( peers[i]->phase != Peer::RUNNING ) - continue; - - SerialInfo new_info(*info); - if ( ! SendPacket(&new_info, peers[i], p) ) - return false; - } - - return true; - } - -bool RemoteSerializer::SendPacket(SerialInfo* info, PeerID id, const Packet& p) - { - if ( ! using_communication || terminating ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - return false; - - return SendPacket(info, peer, p); - } - -bool RemoteSerializer::SendPacket(SerialInfo* info, Peer* peer, const Packet& p) - { - ++stats.packets.out; - SetCache(peer->cache_out); - SetupSerialInfo(info, peer); - - if ( ! Serialize(info, p) ) - { - FatalError(io->Error()); - return false; - } - - return true; - } - -bool RemoteSerializer::SendPing(PeerID id, uint32 seq) - { - if ( ! using_communication || terminating ) - return true; - - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - return false; - - char* data = new char[sizeof(ping_args)]; - - ping_args* args = (ping_args*) data; - args->seq = htonl(seq); - args->time1 = htond(current_time(true)); - args->time2 = 0; - args->time3 = 0; - - return SendToChild(MSG_PING, peer, data, sizeof(ping_args)); - } - -bool RemoteSerializer::SendCapabilities(Peer* peer) - { - if ( peer->phase != Peer::HANDSHAKE ) - { - reporter->Error("can't sent capabilties to peer; wrong phase %d", - peer->phase); - return false; - } - - uint32 caps = 0; - - caps |= Peer::COMPRESSION; - caps |= Peer::PID_64BIT; - caps |= Peer::NEW_CACHE_STRATEGY; - - return SendToChild(MSG_CAPS, peer, 3, caps, 0, 0); - } - -bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl, - bool ipv6, const string& zone_id, double retry) - { - if ( ! using_communication ) - return true; - - if ( ! initialized ) - reporter->InternalError("remote serializer not initialized"); - - if ( ! ipv6 && ip.GetFamily() == IPv6 && - ip != IPAddr("0.0.0.0") && ip != IPAddr("::") ) - reporter->FatalError("Attempt to listen on address %s, but IPv6 " - "communication disabled", ip.AsString().c_str()); - - const size_t BUFSIZE = 1024; - char* data = new char[BUFSIZE]; - snprintf(data, BUFSIZE, "%s,%" PRIu16",%d,%d,%s,%" PRIu32, - ip.AsString().c_str(), port, expect_ssl, ipv6, zone_id.c_str(), - (uint32) retry); - - if ( ! SendToChild(MSG_LISTEN, 0, data) ) - return false; - - listening = true; - SetClosed(false); - return true; - } - -void RemoteSerializer::SendSyncPoint(uint32 point) - { - if ( ! (remote_trace_sync_interval && pseudo_realtime) || terminating ) - return; - - current_sync_point = point; - - loop_over_list(peers, i) - if ( peers[i]->phase == Peer::RUNNING && - ! SendToChild(MSG_SYNC_POINT, peers[i], - 1, current_sync_point) ) - return; - - if ( ! syncing_times ) - { - Log(LogInfo, "waiting for peers"); - syncing_times = true; - - loop_over_list(peers, i) - { - // Need to do this once per peer to correctly - // track the number of suspend calls. - net_suspend_processing(); - peers[i]->suspended_processing = true; - } - } - - CheckSyncPoints(); - } - -uint32 RemoteSerializer::SendSyncPoint() - { - Log(LogInfo, fmt("reached sync-point %u", current_sync_point)); - SendSyncPoint(current_sync_point + 1); - return current_sync_point; - } - -void RemoteSerializer::SendFinalSyncPoint() - { - Log(LogInfo, fmt("reached end of trace, sending final sync point")); - SendSyncPoint(FINAL_SYNC_POINT); - } - -bool RemoteSerializer::Terminate() - { - loop_over_list(peers, i) - { - FlushPrintBuffer(peers[i]); - FlushLogBuffer(peers[i]); - } - - Log(LogInfo, fmt("terminating...")); - - return terminating = SendToChild(MSG_TERMINATE, 0, 0); - } - -bool RemoteSerializer::StopListening() - { - if ( ! listening ) - return true; - - if ( ! SendToChild(MSG_LISTEN_STOP, 0, 0) ) - return false; - - listening = false; - SetClosed(! IsActive()); - return true; - } - -void RemoteSerializer::Register(ID* id) - { - DBG_LOG(DBG_STATE, "&synchronized %s", id->Name()); - Unregister(id); - Ref(id); - sync_ids.append(id); - } - -void RemoteSerializer::Unregister(ID* id) - { - loop_over_list(sync_ids, i) - if ( streq(sync_ids[i]->Name(), id->Name()) ) - { - Unref(sync_ids[i]); - sync_ids.remove_nth(i); - break; - } - } - -void RemoteSerializer::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, - iosource::FD_Set* except) - { - read->Insert(io->Fd()); - read->Insert(io->ExtraReadFDs()); - - if ( io->CanWrite() ) - write->Insert(io->Fd()); - } - -double RemoteSerializer::NextTimestamp(double* local_network_time) - { - Poll(false); - - if ( received_logs > 0 ) - { - // If we processed logs last time, assume there's more. - SetIdle(false); - received_logs = 0; - return timer_mgr->Time(); - } - - double et = events.length() ? events[0]->time : -1; - double pt = packets.length() ? packets[0]->time : -1; - - if ( ! et ) - et = timer_mgr->Time(); - - if ( ! pt ) - pt = timer_mgr->Time(); - - if ( packets.length() ) - SetIdle(false); - - if ( et >= 0 && (et < pt || pt < 0) ) - return et; - - if ( pt >= 0 ) - { - // Return packet time as network time. - *local_network_time = packets[0]->p->time; - return pt; - } - - return -1; - } - -TimerMgr::Tag* RemoteSerializer::GetCurrentTag() - { - return packets.length() ? &packets[0]->p->tag : 0; - } - -void RemoteSerializer::Process() - { - Poll(false); - - int i = 0; - while ( events.length() ) - { - if ( max_remote_events_processed && - ++i > max_remote_events_processed ) - break; - - BufferedEvent* be = events[0]; - ::Event* event = new ::Event(be->handler, be->args, be->src); - - Peer* old_current_peer = current_peer; - // Prevent the source peer from getting the event back. - current_peer = LookupPeer(be->src, true); // may be null. - mgr.Dispatch(event, ! forward_remote_events); - current_peer = old_current_peer; - - assert(events[0] == be); - delete be; - events.remove_nth(0); - } - - // We shouldn't pass along more than one packet, as otherwise the - // timer mgr will not advance. - if ( packets.length() ) - { - BufferedPacket* bp = packets[0]; - const Packet* p = bp->p; - - // FIXME: The following chunk of code is copied from - // net_packet_dispatch(). We should change that function - // to accept an IOSource instead of the PktSrc. - net_update_time(p->time); - - SegmentProfiler(segment_logger, "expiring-timers"); - TimerMgr* tmgr = sessions->LookupTimerMgr(GetCurrentTag()); - current_dispatched = - tmgr->Advance(network_time, max_timer_expires); - - current_pkt = p; - current_pktsrc = 0; - current_iosrc = this; - sessions->NextPacket(p->time, p); - mgr.Drain(); - - current_pkt = 0; - current_iosrc = 0; - - delete p; - delete bp; - packets.remove_nth(0); - } - - if ( packets.length() ) - SetIdle(false); - } - -void RemoteSerializer::Finish() - { - if ( ! using_communication ) - return; - - do - Poll(true); - while ( io->CanWrite() ); - - loop_over_list(peers, i) - { - CloseConnection(peers[i]); - } - } - -bool RemoteSerializer::Poll(bool may_block) - { - if ( ! child_pid ) - return true; - - // See if there's any peer waiting for initial state synchronization. - if ( sync_pending.length() && ! in_sync ) - { - Peer* p = sync_pending[0]; - sync_pending.remove_nth(0); - HandshakeDone(p); - } - - io->Flush(); - SetIdle(false); - - switch ( msgstate ) { - case TYPE: - { - current_peer = 0; - current_msgtype = MSG_NONE; - - // CMsg follows - ChunkedIO::Chunk* c; - READ_CHUNK_FROM_CHILD(c); - - CMsg* msg = (CMsg*) c->data; - current_peer = LookupPeer(msg->Peer(), false); - current_id = msg->Peer(); - current_msgtype = msg->Type(); - current_args = 0; - - delete c; - - switch ( current_msgtype ) { - case MSG_CLOSE: - case MSG_CLOSE_ALL: - case MSG_LISTEN_STOP: - case MSG_PHASE_DONE: - case MSG_TERMINATE: - case MSG_DEBUG_DUMP: - case MSG_REQUEST_LOGS: - { - // No further argument chunk. - msgstate = TYPE; - return DoMessage(); - } - case MSG_VERSION: - case MSG_SERIAL: - case MSG_ERROR: - case MSG_CONNECT_TO: - case MSG_CONNECTED: - case MSG_REQUEST_EVENTS: - case MSG_REQUEST_SYNC: - case MSG_LISTEN: - case MSG_STATS: - case MSG_CAPTURE_FILTER: - case MSG_PING: - case MSG_PONG: - case MSG_CAPS: - case MSG_COMPRESS: - case MSG_LOG: - case MSG_SYNC_POINT: - case MSG_REMOTE_PRINT: - case MSG_LOG_CREATE_WRITER: - case MSG_LOG_WRITE: - { - // One further argument chunk. - msgstate = ARGS; - return Poll(may_block); - } - - case MSG_NONE: - InternalCommError(fmt("unexpected msg type %d", - current_msgtype)); - return true; - - default: - InternalCommError(fmt("unknown msg type %d in Poll()", - current_msgtype)); - return true; - } - } - - case ARGS: - { - // Argument chunk follows. - ChunkedIO::Chunk* c; - READ_CHUNK_FROM_CHILD(c); - - current_args = c; - msgstate = TYPE; - bool result = DoMessage(); - - delete current_args; - current_args = 0; - - return result; - } - - default: - reporter->InternalError("unknown msgstate"); - } - - reporter->InternalError("cannot be reached"); - return false; - } - -bool RemoteSerializer::DoMessage() - { - if ( current_peer && - (current_peer->state == Peer::CLOSING || - current_peer->state == Peer::CLOSED) && - is_peer_msg(current_msgtype) ) - { - // We shut the connection to this peer down, - // so we ignore all further messages. - DEBUG_COMM(fmt("parent: ignoring %s due to shutdown of peer #%" PRI_SOURCE_ID, - msgToStr(current_msgtype), - current_peer ? current_peer->id : 0)); - return true; - } - - DEBUG_COMM(fmt("parent: %s from child; peer is #%" PRI_SOURCE_ID, - msgToStr(current_msgtype), - current_peer ? current_peer->id : 0)); - - if ( current_peer && - (current_msgtype < 0 || current_msgtype > MSG_ID_MAX) ) - { - Log(LogError, "garbage message from peer, shutting down", - current_peer); - CloseConnection(current_peer); - return true; - } - - // As long as we haven't finished the version - // handshake, no other messages than MSG_VERSION - // are allowed from peer. - if ( current_peer && current_peer->phase == Peer::SETUP && - is_peer_msg(current_msgtype) && current_msgtype != MSG_VERSION ) - { - Log(LogError, "peer did not send version", current_peer); - CloseConnection(current_peer); - return true; - } - - switch ( current_msgtype ) { - case MSG_CLOSE: - PeerDisconnected(current_peer); - return true; - - case MSG_CONNECTED: - return ProcessConnected(); - - case MSG_SERIAL: - return ProcessSerialization(); - - case MSG_REQUEST_EVENTS: - return ProcessRequestEventsMsg(); - - case MSG_REQUEST_SYNC: - return ProcessRequestSyncMsg(); - - case MSG_PHASE_DONE: - return ProcessPhaseDone(); - - case MSG_ERROR: - return ProcessLogMsg(true); - - case MSG_LOG: - return ProcessLogMsg(false); - - case MSG_STATS: - return ProcessStatsMsg(); - - case MSG_CAPTURE_FILTER: - return ProcessCaptureFilterMsg(); - - case MSG_VERSION: - return ProcessVersionMsg(); - - case MSG_PING: - return ProcessPingMsg(); - - case MSG_PONG: - return ProcessPongMsg(); - - case MSG_CAPS: - return ProcessCapsMsg(); - - case MSG_SYNC_POINT: - return ProcessSyncPointMsg(); - - case MSG_TERMINATE: - assert(terminating); - iosource_mgr->Terminate(); - return true; - - case MSG_REMOTE_PRINT: - return ProcessRemotePrint(); - - case MSG_LOG_CREATE_WRITER: - return ProcessLogCreateWriter(); - - case MSG_LOG_WRITE: - return ProcessLogWrite(); - - case MSG_REQUEST_LOGS: - return ProcessRequestLogs(); - - default: - DEBUG_COMM(fmt("unexpected msg type: %d", - int(current_msgtype))); - InternalCommError(fmt("unexpected msg type in DoMessage(): %d", - int(current_msgtype))); - return true; // keep going - } - - reporter->InternalError("cannot be reached"); - return false; - } - -void RemoteSerializer::PeerDisconnected(Peer* peer) - { - assert(peer); - - if ( peer->suspended_processing ) - { - net_continue_processing(); - peer->suspended_processing = false; - } - - if ( peer->state == Peer::CLOSED || peer->state == Peer::INIT ) - return; - - if ( peer->state == Peer::PENDING ) - { - peer->state = Peer::CLOSED; - Log(LogError, "could not connect", peer); - return; - } - - Log(LogInfo, "peer disconnected", peer); - - if ( peer->phase != Peer::SETUP ) - RaiseEvent(remote_connection_closed, peer); - - if ( in_sync == peer ) - in_sync = 0; - - peer->state = Peer::CLOSED; - peer->phase = Peer::UNKNOWN; - peer->cache_in->Clear(); - peer->cache_out->Clear(); - UnregisterHandlers(peer); - } - -void RemoteSerializer::PeerConnected(Peer* peer) - { - if ( peer->state == Peer::CONNECTED ) - return; - - peer->state = Peer::CONNECTED; - peer->phase = Peer::SETUP; - peer->sent_version = Peer::NONE; - peer->sync_requested = Peer::NONE; - peer->handshake_done = Peer::NONE; - - peer->cache_in->Clear(); - peer->cache_out->Clear(); - peer->our_runtime = int(current_time(true) - bro_start_time); - peer->sync_point = 0; - peer->logs_requested = false; - - if ( ! SendCMsgToChild(MSG_VERSION, peer) ) - return; - - int len = 4 * sizeof(uint32) + peer->our_class.size() + 1; - char* data = new char[len]; - uint32* args = (uint32*) data; - - *args++ = htonl(PROTOCOL_VERSION); - *args++ = htonl(peer->cache_out->GetMaxCacheSize()); - *args++ = htonl(DATA_FORMAT_VERSION); - *args++ = htonl(peer->our_runtime); - strcpy((char*) args, peer->our_class.c_str()); - - ChunkedIO::Chunk* c = new ChunkedIO::Chunk(data, len); - - if ( peer->our_class.size() ) - Log(LogInfo, fmt("sending class \"%s\"", peer->our_class.c_str()), peer); - - if ( ! SendToChild(c) ) - { - Log(LogError, "can't send version message"); - CloseConnection(peer); - return; - } - - peer->sent_version |= Peer::WE; - Log(LogInfo, "peer connected", peer); - Log(LogInfo, "phase: version", peer); - } - -RecordVal* RemoteSerializer::MakePeerVal(Peer* peer) - { - RecordVal* v = new RecordVal(::peer); - v->Assign(0, val_mgr->GetCount(uint32(peer->id))); - // Sic! Network order for AddrVal, host order for PortVal. - v->Assign(1, new AddrVal(peer->ip)); - v->Assign(2, val_mgr->GetPort(peer->port, TRANSPORT_TCP)); - v->Assign(3, val_mgr->GetFalse()); - v->Assign(4, val_mgr->GetEmptyString()); // set when received - v->Assign(5, peer->peer_class.size() ? - new StringVal(peer->peer_class.c_str()) : 0); - return v; - } - -RemoteSerializer::Peer* RemoteSerializer::AddPeer(const IPAddr& ip, uint16 port, - PeerID id) - { - Peer* peer = new Peer; - peer->id = id != PEER_NONE ? id : id_counter++; - peer->ip = ip; - peer->port = port; - peer->state = Peer::INIT; - peer->phase = Peer::UNKNOWN; - peer->sent_version = Peer::NONE; - peer->sync_requested = Peer::NONE; - peer->handshake_done = Peer::NONE; - peer->orig = false; - peer->accept_state = false; - peer->send_state = false; - peer->logs_requested = false; - peer->caps = 0; - peer->comp_level = 0; - peer->suspended_processing = false; - peer->caps = 0; - peer->val = MakePeerVal(peer); - peer->cache_in = new SerializationCache(MAX_CACHE_SIZE); - peer->cache_out = new SerializationCache(MAX_CACHE_SIZE); - peer->sync_point = 0; - peer->print_buffer = 0; - peer->print_buffer_used = 0; - peer->log_buffer = new char[LOG_BUFFER_SIZE]; - peer->log_buffer_used = 0; - - peers.append(peer); - Log(LogInfo, "added peer", peer); - - return peer; - } - -void RemoteSerializer::UnregisterHandlers(Peer* peer) - { - // Unregister the peers for the EventHandlers. - loop_over_list(peer->handlers, i) - { - peer->handlers[i]->RemoveRemoteHandler(peer->id); - } - } - -void RemoteSerializer::RemovePeer(Peer* peer) - { - if ( peer->suspended_processing ) - { - net_continue_processing(); - peer->suspended_processing = false; - } - - peers.remove(peer); - UnregisterHandlers(peer); - - Log(LogInfo, "removed peer", peer); - - int id = peer->id; - Unref(peer->val); - delete [] peer->print_buffer; - delete [] peer->log_buffer; - delete peer->cache_in; - delete peer->cache_out; - delete peer; - - SetClosed(! IsActive()); - - if ( in_sync == peer ) - in_sync = 0; - } - -RemoteSerializer::Peer* RemoteSerializer::LookupPeer(PeerID id, - bool only_if_connected) - { - Peer* peer = 0; - loop_over_list(peers, i) - if ( peers[i]->id == id ) - { - peer = peers[i]; - break; - } - - if ( ! only_if_connected || (peer && peer->state == Peer::CONNECTED) ) - return peer; - else - return 0; - } - -bool RemoteSerializer::ProcessVersionMsg() - { - uint32* args = (uint32*) current_args->data; - uint32 version = ntohl(args[0]); - uint32 data_version = ntohl(args[2]); - - if ( PROTOCOL_VERSION != version ) - { - Log(LogError, fmt("remote protocol version mismatch: got %d, but expected %d", - version, PROTOCOL_VERSION), current_peer); - CloseConnection(current_peer); - return true; - } - - // For backwards compatibility, data_version may be null. - if ( data_version && DATA_FORMAT_VERSION != data_version ) - { - Log(LogError, fmt("remote data version mismatch: got %d, but expected %d", - data_version, DATA_FORMAT_VERSION), - current_peer); - CloseConnection(current_peer); - return true; - } - - uint32 cache_size = ntohl(args[1]); - current_peer->cache_in->SetMaxCacheSize(cache_size); - current_peer->runtime = ntohl(args[3]); - - current_peer->sent_version |= Peer::PEER; - - if ( current_args->len > 4 * sizeof(uint32) ) - { - // The peer sends us a class string. - const char* pclass = (const char*) &args[4]; - current_peer->peer_class = pclass; - if ( *pclass ) - Log(LogInfo, fmt("peer sent class \"%s\"", pclass), current_peer); - if ( current_peer->val ) - current_peer->val->Assign(5, new StringVal(pclass)); - } - - assert(current_peer->sent_version == Peer::BOTH); - current_peer->phase = Peer::HANDSHAKE; - Log(LogInfo, "phase: handshake", current_peer); - - if ( ! SendCapabilities(current_peer) ) - return false; - - RaiseEvent(remote_connection_established, current_peer); - - return true; - } - -bool RemoteSerializer::EnterPhaseRunning(Peer* peer) - { - if ( in_sync == peer ) - in_sync = 0; - - peer->phase = Peer::RUNNING; - Log(LogInfo, "phase: running", peer); - RaiseEvent(remote_connection_handshake_done, peer); - - if ( remote_trace_sync_interval ) - { - loop_over_list(peers, i) - { - if ( ! SendToChild(MSG_SYNC_POINT, peers[i], - 1, current_sync_point) ) - return false; - } - } - - return true; - } - -bool RemoteSerializer::ProcessConnected() - { - // IP and port follow. - vector args = tokenize(current_args->data, ','); - - if ( args.size() != 2 ) - { - InternalCommError("ProcessConnected() bad number of arguments"); - return false; - } - - IPAddr host = IPAddr(args[0]); - uint16 port; - - if ( ! atoi_n(args[1].size(), args[1].c_str(), 0, 10, port) ) - { - InternalCommError("ProcessConnected() bad peer port string"); - return false; - } - - if ( ! current_peer ) - { - // The other side connected to one of our listening ports. - current_peer = AddPeer(host, port, current_id); - current_peer->orig = false; - } - else if ( current_peer->orig ) - { - // It's a successful retry. - current_peer->port = port; - current_peer->accept_state = false; - Unref(current_peer->val); - current_peer->val = MakePeerVal(current_peer); - } - - PeerConnected(current_peer); - - ID* descr = global_scope()->Lookup("peer_description"); - if ( ! descr ) - reporter->InternalError("peer_description not defined"); - - SerialInfo info(this); - SendID(&info, current_peer, *descr); - - return true; - } - -bool RemoteSerializer::ProcessRequestEventsMsg() - { - if ( ! current_peer ) - return false; - - // Register new handlers. - char* p = current_args->data; - while ( p < current_args->data + current_args->len ) - { - EventHandler* handler = event_registry->Lookup(p); - if ( handler ) - { - handler->AddRemoteHandler(current_peer->id); - current_peer->handlers.append(handler); - RaiseEvent(remote_event_registered, current_peer, p); - Log(LogInfo, fmt("registered for event %s", p), - current_peer); - - // If the other side requested the print_hook event, - // we initialize the buffer. - if ( current_peer->print_buffer == 0 && - streq(p, "print_hook") ) - { - current_peer->print_buffer = - new char[PRINT_BUFFER_SIZE]; - current_peer->print_buffer_used = 0; - Log(LogInfo, "initialized print buffer", - current_peer); - } - } - else - Log(LogInfo, fmt("request for unknown event %s", p), - current_peer); - - p += strlen(p) + 1; - } - - return true; - } - -bool RemoteSerializer::ProcessRequestSyncMsg() - { - if ( ! current_peer ) - return false; - - int auth = 0; - uint32* args = (uint32*) current_args->data; - if ( ntohl(args[0]) != 0 ) - { - Log(LogInfo, "peer considers its state authoritative", current_peer); - auth = Peer::AUTH_PEER; - } - - current_peer->sync_requested |= Peer::PEER | auth; - return true; - } - -bool RemoteSerializer::ProcessRequestLogs() - { - if ( ! current_peer ) - return false; - - Log(LogInfo, "peer requested logs", current_peer); - - current_peer->logs_requested = true; - return true; - } - -bool RemoteSerializer::ProcessPhaseDone() - { - switch ( current_peer->phase ) { - case Peer::HANDSHAKE: - { - current_peer->handshake_done |= Peer::PEER; - - if ( current_peer->handshake_done == Peer::BOTH ) - HandshakeDone(current_peer); - break; - } - - case Peer::SYNC: - { - // Make sure that the other side is supposed to sent us this. - if ( current_peer->send_state ) - { - Log(LogError, "unexpected phase_done in sync phase from peer", current_peer); - CloseConnection(current_peer); - return false; - } - - if ( ! EnterPhaseRunning(current_peer) ) - { - if ( current_peer->suspended_processing ) - { - net_continue_processing(); - current_peer->suspended_processing = false; - } - - return false; - } - - if ( current_peer->suspended_processing ) - { - net_continue_processing(); - current_peer->suspended_processing = false; - } - - break; - } - - default: - Log(LogError, "unexpected phase_done", current_peer); - CloseConnection(current_peer); - } - - return true; - } - -bool RemoteSerializer::HandshakeDone(Peer* peer) - { - if ( peer->caps & Peer::COMPRESSION && peer->comp_level > 0 ) - if ( ! SendToChild(MSG_COMPRESS, peer, 1, peer->comp_level) ) - return false; - - if ( ! (peer->caps & Peer::PID_64BIT) ) - Log(LogInfo, "peer does not support 64bit PIDs; using compatibility mode", peer); - - if ( (peer->caps & Peer::NEW_CACHE_STRATEGY) ) - Log(LogInfo, "peer supports keep-in-cache; using that", peer); - - if ( (peer->caps & Peer::BROCCOLI_PEER) ) - Log(LogInfo, "peer is a Broccoli", peer); - - if ( peer->logs_requested ) - log_mgr->SendAllWritersTo(peer->id); - - if ( peer->sync_requested != Peer::NONE ) - { - if ( in_sync ) - { - Log(LogInfo, "another sync in progress, waiting...", - peer); - sync_pending.append(peer); - return true; - } - - if ( (peer->sync_requested & Peer::AUTH_PEER) && - (peer->sync_requested & Peer::AUTH_WE) ) - { - Log(LogError, "misconfiguration: authoritative state on both sides", - current_peer); - CloseConnection(peer); - return false; - } - - in_sync = peer; - peer->phase = Peer::SYNC; - - // If only one side has requested state synchronization, - // it will get all the state from the peer. - // - // If both sides have shown interest, the one considering - // itself authoritative will send the state. If none is - // authoritative, the peer which is running longest sends - // its state. - // - if ( (peer->sync_requested & Peer::BOTH) != Peer::BOTH ) - { - // One side. - if ( peer->sync_requested & Peer::PEER ) - peer->send_state = true; - else if ( peer->sync_requested & Peer::WE ) - peer->send_state = false; - else - reporter->InternalError("illegal sync_requested value"); - } - else - { - // Both. - if ( peer->sync_requested & Peer::AUTH_WE ) - peer->send_state = true; - else if ( peer->sync_requested & Peer::AUTH_PEER ) - peer->send_state = false; - else - { - if ( peer->our_runtime == peer->runtime ) - peer->send_state = peer->orig; - else - peer->send_state = (peer->our_runtime > - peer->runtime); - } - } - - Log(LogInfo, fmt("phase: sync (%s)", (peer->send_state ? "sender" : "receiver")), peer); - - if ( peer->send_state ) - { - SerialInfo* info = new SerialInfo(this); - SendAllSynchronized(peer, info); - } - - else - { - // Suspend until we got everything. - net_suspend_processing(); - peer->suspended_processing = true; - } - } - else - return EnterPhaseRunning(peer); - - return true; - } - -bool RemoteSerializer::ProcessPingMsg() - { - if ( ! current_peer ) - return false; - - if ( ! SendToChild(MSG_PONG, current_peer, - current_args->data, current_args->len) ) - return false; - - return true; - } - -bool RemoteSerializer::ProcessPongMsg() - { - if ( ! current_peer ) - return false; - - ping_args* args = (ping_args*) current_args->data; - - val_list* vl = new val_list; - vl->append(current_peer->val->Ref()); - vl->append(val_mgr->GetCount((unsigned int) ntohl(args->seq))); - vl->append(new Val(current_time(true) - ntohd(args->time1), - TYPE_INTERVAL)); - vl->append(new Val(ntohd(args->time2), TYPE_INTERVAL)); - vl->append(new Val(ntohd(args->time3), TYPE_INTERVAL)); - mgr.QueueEvent(remote_pong, vl); - return true; - } - -bool RemoteSerializer::ProcessCapsMsg() - { - if ( ! current_peer ) - return false; - - uint32* args = (uint32*) current_args->data; - current_peer->caps = ntohl(args[0]); - return true; - } - -bool RemoteSerializer::ProcessLogMsg(bool is_error) - { - Log(is_error ? LogError : LogInfo, current_args->data, 0, LogChild); - return true; - } - -bool RemoteSerializer::ProcessStatsMsg() - { - // Take the opportunity to log our stats, too. - LogStats(); - - // Split the concatenated child stats into indiviual log messages. - int count = 0; - for ( char* p = current_args->data; - p < current_args->data + current_args->len; p += strlen(p) + 1 ) - Log(LogInfo, fmt("child statistics: [%d] %s", count++, p), - current_peer); - - return true; - } - -bool RemoteSerializer::ProcessCaptureFilterMsg() - { - if ( ! current_peer ) - return false; - - RaiseEvent(remote_capture_filter, current_peer, current_args->data); - return true; - } - -bool RemoteSerializer::CheckSyncPoints() - { - if ( ! current_sync_point ) - return false; - - int ready = 0; - - loop_over_list(peers, i) - if ( peers[i]->sync_point >= current_sync_point ) - ready++; - - if ( ready < remote_trace_sync_peers ) - return false; - - if ( current_sync_point == FINAL_SYNC_POINT ) - { - Log(LogInfo, fmt("all peers reached final sync-point, going to finish")); - Terminate(); - } - else - Log(LogInfo, fmt("all peers reached sync-point %u", - current_sync_point)); - - if ( syncing_times ) - { - loop_over_list(peers, i) - { - if ( peers[i]->suspended_processing ) - { - net_continue_processing(); - peers[i]->suspended_processing = false; - } - } - - syncing_times = false; - } - - return true; - } - -bool RemoteSerializer::ProcessSyncPointMsg() - { - if ( ! current_peer ) - return false; - - uint32* args = (uint32*) current_args->data; - uint32 count = ntohl(args[0]); - - current_peer->sync_point = max(current_peer->sync_point, count); - - if ( current_peer->sync_point == FINAL_SYNC_POINT ) - Log(LogInfo, fmt("reached final sync-point"), current_peer); - else - Log(LogInfo, fmt("reached sync-point %u", current_peer->sync_point), current_peer); - - if ( syncing_times ) - CheckSyncPoints(); - - return true; - } - -bool RemoteSerializer::ProcessSerialization() - { - if ( current_peer->state == Peer::CLOSING ) - return false; - - SetCache(current_peer->cache_in); - UnserialInfo info(this); - - bool accept_state = current_peer->accept_state; - -#if 0 - // If processing is suspended, we unserialize the data but throw - // it away. - if ( current_peer->phase == Peer::RUNNING && - net_is_processing_suspended() ) - accept_state = false; -#endif - - assert(current_args); - info.chunk = current_args; - - info.install_globals = accept_state; - info.install_conns = accept_state; - info.ignore_callbacks = ! accept_state; - - if ( current_peer->phase != Peer::RUNNING ) - info.id_policy = UnserialInfo::InstantiateNew; - else - info.id_policy = accept_state ? - UnserialInfo::CopyNewToCurrent : - UnserialInfo::Keep; - - if ( ! (current_peer->caps & Peer::PID_64BIT) || - current_peer->phase != Peer::RUNNING ) - info.pid_32bit = true; - - if ( (current_peer->caps & Peer::NEW_CACHE_STRATEGY) && - current_peer->phase == Peer::RUNNING ) - info.new_cache_strategy = true; - - if ( current_peer->caps & Peer::BROCCOLI_PEER ) - info.broccoli_peer = true; - - if ( ! forward_remote_state_changes ) - ignore_accesses = true; - - source_peer = current_peer; - int i = Unserialize(&info); - source_peer = 0; - - if ( ! forward_remote_state_changes ) - ignore_accesses = false; - - if ( i < 0 ) - { - Log(LogError, "unserialization error", current_peer); - CloseConnection(current_peer); - // Error - return false; - } - - return true; - } - -bool RemoteSerializer::FlushPrintBuffer(Peer* p) - { - if ( p->state == Peer::CLOSING ) - return false; - - if ( ! (p->print_buffer && p->print_buffer_used) ) - return true; - - SendToChild(MSG_REMOTE_PRINT, p, p->print_buffer, p->print_buffer_used); - - p->print_buffer = new char[PRINT_BUFFER_SIZE]; - p->print_buffer_used = 0; - return true; - } - -bool RemoteSerializer::SendPrintHookEvent(BroFile* f, const char* txt, size_t len) - { - loop_over_list(peers, i) - { - Peer* p = peers[i]; - - if ( ! p->print_buffer ) - continue; - - const char* fname = f->Name(); - if ( ! fname ) - continue; // not a managed file. - - // We cut off everything after the max buffer size. That - // makes the code a bit easier, and we shouldn't have such - // long lines anyway. - len = min(len, PRINT_BUFFER_SIZE - strlen(fname) - 2); - - // If there's not enough space in the buffer, flush it. - - int need = strlen(fname) + 1 + len + 1; - if ( p->print_buffer_used + need > PRINT_BUFFER_SIZE ) - { - if ( ! FlushPrintBuffer(p) ) - return false; - } - - assert(p->print_buffer_used + need <= PRINT_BUFFER_SIZE); - - char* dst = p->print_buffer + p->print_buffer_used; - strcpy(dst, fname); - dst += strlen(fname) + 1; - memcpy(dst, txt, len); - dst += len; - *dst++ = '\0'; - - p->print_buffer_used = dst - p->print_buffer; - } - - return true; - } - -bool RemoteSerializer::ProcessRemotePrint() - { - if ( current_peer->state == Peer::CLOSING ) - return false; - - const char* p = current_args->data; - while ( p < current_args->data + current_args->len ) - { - const char* fname = p; - p += strlen(p) + 1; - const char* txt = p; - p += strlen(p) + 1; - - val_list* vl = new val_list(2); - BroFile* f = BroFile::GetFile(fname); - Ref(f); - vl->append(new Val(f)); - vl->append(new StringVal(txt)); - GotEvent("print_hook", -1.0, print_hook, vl); - } - - return true; - } - -bool RemoteSerializer::SendLogCreateWriter(EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields) - { - loop_over_list(peers, i) - { - SendLogCreateWriter(peers[i]->id, id, writer, info, num_fields, fields); - } - - return true; - } - -bool RemoteSerializer::SendLogCreateWriter(PeerID peer_id, EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields) - { - SetErrorDescr("logging"); - - ChunkedIO::Chunk* c = 0; - - Peer* peer = LookupPeer(peer_id, true); - if ( ! peer ) - return false; - - if ( peer->phase != Peer::HANDSHAKE && peer->phase != Peer::RUNNING ) - return false; - - if ( ! peer->logs_requested ) - return false; - - BinarySerializationFormat fmt; - - fmt.StartWrite(); - - bool success = fmt.Write(id->AsEnum(), "id") && - fmt.Write(writer->AsEnum(), "writer") && - fmt.Write(num_fields, "num_fields") && - info.Write(&fmt); - - if ( ! success ) - goto error; - - for ( int i = 0; i < num_fields; i++ ) - { - if ( ! fields[i]->Write(&fmt) ) - goto error; - } - - if ( ! SendToChild(MSG_LOG_CREATE_WRITER, peer, 0) ) - goto error; - - c = new ChunkedIO::Chunk; - c->len = fmt.EndWrite(&c->data); - c->free_func = ChunkedIO::Chunk::free_func_free; - - if ( ! SendToChild(c) ) - goto error; - - return true; - -error: - delete c; - - FatalError(io->Error()); - return false; - } - -bool RemoteSerializer::SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals) - { - loop_over_list(peers, i) - { - SendLogWrite(peers[i], id, writer, path, num_fields, vals); - } - - return true; - } - -bool RemoteSerializer::SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals) - { - if ( peer->phase != Peer::HANDSHAKE && peer->phase != Peer::RUNNING ) - return false; - - if ( ! peer->logs_requested ) - return false; - - if ( ! peer->log_buffer ) - // Peer shutting down. - return false; - - // Serialize the log record entry. - - BinarySerializationFormat fmt; - - fmt.StartWrite(); - - bool success = fmt.Write(id->AsEnum(), "id") && - fmt.Write(writer->AsEnum(), "writer") && - fmt.Write(path, "path") && - fmt.Write(num_fields, "num_fields"); - - if ( ! success ) - goto error; - - for ( int i = 0; i < num_fields; i++ ) - { - if ( ! vals[i]->Write(&fmt) ) - goto error; - } - - // Ok, we have the binary data now. - char* data; - int len; - - len = fmt.EndWrite(&data); - - assert(len > 10); - - // Do we have not enough space in the buffer, or was the last flush a - // while ago? If so, flush first. - if ( len > (LOG_BUFFER_SIZE - peer->log_buffer_used) || (network_time - last_flush > 1.0) ) - { - if ( ! FlushLogBuffer(peer) ) - { - free(data); - return false; - } - } - - // If the data is actually larger than our complete buffer, just send it out. - if ( len > LOG_BUFFER_SIZE ) - return SendToChild(MSG_LOG_WRITE, peer, data, len, true); - - // Now we have space in the buffer, copy it into there. - memcpy(peer->log_buffer + peer->log_buffer_used, data, len); - peer->log_buffer_used += len; - assert(peer->log_buffer_used <= LOG_BUFFER_SIZE); - - free(data); - - return true; - -error: - FatalError(io->Error()); - return false; - } - -bool RemoteSerializer::FlushLogBuffer(Peer* p) - { - if ( ! p->logs_requested ) - return false; - - last_flush = network_time; - - if ( p->state == Peer::CLOSING ) - return false; - - if ( ! (p->log_buffer && p->log_buffer_used) ) - return true; - - char* data = new char[p->log_buffer_used]; - memcpy(data, p->log_buffer, p->log_buffer_used); - SendToChild(MSG_LOG_WRITE, p, data, p->log_buffer_used); - - p->log_buffer_used = 0; - return true; - } - -bool RemoteSerializer::ProcessLogCreateWriter() - { - if ( current_peer->state == Peer::CLOSING ) - return false; - -#ifdef USE_PERFTOOLS_DEBUG - // Don't track allocations here, they'll be released only after the - // main loop exists. And it's just a tiny amount anyway. - HeapLeakChecker::Disabler disabler; -#endif - - assert(current_args); - - EnumVal* id_val = 0; - EnumVal* writer_val = 0; - threading::Field** fields = 0; - int delete_fields_up_to = -1; - - BinarySerializationFormat fmt; - fmt.StartRead(current_args->data, current_args->len); - - int id, writer; - int num_fields; - logging::WriterBackend::WriterInfo* info = new logging::WriterBackend::WriterInfo(); - - bool success = fmt.Read(&id, "id") && - fmt.Read(&writer, "writer") && - fmt.Read(&num_fields, "num_fields") && - info->Read(&fmt); - - if ( ! success ) - goto error; - - fields = new threading::Field* [num_fields]; - - for ( int i = 0; i < num_fields; i++ ) - { - fields[i] = new threading::Field; - if ( ! fields[i]->Read(&fmt) ) - { - delete_fields_up_to = i + 1; - goto error; - } - } - - fmt.EndRead(); - - id_val = internal_type("Log::ID")->AsEnumType()->GetVal(id); - writer_val = internal_type("Log::Writer")->AsEnumType()->GetVal(writer); - - if ( ! log_mgr->CreateWriterForRemoteLog(id_val, writer_val, info, num_fields, fields) ) - { - info = 0; - fields = 0; - goto error; - } - - Unref(id_val); - Unref(writer_val); - - return true; - -error: - Unref(id_val); - Unref(writer_val); - delete info; - - for ( int i = 0; i < delete_fields_up_to; ++i ) - delete fields[i]; - - delete [] fields; - Error("write error for creating writer"); - return false; - } - -bool RemoteSerializer::ProcessLogWrite() - { - if ( current_peer->state == Peer::CLOSING ) - return false; - - assert(current_args); - - BinarySerializationFormat fmt; - fmt.StartRead(current_args->data, current_args->len); - - while ( fmt.BytesRead() != (int)current_args->len ) - { - // Unserialize one entry. - EnumVal* id_val = 0; - EnumVal* writer_val = 0; - threading::Value** vals = 0; - - int id, writer; - string path; - int num_fields; - - bool success = fmt.Read(&id, "id") && - fmt.Read(&writer, "writer") && - fmt.Read(&path, "path") && - fmt.Read(&num_fields, "num_fields"); - - if ( ! success ) - goto error; - - vals = new threading::Value* [num_fields]; - - for ( int i = 0; i < num_fields; i++ ) - { - vals[i] = new threading::Value; - - if ( ! vals[i]->Read(&fmt) ) - { - for ( int j = 0; j <= i; ++j ) - delete vals[j]; - - delete [] vals; - goto error; - } - } - - id_val = internal_type("Log::ID")->AsEnumType()->GetVal(id); - writer_val = internal_type("Log::Writer")->AsEnumType()->GetVal(writer); - - success = log_mgr->WriteFromRemote(id_val, writer_val, path, num_fields, vals); - - Unref(id_val); - Unref(writer_val); - - if ( ! success ) - goto error; - - } - - fmt.EndRead(); - - ++received_logs; - - return true; - -error: - Error("write error for log entry"); - return false; - } - -void RemoteSerializer::GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) - { - if ( time >= 0 ) - { - // Marker for being called from ProcessRemotePrint(). - DEBUG_COMM("parent: got event"); - ++stats.events.in; - } - - if ( ! current_peer ) - { - Error("unserialized event from unknown peer"); - delete_vals(args); - return; - } - - BufferedEvent* e = new BufferedEvent; - - // Our time, not the time when the event was generated. - e->time = iosource_mgr->GetPktSrcs().size() ? - time_t(network_time) : time_t(timer_mgr->Time()); - - e->src = current_peer->id; - e->handler = event; - e->args = args; - - // If needed, coerce received record arguments to the expected record type. - if ( e->handler->FType() ) - { - const type_list* arg_types = e->handler->FType()->ArgTypes()->Types(); - loop_over_list(*args, i) - { - Val* v = (*args)[i]; - BroType* v_t = v->Type(); - BroType* arg_t = (*arg_types)[i]; - if ( v_t->Tag() == TYPE_RECORD && arg_t->Tag() == TYPE_RECORD ) - { - if ( ! same_type(v_t, arg_t) ) - { - Val* nv = v->AsRecordVal()->CoerceTo(arg_t->AsRecordType()); - if ( nv ) - { - args->replace(i, nv); - Unref(v); - } - } - } - } - } - - events.append(e); - } - -void RemoteSerializer::GotFunctionCall(const char* name, double time, - Func* function, val_list* args) - { - DEBUG_COMM("parent: got function call"); - ++stats.events.in; - - if ( ! current_peer ) - { - Error("unserialized function from unknown peer"); - delete_vals(args); - return; - } - - try - { - function->Call(args); - } - - catch ( InterpreterException& e ) - { /* Already reported. */ } - } - -void RemoteSerializer::GotID(ID* id, Val* val) - { - ++stats.ids.in; - - if ( ! current_peer ) - { - Error("unserialized id from unknown peer"); - Unref(id); - return; - } - - if ( current_peer->phase == Peer::HANDSHAKE && - streq(id->Name(), "peer_description") ) - { - if ( val->Type()->Tag() != TYPE_STRING ) - { - Error("peer_description not a string"); - Unref(id); - return; - } - - const char* desc = val->AsString()->CheckString(); - current_peer->val->Assign(4, new StringVal(desc)); - - Log(LogInfo, fmt("peer_description is %s", *desc ? desc : "not set"), - current_peer); - - Unref(id); - return; - } - - if ( id->Name()[0] == '#' ) - { - // This is a globally unique, non-user-visible ID. - - // Only MutableVals can be bound to names starting with '#'. - assert(val->IsMutableVal()); - - // It must be already installed in the global namespace: - // either we saw it before, or MutableVal::Unserialize() - // installed it. - assert(global_scope()->Lookup(id->Name())); - - // Only synchronized values can arrive here. - assert(((MutableVal*) val)->GetProperties() & MutableVal::SYNCHRONIZED); - - DBG_LOG(DBG_COMM, "got ID %s from peer\n", id->Name()); - } - - Unref(id); - } - -void RemoteSerializer::GotConnection(Connection* c) - { - ++stats.conns.in; - - // Nothing else to-do. Connection will be installed automatically - // (if allowed). - - Unref(c); - } - -void RemoteSerializer::GotStateAccess(StateAccess* s) - { - ++stats.accesses.in; - - ODesc d; - DBG_LOG(DBG_COMM, "got StateAccess: %s", (s->Describe(&d), d.Description())); - - if ( ! current_peer ) - { - Error("unserialized function from unknown peer"); - return; - } - - if ( current_peer->sync_requested & Peer::WE ) - s->Replay(); - - delete s; - } - -void RemoteSerializer::GotTimer(Timer* s) - { - reporter->Error("RemoteSerializer::GotTimer not implemented"); - } - -void RemoteSerializer::GotPacket(Packet* p) - { - ++stats.packets.in; - - BufferedPacket* bp = new BufferedPacket; - bp->time = time_t(timer_mgr->Time()); - bp->p = p; - packets.append(bp); - } - -void RemoteSerializer::Log(LogLevel level, const char* msg) - { - Log(level, msg, 0, LogParent); - } - -void RemoteSerializer::Log(LogLevel level, const char* msg, Peer* peer, - LogSrc src) - { - if ( peer ) - { - val_list* vl = new val_list(); - vl->append(peer->val->Ref()); - vl->append(val_mgr->GetCount(level)); - vl->append(val_mgr->GetCount(src)); - vl->append(new StringVal(msg)); - mgr.QueueEvent(remote_log_peer, vl); - } - else - { - val_list* vl = new val_list(); - vl->append(val_mgr->GetCount(level)); - vl->append(val_mgr->GetCount(src)); - vl->append(new StringVal(msg)); - mgr.QueueEvent(remote_log, vl); - } - -#ifdef DEBUG - const int BUFSIZE = 1024; - char buffer[BUFSIZE]; - int len = 0; - - if ( peer ) - len += snprintf(buffer + len, sizeof(buffer) - len, "[#%d/%s:%d] ", - int(peer->id), peer->ip.AsURIString().c_str(), - peer->port); - - len += safe_snprintf(buffer + len, sizeof(buffer) - len, "%s", msg); - - DEBUG_COMM(fmt("parent: %.6f %s", current_time(), buffer)); -#endif - } - -void RemoteSerializer::RaiseEvent(EventHandlerPtr event, Peer* peer, - const char* arg) - { - val_list* vl = new val_list; - - if ( peer ) - { - Ref(peer->val); - vl->append(peer->val); - } - else - { - Val* v = mgr.GetLocalPeerVal(); - v->Ref(); - vl->append(v); - } - - if ( arg ) - vl->append(new StringVal(arg)); - - // If we only have remote sources, the network time - // will not increase as long as no peers are connected. - // Therefore, we send these events immediately. - mgr.Dispatch(new Event(event, vl, PEER_LOCAL)); - } - -void RemoteSerializer::LogStats() - { - if ( ! io ) - return; - - char buffer[512]; - io->Stats(buffer, 512); - Log(LogInfo, fmt("parent statistics: %s events=%lu/%lu operations=%lu/%lu", - buffer, stats.events.in, stats.events.out, - stats.accesses.in, stats.accesses.out)); - } - -RecordVal* RemoteSerializer::GetPeerVal(PeerID id) - { - Peer* peer = LookupPeer(id, true); - if ( ! peer ) - return 0; - - Ref(peer->val); - return peer->val; - } - -void RemoteSerializer::ChildDied() - { - Log(LogError, "child died"); - SetClosed(true); - child_pid = 0; - - // Shut down the main process as well. - terminate_processing(); - } - -bool RemoteSerializer::SendCMsgToChild(char msg_type, Peer* peer) - { - if ( ! sendCMsg(io, msg_type, peer ? peer->id : PEER_NONE) ) - { - reporter->Warning("can't send message of type %d: %s", - msg_type, io->Error()); - return false; - } - return true; - } - -bool RemoteSerializer::SendToChild(char type, Peer* peer, char* str, int len, - bool delete_with_free) - { - DEBUG_COMM(fmt("parent: (->child) %s (#%" PRI_SOURCE_ID ", %s)", msgToStr(type), peer ? peer->id : PEER_NONE, str)); - - if ( child_pid && sendToIO(io, type, peer ? peer->id : PEER_NONE, str, len, - delete_with_free) ) - return true; - - if ( delete_with_free ) - free(str); - else - delete [] str; - - if ( ! child_pid ) - return false; - - if ( io->Eof() ) - ChildDied(); - - FatalError(io->Error()); - return false; - } - -bool RemoteSerializer::SendToChild(char type, Peer* peer, int nargs, ...) - { - va_list ap; - -#ifdef DEBUG - va_start(ap, nargs); - DEBUG_COMM(fmt("parent: (->child) %s (#%" PRI_SOURCE_ID ",%s)", - msgToStr(type), peer ? peer->id : PEER_NONE, fmt_uint32s(nargs, ap))); - va_end(ap); -#endif - - if ( child_pid ) - { - va_start(ap, nargs); - bool ret = sendToIO(io, type, peer ? peer->id : PEER_NONE, nargs, ap); - va_end(ap); - - if ( ret ) - return true; - } - - if ( ! child_pid ) - return false; - - if ( io->Eof() ) - ChildDied(); - - FatalError(io->Error()); - return false; - } - -bool RemoteSerializer::SendToChild(ChunkedIO::Chunk* c) - { - DEBUG_COMM(fmt("parent: (->child) chunk of size %d", c->len)); - - if ( child_pid && sendToIO(io, c) ) - return true; - - c->free_func(c->data); - c->data = 0; - - if ( ! child_pid ) - return false; - - if ( io->Eof() ) - ChildDied(); - - FatalError(io->Error()); - return false; - } - -void RemoteSerializer::FatalError(const char* msg) - { - msg = fmt("fatal error, shutting down communication: %s", msg); - Log(LogError, msg); - reporter->Error("%s", msg); - - SetClosed(true); - - if ( kill(child_pid, SIGQUIT) < 0 ) - reporter->Warning("warning: cannot kill child pid %d, %s", child_pid, strerror(errno)); - - child_pid = 0; - using_communication = false; - io->Clear(); - - loop_over_list(peers, i) - { - // Make perftools happy. - Peer* p = peers[i]; - delete [] p->log_buffer; - delete [] p->print_buffer; - p->log_buffer = p->print_buffer = 0; - } - } - -bool RemoteSerializer::IsActive() - { - if ( listening ) - return true; - - loop_over_list(peers, i) - if ( peers[i]->state == Peer::PENDING || - peers[i]->state == Peer::CONNECTED ) - return true; - - return false; - } - -void RemoteSerializer::ReportError(const char* msg) - { - if ( current_peer && current_peer->phase != Peer::SETUP ) - RaiseEvent(remote_connection_error, current_peer, msg); - Log(LogError, msg, current_peer); - } - -void RemoteSerializer::InternalCommError(const char* msg) - { -#ifdef DEBUG_COMMUNICATION - DumpDebugData(); -#else - reporter->InternalError("%s", msg); -#endif - } - -#ifdef DEBUG_COMMUNICATION - -void RemoteSerializer::DumpDebugData() - { - Log(LogError, "dumping debug data and terminating ..."); - io->DumpDebugData("comm-dump.parent", true); - io->DumpDebugData("comm-dump.parent", false); - SendToChild(MSG_DEBUG_DUMP, 0, 0); - Terminate(); - } - -static ChunkedIO* openDump(const char* file) - { - int fd = open(file, O_RDONLY, 0600); - - if ( fd < 0 ) - { - reporter->Error("cannot open %s: %s\n", file, strerror(errno)); - return 0; - } - - return new ChunkedIOFd(fd, "dump-file"); - } - -void RemoteSerializer::ReadDumpAsMessageType(const char* file) - { - ChunkedIO* io = openDump(file); - if ( ! io ) - return; - - ChunkedIO::Chunk* chunk; - - if ( ! io->Read(&chunk, true ) ) - { - reporter->Error("cannot read %s: %s\n", file, strerror(errno)); - return; - } - - CMsg* msg = (CMsg*) chunk->data; - - delete [] chunk->data; - delete io; - } - -void RemoteSerializer::ReadDumpAsSerialization(const char* file) - { - FileSerializer s; - UnserialInfo info(&s); - info.print = stdout; - info.install_uniques = info.ignore_callbacks = true; - s.Read(&info, file, false); - } - -#endif - -//////////////////////////// - -// If true (set by signal handler), we will log some stats to parent. -static bool log_stats = false; -static bool log_prof = false; - -// How often stats are sent (in seconds). -// Perhaps we should make this configurable... -const int STATS_INTERVAL = 60; - -static RETSIGTYPE sig_handler_log(int signo) - { - // SIGALRM is the only one we get. - log_stats = true; - } - -static RETSIGTYPE sig_handler_prof(int signo) - { - log_prof = true; - } - -SocketComm::SocketComm() - { - io = 0; - - // We start the ID counter high so that IDs assigned by us - // (hopefully) don't conflict with those of our parent. - id_counter = 10000; - parent_peer = 0; - parent_msgstate = TYPE; - parent_id = RemoteSerializer::PEER_NONE; - parent_msgtype = 0; - parent_args = 0; - shutting_conns_down = false; - terminating = false; - killing = false; - - listen_port = 0; - listen_ssl = false; - enable_ipv6 = false; - bind_retry_interval = 0; - listen_next_try = 0; - - // We don't want to use the signal handlers of our parent. - (void) setsignal(SIGTERM, SIG_DFL); - (void) setsignal(SIGINT, SIG_DFL); - (void) setsignal(SIGUSR1, SIG_DFL); - (void) setsignal(SIGUSR2, SIG_DFL); - (void) setsignal(SIGCONT, SIG_DFL); - (void) setsignal(SIGCHLD, SIG_DFL); - - // Raping SIGPROF for profiling - (void) setsignal(SIGPROF, sig_handler_prof); - (void) setsignal(SIGALRM, sig_handler_log); - alarm(STATS_INTERVAL); - } - -SocketComm::~SocketComm() - { - loop_over_list(peers, i) - delete peers[i]->io; - - delete io; - CloseListenFDs(); - } - -static unsigned int first_rtime = 0; - -static void fd_vector_set(const std::vector& fds, fd_set* set, int* max) - { - for ( size_t i = 0; i < fds.size(); ++i ) - { - FD_SET(fds[i], set); - *max = ::max(fds[i], *max); - } - } - -void SocketComm::Run() - { - first_rtime = (unsigned int) current_time(true); - - while ( true ) - { - // Logging signaled? - if ( log_stats ) - LogStats(); - - // Termination signaled - if ( terminating ) - CheckFinished(); - - // Build FDSets for select. - fd_set fd_read, fd_write, fd_except; - - FD_ZERO(&fd_read); - FD_ZERO(&fd_write); - FD_ZERO(&fd_except); - - int max_fd = io->Fd(); - FD_SET(io->Fd(), &fd_read); - max_fd = std::max(max_fd, io->ExtraReadFDs().Set(&fd_read)); - - loop_over_list(peers, i) - { - if ( peers[i]->connected ) - { - FD_SET(peers[i]->io->Fd(), &fd_read); - if ( peers[i]->io->Fd() > max_fd ) - max_fd = peers[i]->io->Fd(); - max_fd = std::max(max_fd, - peers[i]->io->ExtraReadFDs().Set(&fd_read)); - } - else - { - if ( peers[i]->next_try > 0 && - time(0) > peers[i]->next_try ) - // Try reconnect. - Connect(peers[i]); - } - } - - if ( listen_next_try && time(0) > listen_next_try ) - Listen(); - - for ( size_t i = 0; i < listen_fds.size(); ++i ) - { - FD_SET(listen_fds[i], &fd_read); - if ( listen_fds[i] > max_fd ) - max_fd = listen_fds[i]; - } - - if ( io->IsFillingUp() && ! shutting_conns_down ) - { - Error("queue to parent filling up; shutting down heaviest connection"); - - const ChunkedIO::Statistics* stats = 0; - unsigned long max = 0; - Peer* max_peer = 0; - - loop_over_list(peers, i) - { - if ( ! peers[i]->connected ) - continue; - - stats = peers[i]->io->Stats(); - if ( stats->bytes_read > max ) - { - max = stats->bytes_read; - max_peer = peers[i]; - } - } - - if ( max_peer ) - CloseConnection(max_peer, true); - - shutting_conns_down = true; - } - - if ( ! io->IsFillingUp() && shutting_conns_down ) - shutting_conns_down = false; - - static long selects = 0; - static long canwrites = 0; - - ++selects; - if ( io->CanWrite() ) - ++canwrites; - - struct timeval timeout; - timeout.tv_sec = 1; - timeout.tv_usec = 0; - - int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, &timeout); - - if ( selects % 100000 == 0 ) - Log(fmt("selects=%ld canwrites=%ld pending=%lu", - selects, canwrites, io->Stats()->pending)); - - if ( a < 0 ) - // Ignore errors for now. - continue; - - if ( io->CanRead() ) - ProcessParentMessage(); - - io->Flush(); - - loop_over_list(peers, j) - { - // We have to be careful here as the peer may - // be removed when an error occurs. - Peer* current = peers[j]; - int round = 0; - while ( ++round <= 10 && j < peers.length() && - peers[j] == current && current->connected && - current->io->CanRead() ) - { - ProcessRemoteMessage(current); - } - } - - for ( size_t i = 0; i < listen_fds.size(); ++i ) - if ( FD_ISSET(listen_fds[i], &fd_read) ) - AcceptConnection(listen_fds[i]); - - // Hack to display CPU usage of the child, triggered via - // SIGPROF. - static unsigned int first_rtime = 0; - if ( first_rtime == 0 ) - first_rtime = (unsigned int) current_time(true); - - if ( log_prof ) - { - LogProf(); - log_prof = false; - } - } - } - -bool SocketComm::ProcessParentMessage() - { - switch ( parent_msgstate ) { - case TYPE: - { - parent_peer = 0; - parent_msgtype = MSG_NONE; - - // CMsg follows - ChunkedIO::Chunk* c; - if ( ! io->Read(&c) ) - { - if ( io->Eof() ) - Error("parent died", true); - - Error(fmt("can't read parent's cmsg: %s", - io->Error()), true); - return false; - } - - if ( ! c ) - return true; - - CMsg* msg = (CMsg*) c->data; - parent_peer = LookupPeer(msg->Peer(), false); - parent_id = msg->Peer(); - parent_msgtype = msg->Type(); - parent_args = 0; - - delete c; - - switch ( parent_msgtype ) { - case MSG_LISTEN_STOP: - case MSG_CLOSE: - case MSG_CLOSE_ALL: - case MSG_TERMINATE: - case MSG_PHASE_DONE: - case MSG_DEBUG_DUMP: - case MSG_REQUEST_LOGS: - { - // No further argument chunk. - parent_msgstate = TYPE; - return DoParentMessage(); - } - - case MSG_LISTEN: - case MSG_CONNECT_TO: - case MSG_COMPRESS: - case MSG_PING: - case MSG_PONG: - case MSG_REQUEST_EVENTS: - case MSG_REQUEST_SYNC: - case MSG_SERIAL: - case MSG_CAPTURE_FILTER: - case MSG_VERSION: - case MSG_CAPS: - case MSG_SYNC_POINT: - case MSG_REMOTE_PRINT: - case MSG_LOG_CREATE_WRITER: - case MSG_LOG_WRITE: - { - // One further argument chunk. - parent_msgstate = ARGS; - return ProcessParentMessage(); - } - - default: - InternalError(fmt("unknown msg type %d", parent_msgtype)); - return true; - } - } - - case ARGS: - { - // Argument chunk follows. - ChunkedIO::Chunk* c = 0; - READ_CHUNK(io, c, Error("parent died", true), true); - parent_args = c; - parent_msgstate = TYPE; - bool result = DoParentMessage(); - - if ( parent_args ) - { - delete parent_args; - parent_args = 0; - } - - return result; - } - - default: - InternalError("unknown msgstate"); - } - - // Cannot be reached. - return false; - } - -bool SocketComm::DoParentMessage() - { - switch ( parent_msgtype ) { - - case MSG_LISTEN_STOP: - { - CloseListenFDs(); - - Log("stopped listening"); - - return true; - } - - case MSG_CLOSE: - { - if ( parent_peer && parent_peer->connected ) - CloseConnection(parent_peer, false); - return true; - } - - case MSG_CLOSE_ALL: - { - loop_over_list(peers, i) - { - if ( peers[i]->connected ) - CloseConnection(peers[i], false); - } - return true; - } - - case MSG_TERMINATE: - { - terminating = true; - CheckFinished(); - return true; - } - - case MSG_DEBUG_DUMP: - { -#ifdef DEBUG_COMMUNICATION - io->DumpDebugData("comm-dump.child.pipe", true); - io->DumpDebugData("comm-dump.child.pipe", false); - - loop_over_list(peers, j) - { - RemoteSerializer::PeerID id = peers[j]->id; - peers[j]->io->DumpDebugData(fmt("comm-dump.child.peer.%d", id), true); - peers[j]->io->DumpDebugData(fmt("comm-dump.child.peer.%d", id), false); - } -#else - InternalError("DEBUG_DUMP support not compiled in"); -#endif - return true; - } - - case MSG_LISTEN: - return ProcessListen(); - - case MSG_CONNECT_TO: - return ProcessConnectTo(); - - case MSG_COMPRESS: - return ProcessParentCompress(); - - case MSG_PING: - { - // Set time2. - assert(parent_args); - ping_args* args = (ping_args*) parent_args->data; - args->time2 = htond(current_time(true)); - return ForwardChunkToPeer(); - } - - case MSG_PONG: - { - assert(parent_args); - // Calculate time delta. - ping_args* args = (ping_args*) parent_args->data; - args->time3 = htond(current_time(true) - ntohd(args->time3)); - return ForwardChunkToPeer(); - } - - case MSG_PHASE_DONE: - case MSG_REQUEST_LOGS: - { - // No argument block follows. - if ( parent_peer && parent_peer->connected ) - { - DEBUG_COMM(fmt("child: forwarding %s to peer", msgToStr(parent_msgtype))); - if ( ! SendToPeer(parent_peer, parent_msgtype, 0) ) - return false; - } - - return true; - } - - case MSG_REQUEST_EVENTS: - case MSG_REQUEST_SYNC: - case MSG_SERIAL: - case MSG_CAPTURE_FILTER: - case MSG_VERSION: - case MSG_CAPS: - case MSG_SYNC_POINT: - case MSG_REMOTE_PRINT: - case MSG_LOG_CREATE_WRITER: - case MSG_LOG_WRITE: - assert(parent_args); - return ForwardChunkToPeer(); - - default: - InternalError("ProcessParentMessage: unexpected state"); - } - - InternalError("cannot be reached"); - return false; - } - -bool SocketComm::ForwardChunkToPeer() - { - char state = parent_msgtype; - - if ( parent_peer && parent_peer->connected ) - { - DEBUG_COMM("child: forwarding with 1 arg to peer"); - - if ( ! SendToPeer(parent_peer, state, 0) ) - return false; - - if ( ! SendToPeer(parent_peer, parent_args) ) - return false; - - parent_args = 0; - } - else - { -#ifdef DEBUG - if ( parent_peer ) - DEBUG_COMM(fmt("child: not connected to #%" PRI_SOURCE_ID, parent_id)); -#endif - } - - return true; - } - -bool SocketComm::ProcessConnectTo() - { - assert(parent_args); - vector args = tokenize(parent_args->data, ','); - - if ( args.size() != 6 ) - { - Error(fmt("ProcessConnectTo() bad number of arguments")); - return false; - } - - Peer* peer = new Peer; - - if ( ! atoi_n(args[0].size(), args[0].c_str(), 0, 10, peer->id) ) - { - Error(fmt("ProccessConnectTo() bad peer id string")); - delete peer; - return false; - } - - peer->ip = IPAddr(args[1]); - peer->zone_id = args[2]; - - if ( ! atoi_n(args[3].size(), args[3].c_str(), 0, 10, peer->port) ) - { - Error(fmt("ProcessConnectTo() bad peer port string")); - delete peer; - return false; - } - - if ( ! atoi_n(args[4].size(), args[4].c_str(), 0, 10, peer->retry) ) - { - Error(fmt("ProcessConnectTo() bad peer retry string")); - delete peer; - return false; - } - - peer->ssl = false; - if ( args[5] != "0" ) - peer->ssl = true; - - return Connect(peer); - } - -bool SocketComm::ProcessListen() - { - assert(parent_args); - vector args = tokenize(parent_args->data, ','); - - if ( args.size() != 6 ) - { - Error(fmt("ProcessListen() bad number of arguments")); - return false; - } - - listen_if = args[0]; - - if ( ! atoi_n(args[1].size(), args[1].c_str(), 0, 10, listen_port) ) - { - Error(fmt("ProcessListen() bad peer port string")); - return false; - } - - listen_ssl = false; - if ( args[2] != "0" ) - listen_ssl = true; - - enable_ipv6 = false; - if ( args[3] != "0" ) - enable_ipv6 = true; - - listen_zone_id = args[4]; - - if ( ! atoi_n(args[5].size(), args[5].c_str(), 0, 10, bind_retry_interval) ) - { - Error(fmt("ProcessListen() bad peer port string")); - return false; - } - - return Listen(); - } - -bool SocketComm::ProcessParentCompress() - { - assert(parent_args); - uint32* args = (uint32*) parent_args->data; - - uint32 level = ntohl(args[0]); - - if ( ! parent_peer->compressor ) - { - parent_peer->io = new CompressedChunkedIO(parent_peer->io); - parent_peer->io->Init(); - parent_peer->compressor = true; - } - - // Signal compression to peer. - if ( ! SendToPeer(parent_peer, MSG_COMPRESS, 0) ) - return false; - - // This cast is safe. - CompressedChunkedIO* comp_io = (CompressedChunkedIO*) parent_peer->io; - comp_io->EnableCompression(level); - - Log(fmt("enabling compression (level %d)", level), parent_peer); - - return true; - } - -bool SocketComm::ProcessRemoteMessage(SocketComm::Peer* peer) - { - assert(peer); - - peer->io->Flush(); - - switch ( peer->state ) { - case MSG_NONE: - { // CMsg follows - ChunkedIO::Chunk* c; - READ_CHUNK(peer->io, c, - (CloseConnection(peer, true), peer), false) - - CMsg* msg = (CMsg*) c->data; - - DEBUG_COMM(fmt("child: %s from peer #%" PRI_SOURCE_ID, - msgToStr(msg->Type()), peer->id)); - - switch ( msg->Type() ) { - case MSG_PHASE_DONE: - case MSG_REQUEST_LOGS: - // No further argument block. - DEBUG_COMM("child: forwarding with 0 args to parent"); - if ( ! SendToParent(msg->Type(), peer, 0) ) - return false; - break; - - default: - peer->state = msg->Type(); - } - - delete c; - - break; - } - - case MSG_COMPRESS: - ProcessPeerCompress(peer); - break; - - case MSG_PING: - { - // Messages with one further argument block which we simply - // forward to our parent. - ChunkedIO::Chunk* c; - READ_CHUNK(peer->io, c, - (CloseConnection(peer, true), peer), false) - - // Set time3. - ping_args* args = (ping_args*) c->data; - args->time3 = htond(current_time(true)); - return ForwardChunkToParent(peer, c); - } - - case MSG_PONG: - { - // Messages with one further argument block which we simply - // forward to our parent. - ChunkedIO::Chunk* c; - READ_CHUNK(peer->io, c, - (CloseConnection(peer, true), peer), false) - - // Calculate time delta. - ping_args* args = (ping_args*) c->data; - args->time2 = htond(current_time(true) - ntohd(args->time2)); - return ForwardChunkToParent(peer, c); - } - - case MSG_REQUEST_EVENTS: - case MSG_REQUEST_SYNC: - case MSG_SERIAL: - case MSG_CAPTURE_FILTER: - case MSG_VERSION: - case MSG_CAPS: - case MSG_SYNC_POINT: - case MSG_REMOTE_PRINT: - case MSG_LOG_CREATE_WRITER: - case MSG_LOG_WRITE: - { - // Messages with one further argument block which we simply - // forward to our parent. - ChunkedIO::Chunk* c; - READ_CHUNK(peer->io, c, - (CloseConnection(peer, true), peer), false) - - return ForwardChunkToParent(peer, c); - } - - default: - InternalError("ProcessRemoteMessage: unexpected state"); - } - - return true; - } - -bool SocketComm::ForwardChunkToParent(Peer* peer, ChunkedIO::Chunk* c) - { - char state = peer->state; - peer->state = MSG_NONE; - - DEBUG_COMM("child: forwarding message with 1 arg to parent"); - - if ( ! SendToParent(state, peer, 0) ) - return false; - - if ( ! SendToParent(c) ) - return false; - - io->Flush(); // FIXME: Needed? - return true; - } - -bool SocketComm::ProcessPeerCompress(Peer* peer) - { - peer->state = MSG_NONE; - - if ( ! parent_peer->compressor ) - { - parent_peer->io = new CompressedChunkedIO(parent_peer->io); - parent_peer->io->Init(); - parent_peer->compressor = true; - } - - // This cast is safe here. - ((CompressedChunkedIO*) peer->io)->EnableDecompression(); - Log("enabling decompression", peer); - return true; - } - -bool SocketComm::Connect(Peer* peer) - { - int status; - addrinfo hints, *res, *res0; - memset(&hints, 0, sizeof(hints)); - - hints.ai_family = PF_UNSPEC; - hints.ai_protocol = IPPROTO_TCP; - hints.ai_socktype = SOCK_STREAM; - hints.ai_flags = AI_NUMERICHOST; - - char port_str[16]; - modp_uitoa10(peer->port, port_str); - - string gaihostname(peer->ip.AsString()); - if ( peer->zone_id != "" ) - gaihostname.append("%").append(peer->zone_id); - - status = getaddrinfo(gaihostname.c_str(), port_str, &hints, &res0); - if ( status != 0 ) - { - Error(fmt("getaddrinfo error: %s", gai_strerror(status))); - return false; - } - - int sockfd = -1; - for ( res = res0; res; res = res->ai_next ) - { - sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); - if ( sockfd < 0 ) - { - Error(fmt("can't create connect socket, %s", strerror(errno))); - continue; - } - - if ( connect(sockfd, res->ai_addr, res->ai_addrlen) < 0 ) - { - Error(fmt("connect failed: %s", strerror(errno)), peer); - safe_close(sockfd); - sockfd = -1; - continue; - } - - break; - } - - freeaddrinfo(res0); - - bool connected = sockfd != -1; - - if ( ! (connected || peer->retry) ) - { - CloseConnection(peer, false); - return false; - } - - Peer* existing_peer = LookupPeer(peer->id, false); - if ( existing_peer ) - { - *existing_peer = *peer; - peer = existing_peer; - } - else - peers.append(peer); - - peer->connected = connected; - peer->next_try = connected ? 0 : time(0) + peer->retry; - peer->state = MSG_NONE; - peer->io = 0; - peer->compressor = false; - - if ( connected ) - { - if ( peer->ssl ) - peer->io = new ChunkedIOSSL(sockfd, false); - else - peer->io = new ChunkedIOFd(sockfd, "child->peer"); - - if ( ! peer->io->Init() ) - { - Error(fmt("can't init peer io: %s", - peer->io->Error()), false); - return 0; - } - } - - if ( connected ) - { - Log("connected", peer); - - const size_t BUFSIZE = 1024; - char* data = new char[BUFSIZE]; - snprintf(data, BUFSIZE, "%s,%" PRIu32, peer->ip.AsString().c_str(), - peer->port); - - if ( ! SendToParent(MSG_CONNECTED, peer, data) ) - return false; - } - - return connected; - } - -bool SocketComm::CloseConnection(Peer* peer, bool reconnect) - { - if ( ! SendToParent(MSG_CLOSE, peer, 0) ) - return false; - - Log("connection closed", peer); - - if ( ! peer->retry || ! reconnect ) - { - peers.remove(peer); - delete peer->io; // This will close the fd. - delete peer; - } - else - { - delete peer->io; // This will close the fd. - peer->io = 0; - peer->connected = false; - peer->next_try = time(0) + peer->retry; - } - - if ( parent_peer == peer ) - { - parent_peer = 0; - parent_id = RemoteSerializer::PEER_NONE; - } - - return true; - } - -bool SocketComm::Listen() - { - int status, on = 1; - addrinfo hints, *res, *res0; - memset(&hints, 0, sizeof(hints)); - - IPAddr listen_ip(listen_if); - - if ( enable_ipv6 ) - { - if ( listen_ip == IPAddr("0.0.0.0") || listen_ip == IPAddr("::") ) - hints.ai_family = PF_UNSPEC; - else - hints.ai_family = (listen_ip.GetFamily() == IPv4 ? PF_INET : PF_INET6); - } - else - hints.ai_family = PF_INET; - - hints.ai_protocol = IPPROTO_TCP; - hints.ai_socktype = SOCK_STREAM; - hints.ai_flags = AI_PASSIVE | AI_NUMERICHOST; - - char port_str[16]; - modp_uitoa10(listen_port, port_str); - - string scoped_addr(listen_if); - if ( listen_zone_id != "" ) - scoped_addr.append("%").append(listen_zone_id); - - const char* addr_str = 0; - if ( listen_ip != IPAddr("0.0.0.0") && listen_ip != IPAddr("::") ) - addr_str = scoped_addr.c_str(); - - CloseListenFDs(); - - if ( (status = getaddrinfo(addr_str, port_str, &hints, &res0)) != 0 ) - { - Error(fmt("getaddrinfo error: %s", gai_strerror(status))); - return false; - } - - for ( res = res0; res; res = res->ai_next ) - { - if ( res->ai_family != AF_INET && res->ai_family != AF_INET6 ) - { - Error(fmt("can't create listen socket: unknown address family, %d", - res->ai_family)); - continue; - } - - IPAddr a = (res->ai_family == AF_INET) ? - IPAddr(((sockaddr_in*)res->ai_addr)->sin_addr) : - IPAddr(((sockaddr_in6*)res->ai_addr)->sin6_addr); - - string l_addr_str(a.AsURIString()); - if ( listen_zone_id != "") - l_addr_str.append("%").append(listen_zone_id); - - int fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); - if ( fd < 0 ) - { - Error(fmt("can't create listen socket, %s", strerror(errno))); - continue; - } - - if ( setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0 ) - Error(fmt("can't set SO_REUSEADDR, %s", strerror(errno))); - - // For IPv6 listening sockets, we don't want do dual binding to also - // get IPv4-mapped addresses because that's not as portable. e.g. - // many BSDs don't allow that. - if ( res->ai_family == AF_INET6 && - setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)) < 0 ) - Error(fmt("can't set IPV6_V6ONLY, %s", strerror(errno))); - - if ( ::bind(fd, res->ai_addr, res->ai_addrlen) < 0 ) - { - Error(fmt("can't bind to %s:%s, %s", l_addr_str.c_str(), - port_str, strerror(errno))); - - if ( errno == EADDRINUSE ) - { - // Abandon completely this attempt to set up listening sockets, - // try again later. - safe_close(fd); - CloseListenFDs(); - listen_next_try = time(0) + bind_retry_interval; - freeaddrinfo(res0); - return false; - } - - safe_close(fd); - continue; - } - - if ( listen(fd, 50) < 0 ) - { - Error(fmt("can't listen on %s:%s, %s", l_addr_str.c_str(), - port_str, strerror(errno))); - safe_close(fd); - continue; - } - - listen_fds.push_back(fd); - Log(fmt("listening on %s:%s (%s)", l_addr_str.c_str(), port_str, - listen_ssl ? "ssl" : "clear")); - } - - freeaddrinfo(res0); - - listen_next_try = 0; - return listen_fds.size() > 0; - } - -bool SocketComm::AcceptConnection(int fd) - { - union { - sockaddr_storage ss; - sockaddr_in s4; - sockaddr_in6 s6; - } client; - - socklen_t len = sizeof(client.ss); - - int clientfd = accept(fd, (sockaddr*) &client.ss, &len); - if ( clientfd < 0 ) - { - Error(fmt("accept failed, %s %d", strerror(errno), errno)); - return false; - } - - if ( client.ss.ss_family != AF_INET && client.ss.ss_family != AF_INET6 ) - { - Error(fmt("accept fail, unknown address family %d", - client.ss.ss_family)); - safe_close(clientfd); - return false; - } - - Peer* peer = new Peer; - peer->id = id_counter++; - peer->ip = client.ss.ss_family == AF_INET ? - IPAddr(client.s4.sin_addr) : - IPAddr(client.s6.sin6_addr); - - peer->port = client.ss.ss_family == AF_INET ? - ntohs(client.s4.sin_port) : - ntohs(client.s6.sin6_port); - - peer->connected = true; - peer->ssl = listen_ssl; - peer->compressor = false; - - if ( peer->ssl ) - peer->io = new ChunkedIOSSL(clientfd, true); - else - peer->io = new ChunkedIOFd(clientfd, "child->peer"); - - if ( ! peer->io->Init() ) - { - Error(fmt("can't init peer io: %s", peer->io->Error()), false); - delete peer->io; - delete peer; - return false; - } - - peers.append(peer); - - Log(fmt("accepted %s connection", peer->ssl ? "SSL" : "clear"), peer); - - const size_t BUFSIZE = 1024; - char* data = new char[BUFSIZE]; - snprintf(data, BUFSIZE, "%s,%" PRIu32, peer->ip.AsString().c_str(), - peer->port); - - if ( ! SendToParent(MSG_CONNECTED, peer, data) ) - return false; - - return true; - } - -const char* SocketComm::MakeLogString(const char* msg, Peer* peer) - { - const int BUFSIZE = 1024; - static char* buffer = 0; - - if ( ! buffer ) - buffer = new char[BUFSIZE]; - - int len = 0; - - if ( peer ) - { - string scoped_addr(peer->ip.AsURIString()); - if ( peer->zone_id != "" ) - scoped_addr.append("%").append(peer->zone_id); - - len = snprintf(buffer, BUFSIZE, "[#%d/%s:%d] ", int(peer->id), - scoped_addr.c_str(), peer->port); - } - - len += safe_snprintf(buffer + len, BUFSIZE - len, "%s", msg); - return buffer; - } - -void SocketComm::CloseListenFDs() - { - for ( size_t i = 0; i < listen_fds.size(); ++i ) - safe_close(listen_fds[i]); - - listen_fds.clear(); - } - -void SocketComm::Error(const char* msg, bool kill_me) - { - if ( kill_me ) - { - fprintf(stderr, "fatal error in child: %s\n", msg); - Kill(); - } - else - { - if ( io->Eof() ) - // Can't send to parent, so fall back to stderr. - fprintf(stderr, "error in child: %s", msg); - else - SendToParent(MSG_ERROR, 0, copy_string(msg)); - } - - DEBUG_COMM(fmt("child: %s", msg)); - } - -bool SocketComm::Error(const char* msg, Peer* peer) - { - const char* buffer = MakeLogString(msg, peer); - Error(buffer); - - // If a remote peer causes an error, we shutdown the connection - // as resynchronizing is in general not possible. But we may - // try again later. - if ( peer->connected ) - CloseConnection(peer, true); - - return true; - } - -void SocketComm::Log(const char* msg, Peer* peer) - { - const char* buffer = MakeLogString(msg, peer); - SendToParent(MSG_LOG, 0, copy_string(buffer)); - DEBUG_COMM(fmt("child: %s", buffer)); - } - -void SocketComm::InternalError(const char* msg) - { - fprintf(stderr, "internal error in child: %s\n", msg); - Kill(); - } - -void SocketComm::Kill() - { - if ( killing ) - // Ignore recursive calls. - return; - - killing = true; - - LogProf(); - Log("terminating"); - - CloseListenFDs(); - - if ( kill(getpid(), SIGTERM) < 0 ) - Log(fmt("warning: cannot kill SocketComm pid %d, %s", getpid(), strerror(errno))); - - while ( 1 ) - ; // loop until killed - } - -SocketComm::Peer* SocketComm::LookupPeer(RemoteSerializer::PeerID id, - bool only_if_connected) - { - loop_over_list(peers, i) - if ( peers[i]->id == id ) - return ! only_if_connected || - peers[i]->connected ? peers[i] : 0; - return 0; - } - -bool SocketComm::LogStats() - { - if ( ! peers.length() ) - return true; - - // Concat stats of all peers into single buffer. - char* buffer = new char[peers.length() * 512]; - int pos = 0; - - loop_over_list(peers, i) - { - if ( peers[i]->connected ) - peers[i]->io->Stats(buffer+pos, 512); - else - strcpy(buffer+pos, "not connected"); - pos += strlen(buffer+pos) + 1; - } - - // Send it. - if ( ! SendToParent(MSG_STATS, 0, buffer, pos) ) - return false; - - log_stats = false; - alarm(STATS_INTERVAL); - return true; - } - -bool SocketComm::LogProf() - { - static struct rusage cld_res; - getrusage(RUSAGE_SELF, &cld_res); - - double Utime = cld_res.ru_utime.tv_sec + cld_res.ru_utime.tv_usec / 1e6; - double Stime = cld_res.ru_stime.tv_sec + cld_res.ru_stime.tv_usec / 1e6; - double Rtime = current_time(true); - - SocketComm::Log(fmt("CPU usage: user %.03f sys %.03f real %0.03f", - Utime, Stime, Rtime - first_rtime)); - - return true; - } - -void SocketComm::CheckFinished() - { - assert(terminating); - - loop_over_list(peers, i) - { - if ( ! peers[i]->connected ) - continue; - if ( ! peers[i]->io->IsIdle() ) - return; - } - - LogProf(); - Log("terminating"); - - // All done. - SendToParent(MSG_TERMINATE, 0, 0); - } - -bool SocketComm::SendToParent(char type, Peer* peer, const char* str, int len) - { -#ifdef DEBUG - // str may already by constructed with fmt() - const char* tmp = copy_string(str); - DEBUG_COMM(fmt("child: (->parent) %s (#%" PRI_SOURCE_ID ", %s)", msgToStr(type), peer ? peer->id : RemoteSerializer::PEER_NONE, tmp)); - delete [] tmp; -#endif - if ( sendToIO(io, type, peer ? peer->id : RemoteSerializer::PEER_NONE, - str, len) ) - return true; - - if ( io->Eof() ) - Error("parent died", true); - - return false; - } - -bool SocketComm::SendToParent(char type, Peer* peer, int nargs, ...) - { - va_list ap; - -#ifdef DEBUG - va_start(ap,nargs); - DEBUG_COMM(fmt("child: (->parent) %s (#%" PRI_SOURCE_ID ",%s)", msgToStr(type), peer ? peer->id : RemoteSerializer::PEER_NONE, fmt_uint32s(nargs, ap))); - va_end(ap); -#endif - - va_start(ap, nargs); - bool ret = sendToIO(io, type, - peer ? peer->id : RemoteSerializer::PEER_NONE, - nargs, ap); - va_end(ap); - - if ( ret ) - return true; - - if ( io->Eof() ) - Error("parent died", true); - - return false; - } - -bool SocketComm::SocketComm::SendToParent(ChunkedIO::Chunk* c) - { - DEBUG_COMM(fmt("child: (->parent) chunk of size %d", c->len)); - if ( sendToIO(io, c) ) - return true; - - if ( io->Eof() ) - Error("parent died", true); - - return false; - } - -bool SocketComm::SendToPeer(Peer* peer, char type, const char* str, int len) - { -#ifdef DEBUG - // str may already by constructed with fmt() - const char* tmp = copy_string(str); - DEBUG_COMM(fmt("child: (->peer) %s to #%" PRI_SOURCE_ID " (%s)", msgToStr(type), peer->id, tmp)); - delete [] tmp; -#endif - - if ( ! sendToIO(peer->io, type, RemoteSerializer::PEER_NONE, str, len) ) - { - Error(fmt("child: write error %s", io->Error()), peer); - return false; - } - - return true; - } - -bool SocketComm::SendToPeer(Peer* peer, char type, int nargs, ...) - { - va_list ap; - -#ifdef DEBUG - va_start(ap,nargs); - DEBUG_COMM(fmt("child: (->peer) %s to #%" PRI_SOURCE_ID " (%s)", - msgToStr(type), peer->id, fmt_uint32s(nargs, ap))); - va_end(ap); -#endif - - va_start(ap, nargs); - bool ret = sendToIO(peer->io, type, RemoteSerializer::PEER_NONE, - nargs, ap); - va_end(ap); - - if ( ! ret ) - { - Error(fmt("child: write error %s", io->Error()), peer); - return false; - } - - return true; - } - -bool SocketComm::SendToPeer(Peer* peer, ChunkedIO::Chunk* c) - { - DEBUG_COMM(fmt("child: (->peer) chunk of size %d to #%" PRI_SOURCE_ID, c->len, peer->id)); - if ( ! sendToIO(peer->io, c) ) - { - Error(fmt("child: write error %s", io->Error()), peer); - return false; - } - - return true; - } diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h deleted file mode 100644 index 28ca495f17..0000000000 --- a/src/RemoteSerializer.h +++ /dev/null @@ -1,525 +0,0 @@ -// Communication between two Bro's. - -#ifndef REMOTE_SERIALIZER -#define REMOTE_SERIALIZER - -#include "Dict.h" -#include "List.h" -#include "Serializer.h" -#include "iosource/IOSource.h" -#include "Stats.h" -#include "File.h" -#include "logging/WriterBackend.h" - -#include -#include - -class IncrementalSendTimer; - -namespace threading { - struct Field; - struct Value; -} - -// This class handles the communication done in Bro's main loop. -class RemoteSerializer : public Serializer, public iosource::IOSource { -public: - RemoteSerializer(); - ~RemoteSerializer() override; - - // Initialize the remote serializer (calling this will fork). - void Enable(); - - // FIXME: Use SourceID directly (or rename everything to Peer*). - typedef SourceID PeerID; - static const PeerID PEER_LOCAL = SOURCE_LOCAL; - static const PeerID PEER_NONE = SOURCE_LOCAL; - - // Connect to host (returns PEER_NONE on error). - PeerID Connect(const IPAddr& ip, const string& zone_id, uint16 port, - const char* our_class, double retry, bool use_ssl); - - // Close connection to host. - bool CloseConnection(PeerID peer); - - // Request all events matching pattern from remote side. - bool RequestEvents(PeerID peer, RE_Matcher* pattern); - - // Request synchronization of IDs with remote side. If auth is true, - // we consider our current state to authoritative and send it to - // the peer right after the handshake. - bool RequestSync(PeerID peer, bool auth); - - // Requests logs from the remote side. - bool RequestLogs(PeerID id); - - // Sets flag whether we're accepting state from this peer - // (default: yes). - bool SetAcceptState(PeerID peer, bool accept); - - // Sets compression level (0-9, 0 is defaults and means no compression) - bool SetCompressionLevel(PeerID peer, int level); - - // Signal the other side that we have finished our part of - // the initial handshake. - bool CompleteHandshake(PeerID peer); - - // Start to listen. - bool Listen(const IPAddr& ip, uint16 port, bool expect_ssl, bool ipv6, - const string& zone_id, double retry); - - // Stop it. - bool StopListening(); - - // Broadcast the event/function call. - bool SendCall(SerialInfo* info, const char* name, val_list* vl); - - // Send the event/function call (only if handshake completed). - bool SendCall(SerialInfo* info, PeerID peer, const char* name, val_list* vl); - - // Broadcasts the access (only if handshake completed). - bool SendAccess(SerialInfo* info, const StateAccess& access); - - // Send the access. - bool SendAccess(SerialInfo* info, PeerID pid, const StateAccess& access); - - // Sends ID. - bool SendID(SerialInfo* info, PeerID peer, const ID& id); - - // Sends the internal connection state. - bool SendConnection(SerialInfo* info, PeerID peer, const Connection& c); - - // Send capture filter. - bool SendCaptureFilter(PeerID peer, const char* filter); - - // Send packet. - bool SendPacket(SerialInfo* info, PeerID peer, const Packet& p); - - // Broadcast packet. - bool SendPacket(SerialInfo* info, const Packet& p); - - // Broadcast ping. - bool SendPing(PeerID peer, uint32 seq); - - // Broadcast remote print. - bool SendPrintHookEvent(BroFile* f, const char* txt, size_t len); - - // Send a request to create a writer on a remote side. - bool SendLogCreateWriter(PeerID peer, EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields); - - // Broadcasts a request to create a writer. - bool SendLogCreateWriter(EnumVal* id, EnumVal* writer, const logging::WriterBackend::WriterInfo& info, int num_fields, const threading::Field* const * fields); - - // Broadcast a log entry to everybody interested. - bool SendLogWrite(EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals); - - // Synchronzizes time with all connected peers. Returns number of - // current sync-point, or -1 on error. - uint32 SendSyncPoint(); - void SendFinalSyncPoint(); - - // Registers the ID to be &synchronized. - void Register(ID* id); - void Unregister(ID* id); - - // Stop/restart propagating state updates. - void SuspendStateUpdates() { --propagate_accesses; } - void ResumeStateUpdates() { ++propagate_accesses; } - - // Check for incoming events and queue them. - bool Poll(bool may_block); - - // Returns the corresponding record (already ref'ed). - RecordVal* GetPeerVal(PeerID id); - - // Log some statistics. - void LogStats(); - - // Tries to sent out all remaining data. - // FIXME: Do we still need this? - void Finish(); - - // Overidden from IOSource: - void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, - iosource::FD_Set* except) override; - double NextTimestamp(double* local_network_time) override; - void Process() override; - TimerMgr::Tag* GetCurrentTag() override; - const char* Tag() override { return "RemoteSerializer"; } - - // Gracefully finishes communication by first making sure that all - // remaining data (parent & child) has been sent out. - virtual bool Terminate(); - -#ifdef DEBUG_COMMUNICATION - // Dump data recently read/written into files. - void DumpDebugData(); - - // Read dump file and interpret as message block. - void ReadDumpAsMessageType(const char* file); - - // Read dump file and interpret as serialization. - void ReadDumpAsSerialization(const char* file); -#endif - - enum LogLevel { LogInfo = 1, LogError = 2, }; - static void Log(LogLevel level, const char* msg); - -protected: - friend class PersistenceSerializer; - friend class IncrementalSendTimer; - - // Maximum size of serialization caches. - static const unsigned int MAX_CACHE_SIZE = 3000; - - // When syncing traces in pseudo-realtime mode, we wait this many - // seconds after the final sync-point to make sure that all - // remaining I/O gets propagated. - static const unsigned int FINAL_SYNC_POINT_DELAY = 5; - - declare(PList, EventHandler); - typedef PList(EventHandler) handler_list; - - struct Peer { - PeerID id; // Unique ID (non-zero) per peer. - - IPAddr ip; - - uint16 port; - handler_list handlers; - RecordVal* val; // Record of type event_source. - SerializationCache* cache_in; // One cache for each direction. - SerializationCache* cache_out; - - // TCP-level state of the connection to the peer. - // State of the connection to the peer. - enum { INIT, PENDING, CONNECTED, CLOSING, CLOSED } state; - - // Current protocol phase of the connection (see RemoteSerializer.cc) - enum { UNKNOWN, SETUP, HANDSHAKE, SYNC, RUNNING } phase; - - // Capabilities. - static const int COMPRESSION = 1; - static const int NO_CACHING = 2; - static const int PID_64BIT = 4; - static const int NEW_CACHE_STRATEGY = 8; - static const int BROCCOLI_PEER = 16; - - // Constants to remember to who did something. - static const int NONE = 0; - static const int WE = 1; - static const int PEER = 2; - static const int BOTH = WE | PEER; - - static const int AUTH_WE = 4; - static const int AUTH_PEER = 8; - - int sent_version; // Who has sent the VERSION. - int handshake_done; // Who finished its handshake phase. - int sync_requested; // Who requested sync'ed state. - - bool orig; // True if we connected to the peer. - bool accept_state; // True if we accept state from peer. - bool send_state; // True if we're supposed to initially sent our state. - int comp_level; // Compression level. - bool logs_requested; // True if the peer has requested logs. - - // True if this peer triggered a net_suspend_processing(). - bool suspended_processing; - - uint32 caps; // Capabilities announced by peer. - int runtime; // Runtime we got from the peer. - int our_runtime; // Our runtime as we told it to this peer. - string peer_class; // Class from peer ("" = no class). - string our_class; // Class we send the peer. - uint32 sync_point; // Highest sync-point received so far - char* print_buffer; // Buffer for remote print or null. - int print_buffer_used; // Number of bytes used in buffer. - char* log_buffer; // Buffer for remote log or null. - int log_buffer_used; // Number of bytes used in buffer. - }; - - // Shuts down remote serializer. - void FatalError(const char* msg); - - enum LogSrc { LogChild = 1, LogParent = 2, LogScript = 3, }; - - static void Log(LogLevel level, const char* msg, Peer* peer, LogSrc src = LogParent); - - void ReportError(const char* msg) override; - - void GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) override; - void GotFunctionCall(const char* name, double time, - Func* func, val_list* args) override; - void GotID(ID* id, Val* val) override; - void GotStateAccess(StateAccess* s) override; - void GotTimer(Timer* t) override; - void GotConnection(Connection* c) override; - void GotPacket(Packet* packet) override; - - void Fork(); - - bool DoMessage(); - bool ProcessConnected(); - bool ProcessSerialization(); - bool ProcessRequestEventsMsg(); - bool ProcessRequestSyncMsg(); - bool ProcessVersionMsg(); - bool ProcessLogMsg(bool is_error); - bool ProcessStatsMsg(); - bool ProcessCaptureFilterMsg(); - bool ProcessPhaseDone(); - bool ProcessPingMsg(); - bool ProcessPongMsg(); - bool ProcessCapsMsg(); - bool ProcessSyncPointMsg(); - bool ProcessRemotePrint(); - bool ProcessLogCreateWriter(); - bool ProcessLogWrite(); - bool ProcessRequestLogs(); - - Peer* AddPeer(const IPAddr& ip, uint16 port, PeerID id = PEER_NONE); - Peer* LookupPeer(PeerID id, bool only_if_connected); - void RemovePeer(Peer* peer); - bool IsConnectedPeer(PeerID id); - void PeerDisconnected(Peer* peer); - void PeerConnected(Peer* peer); - RecordVal* MakePeerVal(Peer* peer); - bool HandshakeDone(Peer* peer); - bool IsActive(); - void SetupSerialInfo(SerialInfo* info, Peer* peer); - bool CheckSyncPoints(); - void SendSyncPoint(uint32 syncpoint); - bool PropagateAccesses() - { - return ignore_accesses ? - propagate_accesses > 1 : propagate_accesses > 0; - } - - bool CloseConnection(Peer* peer); - - bool SendAllSynchronized(Peer* peer, SerialInfo* info); - bool SendCall(SerialInfo* info, Peer* peer, const char* name, val_list* vl); - bool SendAccess(SerialInfo* info, Peer* peer, const StateAccess& access); - bool SendID(SerialInfo* info, Peer* peer, const ID& id); - bool SendCapabilities(Peer* peer); - bool SendPacket(SerialInfo* info, Peer* peer, const Packet& p); - bool SendLogWrite(Peer* peer, EnumVal* id, EnumVal* writer, string path, int num_fields, const threading::Value* const * vals); - - void UnregisterHandlers(Peer* peer); - void RaiseEvent(EventHandlerPtr event, Peer* peer, const char* arg = 0); - bool EnterPhaseRunning(Peer* peer); - bool FlushPrintBuffer(Peer* p); - bool FlushLogBuffer(Peer* p); - - void ChildDied(); - void InternalCommError(const char* msg); - - // Communication helpers - bool SendCMsgToChild(char msg_type, Peer* peer); - bool SendToChild(char type, Peer* peer, char* str, int len = -1, - bool delete_with_free = false); - bool SendToChild(char type, Peer* peer, int nargs, ...); // can send uints32 only - bool SendToChild(ChunkedIO::Chunk* c); - - void SetSocketBufferSize(int fd, int opt, const char *what, int size, int verbose); - -private: - enum { TYPE, ARGS } msgstate; // current state of reading comm. - Peer* current_peer; - PeerID current_id; - char current_msgtype; - ChunkedIO::Chunk* current_args; - double last_flush; - - id_list sync_ids; - - // FIXME: Check which of these are necessary... - bool initialized; - bool listening; - int propagate_accesses; - bool ignore_accesses; - bool terminating; - int received_logs; - Peer* source_peer; - PeerID id_counter; // Keeps track of assigned IDs. - uint32 current_sync_point; - bool syncing_times; - - declare(PList, Peer); - typedef PList(Peer) peer_list; - peer_list peers; - - Peer* in_sync; // Peer we're currently syncing state with. - peer_list sync_pending; // List of peers waiting to sync state. - - // Event buffer - struct BufferedEvent { - time_t time; - PeerID src; - EventHandlerPtr handler; - val_list* args; - }; - - declare(PList, BufferedEvent); - typedef PList(BufferedEvent) EventQueue; - EventQueue events; - - // Packet buffer - struct BufferedPacket { - time_t time; - Packet* p; - }; - - declare(PList, BufferedPacket); - typedef PList(BufferedPacket) PacketQueue; - PacketQueue packets; - - // Some stats - struct Statistics { - struct Pair { - Pair() : in(0), out(0) {} - unsigned long in; - unsigned long out; - }; - - Pair events; // actually events and function calls - Pair accesses; - Pair conns; - Pair packets; - Pair ids; - } stats; - -}; - -// This class handles the communication done in the forked child. -class SocketComm { -public: - SocketComm(); - ~SocketComm(); - - void SetParentIO(ChunkedIO* arg_io) { io = arg_io; } - - void Run(); // does not return - - // Log some statistics (via pipe to parent). - bool LogStats(); - - // Log CPU usage (again via pipe to parent). - bool LogProf(); - -protected: - struct Peer { - Peer() - { - id = 0; - io = 0; - port = 0; - state = 0; - connected = false; - ssl = false; - retry = 0; - next_try = 0; - compressor = false; - } - - RemoteSerializer::PeerID id; - ChunkedIO* io; - IPAddr ip; - string zone_id; - uint16 port; - char state; - bool connected; - bool ssl; - // If we get disconnected, reconnect after this many seconds. - int retry; - // Time of next connection attempt (0 if none). - time_t next_try; - // True if io is a CompressedChunkedIO. - bool compressor; - }; - - bool Listen(); - bool AcceptConnection(int listen_fd); - bool Connect(Peer* peer); - bool CloseConnection(Peer* peer, bool reconnect); - - Peer* LookupPeer(RemoteSerializer::PeerID id, bool only_if_connected); - - bool ProcessRemoteMessage(Peer* peer); - bool ProcessParentMessage(); - bool DoParentMessage(); - - bool ProcessListen(); - bool ProcessConnectTo(); - bool ProcessCompress(); - - void Log(const char* msg, Peer* peer = 0); - - // The connection to the peer will be closed. - bool Error(const char* msg, Peer* peer); - - // If kill is true, this is a fatal error and we kill ourselves. - void Error(const char* msg, bool kill = false); - - // Kill the current process. - void Kill(); - - // Check whether everything has been sent out. - void CheckFinished(); - - // Reports the error and terminates the process. - void InternalError(const char* msg); - - // Communication helpers. - bool SendToParent(char type, Peer* peer, const char* str, int len = -1); - bool SendToParent(char type, Peer* peer, int nargs, ...); // can send uints32 only - bool SendToParent(ChunkedIO::Chunk* c); - bool SendToPeer(Peer* peer, char type, const char* str, int len = -1); - bool SendToPeer(Peer* peer, char type, int nargs, ...); // can send uints32 only - bool SendToPeer(Peer* peer, ChunkedIO::Chunk* c); - bool ProcessParentCompress(); - bool ProcessPeerCompress(Peer* peer); - bool ForwardChunkToParent(Peer* p, ChunkedIO::Chunk* c); - bool ForwardChunkToPeer(); - const char* MakeLogString(const char* msg, Peer *peer); - - // Closes all file descriptors associated with listening sockets. - void CloseListenFDs(); - - // Peers we are communicating with: - declare(PList, Peer); - typedef PList(Peer) peer_list; - - RemoteSerializer::PeerID id_counter; - peer_list peers; - - ChunkedIO* io; // I/O to parent - - // Current state of reading from parent. - enum { TYPE, ARGS } parent_msgstate; - Peer* parent_peer; - RemoteSerializer::PeerID parent_id; - char parent_msgtype; - ChunkedIO::Chunk* parent_args; - - vector listen_fds; - - // If the port we're trying to bind to is already in use, we will retry - // it regularly. - string listen_if; - string listen_zone_id; // RFC 4007 IPv6 zone_id - uint16 listen_port; - bool listen_ssl; // use SSL for IO - bool enable_ipv6; // allow IPv6 listen sockets - uint32 bind_retry_interval; // retry interval for already-in-use sockets - time_t listen_next_try; // time at which to try another bind - bool shutting_conns_down; - bool terminating; - bool killing; -}; - -extern RemoteSerializer* remote_serializer; - -#endif diff --git a/src/Reporter.cc b/src/Reporter.cc index 413f89b9ea..a40ddb9a3d 100644 --- a/src/Reporter.cc +++ b/src/Reporter.cc @@ -4,7 +4,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "Reporter.h" #include "Event.h" #include "NetVar.h" @@ -31,12 +31,14 @@ Reporter::Reporter() via_events = false; in_error_handler = 0; - // Always use stderr at startup/init before scripts have been fully parsed. + // Always use stderr at startup/init before scripts have been fully parsed + // and zeek_init() processed. // Messages may otherwise be missed if an error occurs that prevents events // from ever being dispatched. info_to_stderr = true; warnings_to_stderr = true; errors_to_stderr = true; + after_zeek_init = false; weird_count = 0; weird_sampling_rate = 0; @@ -80,7 +82,7 @@ void Reporter::Info(const char* fmt, ...) { va_list ap; va_start(ap, fmt); - FILE* out = info_to_stderr ? stderr : 0; + FILE* out = EmitToStderr(info_to_stderr) ? stderr : 0; DoLog("", reporter_info, out, 0, 0, true, true, 0, fmt, ap); va_end(ap); } @@ -89,7 +91,7 @@ void Reporter::Warning(const char* fmt, ...) { va_list ap; va_start(ap, fmt); - FILE* out = warnings_to_stderr ? stderr : 0; + FILE* out = EmitToStderr(warnings_to_stderr) ? stderr : 0; DoLog("warning", reporter_warning, out, 0, 0, true, true, 0, fmt, ap); va_end(ap); } @@ -99,7 +101,7 @@ void Reporter::Error(const char* fmt, ...) ++errors; va_list ap; va_start(ap, fmt); - FILE* out = errors_to_stderr ? stderr : 0; + FILE* out = EmitToStderr(errors_to_stderr) ? stderr : 0; DoLog("error", reporter_error, out, 0, 0, true, true, 0, fmt, ap); va_end(ap); } @@ -142,7 +144,7 @@ void Reporter::ExprRuntimeError(const Expr* expr, const char* fmt, ...) PushLocation(expr->GetLocationInfo()); va_list ap; va_start(ap, fmt); - FILE* out = errors_to_stderr ? stderr : 0; + FILE* out = EmitToStderr(errors_to_stderr) ? stderr : 0; DoLog("expression error", reporter_error, out, 0, 0, true, true, d.Description(), fmt, ap); va_end(ap); @@ -156,7 +158,7 @@ void Reporter::RuntimeError(const Location* location, const char* fmt, ...) PushLocation(location); va_list ap; va_start(ap, fmt); - FILE* out = errors_to_stderr ? stderr : 0; + FILE* out = EmitToStderr(errors_to_stderr) ? stderr : 0; DoLog("runtime error", reporter_error, out, 0, 0, true, true, "", fmt, ap); va_end(ap); PopLocation(); @@ -196,7 +198,7 @@ void Reporter::InternalWarning(const char* fmt, ...) { va_list ap; va_start(ap, fmt); - FILE* out = warnings_to_stderr ? stderr : 0; + FILE* out = EmitToStderr(warnings_to_stderr) ? stderr : 0; // TODO: would be nice to also log a call stack. DoLog("internal warning", reporter_warning, out, 0, 0, true, true, 0, fmt, ap); @@ -216,36 +218,30 @@ void Reporter::Syslog(const char* fmt, ...) void Reporter::WeirdHelper(EventHandlerPtr event, Val* conn_val, file_analysis::File* f, const char* addl, const char* fmt_name, ...) { - val_list* vl = new val_list(1); + val_list vl(2); if ( conn_val ) - vl->append(conn_val); + vl.append(conn_val); else if ( f ) - vl->append(f->GetVal()->Ref()); + vl.append(f->GetVal()->Ref()); if ( addl ) - vl->append(new StringVal(addl)); + vl.append(new StringVal(addl)); va_list ap; va_start(ap, fmt_name); - DoLog("weird", event, 0, 0, vl, false, false, 0, fmt_name, ap); + DoLog("weird", event, 0, 0, &vl, false, false, 0, fmt_name, ap); va_end(ap); - - delete vl; } void Reporter::WeirdFlowHelper(const IPAddr& orig, const IPAddr& resp, const char* fmt_name, ...) { - val_list* vl = new val_list(2); - vl->append(new AddrVal(orig)); - vl->append(new AddrVal(resp)); + val_list vl{new AddrVal(orig), new AddrVal(resp)}; va_list ap; va_start(ap, fmt_name); - DoLog("weird", flow_weird, 0, 0, vl, false, false, 0, fmt_name, ap); + DoLog("weird", flow_weird, 0, 0, &vl, false, false, 0, fmt_name, ap); va_end(ap); - - delete vl; } void Reporter::UpdateWeirdStats(const char* name) @@ -489,29 +485,32 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, if ( raise_event && event && via_events && ! in_error_handler ) { - val_list* vl = new val_list; + auto vl_size = 1 + (bool)time + (bool)location + (bool)conn + + (addl ? addl->length() : 0); + + val_list vl(vl_size); if ( time ) - vl->append(new Val((bro_start_network_time != 0.0) ? network_time : 0, TYPE_TIME)); + vl.append(new Val((bro_start_network_time != 0.0) ? network_time : 0, TYPE_TIME)); - vl->append(new StringVal(buffer)); + vl.append(new StringVal(buffer)); if ( location ) - vl->append(new StringVal(loc_str.c_str())); + vl.append(new StringVal(loc_str.c_str())); if ( conn ) - vl->append(conn->BuildConnVal()); + vl.append(conn->BuildConnVal()); if ( addl ) { loop_over_list(*addl, i) - vl->append((*addl)[i]); + vl.append((*addl)[i]); } if ( conn ) - conn->ConnectionEvent(event, 0, vl); + conn->ConnectionEventFast(event, 0, std::move(vl)); else - mgr.QueueEvent(event, vl); + mgr.QueueEventFast(event, std::move(vl)); } else { diff --git a/src/Reporter.h b/src/Reporter.h index 88270a9dba..dce075de64 100644 --- a/src/Reporter.h +++ b/src/Reporter.h @@ -233,6 +233,13 @@ public: this->weird_sampling_duration = weird_sampling_duration; } + /** + * Called after zeek_init() and toggles whether messages may stop being + * emitted to stderr. + */ + void ZeekInitDone() + { after_zeek_init = true; } + private: void DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Connection* conn, val_list* addl, bool location, bool time, @@ -248,12 +255,16 @@ private: bool PermitNetWeird(const char* name); bool PermitFlowWeird(const char* name, const IPAddr& o, const IPAddr& r); + bool EmitToStderr(bool flag) + { return flag || ! after_zeek_init; } + int errors; bool via_events; int in_error_handler; bool info_to_stderr; bool warnings_to_stderr; bool errors_to_stderr; + bool after_zeek_init; std::list > locations; diff --git a/src/Rule.cc b/src/Rule.cc index c483527c63..57cb82f65e 100644 --- a/src/Rule.cc +++ b/src/Rule.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #include "Rule.h" #include "RuleMatcher.h" diff --git a/src/RuleAction.cc b/src/RuleAction.cc index e67c51b514..edfe2497a2 100644 --- a/src/RuleAction.cc +++ b/src/RuleAction.cc @@ -1,7 +1,7 @@ #include using std::string; -#include "bro-config.h" +#include "zeek-config.h" #include "RuleAction.h" #include "RuleMatcher.h" @@ -17,16 +17,11 @@ void RuleActionEvent::DoAction(const Rule* parent, RuleEndpointState* state, { if ( signature_match ) { - val_list* vl = new val_list; - vl->append(rule_matcher->BuildRuleStateValue(parent, state)); - vl->append(new StringVal(msg)); - - if ( data ) - vl->append(new StringVal(len, (const char*)data)); - else - vl->append(val_mgr->GetEmptyString()); - - mgr.QueueEvent(signature_match, vl); + mgr.QueueEventFast(signature_match, { + rule_matcher->BuildRuleStateValue(parent, state), + new StringVal(msg), + data ? new StringVal(len, (const char*)data) : val_mgr->GetEmptyString(), + }); } } diff --git a/src/RuleCondition.cc b/src/RuleCondition.cc index 0534570ed7..6cd2e9e4c1 100644 --- a/src/RuleCondition.cc +++ b/src/RuleCondition.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #include "RuleCondition.h" #include "analyzer/protocol/tcp/TCP.h" @@ -162,7 +162,7 @@ bool RuleConditionEval::DoMatch(Rule* rule, RuleEndpointState* state, return id->ID_Val()->AsBool(); // Call function with a signature_state value as argument. - val_list args; + val_list args(2); args.append(rule_matcher->BuildRuleStateValue(rule, state)); if ( data ) diff --git a/src/RuleMatcher.cc b/src/RuleMatcher.cc index 54228d58dd..6fd13d2db7 100644 --- a/src/RuleMatcher.cc +++ b/src/RuleMatcher.cc @@ -1,7 +1,7 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "analyzer/Analyzer.h" #include "RuleMatcher.h" @@ -235,7 +235,7 @@ bool RuleMatcher::ReadFiles(const name_list& files) for ( int i = 0; i < files.length(); ++i ) { - rules_in = open_file(find_file(files[i], bro_path(), "sig")); + rules_in = open_file(find_file(files[i], bro_path(), ".sig")); if ( ! rules_in ) { diff --git a/src/Scope.cc b/src/Scope.cc index a707336381..5107bd8e9a 100644 --- a/src/Scope.cc +++ b/src/Scope.cc @@ -1,12 +1,15 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "ID.h" #include "Val.h" #include "Scope.h" #include "Reporter.h" +declare(PList,Scope); +typedef PList(Scope) scope_list; + static scope_list scopes; static Scope* top_scope; diff --git a/src/SerialInfo.h b/src/SerialInfo.h deleted file mode 100644 index de2d9eeb61..0000000000 --- a/src/SerialInfo.h +++ /dev/null @@ -1,182 +0,0 @@ -// Helper classes to pass data between serialization methods. - -#ifndef serialinfo_h -#define serialinfo_h - -class SerialInfo { -public: - SerialInfo(Serializer* arg_s) - { - chunk = 0; - s = arg_s; - may_suspend = clear_containers = false; - cache = globals_as_names = true; - type = SER_NONE; - pid_32bit = false; - include_locations = true; - new_cache_strategy = false; - broccoli_peer = false; - } - - SerialInfo(const SerialInfo& info) - { - chunk = info.chunk; - s = info.s; - may_suspend = info.may_suspend; - cache = info.cache; - type = info.type; - clear_containers = info.clear_containers; - globals_as_names = info.globals_as_names; - pid_32bit = info.pid_32bit; - include_locations = info.include_locations; - new_cache_strategy = info.new_cache_strategy; - broccoli_peer = info.broccoli_peer; - } - - // Parameters that control serialization. - Serializer* s; // serializer to use - bool cache; // true if object caching is ok - bool may_suspend; // if true, suspending serialization is ok - bool clear_containers; // if true, store container values as empty - bool include_locations; // if true, include locations in serialization - - // If true, for NameExpr's serialize just the names of globals, just - // their value. - bool globals_as_names; - - bool pid_32bit; // if true, use old-style 32-bit permanent IDs - - // If true, we support keeping objs in cache permanently. - bool new_cache_strategy; - - // If true, we're connecting to a Broccoli. If so, serialization - // specifics may be adapted for functionality Broccoli does not - // support. - bool broccoli_peer; - - ChunkedIO::Chunk* chunk; // chunk written right before the serialization - - // Attributes set during serialization. - SerialType type; // type of currently serialized object - - // State for suspending/resuming serialization - Continuation cont; -}; - -class UnserialInfo { -public: - UnserialInfo(Serializer* arg_s) - { - s = arg_s; - cache = true; - type = SER_NONE; - chunk = 0; - install_globals = install_conns = true; - install_uniques = false; - ignore_callbacks = false; - id_policy = Replace; - print = 0; - pid_32bit = false; - new_cache_strategy = false; - broccoli_peer = false; - } - - UnserialInfo(const UnserialInfo& info) - { - s = info.s; - cache = info.cache; - type = info.type; - chunk = info.chunk; - install_globals = info.install_globals; - install_uniques = info.install_uniques; - install_conns = info.install_conns; - ignore_callbacks = info.ignore_callbacks; - id_policy = info.id_policy; - print = info.print; - pid_32bit = info.pid_32bit; - new_cache_strategy = info.new_cache_strategy; - broccoli_peer = info.broccoli_peer; - } - - // Parameters that control unserialization. - Serializer* s; // serializer to use - bool cache; // if true, object caching is ok - FILE* print; // print read objects to given file (human-readable) - - ChunkedIO::Chunk* chunk; // chunk to parse (rather than reading one) - - bool install_globals; // if true, install unknown globals - // in global scope - bool install_conns; // if true, add connections to session table - bool install_uniques; // if true, install unknown globally - // unique IDs in global scope - bool ignore_callbacks; // if true, don't call Got*() callbacks - bool pid_32bit; // if true, use old-style 32-bit permanent IDs. - - // If true, we support keeping objs in cache permanently. - bool new_cache_strategy; - - // If true, we're connecting to a Broccoli. If so, serialization - // specifics may be adapted for functionality Broccoli does not - // support. - bool broccoli_peer; - - // If a global ID already exits, of these policies is used. - enum { - Keep, // keep the old ID and ignore the new - Replace, // install the new ID (default) - - // Keep current ID instance but copy the new value into it - // (types have to match). - CopyNewToCurrent, - - // Install the new ID instance but replace its value - // with that of the old one (types have to match). - CopyCurrentToNew, - - // Instantiate a new ID, but do not insert it into the global - // space. - InstantiateNew, - } id_policy; - - // Attributes set during unserialization. - SerialType type; // type of currently unserialized object -}; - -// Helper class to temporarily disable suspending for all next-level calls -// using the given SerialInfo. It saves the current value of info.may_suspend -// and then sets it to false. When it goes out of scope, the original value -// is restored. -// -// We need this because not all classes derived from SerialObj are -// suspension-aware yet, i.e., they don't work correctly if one of the -// next-level functions suspends. Eventually this may change, but actually -// it's not very important: most classes don't need to suspend anyway as -// their data volume is very small. We have to make sure though that those -// which do (e.g. TableVals) support suspension. -class DisableSuspend { -public: - DisableSuspend(SerialInfo* arg_info) - { - info = arg_info; - old_may_suspend = info->may_suspend; - info->may_suspend = false; - } - - ~DisableSuspend() { Restore(); } - - void Release() { info = 0; } - - // Restores the suspension-state to its original value. - void Restore() - { - if ( info ) - info->may_suspend = old_may_suspend; - } - -private: - SerialInfo* info; - bool old_may_suspend; -}; - -#endif diff --git a/src/SerialObj.cc b/src/SerialObj.cc deleted file mode 100644 index ab7f63e823..0000000000 --- a/src/SerialObj.cc +++ /dev/null @@ -1,277 +0,0 @@ -#include "SerialObj.h" -#include "Serializer.h" - -TransientID::ID TransientID::counter = 0; - -SerialObj::FactoryMap* SerialObj::factories = 0; -SerialObj::ClassNameMap* SerialObj::names = 0; -uint64 SerialObj::time_counter = NEVER + ALWAYS + 1; - -SerialObj* SerialObj::Instantiate(SerialType type) - { - FactoryMap::iterator f = factories->find(type & SER_TYPE_MASK_EXACT); - if ( f != factories->end() ) - { - SerialObj* o = (SerialObj*) (*f->second)(); -#ifdef DEBUG - o->serial_type = o->GetSerialType(); -#endif - return o; - } - - reporter->Error("Unknown object type 0x%08x", type); - return 0; - } - -const char* SerialObj::ClassName(SerialType type) - { - ClassNameMap::iterator f = names->find(type); - if ( f != names->end() ) - return f->second; - - reporter->Error("Unknown object type 0x%08x", type); - return ""; - } - -void SerialObj::Register(SerialType type, FactoryFunc f, const char* name) - { - if ( ! factories ) - { - factories = new FactoryMap; - names = new ClassNameMap; - } - - type = type & SER_TYPE_MASK_EXACT; - - FactoryMap::iterator i = factories->find(type); - if ( i != factories->end() ) - reporter->InternalError("SerialType 0x%08x registered twice", type); - - (*factories)[type] = f; - (*names)[type] = name; - } - -inline bool SerializePID(SerialInfo* info, bool full, SerializationCache::PermanentID pid) - { - if ( ! SERIALIZE(full) ) - return false; - - if ( ! info->pid_32bit ) - return SERIALIZE(pid); - - // Broccoli compatibility mode with 32bit pids. - uint32 tmp = uint32(pid); - return SERIALIZE(tmp); - } - -bool SerialObj::Serialize(SerialInfo* info) const - { - assert(info); - - if ( info->cont.NewInstance() ) - { - SerializationCache::PermanentID pid = SerializationCache::NONE; - - const TransientID* tid = GetTID(); - - if ( ! tid ) - reporter->InternalError("no tid - missing DECLARE_SERIAL?"); - - if ( info->cache ) - pid = info->s->Cache()->Lookup(*tid); - - if ( pid != SerializationCache::NONE && info->cache ) - { - DBG_LOG(DBG_SERIAL, "%s [%p, ref pid %lld, tid %lld]", __PRETTY_FUNCTION__, this, (long long) pid, tid->Value() ); - - DBG_LOG(DBG_SERIAL, "-- Caching"); - DBG_PUSH(DBG_SERIAL); - - if ( ! SerializePID(info, false, pid) ) - { - DBG_POP(DBG_SERIAL); - return false; - } - - DBG_POP(DBG_SERIAL); - return true; - } - - if ( info->cache ) - pid = info->s->Cache()->Register(this, - SerializationCache::NONE, - info->new_cache_strategy); - - DBG_LOG(DBG_SERIAL, "%s [%p, new pid %lld, tid %lld]", __PRETTY_FUNCTION__, this, (long long) pid, tid->Value() ); - DBG_LOG(DBG_SERIAL, "-- Caching"); - DBG_PUSH(DBG_SERIAL); - - if ( ! SerializePID(info, true, pid) ) - { - DBG_POP(DBG_SERIAL); - return false; - } - - info->type = SER_NONE; - DBG_POP(DBG_SERIAL); - } - - DBG_PUSH(DBG_SERIAL); - info->cont.SaveContext(); - bool ret = DoSerialize(info); - info->cont.RestoreContext(); - DBG_POP(DBG_SERIAL); - - if ( info->cont.ChildSuspended() ) - return ret; - -#ifdef DEBUG - if ( debug_logger.IsEnabled(DBG_SERIAL) && IsBroObj(serial_type) ) - { - ODesc desc(DESC_READABLE); - ((BroObj*)this)->Describe(&desc); - DBG_LOG(DBG_SERIAL, "-- Desc: %s", desc.Description()); - } -#endif - - return ret; - } - -SerialObj* SerialObj::Unserialize(UnserialInfo* info, SerialType type) - { - SerializationCache::PermanentID pid = SerializationCache::NONE; - - DBG_LOG(DBG_SERIAL, "%s", __PRETTY_FUNCTION__); - - bool full_obj; - - DBG_LOG(DBG_SERIAL, "-- Caching"); - DBG_PUSH(DBG_SERIAL); - - bool result; - - if ( ! info->pid_32bit ) - result = UNSERIALIZE(&full_obj) && UNSERIALIZE(&pid); - else - { - // Broccoli compatibility mode with 32bit pids. - uint32 tmp = 0; - result = UNSERIALIZE(&full_obj) && UNSERIALIZE(&tmp); - pid = tmp; - } - - if ( ! result ) - { - DBG_POP(DBG_SERIAL); - return 0; - } - - DBG_POP(DBG_SERIAL); - - DBG_LOG(DBG_SERIAL, "-- [%s pid %lld]", full_obj ? "obj" : "ref", (long long) pid); - - if ( ! full_obj ) - { - // FIXME: Yet another const_cast to check eventually... - SerialObj* obj = - const_cast(info->s->Cache()->Lookup(pid)); - if ( obj ) - { - if ( obj->IsBroObj() ) - Ref((BroObj*) obj); - return obj; - } - - // In the following we'd like the format specifier to match - // the type of pid; but pid is uint64, for which there's - // no portable format specifier. So we upcast it to long long, - // which is at least that size, and use a matching format. - info->s->Error(fmt("unknown object %lld referenced", - (long long) pid)); - return 0; - } - - uint16 stype; - if ( ! UNSERIALIZE(&stype) ) - return 0; - - SerialObj* obj = Instantiate(SerialType(stype)); - - if ( ! obj ) - { - info->s->Error("unknown object type"); - return 0; - } - -#ifdef DEBUG - obj->serial_type = stype; -#endif - - const TransientID* tid = obj->GetTID(); - if ( ! tid ) - reporter->InternalError("no tid - missing DECLARE_SERIAL?"); - - if ( info->cache ) - info->s->Cache()->Register(obj, pid, info->new_cache_strategy); - - info->type = stype; - - DBG_PUSH(DBG_SERIAL); - if ( ! obj->DoUnserialize(info) ) - { - DBG_POP(DBG_SERIAL); - return 0; - } - - DBG_POP(DBG_SERIAL); - - if ( ! SerialObj::CheckTypes(stype, type) ) - { - info->s->Error("type mismatch"); - return 0; - } - -#ifdef DEBUG - if ( debug_logger.IsEnabled(DBG_SERIAL) && IsBroObj(stype) ) - { - ODesc desc(DESC_READABLE); - ((BroObj*)obj)->Describe(&desc); - DBG_LOG(DBG_SERIAL, "-- Desc: %s", desc.Description()); - } -#endif - - assert(obj); - return obj; - } - -bool SerialObj::DoSerialize(SerialInfo* info) const - { - assert(info->type != SER_NONE); - -#ifdef DEBUG - const_cast(this)->serial_type = info->type; -#endif - - DBG_LOG(DBG_SERIAL, __PRETTY_FUNCTION__); - DBG_PUSH(DBG_SERIAL); - - uint16 stype = uint16(info->type); - - if ( ! info->new_cache_strategy ) - { - // This is a bit unfortunate: to make sure we're sending - // out the same types as in the past, we need to strip out - // the new cache stable bit. - stype &= ~SER_IS_CACHE_STABLE; - } - - bool ret = SERIALIZE(stype); - DBG_POP(DBG_SERIAL); - return ret; - } - -bool SerialObj::DoUnserialize(UnserialInfo* info) - { - DBG_LOG(DBG_SERIAL, __PRETTY_FUNCTION__); - return true; - } diff --git a/src/SerialObj.h b/src/SerialObj.h deleted file mode 100644 index b502414f71..0000000000 --- a/src/SerialObj.h +++ /dev/null @@ -1,382 +0,0 @@ -// Infrastructure for serializable objects. -// -// How to make objects of class Foo serializable: -// -// 1. Derive Foo (directly or indirectly) from SerialObj. -// 2. Add a SER_FOO constant to SerialTypes in SerialTypes.h. -// 3. Add DECLARE_SERIAL(Foo) into class definition. -// 4. Add a (preferably protected) default ctor if it doesn't already exist. -// 5. For non-abstract classes, add IMPLEMENT_SERIAL(Foo, SER_FOO) to *.cc -// 6. Add two methods like this to *.cc (keep names of arguments!) -// -// bool Foo::DoSerialize(SerialInfo* info) const -// { -// DO_SERIALIZE(SER_FOO, ParentClassOfFoo); -// <... serialize class members via methods in Serializer ...> -// return true if everything ok; -// } -// -// bool Foo::DoUnserialize(UnserialInfo* info) -// { -// DO_UNSERIALIZE(ParentClassOfFoo); -// <... unserialize class members via methods in Serializer ...> -// return true if everything ok; -// } -// -// (7. If no parent class of Foo already contains Serialize()/Unserialize() -// methods, these need to be added somewhere too. But most of the various -// parts of the class hierarchy already have them.) - - -#ifndef SERIALOBJ_H -#define SERIALOBJ_H - -#include -#include - -#include "DebugLogger.h" -#include "Continuation.h" -#include "SerialTypes.h" -#include "bro-config.h" - -#if SIZEOF_LONG_LONG < 8 -# error "Serialization requires that sizeof(long long) is at least 8. (Remove this message only if you know what you're doing.)" -#endif - -class Serializer; -class SerialInfo; -class UnserialInfo; -class SerializationCache; - -// Per-process unique ID. -class TransientID { -public: - TransientID() { id = ++counter; } - - typedef unsigned long long ID; - ID Value() const { return id; } - -private: - ID id; - static ID counter; -}; - -// Abstract base class for serializable objects. -class SerialObj { -public: - virtual ~SerialObj() { } - - virtual const TransientID* GetTID() const { return 0; } - - virtual SerialType GetSerialType() const { return 0; } - - bool IsBroObj() const { return IsBroObj(GetSerialType()); } - bool IsCacheStable() const { return IsCacheStable(GetSerialType()); } - - static const uint64 NEVER = 0; - static const uint64 ALWAYS = 1; - - // Returns time of last modification. This "time" is a monotonically - // increasing counter which is incremented each time a modification is - // performed (more precisely: each time an object is modified which - // returns something different than NEVER). Such times can thus be - // compared to see whether some modification took place before another. - // - // There are two special values: - // NEVER: This object will never change. - // ALWAYS: Always consider this object as changed, i.e., don't - // cache it. - virtual uint64 LastModified() const { return NEVER; } - - // Instantiate an object of the given type. Return nil - // if unknown. - static SerialObj* Instantiate(SerialType type); - - static const char* ClassName(SerialType type); - - // Associate a "factory" function with the given type. - // A factory is a class or function that creates instances - // of a certain type. - - typedef SerialObj* (*FactoryFunc)(); - static void Register(SerialType type, FactoryFunc f, - const char* class_name); - - static bool IsBroObj(SerialType type) - { return type & SER_IS_BRO_OBJ; } - - static bool IsCacheStable(SerialType type) - { return type & SER_IS_CACHE_STABLE; } - - static bool CheckTypes(SerialType type1, SerialType type2) - { return (type1 & SER_TYPE_MASK_PARENT) == - (type2 & SER_TYPE_MASK_PARENT); } - -protected: - friend class SerializationCache; - - SerialObj() - { -#ifdef DEBUG - serial_type = 0; -#endif - } - - // Serializes this object. If info->cache is false, we can use - // DECLARE_NON_CACHEABLE_SERIAL (instead of DECLARE_SERIAL) which - // avoids storing a per-object id. - bool Serialize(SerialInfo* info) const; - - // Unserializes next object. - static SerialObj* Unserialize(UnserialInfo* info, - SerialType type); - - virtual bool DoSerialize(SerialInfo* info) const; - virtual bool DoUnserialize(UnserialInfo* info); - - typedef std::map FactoryMap; - static FactoryMap* factories; - - typedef std::map ClassNameMap; - static ClassNameMap* names; - - static uint64 time_counter; - static uint64 IncreaseTimeCounter() { return ++time_counter; } - static uint64 GetTimeCounter() { return time_counter; } - -#ifdef DEBUG - SerialType serial_type; -#endif -}; - -// A class that registers a factory function upon instantiation. -class SerialTypeRegistrator { -public: - SerialTypeRegistrator(SerialType type, SerialObj::FactoryFunc func, - const char* class_name) - { - SerialObj::Register(type, func, class_name); - } -}; - - -// Macro helpers. - -#define DECLARE_ABSTRACT_SERIAL(classname) \ - bool DoSerialize(SerialInfo*) const override; \ - bool DoUnserialize(UnserialInfo*) override; \ - -#define DECLARE_SERIAL(classname) \ - static classname* Instantiate(); \ - static SerialTypeRegistrator register_type; \ - bool DoSerialize(SerialInfo*) const override; \ - bool DoUnserialize(UnserialInfo*) override; \ - const TransientID* GetTID() const override { return &tid; } \ - SerialType GetSerialType() const override; \ - TransientID tid; - -// Only needed (and usable) for non-abstract classes. -#define IMPLEMENT_SERIAL(classname, classtype) \ - SerialTypeRegistrator classname::register_type(classtype, \ - FactoryFunc(&classname::Instantiate), #classname); \ - SerialType classname::GetSerialType() const { return classtype; }; \ - classname* classname::Instantiate() { return new classname(); } \ - -// Pushes debug level on instantiation and pops when it goes out of scope. -class AutoPush { -public: - AutoPush() { DBG_PUSH(DBG_SERIAL); } - ~AutoPush() { DBG_POP(DBG_SERIAL); } -}; - -// Note that by default we disable suspending. Use DO_SERIALIZE_WITH_SUSPEND -// to enable, but be careful to make sure that whomever calls us is aware of -// the fact (or has already disabled suspension itself). -#define DO_SERIALIZE(classtype, super) \ - DBG_LOG(DBG_SERIAL, __PRETTY_FUNCTION__); \ - if ( info->type == SER_NONE ) \ - info->type = classtype; \ - DisableSuspend suspend(info); \ - AutoPush auto_push; \ - if ( ! super::DoSerialize(info) ) \ - return false; - -// Unfortunately, this is getting quite long. :-( -#define DO_SERIALIZE_WITH_SUSPEND(classtype, super) \ - DBG_LOG(DBG_SERIAL, __PRETTY_FUNCTION__); \ - if ( info->type == SER_NONE ) \ - info->type = classtype; \ - AutoPush auto_push; \ - \ - bool call_super = info->cont.NewInstance(); \ - \ - if ( info->cont.ChildSuspended() ) \ - { \ - void* user_ptr = info->cont.RestoreState(); \ - if ( user_ptr == &call_super ) \ - call_super = true; \ - } \ - \ - if ( call_super ) \ - { \ - info->cont.SaveState(&call_super); \ - info->cont.SaveContext(); \ - bool result = super::DoSerialize(info); \ - info->cont.RestoreContext(); \ - if ( ! result ) \ - return false; \ - if ( info->cont.ChildSuspended() ) \ - return true; \ - info->cont.SaveState(0); \ - } \ - -#define DO_UNSERIALIZE(super) \ - DBG_LOG(DBG_SERIAL, __PRETTY_FUNCTION__); \ - AutoPush auto_push; \ - if ( ! super::DoUnserialize(info) ) \ - return false; - -#define SERIALIZE(x) \ - info->s->Write(x, #x) - -#define SERIALIZE_STR(x, y) \ - info->s->Write(x, y, #x) - -#define SERIALIZE_BIT(bit) \ - info->s->Write(bool(bit), #bit) - -#define UNSERIALIZE(x) \ - info->s->Read(x, #x) - -#define UNSERIALIZE_STR(x, y) \ - info->s->Read(x, y, #x) - -#define UNSERIALIZE_BIT(bit) \ - { \ - bool tmp; \ - if ( ! info->s->Read(&tmp, #bit) ) \ - return false; \ - bit = (unsigned int) tmp; \ - } - -// Some helpers for pointers which may be nil. -#define SERIALIZE_OPTIONAL(ptr) \ - { \ - if ( ptr ) \ - { \ - if ( ! info->cont.ChildSuspended() ) \ - if ( ! info->s->Write(true, "has_" #ptr) ) \ - return false; \ - \ - info->cont.SaveContext(); \ - bool result = ptr->Serialize(info); \ - info->cont.RestoreContext(); \ - if ( ! result ) \ - return false; \ - \ - if ( info->cont.ChildSuspended() ) \ - return true; \ - } \ - \ - else if ( ! info->s->Write(false, "has_" #ptr) ) \ - return false; \ - } - -#define SERIALIZE_OPTIONAL_STR(str) \ - { \ - if ( str ) \ - { \ - if ( ! (info->s->Write(true, "has_" #str) && info->s->Write(str, "str")) ) \ - return false; \ - } \ - \ - else if ( ! info->s->Write(false, "has_" #str) ) \ - return false; \ - } - -#define UNSERIALIZE_OPTIONAL(dst, unserialize) \ - { \ - bool has_it; \ - if ( ! info->s->Read(&has_it, "has_" #dst) ) \ - return false; \ - \ - if ( has_it ) \ - { \ - dst = unserialize; \ - if ( ! dst ) \ - return false; \ - } \ - \ - else \ - dst = 0; \ - } - -#define UNSERIALIZE_OPTIONAL_STR(dst) \ - { \ - bool has_it; \ - if ( ! info->s->Read(&has_it, "has_" #dst) ) \ - return false; \ - \ - if ( has_it ) \ - { \ - if ( ! info->s->Read(&dst, 0, "has_" #dst) ) \ - return false; \ - if ( ! dst ) \ - return false; \ - } \ - \ - else \ - dst = 0; \ - } - -#define UNSERIALIZE_OPTIONAL_STR_DEL(dst, del) \ - { \ - bool has_it; \ - if ( ! info->s->Read(&has_it, "has_" #dst) ) \ - { \ - delete del; \ - return 0; \ - } \ - \ - if ( has_it ) \ - { \ - if ( ! info->s->Read(&dst, 0, "has_" #dst) ) \ - { \ - delete del; \ - return 0; \ - } \ - if ( ! dst ) \ - { \ - delete del; \ - return 0; \ - } \ - } \ - \ - else \ - dst = 0; \ - } - -#define UNSERIALIZE_OPTIONAL_STATIC(dst, unserialize, del) \ - { \ - bool has_it; \ - if ( ! info->s->Read(&has_it, "has_" #dst) ) \ - { \ - delete del; \ - return 0; \ - } \ - \ - if ( has_it ) \ - { \ - dst = unserialize; \ - if ( ! dst ) \ - { \ - delete del; \ - return 0; \ - } \ - } \ - \ - else \ - dst = 0; \ - } - -#endif diff --git a/src/SerialTypes.h b/src/SerialTypes.h deleted file mode 100644 index 029048a80f..0000000000 --- a/src/SerialTypes.h +++ /dev/null @@ -1,235 +0,0 @@ -#ifndef serialtypes_h -#define serialtypes_h - -// Each serializable class gets a type. -// -// The type enables a form of poor man's type-checking: -// Bit 0-7: Number (unique relative to main parent (see below)). -// Bit 8-12: Main parent class (SER_IS_*) -// Bit 13: unused -// Bit 14: 1 if preference is to keep in cache. -// Bit 15: 1 if derived from BroObj. - -typedef uint16 SerialType; - -static const SerialType SER_TYPE_MASK_EXACT = 0x1fff; -static const SerialType SER_TYPE_MASK_PARENT = 0x1f00; -static const SerialType SER_IS_CACHE_STABLE = 0x4000; -static const SerialType SER_IS_BRO_OBJ = 0x8000; - -#define SERIAL_CONST(name, val, type) \ - const SerialType SER_ ## name = val | SER_IS_ ## type; - -#define SERIAL_CONST2(name) SERIAL_CONST(name, 1, name) - -#define SERIAL_IS(name, val) \ - static const SerialType SER_IS_ ## name = val; -#define SERIAL_IS_BO(name, val) \ - static const SerialType SER_IS_ ## name = val | SER_IS_BRO_OBJ; -#define SERIAL_IS_BO_AND_CACHE_STABLE(name, val) \ - static const SerialType SER_IS_ ## name = val | (SER_IS_BRO_OBJ | SER_IS_CACHE_STABLE); - -SERIAL_IS_BO(CONNECTION, 0x0100) -SERIAL_IS(TIMER, 0x0200) -SERIAL_IS(TCP_ENDPOINT, 0x0300) -SERIAL_IS_BO(TCP_ANALYZER, 0x0400) -SERIAL_IS_BO(TCP_ENDPOINT_ANALYZER, 0x0500) -SERIAL_IS(TCP_CONTENTS, 0x0600) -SERIAL_IS(REASSEMBLER, 0x0700) -SERIAL_IS_BO(VAL, 0x0800) -SERIAL_IS_BO_AND_CACHE_STABLE(EXPR, 0x0900) -SERIAL_IS_BO_AND_CACHE_STABLE(BRO_TYPE, 0x0a00) -SERIAL_IS_BO_AND_CACHE_STABLE(STMT, 0x0b00) -SERIAL_IS_BO_AND_CACHE_STABLE(ATTRIBUTES, 0x0c00) -SERIAL_IS_BO_AND_CACHE_STABLE(EVENT_HANDLER, 0x0d00) -SERIAL_IS_BO_AND_CACHE_STABLE(BRO_FILE, 0x0e00) -SERIAL_IS_BO_AND_CACHE_STABLE(FUNC, 0x0f00) -SERIAL_IS_BO(ID, 0x1000) -SERIAL_IS(STATE_ACCESS, 0x1100) -SERIAL_IS_BO(CASE, 0x1200) -SERIAL_IS(LOCATION, 0x1300) -SERIAL_IS(RE_MATCHER, 0x1400) -SERIAL_IS(BITVECTOR, 0x1500) -SERIAL_IS(COUNTERVECTOR, 0x1600) -SERIAL_IS(BLOOMFILTER, 0x1700) -SERIAL_IS(HASHER, 0x1800) - -// These are the externally visible types. -const SerialType SER_NONE = 0; - -SERIAL_CONST2(BRO_OBJ) - -#define SERIAL_CONN(name, val) SERIAL_CONST(name, val, CONNECTION) -SERIAL_CONN(CONNECTION, 1) -SERIAL_CONN(ICMP_ANALYZER, 2) -// We use ICMP_Echo here rather than ICMP_ECHO because the latter gets -// macro expanded :-(. -SERIAL_CONN(ICMP_Echo, 3) -SERIAL_CONN(ICMP_CONTEXT, 4) -SERIAL_CONN(TCP_CONNECTION, 5) -SERIAL_CONN(TCP_CONNECTION_CONTENTS, 6) -SERIAL_CONN(FTP_CONN, 7) -SERIAL_CONN(UDP_CONNECTION, 8) - -#define SERIAL_TIMER(name, val) SERIAL_CONST(name, val, TIMER) -SERIAL_TIMER(TIMER, 1) -SERIAL_TIMER(CONNECTION_TIMER, 2) - -SERIAL_CONST2(TCP_ENDPOINT) -SERIAL_CONST2(TCP_ANALYZER) -SERIAL_CONST2(TCP_ENDPOINT_ANALYZER) - -#define SERIAL_TCP_CONTENTS(name, val) SERIAL_CONST(name, val, TCP_CONTENTS) -SERIAL_TCP_CONTENTS(TCP_CONTENTS, 1) -SERIAL_TCP_CONTENTS(TCP_CONTENT_LINE, 2) -SERIAL_TCP_CONTENTS(TCP_NVT, 3) - -#define SERIAL_REASSEMBLER(name, val) SERIAL_CONST(name, val, REASSEMBLER) -SERIAL_REASSEMBLER(REASSEMBLER, 1) -SERIAL_REASSEMBLER(TCP_REASSEMBLER, 2) -SERIAL_REASSEMBLER(FILE_REASSEMBLER, 3) - -#define SERIAL_VAL(name, val) SERIAL_CONST(name, val, VAL) -SERIAL_VAL(VAL, 1) -SERIAL_VAL(INTERVAL_VAL, 2) -SERIAL_VAL(PORT_VAL, 3) -SERIAL_VAL(ADDR_VAL, 4) -SERIAL_VAL(SUBNET_VAL, 5) -SERIAL_VAL(STRING_VAL, 6) -SERIAL_VAL(PATTERN_VAL, 7) -SERIAL_VAL(LIST_VAL, 8) -SERIAL_VAL(TABLE_VAL, 9) -SERIAL_VAL(RECORD_VAL, 10) -SERIAL_VAL(ENUM_VAL, 11) -SERIAL_VAL(VECTOR_VAL, 12) -SERIAL_VAL(MUTABLE_VAL, 13) -SERIAL_VAL(OPAQUE_VAL, 14) -SERIAL_VAL(HASH_VAL, 15) -SERIAL_VAL(MD5_VAL, 16) -SERIAL_VAL(SHA1_VAL, 17) -SERIAL_VAL(SHA256_VAL, 18) -SERIAL_VAL(ENTROPY_VAL, 19) -SERIAL_VAL(TOPK_VAL, 20) -SERIAL_VAL(BLOOMFILTER_VAL, 21) -SERIAL_VAL(CARDINALITY_VAL, 22) -SERIAL_VAL(X509_VAL, 23) -SERIAL_VAL(COMM_STORE_HANDLE_VAL, 24) -SERIAL_VAL(COMM_DATA_VAL, 25) -SERIAL_VAL(OCSP_RESP_VAL, 26) - -#define SERIAL_EXPR(name, val) SERIAL_CONST(name, val, EXPR) -SERIAL_EXPR(EXPR, 1) -SERIAL_EXPR(NAME_EXPR, 2) -SERIAL_EXPR(CONST_EXPR, 3) -SERIAL_EXPR(UNARY_EXPR, 4) -SERIAL_EXPR(BINARY_EXPR, 5) -SERIAL_EXPR(INCR_EXPR, 6) -SERIAL_EXPR(NOT_EXPR, 7) -SERIAL_EXPR(POS_EXPR, 8) -SERIAL_EXPR(NEG_EXPR, 9) -SERIAL_EXPR(ADD_EXPR, 10) -SERIAL_EXPR(SUB_EXPR, 11) -SERIAL_EXPR(TIMES_EXPR, 12) -SERIAL_EXPR(DIVIDE_EXPR, 13) -SERIAL_EXPR(MOD_EXPR, 14) -SERIAL_EXPR(BOOL_EXPR, 15) -SERIAL_EXPR(EQ_EXPR, 16) -SERIAL_EXPR(REL_EXPR, 17) -SERIAL_EXPR(COND_EXPR, 18) -SERIAL_EXPR(REF_EXPR, 19) -SERIAL_EXPR(ASSIGN_EXPR, 20) -SERIAL_EXPR(INDEX_EXPR, 21) -SERIAL_EXPR(FIELD_EXPR, 22) -SERIAL_EXPR(HAS_FIELD_EXPR, 23) -SERIAL_EXPR(RECORD_CONSTRUCTOR_EXPR, 24) -SERIAL_EXPR(FIELD_ASSIGN_EXPR, 25) -// There used to be a SERIAL_EXPR(RECORD_MATCH_EXPR, 26) here -SERIAL_EXPR(ARITH_COERCE_EXPR, 27) -SERIAL_EXPR(RECORD_COERCE_EXPR, 28) -SERIAL_EXPR(FLATTEN_EXPR, 29) -SERIAL_EXPR(SCHEDULE_EXPR, 30) -SERIAL_EXPR(IN_EXPR, 31) -SERIAL_EXPR(CALL_EXPR, 32) -SERIAL_EXPR(EVENT_EXPR, 33) -SERIAL_EXPR(LIST_EXPR, 34) -SERIAL_EXPR(RECORD_ASSIGN_EXPR, 35) -SERIAL_EXPR(ADD_TO_EXPR, 36) -SERIAL_EXPR(REMOVE_FROM_EXPR, 37) -SERIAL_EXPR(SIZE_EXPR, 38) -SERIAL_EXPR(CLONE_EXPR, 39) -SERIAL_EXPR(TABLE_CONSTRUCTOR_EXPR, 40) -SERIAL_EXPR(SET_CONSTRUCTOR_EXPR, 41) -SERIAL_EXPR(VECTOR_CONSTRUCTOR_EXPR, 42) -SERIAL_EXPR(TABLE_COERCE_EXPR, 43) -SERIAL_EXPR(VECTOR_COERCE_EXPR, 44) -SERIAL_EXPR(CAST_EXPR, 45) -SERIAL_EXPR(IS_EXPR_, 46) // Name conflict with internal SER_IS_EXPR constant. -SERIAL_EXPR(BIT_EXPR, 47) -SERIAL_EXPR(COMPLEMENT_EXPR, 48) - -#define SERIAL_STMT(name, val) SERIAL_CONST(name, val, STMT) -SERIAL_STMT(STMT, 1) -SERIAL_STMT(EXPR_LIST_STMT, 2) -// There used to be ALARM_STMT (3) here. -SERIAL_STMT(PRINT_STMT, 4) -SERIAL_STMT(EXPR_STMT, 5) -SERIAL_STMT(IF_STMT, 6) -SERIAL_STMT(SWITCH_STMT, 7) -SERIAL_STMT(ADD_STMT, 8) -SERIAL_STMT(DEL_STMT, 9) -SERIAL_STMT(EVENT_STMT, 10) -SERIAL_STMT(FOR_STMT, 11) -SERIAL_STMT(NEXT_STMT, 12) -SERIAL_STMT(BREAK_STMT, 13) -SERIAL_STMT(RETURN_STMT, 14) -SERIAL_STMT(STMT_LIST, 15) -SERIAL_STMT(EVENT_BODY_LIST, 16) -SERIAL_STMT(INIT_STMT, 17) -SERIAL_STMT(NULL_STMT, 18) -SERIAL_STMT(WHEN_STMT, 19) -SERIAL_STMT(FALLTHROUGH_STMT, 20) -SERIAL_STMT(WHILE_STMT, 21) - -#define SERIAL_TYPE(name, val) SERIAL_CONST(name, val, BRO_TYPE) -SERIAL_TYPE(BRO_TYPE, 1) -SERIAL_TYPE(TYPE_LIST, 2) -SERIAL_TYPE(INDEX_TYPE, 3) -SERIAL_TYPE(TABLE_TYPE, 4) -SERIAL_TYPE(SET_TYPE, 5) -SERIAL_TYPE(FUNC_TYPE, 6) -SERIAL_TYPE(RECORD_TYPE, 7) -SERIAL_TYPE(SUBNET_TYPE, 8) -SERIAL_TYPE(FILE_TYPE, 9) -SERIAL_TYPE(ENUM_TYPE, 10) -SERIAL_TYPE(VECTOR_TYPE, 11) -SERIAL_TYPE(OPAQUE_TYPE, 12) - -SERIAL_CONST2(ATTRIBUTES) -SERIAL_CONST2(EVENT_HANDLER) -SERIAL_CONST2(BRO_FILE) - -#define SERIAL_FUNC(name, val) SERIAL_CONST(name, val, FUNC) -SERIAL_FUNC(FUNC, 1) -SERIAL_FUNC(BRO_FUNC, 2) -SERIAL_FUNC(DEBUG_FUNC, 3) -SERIAL_FUNC(BUILTIN_FUNC, 4) - -#define SERIAL_BLOOMFILTER(name, val) SERIAL_CONST(name, val, BLOOMFILTER) -SERIAL_BLOOMFILTER(BLOOMFILTER, 1) -SERIAL_BLOOMFILTER(BASICBLOOMFILTER, 2) -SERIAL_BLOOMFILTER(COUNTINGBLOOMFILTER, 3) - -#define SERIAL_HASHER(name, val) SERIAL_CONST(name, val, HASHER) -SERIAL_HASHER(HASHER, 1) -SERIAL_HASHER(DEFAULTHASHER, 2) -SERIAL_HASHER(DOUBLEHASHER, 3) - -SERIAL_CONST2(ID) -SERIAL_CONST2(STATE_ACCESS) -SERIAL_CONST2(CASE) -SERIAL_CONST2(LOCATION) -SERIAL_CONST2(RE_MATCHER) -SERIAL_CONST2(BITVECTOR) -SERIAL_CONST2(COUNTERVECTOR) - -#endif diff --git a/src/SerializationFormat.cc b/src/SerializationFormat.cc index d5f366f7fd..6505598fc8 100644 --- a/src/SerializationFormat.cc +++ b/src/SerializationFormat.cc @@ -2,7 +2,7 @@ #include "net_util.h" #include "SerializationFormat.h" -#include "Serializer.h" +#include "DebugLogger.h" #include "Reporter.h" const float SerializationFormat::GROWTH_FACTOR = 2.5; diff --git a/src/Serializer.cc b/src/Serializer.cc deleted file mode 100644 index 0366c36c81..0000000000 --- a/src/Serializer.cc +++ /dev/null @@ -1,1061 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Serializer.h" -#include "Scope.h" -#include "Stmt.h" -#include "Reporter.h" -#include "Func.h" -#include "Event.h" -#include "EventRegistry.h" -#include "SerializationFormat.h" -#include "NetVar.h" -#include "Conn.h" -#include "Timer.h" -#include "RemoteSerializer.h" -#include "iosource/Manager.h" - -Serializer::Serializer(SerializationFormat* arg_format) - { - if ( arg_format ) - format = arg_format; - else - format = new BinarySerializationFormat(); - - io = 0; - error_descr = 0; - current_cache = 0; - } - -Serializer::~Serializer() - { - delete format; - delete [] error_descr; - } - -bool Serializer::Read(string* s, const char* tag) - { - char* cstr; - int len; - if ( format->Read(&cstr, &len, tag) ) - { - s->assign(cstr, len); - delete [] cstr; - return true; - } - else - return false; - } - -bool Serializer::StartSerialization(SerialInfo* info, const char* descr, - char tag) - { - format->StartWrite(); - assert(current_cache); - SetErrorDescr(fmt("serializing %s", descr)); - if ( ! Write(tag, "tag") ) - { - Error(io->Error()); - return false; - } - - current_cache->Begin(info->new_cache_strategy); - return true; - } - -bool Serializer::EndSerialization(SerialInfo* info) - { - if ( info->chunk ) - { - if ( ! io->Write(info->chunk) ) - { - Error(io->Error()); - return false; - } - } - - ChunkedIO::Chunk* chunk = new ChunkedIO::Chunk; - chunk->len = format->EndWrite(&chunk->data); - chunk->free_func = ChunkedIO::Chunk::free_func_free; - - if ( ! io->Write(chunk) ) - { - Error(io->Error()); - return false; - } - - current_cache->End(info->new_cache_strategy); - return true; - } - -bool Serializer::Serialize(SerialInfo* info, const ID& id) - { - if ( info->cont.NewInstance() ) - { - if ( ! (id.IsGlobal() || id.IsEnumConst()) ) - { - Error("non-global identifiers cannot be serialized"); - return false; - } - - if ( ! StartSerialization(info, "ID", 'i') ) - return false; - } - - info->cont.SaveContext(); - bool result = id.Serialize(info); - info->cont.RestoreContext(); - - if ( ! result ) - { - Error("failed"); - return false; - } - - if ( info->cont.ChildSuspended() ) - return true; - - WriteSeparator(); - return EndSerialization(info); - } - -bool Serializer::Serialize(SerialInfo* info, const char* func, val_list* args) - { - DisableSuspend suspend(info); - - if ( ! StartSerialization(info, "call", 'e') ) - return false; - - WriteOpenTag("call"); - int a = args->length(); - Write(func, "name"); - Write(network_time, "time"); - Write(a, "len"); - - loop_over_list(*args, i) - { - if ( ! (*args)[i]->Serialize(info) ) - { - Error("failed"); - return false; - } - } - - WriteCloseTag("call"); - WriteSeparator(); - - return EndSerialization(info); - } - -bool Serializer::Serialize(SerialInfo* info, const StateAccess& s) - { - DisableSuspend suspend(info); - - if ( ! StartSerialization(info, "state access", 's') ) - return false; - - if ( ! s.Serialize(info) ) - { - Error("failed"); - return false; - } - - return EndSerialization(info); - } - -bool Serializer::Serialize(SerialInfo* info, const Timer& t) - { - DisableSuspend suspend(info); - - if ( ! StartSerialization(info, "timer", 't') ) - return false; - - if ( ! t.Serialize(info) ) - { - Error("failed"); - return false; - } - - return EndSerialization(info); - } - -bool Serializer::Serialize(SerialInfo* info, const Connection& c) - { - DisableSuspend suspend(info); - - if ( ! StartSerialization(info, "connection", 'c') ) - return false; - - if ( ! c.Serialize(info) ) - { - Error("failed"); - return false; - } - - return EndSerialization(info); - } - -bool Serializer::Serialize(SerialInfo* info, const Packet& p) - { - DisableSuspend suspend(info); - - if ( ! StartSerialization(info, "packet", 'p') ) - return false; - - if ( ! p.Serialize(info) ) - { - Error("failed"); - return false; - } - - return EndSerialization(info); - } - -int Serializer::Unserialize(UnserialInfo* info, bool block) - { - assert(current_cache); - - SetErrorDescr("unserializing"); - - current_cache->Begin(info->new_cache_strategy); - - ChunkedIO::Chunk* chunk = info->chunk; - - while ( ! chunk ) - { - if ( ! io->Read(&chunk) ) - { - if ( io->Eof() ) - return 0; - Error(io->Error()); - return -1; - } - - if ( ! chunk && ! block ) - return 0; - } - - format->StartRead(chunk->data, chunk->len); - - char type; - if ( ! format->Read(&type, "tag") ) - return -1; - -// DEBUG(fmt("parent: serialization of size %d", ); - - bool result; - switch ( type ) { - case 'i': - result = UnserializeID(info); - break; - - case 'e': - result = UnserializeCall(info); - break; - - case 's': - result = UnserializeStateAccess(info); - break; - - case 'c': - result = UnserializeConnection(info); - break; - - case 't': - result = UnserializeTimer(info); - break; - - case 'p': - result = UnserializePacket(info); - break; - - default: - Error(fmt("unknown serialization type %x", (int) type)); - result = false; - } - - format->EndRead(); - - if ( ! info->chunk ) - { // only delete if we allocated it ourselves - delete chunk; - } - - current_cache->End(info->new_cache_strategy); - - return result ? 1 : -1; - } - -bool Serializer::UnserializeID(UnserialInfo* info) - { - SetErrorDescr("unserializing ID"); - - ID* id = ID::Unserialize(info); - - if ( ! id ) - return false; - - if ( info->print ) - { - ODesc d; - d.SetQuotes(true); - d.SetIncludeStats(true); - d.SetShort(); - id->DescribeExtended(&d); - fprintf(info->print, "ID %s\n", d.Description()); - } - - if ( ! info->ignore_callbacks ) - GotID(id, id->ID_Val()); - else - Unref(id); - - return true; - } - -bool Serializer::UnserializeCall(UnserialInfo* info) - { - char* name; - int len; - double time; - - if ( ! (UNSERIALIZE_STR(&name, 0) && UNSERIALIZE(&time) && UNSERIALIZE(&len)) ) - return false; - - SetErrorDescr(fmt("unserializing event/function %s", name)); - - bool ignore = false; - FuncType* functype = 0; - type_list* types = 0; - - ID* id = global_scope()->Lookup(name); - - if ( id ) - { - if ( id->Type()->Tag() == TYPE_FUNC ) - { - functype = id->Type()->AsFuncType(); - types = functype->ArgTypes()->Types(); - if ( types->length() != len ) - { - Error("wrong number of arguments, ignoring"); - ignore = true; - } - } - else - { - Error("not a function/event, ignoring"); - ignore = true; - } - } - else - { - Error("unknown event/function, ignoring"); - ignore = true; - } - - ODesc d; - d.SetQuotes(true); - d.SetIncludeStats(true); - d.SetShort(); - - val_list* args = new val_list; - for ( int i = 0; i < len; ++i ) - { - Val* v = Val::Unserialize(info); - - if ( ! v ) - { - delete [] name; - delete_vals(args); - return false; - } - - if ( ! ignore ) - { - if ( v->Type()->Tag() != (*types)[i]->Tag() && - (*types)[i]->Tag() != TYPE_ANY ) - { - Error("mismatch in argument types; ignoring"); - ignore = true; - } - - if ( info->print && ! ignore ) - v->Describe(&d); - } - - args->append(v); - } - - if ( ! ignore ) - { - if ( info->print ) - fprintf(info->print, "%s [%.06f] %s(%s)\n", - functype->FlavorString().c_str(), - time, name, types ? d.Description() : ""); - - switch ( functype->Flavor() ) { - - case FUNC_FLAVOR_EVENT: - { - EventHandler* handler = event_registry->Lookup(name); - assert(handler); - - if ( ! info->ignore_callbacks ) - GotEvent(name, time, handler, args); - - break; - } - - case FUNC_FLAVOR_FUNCTION: - case FUNC_FLAVOR_HOOK: - if ( ! info->ignore_callbacks ) - GotFunctionCall(name, time, id->ID_Val()->AsFunc(), args); - break; - - default: - reporter->InternalError("unserialized call for invalid function flavor"); - break; - } - - if ( info->ignore_callbacks ) - delete_vals(args); - } - else - delete_vals(args); - - delete [] name; - - return true; - } - -bool Serializer::UnserializeStateAccess(UnserialInfo* info) - { - SetErrorDescr("unserializing state access"); - - StateAccess* s = StateAccess::Unserialize(info); - - if ( ! s ) - return false; - - if ( info->print ) - { - ODesc d; - d.SetQuotes(true); - d.SetIncludeStats(true); - d.SetShort(); - s->Describe(&d); - fprintf(info->print, "State access: %s\n", d.Description()); - } - - if ( ! info->ignore_callbacks ) - GotStateAccess(s); - else - delete s; - - return true; - } - -bool Serializer::UnserializeTimer(UnserialInfo* info) - { - SetErrorDescr("unserializing timer"); - - Timer* t = Timer::Unserialize(info); - - if ( ! t ) - return false; - - if ( info->print ) - { - ODesc d; - d.SetQuotes(true); - d.SetIncludeStats(true); - d.SetShort(); - t->Describe(&d); - fprintf(info->print, "Timer: %s\n", d.Description()); - } - - if ( ! info->ignore_callbacks ) - GotTimer(t); - - return true; - } - -bool Serializer::UnserializeConnection(UnserialInfo* info) - { - SetErrorDescr("unserializing connection"); - - Connection* c = Connection::Unserialize(info); - - if ( ! c ) - return false; - - if ( info->print ) - { - ODesc d; - d.SetQuotes(true); - d.SetIncludeStats(true); - d.SetShort(); - c->Describe(&d); - fprintf(info->print, "Connection: %s", d.Description()); - } - - if ( info->install_conns ) - { - if ( c->IsPersistent() && c->Key() ) - persistence_serializer->Register(c); - Ref(c); - sessions->Insert(c); - } - else - // We finish the connection here because it's not part - // of the standard processing and most likely to be - // discarded pretty soon. - // Without the Done(), some cleanup may not take place. - c->Done(); - - if ( ! info->ignore_callbacks ) - GotConnection(c); - else - Unref(c); - - return true; - } - -bool Serializer::UnserializePacket(UnserialInfo* info) - { - SetErrorDescr("unserializing packet"); - - Packet* p = Packet::Unserialize(info); - - if ( ! p ) - return false; - - if ( info->print ) - { - ODesc d; - d.SetQuotes(true); - d.SetIncludeStats(true); - d.SetShort(); - p->Describe(&d); - fprintf(info->print, "Packet: %s", d.Description()); - } - - if ( ! info->ignore_callbacks ) - GotPacket(p); - else - delete p; - - return true; - } - -void Serializer::Error(const char* str) - { - char buffer[1024]; - safe_snprintf(buffer, sizeof(buffer), "%s%s%s", - error_descr ? error_descr : "", error_descr ? ": " : "", str); - ReportError(buffer); - } - -void Serializer::Warning(const char* str) - { - // We ignore these as there's no good place to report them. - } - -SerializationCache::SerializationCache(unsigned int arg_max_cache_size) - { - max_cache_size = arg_max_cache_size; - next_id = 1; - cache_stable.head = cache_stable.tail = 0; - cache_unstable.head = cache_unstable.tail = 0; - cache_stable.size = cache_unstable.size = 0; - } - -SerializationCache::~SerializationCache() - { - Clear(); - } - -SerializationCache::PermanentID -SerializationCache::Register(const SerialObj* obj, PermanentID pid, - bool new_cache_strategy) - { - if ( pid == NONE ) - pid = next_id++; - - PIDMap::iterator i = pid_map.find(pid); - assert(i == pid_map.end()); - - CacheList* cache = - (new_cache_strategy && obj->IsCacheStable()) ? - &cache_stable : &cache_unstable; - - CacheEntry* entry = new CacheEntry; - entry->obj.serial = obj; - entry->is_bro_obj = obj->IsBroObj(); - entry->pid = pid; - entry->tid = obj->GetTID()->Value(); - entry->time = SerialObj::GetTimeCounter(); - entry->prev = cache->tail; - entry->next = 0; - entry->cache = cache; - entry->stype = obj->GetSerialType(); - - if ( cache->tail ) - cache->tail->next = entry; - if ( ! cache->head ) - cache->head = entry; - - cache->tail = entry; - ++(cache->size); - - // This is a bit weird. If the TID is already contained in the map (i.e. - // we're re-registering), TIDMap::insert() will *not* override the old - // entry but set the bool to false and return it. - pair old = tid_map.insert(TIDMap::value_type(entry->tid, entry)); - if ( ! old.second ) - { - // Already existed. - old.first->second->tid = 0; // invalidate - old.first->second = entry; // replace - } - - pid_map.insert(PIDMap::value_type(pid, entry)); - - if ( entry->is_bro_obj ) - Ref(const_cast(entry->obj.bro)); - else - { - // Make sure it goes into unstable. - assert(! obj->IsCacheStable()); - - volatiles.push_back(entry); - } - - return entry->pid; - } - -void SerializationCache::UnlinkEntry(CacheEntry* e) - { - assert(e); - - // Remove from double-linked list. - if ( e == e->cache->head ) - { - e->cache->head = e->next; - if ( e->cache->head ) - e->cache->head->prev = 0; - } - else - e->prev->next = e->next; - - if ( e == e->cache->tail ) - { - e->cache->tail = e->prev; - if ( e->cache->tail ) - e->cache->tail->next = 0; - } - else - e->next->prev = e->prev; - - e->prev = e->next = 0; - } - -void SerializationCache::RemoveEntry(CacheEntry* e) - { - assert(e); - UnlinkEntry(e); - - if ( e->tid ) - tid_map.erase(e->tid); - - pid_map.erase(e->pid); - - if ( e->is_bro_obj ) - Unref(const_cast(e->obj.bro)); - - e->obj.serial = 0; // for debugging - --(e->cache->size); - delete e; - } - -void SerializationCache::MoveEntryToTail(CacheEntry* e) - { - assert(e); - UnlinkEntry(e); - e->prev = e->cache->tail; - e->next = 0; - - if ( e->cache->tail ) - e->cache->tail->next = e; - if ( ! e->cache->head ) - e->cache->head = e; - - e->cache->tail = e; - } - -void SerializationCache::Clear() - { - tid_map.clear(); - pid_map.clear(); - volatiles.clear(); - - while ( cache_stable.head ) - RemoveEntry(cache_stable.head); - - while ( cache_unstable.head ) - RemoveEntry(cache_unstable.head); - - assert(cache_stable.size == 0); - assert(cache_unstable.size == 0); - } - -void SerializationCache::End(bool new_cache_strategy) - { - // Remove objects not-derived from BroObj (they aren't ref'counted - // so it's not safe to keep them). - for ( VolatileList::iterator i = volatiles.begin(); - i != volatiles.end(); i++ ) - { - assert(*i); - RemoveEntry(*i); - } - - volatiles.clear(); - - if ( new_cache_strategy ) - { - while ( max_cache_size && cache_stable.head && - cache_stable.size > max_cache_size ) - RemoveEntry(cache_stable.head); - - while ( max_cache_size && cache_unstable.head && - cache_unstable.size > max_cache_size ) - RemoveEntry(cache_unstable.head); - } - - else - { - while ( max_cache_size && pid_map.size() > max_cache_size ) - RemoveEntry(cache_unstable.head); - } - } - -FileSerializer::FileSerializer(SerializationFormat* format) -: Serializer(format), cache(100) - { - file = 0; - fd = -1; - io = 0; - SetCache(&cache); - } - -FileSerializer::~FileSerializer() - { - if ( io ) - io->Flush(); - - delete [] file; - - if ( io ) - delete io; // destructor will call close() on fd - else if ( fd >= 0 ) - safe_close(fd); - } - -bool FileSerializer::Open(const char* file, bool pure) - { - if ( ! OpenFile(file, false) ) - return false; - - if ( pure ) - io->MakePure(); - - if ( ! PrepareForWriting() ) - return false; - - return true; - } - -bool FileSerializer::Close() - { - CloseFile(); - return true; - } - -bool FileSerializer::OpenFile(const char* arg_file, bool readonly, bool should_exist) - { - CloseFile(); - - cache.Clear(); - - file = copy_string(arg_file); - fd = open(file, readonly ? O_RDONLY : O_WRONLY | O_CREAT | O_TRUNC, 0600); - - if ( fd < 0 ) - { - if ( readonly && errno == ENOENT ) - { - // Only an error if we expect to exist. - if ( should_exist ) - { - Error(fmt("%s does not exist", file)); - return false; - } - - CloseFile(); - return true; - } - - Error(fmt("can't open file %s for %s: %s", - file, (readonly ? "reading" : "writing"), - strerror(errno))); - return false; - } - - io = new ChunkedIOFd(fd, "file"); - - return io != 0; - } - -void FileSerializer::CloseFile() - { - if ( io ) - io->Flush(); - - if ( fd >= 0 && ! io ) // destructor of io calls close() on fd - safe_close(fd); - fd = -1; - - delete [] file; - file = 0; - - delete io; - io = 0; - - cache.Clear(); - } - -bool FileSerializer::PrepareForWriting() - { - if ( ! io->IsPure() ) - { - // Write file header. - uint32 magic = htonl(MAGIC); - uint16 version = htons(DATA_FORMAT_VERSION); - uint32 time = htonl(uint32(::time(0))); - - if ( write(fd, &magic, sizeof(magic)) != sizeof(magic ) || - write(fd, &version, sizeof(version)) != sizeof(version) || - write(fd, &time, sizeof(time)) != sizeof(time)) - { - Error(fmt("can't write file header to %s: %s", - file, strerror(errno))); - return false; - } - } - - return true; - } - -bool FileSerializer::ReadHeader(UnserialInfo* info) - { - uint32 magic; - uint16 version; - uint32 time; - - if ( read(fd, &magic, sizeof(magic)) != sizeof(magic ) || - read(fd, &version, sizeof(version)) != sizeof(version) || - read(fd, &time, sizeof(time)) != sizeof(time) ) - { - Error(fmt("can't read file header from %s: %s", - file, strerror(errno))); - return false; - } - - version = ntohs(version); - time = ntohl(time); - - if ( info && info->print ) - { - time_t teatime = (time_t) time; - fprintf(stderr, "Date: %s", ctime(&teatime)); - } - - if ( magic != htonl(MAGIC) ) - { - Error(fmt("%s is not a bro state file", file)); - CloseFile(); - return false; - } - - if ( version != DATA_FORMAT_VERSION ) - { - Error(fmt("wrong data format, expected version %d but got version %d", DATA_FORMAT_VERSION, version)); - CloseFile(); - return false; - } - - return true; - } - -bool FileSerializer::Read(UnserialInfo* info, const char* file, bool header) - { - if ( ! OpenFile(file, true, info->print) ) - return false; - - // fprintf( stderr, "Reading %s\n", file ); - - if ( fd < 0 ) - // Not existent, but that's ok. - return true; - - if ( header && ! ReadHeader(info) ) - return false; - - int i; - while ( (i = Unserialize(info, true)) > 0 ) - ; - - CloseFile(); - - return i == 0; - } - -void FileSerializer::ReportError(const char* str) - { - reporter->Error("%s", str); - } - -void FileSerializer::GotID(ID* id, Val* val) - { - // Do nothing. - Unref(id); - } - -void FileSerializer::GotStateAccess(StateAccess* s) - { - delete s; - } - -void FileSerializer::GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) - { - // Do nothing. - delete_vals(args); - } - -void FileSerializer::GotFunctionCall(const char* name, double time, - Func* func, val_list* args) - { - // Do nothing. - delete_vals(args); - } - -void FileSerializer::GotTimer(Timer* t) - { - // Do nothing. - delete t; - } - -void FileSerializer::GotConnection(Connection* c) - { - // Do nothing. - Unref(c); - } - -void FileSerializer::GotPacket(Packet* p) - { - // Do nothing. - delete p; - } - -EventPlayer::EventPlayer(const char* file) - : stream_time(), replay_time(), ne_time(), ne_handler(), ne_args() - { - if ( ! OpenFile(file, true) || fd < 0 ) - Error(fmt("event replayer: cannot open %s", file)); - - if ( ReadHeader() ) - iosource_mgr->Register(this); - } - -EventPlayer::~EventPlayer() - { - CloseFile(); - } - -void EventPlayer::GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) - { - ne_time = time; - ne_handler = event; - ne_args = args; - } - -void EventPlayer::GotFunctionCall(const char* name, double time, - Func* func, val_list* args) - { - // We don't replay function calls. - } - -void EventPlayer::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, - iosource::FD_Set* except) - { - read->Insert(fd); - } - -double EventPlayer::NextTimestamp(double* local_network_time) - { - if ( ne_time ) - return ne_time; - - if ( ! io ) - return -1; - - // Read next event if we don't have one waiting. - if ( ! ne_time ) - { - UnserialInfo info(this); - Unserialize(&info); - SetClosed(io->Eof()); - } - - if ( ! ne_time ) - return -1; - - if ( ! network_time ) - { - // Network time not initialized yet. - stream_time = replay_time = ne_time; - return ne_time; - } - - if ( ! stream_time ) - { - // Init base times. - stream_time = ne_time; - replay_time = network_time; - } - - // Scale time. - ne_time = ne_time - stream_time + network_time; - return ne_time; - } - -void EventPlayer::Process() - { - if ( ! (io && ne_time) ) - return; - - Event* event = new Event(ne_handler, ne_args); - mgr.Dispatch(event); - - ne_time = 0; - } diff --git a/src/Serializer.h b/src/Serializer.h deleted file mode 100644 index 3b863a5b6e..0000000000 --- a/src/Serializer.h +++ /dev/null @@ -1,363 +0,0 @@ -#ifndef SERIALIZER_H -#define SERIALIZER_H - -#include -#include -#include - -#include "ID.h" -#include "List.h" -#include "Expr.h" -#include "ChunkedIO.h" -#include "SerializationFormat.h" -#include "StateAccess.h" -#include "PriorityQueue.h" -#include "SerialInfo.h" -#include "IP.h" -#include "Timer.h" -#include "iosource/IOSource.h" -#include "Reporter.h" - -class SerializationCache; -class SerialInfo; - -class Connection; -class Timer; -class Packet; - -class Serializer { -public: - // Currently ID serialization is the only method which may suspend. - bool Serialize(SerialInfo* info, const ID& id); - bool Serialize(SerialInfo* info, const char* func, val_list* args); - bool Serialize(SerialInfo* info, const StateAccess& s); - bool Serialize(SerialInfo* info, const Connection& c); - bool Serialize(SerialInfo* info, const Timer& t); - bool Serialize(SerialInfo* info, const Packet& p); - - // Access to the current cache. - SerializationCache* Cache() { return current_cache; } - void SetCache(SerializationCache* cache) - { current_cache = cache; } - - // Input/output methods. - -#define DECLARE_READ(type) \ - bool Read(type* v, const char* tag) { return format->Read(v, tag); } - -#define DECLARE_WRITE(type) \ - bool Write(type v, const char* tag) \ - { return format->Write(v, tag); } - -#define DECLARE_IO(type) \ - DECLARE_READ(type) \ - DECLARE_WRITE(type) - - DECLARE_IO(int) - DECLARE_IO(uint16) - DECLARE_IO(uint32) - DECLARE_IO(int64) - DECLARE_IO(uint64) - DECLARE_IO(char) - DECLARE_IO(bool) - DECLARE_IO(double) - - bool Read(char** str, int* len, const char* tag) - { return format->Read(str, len, tag); } - bool Read(const char** str, int* len, const char* tag) - // This cast is ok. - { return format->Read(const_cast(str), len, tag); } - - bool Read(string* s, const char* tag); - bool Read(IPAddr* a, const char* tag) { return format->Read(a, tag); } - bool Read(IPPrefix* p, const char* tag) { return format->Read(p, tag); } - - bool Write(const char* s, const char* tag) - { return format->Write(s, tag); } - bool Write(const char* buf, int len, const char* tag) - { return format->Write(buf, len, tag); } - bool Write(const string& s, const char* tag) - { return format->Write(s.data(), s.size(), tag); } - bool Write(const IPAddr& a, const char* tag) { return format->Write(a, tag); } - bool Write(const IPPrefix& p, const char* tag) { return format->Write(p, tag); } - - bool WriteOpenTag(const char* tag) - { return format->WriteOpenTag(tag); } - bool WriteCloseTag(const char* tag) - { return format->WriteCloseTag(tag); } - - bool WriteSeparator() { return format->WriteSeparator(); } - - void Error(const char* msg); - void Warning(const char* msg); - - void SetErrorDescr(const char* descr) - { delete [] error_descr; error_descr = copy_string(descr); } - -protected: - // Format defaults to binary serialization. - explicit Serializer(SerializationFormat* format = 0); - virtual ~Serializer(); - - // Reads next object. - // If 'block' is true, wait until an object can be read. - // Returns 0 if no more object available, -1 on error. - int Unserialize(UnserialInfo* info, bool block = false); - - // Callback for error messages. - virtual void ReportError(const char* msg) = 0; - - // Callbacks for unserialized objects. - - // id points to ID in global scope, val is unserialized value. - virtual void GotID(ID* id, Val* val) = 0; - virtual void GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) = 0; - virtual void GotFunctionCall(const char* name, double time, - Func* func, val_list* args) = 0; - virtual void GotStateAccess(StateAccess* s) = 0; - virtual void GotTimer(Timer* t) = 0; - virtual void GotConnection(Connection* c) = 0; - virtual void GotPacket(Packet* packet) = 0; - - // Magic to recognize state files. - static const uint32 MAGIC = 0x42525354; - - // This will be increased whenever there is an incompatible change - // in the data format. - static const uint32 DATA_FORMAT_VERSION = 26; - - ChunkedIO* io; - -private: - bool StartSerialization(SerialInfo* info, const char* descr, char tag); - bool EndSerialization(SerialInfo* info); - - bool UnserializeID(UnserialInfo* info); - bool UnserializeCall(UnserialInfo* info); - bool UnserializeStateAccess(UnserialInfo* info); - bool UnserializeTimer(UnserialInfo* info); - bool UnserializeConnection(UnserialInfo* info); - bool UnserializePacket(UnserialInfo* info); - - SerializationFormat* format; - SerializationCache* current_cache; - const char* error_descr; // used in error messages -}; - - - -// We maintain an LRU-cache for some of the objects which have already been -// serialized. For the cache, we need two types of IDs: TransientIDs (defined -// in SerialObj.cc) uniquely reference an object during the lifetime of a -// process. PermanentIDs uniquely reference an object within a serialization. - -class SerializationCache { -public: - typedef uint64 PermanentID; - static const PermanentID NONE = 0; - - // If max_cache_size is greater than zero, we'll remove old entries - // automatically if limit is reached (LRU expiration). - explicit SerializationCache(unsigned int max_cache_size = 0); - ~SerializationCache(); - - PermanentID Register(const SerialObj* obj, PermanentID pid, - bool new_cache_strategy); - - const SerialObj* Lookup(PermanentID pid) - { - PIDMap::const_iterator i = pid_map.find(pid); - if ( i == pid_map.end() ) - return 0; - - assert(i->second); - MoveEntryToTail(i->second); - return i->second->obj.serial; - } - - PermanentID Lookup(const TransientID& tid) - { - TIDMap::const_iterator i = tid_map.find(tid.Value()); - if ( i == tid_map.end() ) - return 0; - - uint64 modified = i->second->obj.serial->LastModified(); - if ( modified == SerialObj::ALWAYS || modified > i->second->time ) - return 0; - - assert(i->second); - MoveEntryToTail(i->second); - return i->second->pid; - } - - unsigned int GetMaxCacheSize() const { return max_cache_size; } - void SetMaxCacheSize(unsigned int size) { max_cache_size = size; } - - // These methods have to be called at the start/end of the - // serialization of an entity. The cache guarentees that objects - // registered after Begin() remain valid until End() is called. - // After End(), objects which are not derived from BroObj are - // discarded; others *may* remain valid. - void Begin(bool can_keep_in_cache) { End(can_keep_in_cache); } - void End(bool can_keep_in_cache); - - void Clear(); - -private: - - struct CacheList; - - struct CacheEntry { - union { - const SerialObj* serial; - const BroObj* bro; - } obj; - - bool is_bro_obj; - PermanentID pid; - TransientID::ID tid; - uint64 time; - struct CacheList* cache; - CacheEntry* prev; - CacheEntry* next; - - SerialType stype; // primarily for debugging - }; - - // We maintain two LRU-sorted lists, one for often-changing objects and - // one for only rarely changing objects; - struct CacheList { - CacheEntry* head; - CacheEntry* tail; - unsigned int size; - }; - - void RemoveEntry(CacheEntry* e); - void UnlinkEntry(CacheEntry* e); - void MoveEntryToTail(CacheEntry* e); - - unsigned int max_cache_size; - - typedef map PIDMap; - typedef map TIDMap; - - TIDMap tid_map; - PIDMap pid_map; - - CacheList cache_stable; - CacheList cache_unstable; - - // Objects in the cache which aren't derived from BroObj. These are - // always stored in the unstable cache. - typedef list VolatileList; - VolatileList volatiles; - - PermanentID next_id; -}; - -// A serializer for cloning objects. Objects can be serialized into -// the serializer and unserialized into new objects. An absolutely -// minimal implementation of Serializer! -class CloneSerializer : public Serializer { -public: - explicit CloneSerializer(SerializationFormat* format = 0) : Serializer(format) { } - ~CloneSerializer() override - { } - -protected: - void ReportError(const char* msg) override { reporter->Error("%s", msg); } - void GotID(ID* id, Val* val) override { } - void GotEvent(const char* name, double time, EventHandlerPtr event, val_list* args) override { } - void GotFunctionCall(const char* name, double time, - Func* func, val_list* args) override { } - void GotStateAccess(StateAccess* s) override { delete s; } - void GotTimer(Timer* t) override { } - void GotConnection(Connection* c) override { } - void GotPacket(Packet* packet) override { } -}; - -// Write values/events to file or fd. -class FileSerializer : public Serializer { -public: - explicit FileSerializer(SerializationFormat* format = 0); - ~FileSerializer() override; - - // Opens the file for serialization. - bool Open(const char* file, bool pure = false); - bool Close(); - - // Reads the file. - bool Read(UnserialInfo* info, const char* file, bool header = true); - -protected: - void ReportError(const char* msg) override; - void GotID(ID* id, Val* val) override; - void GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) override; - void GotFunctionCall(const char* name, double time, - Func* func, val_list* args) override; - void GotStateAccess(StateAccess* s) override; - void GotTimer(Timer* t) override; - void GotConnection(Connection* c) override; - void GotPacket(Packet* packet) override; - - bool OpenFile(const char* file, bool readonly, bool should_exist = false); - void CloseFile(); - bool ReadFile(const char* file); - bool PrepareForWriting(); - bool ReadHeader(UnserialInfo* info = 0); - - SerializationCache cache; - const char* file; - int fd; -}; - -// Abstract interface class for external sources providing a stream of events. -class EventSource { -public: - virtual ~EventSource() { } - - // Returns time of the oldest event (0 if none available). - virtual double NextTimestamp(double* local_network_time) = 0; - - // Dispatches the oldest event and removes it. - virtual void DispatchNextEvent() = 0; - - // Returns true if there are more events to expect from this source. - virtual bool IsActive() = 0; -}; - -// Plays a file of events back. -class EventPlayer : public FileSerializer, public iosource::IOSource { -public: - explicit EventPlayer(const char* file); - ~EventPlayer() override; - - void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, - iosource::FD_Set* except) override; - double NextTimestamp(double* local_network_time) override; - void Process() override; - const char* Tag() override { return "EventPlayer"; } - -protected: - void GotID(ID* id, Val* val) override {} - void GotEvent(const char* name, double time, - EventHandlerPtr event, val_list* args) override; - void GotFunctionCall(const char* name, double time, - Func* func, val_list* args) override; - - double stream_time; // time of first captured event - double replay_time; // network time of replay start - - // Next event waiting to be dispatched. - double ne_time; - EventHandlerPtr ne_handler; - val_list* ne_args; - -}; - -extern FileSerializer* event_serializer; -extern FileSerializer* state_serializer; - -#endif diff --git a/src/Sessions.cc b/src/Sessions.cc index edccb7e00c..e668815cfb 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -14,7 +14,6 @@ #include "NetVar.h" #include "Sessions.h" #include "Reporter.h" -#include "OSFinger.h" #include "analyzer/protocol/icmp/ICMP.h" #include "analyzer/protocol/udp/UDP.h" @@ -130,15 +129,6 @@ NetSessions::NetSessions() dump_this_packet = 0; num_packets_processed = 0; - if ( OS_version_found ) - { - SYN_OS_Fingerprinter = new OSFingerprint(SYN_FINGERPRINT_MODE); - if ( SYN_OS_Fingerprinter->Error() ) - exit(1); - } - else - SYN_OS_Fingerprinter = 0; - if ( pkt_profile_mode && pkt_profile_freq > 0 && pkt_profile_file ) pkt_profiler = new PacketProfiler(pkt_profile_mode, pkt_profile_freq, pkt_profile_file->AsFile()); @@ -155,7 +145,6 @@ NetSessions::~NetSessions() { delete ch; delete packet_filter; - delete SYN_OS_Fingerprinter; delete pkt_profiler; Unref(arp_analyzer); delete discarder; @@ -171,11 +160,7 @@ void NetSessions::NextPacket(double t, const Packet* pkt) SegmentProfiler(segment_logger, "dispatching-packet"); if ( raw_packet ) - { - val_list* vl = new val_list(); - vl->append(pkt->BuildPktHdrVal()); - mgr.QueueEvent(raw_packet, vl); - } + mgr.QueueEventFast(raw_packet, {pkt->BuildPktHdrVal()}); if ( pkt_profiler ) pkt_profiler->ProfilePkt(t, pkt->cap_len); @@ -415,11 +400,7 @@ void NetSessions::DoNextPacket(double t, const Packet* pkt, const IP_Hdr* ip_hdr { dump_this_packet = 1; if ( esp_packet ) - { - val_list* vl = new val_list(); - vl->append(ip_hdr->BuildPktHdrVal()); - mgr.QueueEvent(esp_packet, vl); - } + mgr.QueueEventFast(esp_packet, {ip_hdr->BuildPktHdrVal()}); // Can't do more since upper-layer payloads are going to be encrypted. return; @@ -439,11 +420,7 @@ void NetSessions::DoNextPacket(double t, const Packet* pkt, const IP_Hdr* ip_hdr } if ( mobile_ipv6_message ) - { - val_list* vl = new val_list(); - vl->append(ip_hdr->BuildPktHdrVal()); - mgr.QueueEvent(mobile_ipv6_message, vl); - } + mgr.QueueEvent(mobile_ipv6_message, {ip_hdr->BuildPktHdrVal()}); if ( ip_hdr->NextProto() != IPPROTO_NONE ) Weird("mobility_piggyback", pkt, encapsulation); @@ -999,24 +976,6 @@ FragReassembler* NetSessions::NextFragment(double t, const IP_Hdr* ip, return f; } -int NetSessions::Get_OS_From_SYN(struct os_type* retval, - uint16 tot, uint8 DF_flag, uint8 TTL, uint16 WSS, - uint8 ocnt, uint8* op, uint16 MSS, uint8 win_scale, - uint32 tstamp, /* uint8 TOS, */ uint32 quirks, - uint8 ECN) const - { - return SYN_OS_Fingerprinter ? - SYN_OS_Fingerprinter->FindMatch(retval, tot, DF_flag, TTL, - WSS, ocnt, op, MSS, win_scale, tstamp, - quirks, ECN) : 0; - } - -bool NetSessions::CompareWithPreviousOSMatch(const IPAddr& addr, int id) const - { - return SYN_OS_Fingerprinter ? - SYN_OS_Fingerprinter->CacheMatch(addr, id) : 0; - } - Connection* NetSessions::FindConnection(Val* v) { BroType* vt = v->Type(); @@ -1113,9 +1072,6 @@ void NetSessions::Remove(Connection* c) tcp_stats.StateLeft(to->state, tr->state); } - if ( c->IsPersistent() ) - persistence_serializer->Unregister(c); - c->Done(); if ( connection_state_remove ) @@ -1206,8 +1162,6 @@ void NetSessions::Insert(Connection* c) // Some clean-ups similar to those in Remove() (but invisible // to the script layer). old->CancelTimers(); - if ( old->IsPersistent() ) - persistence_serializer->Unregister(old); delete old->Key(); old->ClearKey(); Unref(old); @@ -1327,12 +1281,12 @@ Connection* NetSessions::NewConn(HashKey* k, double t, const ConnID* id, { conn->Event(new_connection, 0); - if ( external ) + if ( external && connection_external ) { - val_list* vl = new val_list(2); - vl->append(conn->BuildConnVal()); - vl->append(new StringVal(conn->GetTimerMgr()->GetTag().c_str())); - conn->ConnectionEvent(connection_external, 0, vl); + conn->ConnectionEventFast(connection_external, 0, { + conn->BuildConnVal(), + new StringVal(conn->GetTimerMgr()->GetTag().c_str()), + }); } } diff --git a/src/Sessions.h b/src/Sessions.h index b237428d25..617ab3e52a 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -17,7 +17,6 @@ class EncapsulationStack; class Connection; -class OSFingerprint; class ConnCompressor; struct ConnID; @@ -77,14 +76,6 @@ public: FragReassembler* NextFragment(double t, const IP_Hdr* ip, const u_char* pkt); - int Get_OS_From_SYN(struct os_type* retval, - uint16 tot, uint8 DF_flag, uint8 TTL, uint16 WSS, - uint8 ocnt, uint8* op, uint16 MSS, uint8 win_scale, - uint32 tstamp, /* uint8 TOS, */ uint32 quirks, - uint8 ECN) const; - - bool CompareWithPreviousOSMatch(const IPAddr& addr, int id) const; - // Looks up the connection referred to by the given Val, // which should be a conn_id record. Returns nil if there's // no such connection or the Val is ill-formed. @@ -180,7 +171,6 @@ public: analyzer::tcp::TCPStateStats tcp_stats; // keeps statistics on TCP states protected: - friend class RemoteSerializer; friend class ConnCompressor; friend class TimerMgrExpireTimer; friend class IPTunnelTimer; @@ -241,7 +231,6 @@ protected: analyzer::stepping_stone::SteppingStoneManager* stp_manager; Discarder* discarder; PacketFilter* packet_filter; - OSFingerprint* SYN_OS_Fingerprinter; int build_backdoor_analyzer; int dump_this_packet; // if true, current packet should be recorded uint64 num_packets_processed; diff --git a/src/SmithWaterman.cc b/src/SmithWaterman.cc index fba3abfc13..857e45bb9b 100644 --- a/src/SmithWaterman.cc +++ b/src/SmithWaterman.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/StateAccess.cc b/src/StateAccess.cc deleted file mode 100644 index 874ed9c5c2..0000000000 --- a/src/StateAccess.cc +++ /dev/null @@ -1,1030 +0,0 @@ -#include "Val.h" -#include "StateAccess.h" -#include "Serializer.h" -#include "Event.h" -#include "NetVar.h" -#include "DebugLogger.h" -#include "RemoteSerializer.h" -#include "PersistenceSerializer.h" - -int StateAccess::replaying = 0; - -StateAccess::StateAccess(Opcode arg_opcode, - const MutableVal* arg_target, const Val* arg_op1, - const Val* arg_op2, const Val* arg_op3) - { - opcode = arg_opcode; - target.val = const_cast(arg_target); - target_type = TYPE_MVAL; - op1.val = const_cast(arg_op1); - op1_type = TYPE_VAL; - op2 = const_cast(arg_op2); - op3 = const_cast(arg_op3); - delete_op1_key = false; - - RefThem(); - } - -StateAccess::StateAccess(Opcode arg_opcode, - const ID* arg_target, const Val* arg_op1, - const Val* arg_op2, const Val* arg_op3) - { - opcode = arg_opcode; - target.id = const_cast(arg_target); - target_type = TYPE_ID; - op1.val = const_cast(arg_op1); - op1_type = TYPE_VAL; - op2 = const_cast(arg_op2); - op3 = const_cast(arg_op3); - delete_op1_key = false; - - RefThem(); - } - -StateAccess::StateAccess(Opcode arg_opcode, - const ID* arg_target, const HashKey* arg_op1, - const Val* arg_op2, const Val* arg_op3) - { - opcode = arg_opcode; - target.id = const_cast(arg_target); - target_type = TYPE_ID; - op1.key = new HashKey(arg_op1->Key(), arg_op1->Size(), arg_op1->Hash()); - op1_type = TYPE_KEY; - op2 = const_cast(arg_op2); - op3 = const_cast(arg_op3); - delete_op1_key = true; - - RefThem(); - } - -StateAccess::StateAccess(Opcode arg_opcode, - const MutableVal* arg_target, const HashKey* arg_op1, - const Val* arg_op2, const Val* arg_op3) - { - opcode = arg_opcode; - target.val = const_cast(arg_target); - target_type = TYPE_MVAL; - op1.key = new HashKey(arg_op1->Key(), arg_op1->Size(), arg_op1->Hash()); - op1_type = TYPE_KEY; - op2 = const_cast(arg_op2); - op3 = const_cast(arg_op3); - delete_op1_key = true; - - RefThem(); - } - -StateAccess::StateAccess(const StateAccess& sa) -: SerialObj() - { - opcode = sa.opcode; - target_type = sa.target_type; - op1_type = sa.op1_type; - delete_op1_key = false; - - if ( target_type == TYPE_ID ) - target.id = sa.target.id; - else - target.val = sa.target.val; - - if ( op1_type == TYPE_VAL ) - op1.val = sa.op1.val; - else - { - // We need to copy the key as the pointer may not be - // valid anymore later. - op1.key = new HashKey(sa.op1.key->Key(), sa.op1.key->Size(), - sa.op1.key->Hash()); - delete_op1_key = true; - } - - op2 = sa.op2; - op3 = sa.op3; - - RefThem(); - } - -StateAccess::~StateAccess() - { - if ( target_type == TYPE_ID ) - Unref(target.id); - else - Unref(target.val); - - if ( op1_type == TYPE_VAL ) - Unref(op1.val); - else if ( delete_op1_key ) - delete op1.key; - - Unref(op2); - Unref(op3); - } - -void StateAccess::RefThem() - { - if ( target_type == TYPE_ID ) - Ref(target.id); - else - Ref(target.val); - - if ( op1_type == TYPE_VAL && op1.val ) - Ref(op1.val); - - if ( op2 ) - Ref(op2); - if ( op3 ) - Ref(op3); - } - -bool StateAccess::CheckOld(const char* op, ID* id, Val* index, - Val* should, Val* is) - { - if ( ! remote_check_sync_consistency ) - return true; - - if ( ! should && ! is ) - return true; - - // 'should == index' means that 'is' should be non-nil. - if ( should == index && is ) - return true; - - if ( should && is ) - { - // There's no general comparison for non-atomic vals currently. - if ( ! (is_atomic_val(is) && is_atomic_val(should)) ) - return true; - - if ( same_atomic_val(should, is) ) - return true; - } - - Val* arg1; - Val* arg2; - Val* arg3; - - if ( index ) - { - ODesc d; - d.SetShort(); - index->Describe(&d); - arg1 = new StringVal(fmt("%s[%s]", id->Name(), d.Description())); - } - else - arg1 = new StringVal(id->Name()); - - if ( should ) - { - ODesc d; - d.SetShort(); - should->Describe(&d); - arg2 = new StringVal(d.Description()); - } - else - arg2 = new StringVal(""); - - if ( is ) - { - ODesc d; - d.SetShort(); - is->Describe(&d); - arg3 = new StringVal(d.Description()); - } - else - arg3 = new StringVal(""); - - val_list* args = new val_list; - args->append(new StringVal(op)); - args->append(arg1); - args->append(arg2); - args->append(arg3); - mgr.QueueEvent(remote_state_inconsistency, args); - - return false; - } - -bool StateAccess::CheckOldSet(const char* op, ID* id, Val* index, - bool should, bool is) - { - if ( ! remote_check_sync_consistency ) - return true; - - if ( should == is ) - return true; - - ODesc d; - d.SetShort(); - index->Describe(&d); - - Val* arg1 = new StringVal(fmt("%s[%s]", id->Name(), d.Description())); - Val* arg2 = new StringVal(should ? "set" : "not set"); - Val* arg3 = new StringVal(is ? "set" : "not set"); - - val_list* args = new val_list; - args->append(new StringVal(op)); - args->append(arg1); - args->append(arg2); - args->append(arg3); - mgr.QueueEvent(remote_state_inconsistency, args); - - return false; - } - -bool StateAccess::MergeTables(TableVal* dst, Val* src) - { - if ( src->Type()->Tag() != TYPE_TABLE ) - { - reporter->Error("type mismatch while merging tables"); - return false; - } - - if ( ! src->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - return false; - - DBG_LOG(DBG_STATE, "merging tables %s += %s", dst->UniqueID()->Name(), - src->AsTableVal()->UniqueID()->Name()); - - src->AsTableVal()->AddTo(dst, 0); - - // We need to make sure that the resulting table is accessible by - // the new name (while keeping the old as an alias). - dst->TransferUniqueID(src->AsMutableVal()); - - return true; - } - -static Val* GetInteger(bro_int_t n, TypeTag t) - { - if ( t == TYPE_INT ) - return val_mgr->GetInt(n); - - return val_mgr->GetCount(n); - } - -void StateAccess::Replay() - { - // For simplicity we assume that we only replay unserialized accesses. - assert(target_type == TYPE_ID && op1_type == TYPE_VAL); - - if ( ! target.id ) - return; - - Val* v = target.id->ID_Val(); - TypeTag t = v ? v->Type()->Tag() : TYPE_VOID; - - if ( opcode != OP_ASSIGN && ! v ) - { - // FIXME: I think this warrants an internal error, - // but let's check that first ... - // reporter->InternalError("replay id lacking a value"); - reporter->Error("replay id lacks a value"); - return; - } - - ++replaying; - - switch ( opcode ) { - case OP_ASSIGN: - assert(op1.val); - // There mustn't be a direct assignment to a unique ID. - assert(target.id->Name()[0] != '#'); - CheckOld("assign", target.id, 0, op2, v); - - if ( t == TYPE_TABLE && v && - v->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - if ( MergeTables(v->AsTableVal(), op1.val) ) - break; - - target.id->SetVal(op1.val->Ref()); - break; - - case OP_INCR: - if ( IsIntegral(t) ) - { - assert(op1.val && op2); - // We derive the amount as difference between old - // and new value. - bro_int_t amount = - op1.val->CoerceToInt() - op2->CoerceToInt(); - - target.id->SetVal(GetInteger(v->CoerceToInt() + amount, t), - OP_INCR); - } - break; - - case OP_ASSIGN_IDX: - assert(op1.val); - - if ( t == TYPE_TABLE ) - { - assert(op2); - - BroType* yt = v->Type()->AsTableType()->YieldType(); - - if ( yt && yt->Tag() == TYPE_TABLE ) - { - TableVal* tv = v->AsTableVal(); - Val* w = tv->Lookup(op1.val); - if ( w && w->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - if ( MergeTables(w->AsTableVal(), op2) ) - break; - } - - CheckOld("index assign", target.id, op1.val, op3, - v->AsTableVal()->Lookup(op1.val)); - - v->AsTableVal()->Assign(op1.val, op2 ? op2->Ref() : 0); - } - - else if ( t == TYPE_RECORD ) - { - const char* field = op1.val->AsString()->CheckString(); - int idx = v->Type()->AsRecordType()->FieldOffset(field); - - if ( idx >= 0 ) - { - BroType* ft = v->Type()->AsRecordType()->FieldType(field); - - if ( ft && ft->Tag() == TYPE_TABLE ) - { - RecordVal* rv = v->AsRecordVal(); - Val* w = rv->Lookup(idx); - if ( w && w->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - if ( MergeTables(w->AsTableVal(), op2) ) - break; - } - - CheckOld("index assign", target.id, op1.val, op3, - v->AsRecordVal()->Lookup(idx)); - v->AsRecordVal()->Assign(idx, op2 ? op2->Ref() : 0); - } - else - reporter->Error("access replay: unknown record field %s for assign", field); - } - - else if ( t == TYPE_VECTOR ) - { - assert(op2); - bro_uint_t index = op1.val->AsCount(); - - BroType* yt = v->Type()->AsVectorType()->YieldType(); - - if ( yt && yt->Tag() == TYPE_TABLE ) - { - VectorVal* vv = v->AsVectorVal(); - Val* w = vv->Lookup(index); - if ( w && w->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - if ( MergeTables(w->AsTableVal(), op2) ) - break; - } - - CheckOld("index assign", target.id, op1.val, op3, - v->AsVectorVal()->Lookup(index)); - v->AsVectorVal()->Assign(index, op2 ? op2->Ref() : 0); - } - - else - reporter->InternalError("unknown type in replaying index assign"); - - break; - - case OP_INCR_IDX: - { - assert(op1.val && op2 && op3); - - // We derive the amount as the difference between old - // and new value. - bro_int_t amount = op2->CoerceToInt() - op3->CoerceToInt(); - - if ( t == TYPE_TABLE ) - { - t = v->Type()->AsTableType()->YieldType()->Tag(); - Val* lookup_op1 = v->AsTableVal()->Lookup(op1.val); - int delta = lookup_op1->CoerceToInt() + amount; - Val* new_val = GetInteger(delta, t); - v->AsTableVal()->Assign(op1.val, new_val, OP_INCR ); - } - - else if ( t == TYPE_RECORD ) - { - const char* field = op1.val->AsString()->CheckString(); - int idx = v->Type()->AsRecordType()->FieldOffset(field); - if ( idx >= 0 ) - { - t = v->Type()->AsRecordType()->FieldType(idx)->Tag(); - Val* lookup_field = - v->AsRecordVal()->Lookup(idx); - bro_int_t delta = - lookup_field->CoerceToInt() + amount; - Val* new_val = GetInteger(delta, t); - v->AsRecordVal()->Assign(idx, new_val, OP_INCR); - } - else - reporter->Error("access replay: unknown record field %s for assign", field); - } - - else if ( t == TYPE_VECTOR ) - { - bro_uint_t index = op1.val->AsCount(); - t = v->Type()->AsVectorType()->YieldType()->Tag(); - Val* lookup_op1 = v->AsVectorVal()->Lookup(index); - int delta = lookup_op1->CoerceToInt() + amount; - Val* new_val = GetInteger(delta, t); - v->AsVectorVal()->Assign(index, new_val); - } - - else - reporter->InternalError("unknown type in replaying index increment"); - - break; - } - - case OP_ADD: - assert(op1.val); - if ( t == TYPE_TABLE ) - { - CheckOldSet("add", target.id, op1.val, op2 != 0, - v->AsTableVal()->Lookup(op1.val) != 0); - v->AsTableVal()->Assign(op1.val, 0); - } - break; - - case OP_DEL: - assert(op1.val); - if ( t == TYPE_TABLE ) - { - if ( v->Type()->AsTableType()->IsSet() ) - CheckOldSet("delete", target.id, op1.val, op2 != 0, - v->AsTableVal()->Lookup(op1.val) != 0); - else - CheckOld("delete", target.id, op1.val, op2, - v->AsTableVal()->Lookup(op1.val)); - - Unref(v->AsTableVal()->Delete(op1.val)); - } - break; - - case OP_EXPIRE: - assert(op1.val); - if ( t == TYPE_TABLE ) - { - // No old check for expire. It may have already - // been deleted by ourselves. Furthermore, we - // ignore the expire_func's return value. - TableVal* tv = v->AsTableVal(); - if ( tv->Lookup(op1.val, false) ) - { - // We want to propagate state updates which - // are performed in the expire_func. - StateAccess::ResumeReplay(); - - if ( remote_serializer ) - remote_serializer->ResumeStateUpdates(); - - tv->CallExpireFunc(op1.val->Ref()); - - if ( remote_serializer ) - remote_serializer->SuspendStateUpdates(); - - StateAccess::SuspendReplay(); - - Unref(tv->AsTableVal()->Delete(op1.val)); - } - } - - break; - - case OP_PRINT: - assert(op1.val); - reporter->InternalError("access replay for print not implemented"); - break; - - case OP_READ_IDX: - if ( t == TYPE_TABLE ) - { - assert(op1.val); - TableVal* tv = v->AsTableVal(); - - // Update the timestamp if we have a read_expire. - if ( tv->FindAttr(ATTR_EXPIRE_READ) ) - { - if ( ! tv->UpdateTimestamp(op1.val) && - remote_check_sync_consistency ) - { - ODesc d; - d.SetShort(); - op1.val->Describe(&d); - - val_list* args = new val_list; - args->append(new StringVal("read")); - args->append(new StringVal(fmt("%s[%s]", target.id->Name(), d.Description()))); - args->append(new StringVal("existent")); - args->append(new StringVal("not existent")); - mgr.QueueEvent(remote_state_inconsistency, args); - } - } - } - else - reporter->Error("read for non-table"); - break; - - default: - reporter->InternalError("access replay: unknown opcode for StateAccess"); - break; - } - - --replaying; - - if ( remote_state_access_performed ) - { - val_list* vl = new val_list; - vl->append(new StringVal(target.id->Name())); - vl->append(target.id->ID_Val()->Ref()); - mgr.QueueEvent(remote_state_access_performed, vl); - } - } - -ID* StateAccess::Target() const - { - return target_type == TYPE_ID ? target.id : target.val->UniqueID(); - } - -bool StateAccess::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -StateAccess* StateAccess::Unserialize(UnserialInfo* info) - { - StateAccess* sa = - (StateAccess*) SerialObj::Unserialize(info, SER_STATE_ACCESS); - return sa; - } - -IMPLEMENT_SERIAL(StateAccess, SER_STATE_ACCESS); - -bool StateAccess::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_STATE_ACCESS, SerialObj); - - if ( ! SERIALIZE(char(opcode)) ) - return false; - - const ID* id = - target_type == TYPE_ID ? target.id : target.val->UniqueID(); - - if ( ! SERIALIZE(id->Name()) ) - return false; - - if ( op1_type == TYPE_KEY ) - { - Val* index = - id->ID_Val()->AsTableVal()->RecoverIndex(this->op1.key); - - if ( ! index ) - return false; - if ( ! index->Serialize(info) ) - return false; - - Unref(index); - } - - else if ( ! op1.val->Serialize(info) ) - return false; - - // Don't send the "old" operand if we don't want consistency checks. - // Unfortunately, it depends on the opcode which operand that actually - // is. - - const Val* null = 0; - - if ( remote_check_sync_consistency ) - { - SERIALIZE_OPTIONAL(op2); - SERIALIZE_OPTIONAL(op3); - } - - else - { - switch ( opcode ) { - case OP_PRINT: - case OP_EXPIRE: - case OP_READ_IDX: - // No old. - SERIALIZE_OPTIONAL(null); - SERIALIZE_OPTIONAL(null); - break; - - case OP_INCR: - case OP_INCR_IDX: - // Always need old. - SERIALIZE_OPTIONAL(op2); - SERIALIZE_OPTIONAL(op3); - break; - - case OP_ASSIGN: - case OP_ADD: - case OP_DEL: - // Op2 is old. - SERIALIZE_OPTIONAL(null); - SERIALIZE_OPTIONAL(null); - break; - - case OP_ASSIGN_IDX: - // Op3 is old. - SERIALIZE_OPTIONAL(op2); - SERIALIZE_OPTIONAL(null); - break; - - default: - reporter->InternalError("StateAccess::DoSerialize: unknown opcode"); - } - } - - return true; - } - -bool StateAccess::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - char c; - if ( ! UNSERIALIZE(&c) ) - return false; - - opcode = Opcode(c); - - const char* name; - if ( ! UNSERIALIZE_STR(&name, 0) ) - return false; - - target_type = TYPE_ID; - target.id = global_scope()->Lookup(name); - - if ( target.id ) - // Otherwise, we'll delete it below. - delete [] name; - - op1_type = TYPE_VAL; - op1.val = Val::Unserialize(info); - if ( ! op1.val ) - return false; - - UNSERIALIZE_OPTIONAL(op2, Val::Unserialize(info)); - UNSERIALIZE_OPTIONAL(op3, Val::Unserialize(info)); - - if ( target.id ) - Ref(target.id); - else - { - // This may happen as long as we haven't agreed on the - // unique name for an ID during initial synchronization, or if - // the local peer has already deleted the ID. - DBG_LOG(DBG_STATE, "state access referenced unknown id %s", name); - - if ( info->install_uniques ) - { - target.id = new ID(name, SCOPE_GLOBAL, true); - Ref(target.id); - global_scope()->Insert(name, target.id); -#ifdef USE_PERFTOOLS_DEBUG - heap_checker->IgnoreObject(target.id); -#endif - } - - delete [] name; - } - - return true; - } - -void StateAccess::Describe(ODesc* d) const - { - const ID* id; - const char* id_str = ""; - const char* unique_str = ""; - - d->SetShort(); - - if ( target_type == TYPE_ID ) - { - id = target.id; - - if ( ! id ) - { - d->Add("(unknown id)"); - return; - } - - id_str = id->Name(); - - if ( id->ID_Val() && id->ID_Val()->IsMutableVal() && - id->Name()[0] != '#' ) - unique_str = fmt(" [id] (%s)", id->ID_Val()->AsMutableVal()->UniqueID()->Name()); - } - else - { - id = target.val->UniqueID(); - -#ifdef DEBUG - if ( target.val->GetID() ) - { - id_str = target.val->GetID()->Name(); - unique_str = fmt(" [val] (%s)", id->Name()); - } - else -#endif - id_str = id->Name(); - } - - const Val* op1 = op1_type == TYPE_VAL ? - this->op1.val : - id->ID_Val()->AsTableVal()->RecoverIndex(this->op1.key); - - switch ( opcode ) { - case OP_ASSIGN: - assert(op1); - d->Add(id_str); - d->Add(" = "); - op1->Describe(d); - if ( op2 ) - { - d->Add(" ("); - op2->Describe(d); - d->Add(")"); - } - d->Add(unique_str); - break; - - case OP_INCR: - assert(op1 && op2); - d->Add(id_str); - d->Add(" += "); - d->Add(op1->CoerceToInt() - op2->CoerceToInt()); - d->Add(unique_str); - break; - - case OP_ASSIGN_IDX: - assert(op1); - d->Add(id_str); - d->Add("["); - op1->Describe(d); - d->Add("]"); - d->Add(" = "); - if ( op2 ) - op2->Describe(d); - else - d->Add("(null)"); - if ( op3 ) - { - d->Add(" ("); - op3->Describe(d); - d->Add(")"); - } - d->Add(unique_str); - break; - - case OP_INCR_IDX: - assert(op1 && op2 && op3); - d->Add(id_str); - d->Add("["); - op1->Describe(d); - d->Add("]"); - d->Add(" += "); - d->Add(op2->CoerceToInt() - op3->CoerceToInt()); - d->Add(unique_str); - break; - - case OP_ADD: - assert(op1); - d->Add("add "); - d->Add(id_str); - d->Add("["); - op1->Describe(d); - d->Add("]"); - if ( op2 ) - { - d->Add(" ("); - op2->Describe(d); - d->Add(")"); - } - d->Add(unique_str); - break; - - case OP_DEL: - assert(op1); - d->Add("del "); - d->Add(id_str); - d->Add("["); - op1->Describe(d); - d->Add("]"); - if ( op2 ) - { - d->Add(" ("); - op2->Describe(d); - d->Add(")"); - } - d->Add(unique_str); - break; - - case OP_EXPIRE: - assert(op1); - d->Add("expire "); - d->Add(id_str); - d->Add("["); - op1->Describe(d); - d->Add("]"); - if ( op2 ) - { - d->Add(" ("); - op2->Describe(d); - d->Add(")"); - } - d->Add(unique_str); - break; - - case OP_PRINT: - assert(op1); - d->Add("print "); - d->Add(id_str); - op1->Describe(d); - d->Add(unique_str); - break; - - case OP_READ_IDX: - assert(op1); - d->Add("read "); - d->Add(id_str); - d->Add("["); - op1->Describe(d); - d->Add("]"); - break; - - default: - reporter->InternalError("unknown opcode for StateAccess"); - break; - } - - if ( op1_type != TYPE_VAL ) - Unref(const_cast(op1)); - } - -void StateAccess::Log(StateAccess* access) - { - bool synchronized = false; - bool persistent = false; - bool tracked = false; - - if ( access->target_type == TYPE_ID ) - { - if ( access->target.id->FindAttr(ATTR_SYNCHRONIZED) ) - synchronized = true; - - if ( access->target.id->FindAttr(ATTR_PERSISTENT) ) - persistent = true; - - if ( access->target.id->FindAttr(ATTR_TRACKED) ) - tracked = true; - } - else - { - if ( access->target.val->GetProperties() & MutableVal::SYNCHRONIZED ) - synchronized = true; - - if ( access->target.val->GetProperties() & MutableVal::PERSISTENT ) - persistent = true; - - if ( access->target.val->GetProperties() & MutableVal::TRACKED ) - tracked = true; - } - - if ( synchronized ) - { - if ( state_serializer ) - { - SerialInfo info(state_serializer); - state_serializer->Serialize(&info, *access); - } - - SerialInfo info(remote_serializer); - remote_serializer->SendAccess(&info, *access); - } - - if ( persistent && persistence_serializer->IsSerializationRunning() ) - persistence_serializer->LogAccess(*access); - - if ( tracked ) - notifiers.AccessPerformed(*access); - -#ifdef DEBUG - ODesc desc; - access->Describe(&desc); - DBG_LOG(DBG_STATE, "operation: %s%s [%s%s]", - desc.Description(), replaying > 0 ? " (replay)" : "", - persistent ? "P" : "", synchronized ? "S" : ""); -#endif - - delete access; - - } - -NotifierRegistry notifiers; - -void NotifierRegistry::Register(ID* id, NotifierRegistry::Notifier* notifier) - { - DBG_LOG(DBG_NOTIFIERS, "registering ID %s for notifier %s", - id->Name(), notifier->Name()); - - Attr* attr = new Attr(ATTR_TRACKED); - - if ( id->Attrs() ) - { - if ( ! id->Attrs()->FindAttr(ATTR_TRACKED) ) - id->Attrs()->AddAttr(attr); - } - else - { - attr_list* a = new attr_list; - a->append(attr); - id->SetAttrs(new Attributes(a, id->Type(), false)); - } - - Unref(attr); - - NotifierMap::iterator i = ids.find(id->Name()); - - if ( i != ids.end() ) - i->second->insert(notifier); - else - { - NotifierSet* s = new NotifierSet; - s->insert(notifier); - ids.insert(NotifierMap::value_type(id->Name(), s)); - } - - Ref(id); - } - -void NotifierRegistry::Register(Val* val, NotifierRegistry::Notifier* notifier) - { - if ( val->IsMutableVal() ) - Register(val->AsMutableVal()->UniqueID(), notifier); - } - -void NotifierRegistry::Unregister(ID* id, NotifierRegistry::Notifier* notifier) - { - DBG_LOG(DBG_NOTIFIERS, "unregistering ID %s for notifier %s", - id->Name(), notifier->Name()); - - NotifierMap::iterator i = ids.find(id->Name()); - - if ( i == ids.end() ) - return; - - Attr* attr = id->Attrs()->FindAttr(ATTR_TRACKED); - id->Attrs()->RemoveAttr(ATTR_TRACKED); - Unref(attr); - - NotifierSet* s = i->second; - s->erase(notifier); - - if ( s->size() == 0 ) - { - delete s; - ids.erase(i); - } - - Unref(id); - } - -void NotifierRegistry::Unregister(Val* val, NotifierRegistry::Notifier* notifier) - { - if ( val->IsMutableVal() ) - Unregister(val->AsMutableVal()->UniqueID(), notifier); - } - -void NotifierRegistry::AccessPerformed(const StateAccess& sa) - { - ID* id = sa.Target(); - - NotifierMap::iterator i = ids.find(id->Name()); - - if ( i == ids.end() ) - return; - - DBG_LOG(DBG_NOTIFIERS, "modification to tracked ID %s", id->Name()); - - NotifierSet* s = i->second; - - if ( id->IsInternalGlobal() ) - for ( NotifierSet::iterator j = s->begin(); j != s->end(); j++ ) - (*j)->Access(id->ID_Val(), sa); - else - for ( NotifierSet::iterator j = s->begin(); j != s->end(); j++ ) - (*j)->Access(id, sa); - } - -const char* NotifierRegistry::Notifier::Name() const - { - return fmt("%p", this); - } - diff --git a/src/StateAccess.h b/src/StateAccess.h deleted file mode 100644 index 1e84430956..0000000000 --- a/src/StateAccess.h +++ /dev/null @@ -1,150 +0,0 @@ -// A class describing a state-modyfing access to a Value or an ID. - -#ifndef STATEACESSS_H -#define STATEACESSS_H - -#include -#include -#include - -#include "SerialObj.h" - -class Val; -class ID; -class MutableVal; -class HashKey; -class ODesc; -class Serializer; -class TableVal; - -enum Opcode { // Op1 Op2 Op3 (Vals) - OP_NONE, - OP_ASSIGN, // new old - OP_ASSIGN_IDX, // idx new old - OP_ADD, // idx old - OP_INCR, // idx new old - OP_INCR_IDX, // idx new old - OP_DEL, // idx old - OP_PRINT, // args - OP_EXPIRE, // idx - OP_READ_IDX, // idx -}; - -class StateAccess : public SerialObj { -public: - StateAccess(Opcode opcode, const ID* target, const Val* op1, - const Val* op2 = 0, const Val* op3 = 0); - StateAccess(Opcode opcode, const MutableVal* target, const Val* op1, - const Val* op2 = 0, const Val* op3 = 0); - - // For tables, the idx operand may be given as an index HashKey. - // This is for efficiency. While we need to reconstruct the index - // if we are actually going to serialize the access, we can at - // least skip it if we don't. - StateAccess(Opcode opcode, const ID* target, const HashKey* op1, - const Val* op2 = 0, const Val* op3 = 0); - StateAccess(Opcode opcode, const MutableVal* target, const HashKey* op1, - const Val* op2 = 0, const Val* op3 = 0); - - StateAccess(const StateAccess& sa); - - ~StateAccess() override; - - // Replays this access in the our environment. - void Replay(); - - // Returns target ID which may be an internal one for unbound vals. - ID* Target() const; - - void Describe(ODesc* d) const; - - bool Serialize(SerialInfo* info) const; - static StateAccess* Unserialize(UnserialInfo* info); - - // Main entry point when StateAcesses are performed. - // For every state-changing operation, this has to be called. - static void Log(StateAccess* access); - - // If we're going to make additional non-replaying accesses during a - // Replay(), we have to call these. - static void SuspendReplay() { --replaying; } - static void ResumeReplay() { ++replaying; } - -private: - StateAccess() { target.id = 0; op1.val = op2 = op3 = 0; } - void RefThem(); - - bool CheckOld(const char* op, ID* id, Val* index, Val* should, Val* is); - bool CheckOldSet(const char* op, ID* id, Val* index, bool should, bool is); - bool MergeTables(TableVal* dst, Val* src); - - DECLARE_SERIAL(StateAccess); - - Opcode opcode; - union { - ID* id; - MutableVal* val; - } target; - - union { - Val* val; - const HashKey* key; - } op1; - - Val* op2; - Val* op3; - - enum Type { TYPE_ID, TYPE_VAL, TYPE_MVAL, TYPE_KEY }; - Type target_type; - Type op1_type; - bool delete_op1_key; - - static int replaying; -}; - -// We provide a notifier framework to inform interested parties of -// modifications to selected global IDs/Vals. To get notified about a change, -// derive a class from Notifier and register the interesting IDs/Vals with -// the NotifierRegistry. -// -// Note: For containers (e.g., tables), notifications are only issued if the -// container itself is modified, *not* for changes to the values contained -// therein. - -class NotifierRegistry { -public: - class Notifier { - public: - virtual ~Notifier() { } - - // Called when a change is being performed. Note that when these - // methods are called, it is undefined whether the change has - // already been done or is just going to be performed soon. - virtual void Access(ID* id, const StateAccess& sa) = 0; - virtual void Access(Val* val, const StateAccess& sa) = 0; - virtual const char* Name() const; // for debugging - }; - - NotifierRegistry() { } - ~NotifierRegistry() { } - - // Inform the given notifier if ID/Val changes. - void Register(ID* id, Notifier* notifier); - void Register(Val* val, Notifier* notifier); - - // Cancel notification for this ID/Val. - void Unregister(ID* id, Notifier* notifier); - void Unregister(Val* val, Notifier* notifier); - -private: - friend class StateAccess; - void AccessPerformed(const StateAccess& sa); - - typedef std::set NotifierSet; - typedef std::map NotifierMap; - NotifierMap ids; -}; - -extern NotifierRegistry notifiers; - -#endif diff --git a/src/Stats.cc b/src/Stats.cc index 780ffdc39b..9489f12f93 100644 --- a/src/Stats.cc +++ b/src/Stats.cc @@ -255,7 +255,7 @@ void ProfileLogger::Log() while ( (id = globals->NextEntry(c)) ) // We don't show/count internal globals as they are always // contained in some other global user-visible container. - if ( id->HasVal() && ! id->IsInternalGlobal() ) + if ( id->HasVal() ) { Val* v = id->ID_Val(); @@ -310,11 +310,11 @@ void ProfileLogger::Log() // (and for consistency we dispatch it *now*) if ( profiling_update ) { - val_list* vl = new val_list; Ref(file); - vl->append(new Val(file)); - vl->append(val_mgr->GetBool(expensive)); - mgr.Dispatch(new Event(profiling_update, vl)); + mgr.Dispatch(new Event(profiling_update, { + new Val(file), + val_mgr->GetBool(expensive), + })); } } @@ -369,12 +369,12 @@ void SampleLogger::SegmentProfile(const char* /* name */, const Location* /* loc */, double dtime, int dmem) { - val_list* vl = new val_list(2); - vl->append(load_samples->Ref()); - vl->append(new IntervalVal(dtime, Seconds)); - vl->append(val_mgr->GetInt(dmem)); - - mgr.QueueEvent(load_sample, vl); + if ( load_sample ) + mgr.QueueEventFast(load_sample, { + load_samples->Ref(), + new IntervalVal(dtime, Seconds), + val_mgr->GetInt(dmem) + }); } void SegmentProfiler::Init() diff --git a/src/Stmt.cc b/src/Stmt.cc index 7e7ba23a18..58bca4fc5b 100644 --- a/src/Stmt.cc +++ b/src/Stmt.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Expr.h" #include "Event.h" @@ -14,7 +14,6 @@ #include "Debug.h" #include "Traverse.h" #include "Trigger.h" -#include "RemoteSerializer.h" const char* stmt_name(BroStmtTag t) { @@ -118,47 +117,6 @@ void Stmt::AccessStats(ODesc* d) const } } -bool Stmt::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Stmt* Stmt::Unserialize(UnserialInfo* info, BroStmtTag want) - { - Stmt* stmt = (Stmt*) SerialObj::Unserialize(info, SER_STMT); - - if ( want != STMT_ANY && stmt->tag != want ) - { - info->s->Error("wrong stmt type"); - Unref(stmt); - return 0; - } - - return stmt; - } - -bool Stmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_STMT, BroObj); - - return SERIALIZE(char(tag)) && SERIALIZE(last_access) - && SERIALIZE(access_count); - } - -bool Stmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - char c; - if ( ! UNSERIALIZE(&c) ) - return 0; - - tag = BroStmtTag(c); - - return UNSERIALIZE(&last_access) && UNSERIALIZE(&access_count); - } - - ExprListStmt::ExprListStmt(BroStmtTag t, ListExpr* arg_l) : Stmt(t) { @@ -208,19 +166,6 @@ void ExprListStmt::PrintVals(ODesc* d, val_list* vals, int offset) const describe_vals(vals, d, offset); } -bool ExprListStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EXPR_LIST_STMT, Stmt); - return l->Serialize(info); - } - -bool ExprListStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - l = (ListExpr*) Expr::Unserialize(info, EXPR_LIST); - return l != 0; - } - TraversalCode ExprListStmt::Traverse(TraversalCallback* cb) const { TraversalCode tc = cb->PreStmt(this); @@ -292,36 +237,20 @@ Val* PrintStmt::DoExec(val_list* vals, stmt_flow_type& /* flow */) const if ( print_hook ) { - val_list* vl = new val_list(2); ::Ref(f); - vl->append(new Val(f)); - vl->append(new StringVal(d.Len(), d.Description())); // Note, this doesn't do remote printing. - mgr.Dispatch(new Event(print_hook, vl), true); + mgr.Dispatch( + new Event( + print_hook, + {new Val(f), new StringVal(d.Len(), d.Description())}), + true); } - - if ( remote_serializer ) - remote_serializer->SendPrintHookEvent(f, d.Description(), d.Len()); } return 0; } -IMPLEMENT_SERIAL(PrintStmt, SER_PRINT_STMT); - -bool PrintStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_PRINT_STMT, ExprListStmt); - return true; - } - -bool PrintStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprListStmt); - return true; - } - ExprStmt::ExprStmt(Expr* arg_e) : Stmt(STMT_EXPR) { e = arg_e; @@ -407,22 +336,6 @@ TraversalCode ExprStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(ExprStmt, SER_EXPR_STMT); - -bool ExprStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EXPR_STMT, Stmt); - SERIALIZE_OPTIONAL(e); - return true; - } - -bool ExprStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - UNSERIALIZE_OPTIONAL(e, Expr::Unserialize(info)); - return true; - } - IfStmt::IfStmt(Expr* test, Stmt* arg_s1, Stmt* arg_s2) : ExprStmt(STMT_IF, test) { s1 = arg_s1; @@ -510,25 +423,6 @@ TraversalCode IfStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(IfStmt, SER_IF_STMT); - -bool IfStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_IF_STMT, ExprStmt); - return s1->Serialize(info) && s2->Serialize(info); - } - -bool IfStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - s1 = Stmt::Unserialize(info); - if ( ! s1 ) - return false; - - s2 = Stmt::Unserialize(info); - return s2 != 0; - } - static BroStmtTag get_last_stmt_tag(const Stmt* stmt) { if ( ! stmt ) @@ -658,67 +552,6 @@ TraversalCode Case::Traverse(TraversalCallback* cb) const return TC_CONTINUE; } -bool Case::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Case* Case::Unserialize(UnserialInfo* info) - { - return (Case*) SerialObj::Unserialize(info, SER_CASE); - } - -IMPLEMENT_SERIAL(Case, SER_CASE); - -bool Case::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_CASE, BroObj); - - if ( ! expr_cases->Serialize(info) ) - return false; - - id_list empty; - id_list* types = (type_cases ? type_cases : &empty); - - if ( ! SERIALIZE(types->length()) ) - return false; - - loop_over_list((*types), i) - { - if ( ! (*types)[i]->Serialize(info) ) - return false; - } - - return this->s->Serialize(info); - } - -bool Case::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - expr_cases = (ListExpr*) Expr::Unserialize(info, EXPR_LIST); - if ( ! expr_cases ) - return false; - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - type_cases = new id_list; - - while ( len-- ) - { - ID* id = ID::Unserialize(info); - if ( ! id ) - return false; - - type_cases->append(id); - } - - this->s = Stmt::Unserialize(info); - return this->s != 0; - } - static void int_del_func(void* v) { delete (int*) v; @@ -1031,66 +864,6 @@ TraversalCode SwitchStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(SwitchStmt, SER_SWITCH_STMT); - -bool SwitchStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SWITCH_STMT, ExprStmt); - - if ( ! SERIALIZE(cases->length()) ) - return false; - - loop_over_list((*cases), i) - if ( ! (*cases)[i]->Serialize(info) ) - return false; - - if ( ! SERIALIZE(default_case_idx) ) - return false; - - return true; - } - -bool SwitchStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - - Init(); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - while ( len-- ) - { - Case* c = Case::Unserialize(info); - if ( ! c ) - return false; - - cases->append(c); - } - - if ( ! UNSERIALIZE(&default_case_idx) ) - return false; - - loop_over_list(*cases, i) - { - const ListExpr* le = (*cases)[i]->ExprCases(); - - if ( ! le ) - continue; - - const expr_list& exprs = le->Exprs(); - - loop_over_list(exprs, j) - { - if ( ! AddCaseLabelValueMapping(exprs[j]->ExprVal(), i) ) - return false; - } - } - - return true; - } - AddStmt::AddStmt(Expr* arg_e) : ExprStmt(STMT_ADD, arg_e) { if ( ! e->CanAdd() ) @@ -1124,20 +897,6 @@ TraversalCode AddStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(AddStmt, SER_ADD_STMT); - -bool AddStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_ADD_STMT, ExprStmt); - return true; - } - -bool AddStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - return true; - } - DelStmt::DelStmt(Expr* arg_e) : ExprStmt(STMT_DELETE, arg_e) { if ( e->IsError() ) @@ -1173,20 +932,6 @@ TraversalCode DelStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(DelStmt, SER_DEL_STMT); - -bool DelStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_DEL_STMT, ExprStmt); - return true; - } - -bool DelStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - return true; - } - EventStmt::EventStmt(EventExpr* arg_e) : ExprStmt(STMT_EVENT, arg_e) { event_expr = arg_e; @@ -1198,7 +943,10 @@ Val* EventStmt::Exec(Frame* f, stmt_flow_type& flow) const val_list* args = eval_list(f, event_expr->Args()); if ( args ) - mgr.QueueEvent(event_expr->Handler(), args); + { + mgr.QueueEvent(event_expr->Handler(), std::move(*args)); + delete args; + } flow = FLOW_NEXT; @@ -1218,22 +966,6 @@ TraversalCode EventStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(EventStmt, SER_EVENT_STMT); - -bool EventStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EVENT_STMT, ExprStmt); - return event_expr->Serialize(info); - } - -bool EventStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - - event_expr = (EventExpr*) Expr::Unserialize(info, EXPR_EVENT); - return event_expr != 0; - } - WhileStmt::WhileStmt(Expr* arg_loop_condition, Stmt* arg_body) : loop_condition(arg_loop_condition), body(arg_body) { @@ -1319,30 +1051,6 @@ Val* WhileStmt::Exec(Frame* f, stmt_flow_type& flow) const return rval; } -IMPLEMENT_SERIAL(WhileStmt, SER_WHILE_STMT); - -bool WhileStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_WHILE_STMT, Stmt); - - if ( ! loop_condition->Serialize(info) ) - return false; - - return body->Serialize(info); - } - -bool WhileStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - loop_condition = Expr::Unserialize(info); - - if ( ! loop_condition ) - return false; - - body = Stmt::Unserialize(info); - return body != 0; - } - ForStmt::ForStmt(id_list* arg_loop_vars, Expr* loop_expr) : ExprStmt(STMT_FOR, loop_expr) { @@ -1607,47 +1315,6 @@ TraversalCode ForStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(ForStmt, SER_FOR_STMT); - -bool ForStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FOR_STMT, ExprStmt); - - if ( ! SERIALIZE(loop_vars->length()) ) - return false; - - loop_over_list((*loop_vars), i) - { - if ( ! (*loop_vars)[i]->Serialize(info) ) - return false; - } - - return body->Serialize(info); - } - -bool ForStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - loop_vars = new id_list; - - while ( len-- ) - { - ID* id = ID::Unserialize(info); - if ( ! id ) - return false; - - loop_vars->append(id); - } - - body = Stmt::Unserialize(info); - return body != 0; - } - Val* NextStmt::Exec(Frame* /* f */, stmt_flow_type& flow) const { RegisterAccess(); @@ -1675,20 +1342,6 @@ TraversalCode NextStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(NextStmt, SER_NEXT_STMT); - -bool NextStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_NEXT_STMT, Stmt); - return true; - } - -bool NextStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - return true; - } - Val* BreakStmt::Exec(Frame* /* f */, stmt_flow_type& flow) const { RegisterAccess(); @@ -1716,20 +1369,6 @@ TraversalCode BreakStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(BreakStmt, SER_BREAK_STMT); - -bool BreakStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BREAK_STMT, Stmt); - return true; - } - -bool BreakStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - return true; - } - Val* FallthroughStmt::Exec(Frame* /* f */, stmt_flow_type& flow) const { RegisterAccess(); @@ -1757,20 +1396,6 @@ TraversalCode FallthroughStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(FallthroughStmt, SER_FALLTHROUGH_STMT); - -bool FallthroughStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FALLTHROUGH_STMT, Stmt); - return true; - } - -bool FallthroughStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - return true; - } - ReturnStmt::ReturnStmt(Expr* arg_e) : ExprStmt(STMT_RETURN, arg_e) { Scope* s = current_scope(); @@ -1838,20 +1463,6 @@ void ReturnStmt::Describe(ODesc* d) const DescribeDone(d); } -IMPLEMENT_SERIAL(ReturnStmt, SER_RETURN_STMT); - -bool ReturnStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_RETURN_STMT, ExprStmt); - return true; - } - -bool ReturnStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(ExprStmt); - return true; - } - StmtList::StmtList() : Stmt(STMT_LIST) { } @@ -1941,43 +1552,6 @@ TraversalCode StmtList::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(StmtList, SER_STMT_LIST); - -bool StmtList::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_STMT_LIST, Stmt); - - if ( ! SERIALIZE(stmts.length()) ) - return false; - - loop_over_list(stmts, i) - if ( ! stmts[i]->Serialize(info) ) - return false; - - return true; - } - -bool StmtList::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - while ( len-- ) - { - Stmt* stmt = Stmt::Unserialize(info); - if ( ! stmt ) - return false; - - stmts.append(stmt); - } - - return true; - } - - Val* EventBodyList::Exec(Frame* f, stmt_flow_type& flow) const { RegisterAccess(); @@ -2036,20 +1610,6 @@ void EventBodyList::Describe(ODesc* d) const StmtList::Describe(d); } -IMPLEMENT_SERIAL(EventBodyList, SER_EVENT_BODY_LIST); - -bool EventBodyList::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_EVENT_BODY_LIST, StmtList); - return SERIALIZE(topmost); - } - -bool EventBodyList::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(StmtList); - return UNSERIALIZE(&topmost); - } - InitStmt::~InitStmt() { loop_over_list(*inits, i) @@ -2123,45 +1683,6 @@ TraversalCode InitStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(InitStmt, SER_INIT_STMT); - -bool InitStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_INIT_STMT, Stmt); - - if ( ! SERIALIZE(inits->length()) ) - return false; - - loop_over_list((*inits), i) - { - if ( ! (*inits)[i]->Serialize(info) ) - return false; - } - - return true; - } - -bool InitStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - inits = new id_list; - - while ( len-- ) - { - ID* id = ID::Unserialize(info); - if ( ! id ) - return false; - inits->append(id); - } - return true; - } - - Val* NullStmt::Exec(Frame* /* f */, stmt_flow_type& flow) const { RegisterAccess(); @@ -2191,20 +1712,6 @@ TraversalCode NullStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(NullStmt, SER_NULL_STMT); - -bool NullStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_NULL_STMT, Stmt); - return true; - } - -bool NullStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - return true; - } - WhenStmt::WhenStmt(Expr* arg_cond, Stmt* arg_s1, Stmt* arg_s2, Expr* arg_timeout, bool arg_is_return) : Stmt(STMT_WHEN) @@ -2320,35 +1827,3 @@ TraversalCode WhenStmt::Traverse(TraversalCallback* cb) const HANDLE_TC_STMT_POST(tc); } -IMPLEMENT_SERIAL(WhenStmt, SER_WHEN_STMT); - -bool WhenStmt::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_WHEN_STMT, Stmt); - - if ( cond->Serialize(info) && s1->Serialize(info) ) - return false; - - SERIALIZE_OPTIONAL(s2); - SERIALIZE_OPTIONAL(timeout); - - return true; - } - -bool WhenStmt::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Stmt); - - cond = Expr::Unserialize(info); - if ( ! cond ) - return false; - - s1 = Stmt::Unserialize(info); - if ( ! s1 ) - return false; - - UNSERIALIZE_OPTIONAL(s2, Stmt::Unserialize(info)); - UNSERIALIZE_OPTIONAL(timeout, Expr::Unserialize(info)); - - return true; - } diff --git a/src/Stmt.h b/src/Stmt.h index a9bf7cddf8..7136ffe306 100644 --- a/src/Stmt.h +++ b/src/Stmt.h @@ -71,9 +71,6 @@ public: virtual unsigned int BPCount() const { return breakpoint_count; } - bool Serialize(SerialInfo* info) const; - static Stmt* Unserialize(UnserialInfo* info, BroStmtTag want = STMT_ANY); - virtual TraversalCode Traverse(TraversalCallback* cb) const = 0; protected: @@ -83,8 +80,6 @@ protected: void AddTag(ODesc* d) const; void DescribeDone(ODesc* d) const; - DECLARE_ABSTRACT_SERIAL(Stmt); - BroStmtTag tag; int breakpoint_count; // how many breakpoints on this statement @@ -111,8 +106,6 @@ protected: void Describe(ODesc* d) const override; void PrintVals(ODesc* d, val_list* vals, int offset) const; - DECLARE_ABSTRACT_SERIAL(ExprListStmt); - ListExpr* l; }; @@ -125,8 +118,6 @@ protected: PrintStmt() {} Val* DoExec(val_list* vals, stmt_flow_type& flow) const override; - - DECLARE_SERIAL(PrintStmt); }; class ExprStmt : public Stmt { @@ -151,8 +142,6 @@ protected: int IsPure() const override; - DECLARE_SERIAL(ExprStmt); - Expr* e; }; @@ -175,8 +164,6 @@ protected: Val* DoExec(Frame* f, Val* v, stmt_flow_type& flow) const override; int IsPure() const override; - DECLARE_SERIAL(IfStmt); - Stmt* s1; Stmt* s2; }; @@ -197,22 +184,20 @@ public: void Describe(ODesc* d) const override; - bool Serialize(SerialInfo* info) const; - static Case* Unserialize(UnserialInfo* info); - TraversalCode Traverse(TraversalCallback* cb) const; protected: friend class Stmt; Case() { expr_cases = 0; type_cases = 0; s = 0; } - DECLARE_SERIAL(Case); - ListExpr* expr_cases; id_list* type_cases; Stmt* s; }; +declare(PList,Case); +typedef PList(Case) case_list; + class SwitchStmt : public ExprStmt { public: SwitchStmt(Expr* index, case_list* cases); @@ -231,8 +216,6 @@ protected: Val* DoExec(Frame* f, Val* v, stmt_flow_type& flow) const override; int IsPure() const override; - DECLARE_SERIAL(SwitchStmt); - // Initialize composite hash and case label map. void Init(); @@ -271,8 +254,6 @@ public: protected: friend class Stmt; AddStmt() {} - - DECLARE_SERIAL(AddStmt); }; class DelStmt : public ExprStmt { @@ -287,8 +268,6 @@ public: protected: friend class Stmt; DelStmt() {} - - DECLARE_SERIAL(DelStmt); }; class EventStmt : public ExprStmt { @@ -303,8 +282,6 @@ protected: friend class Stmt; EventStmt() { event_expr = 0; } - DECLARE_SERIAL(EventStmt); - EventExpr* event_expr; }; @@ -328,8 +305,6 @@ protected: Val* Exec(Frame* f, stmt_flow_type& flow) const override; - DECLARE_SERIAL(WhileStmt); - Expr* loop_condition; Stmt* body; }; @@ -359,8 +334,6 @@ protected: Val* DoExec(Frame* f, Val* v, stmt_flow_type& flow) const override; - DECLARE_SERIAL(ForStmt); - id_list* loop_vars; Stmt* body; // Stores the value variable being used for a key value for loop. @@ -380,7 +353,6 @@ public: TraversalCode Traverse(TraversalCallback* cb) const override; protected: - DECLARE_SERIAL(NextStmt); }; class BreakStmt : public Stmt { @@ -395,7 +367,6 @@ public: TraversalCode Traverse(TraversalCallback* cb) const override; protected: - DECLARE_SERIAL(BreakStmt); }; class FallthroughStmt : public Stmt { @@ -410,7 +381,6 @@ public: TraversalCode Traverse(TraversalCallback* cb) const override; protected: - DECLARE_SERIAL(FallthroughStmt); }; class ReturnStmt : public ExprStmt { @@ -424,8 +394,6 @@ public: protected: friend class Stmt; ReturnStmt() {} - - DECLARE_SERIAL(ReturnStmt); }; class StmtList : public Stmt { @@ -445,8 +413,6 @@ public: protected: int IsPure() const override; - DECLARE_SERIAL(StmtList); - stmt_list stmts; }; @@ -464,9 +430,6 @@ public: // bool IsTopmost() { return topmost; } protected: - - DECLARE_SERIAL(EventBodyList); - bool topmost; }; @@ -493,8 +456,6 @@ protected: friend class Stmt; InitStmt() { inits = 0; } - DECLARE_SERIAL(InitStmt); - id_list* inits; }; @@ -508,9 +469,6 @@ public: void Describe(ODesc* d) const override; TraversalCode Traverse(TraversalCallback* cb) const override; - -protected: - DECLARE_SERIAL(NullStmt); }; class WhenStmt : public Stmt { @@ -534,8 +492,6 @@ public: protected: WhenStmt() { cond = 0; s1 = s2 = 0; timeout = 0; is_return = 0; } - DECLARE_SERIAL(WhenStmt); - Expr* cond; Stmt* s1; Stmt* s2; diff --git a/src/Tag.h b/src/Tag.h index efc3e359c2..78fe333e12 100644 --- a/src/Tag.h +++ b/src/Tag.h @@ -3,7 +3,7 @@ #ifndef TAG_H #define TAG_H -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "Type.h" diff --git a/src/Timer.cc b/src/Timer.cc index 101733028c..1138deec79 100644 --- a/src/Timer.cc +++ b/src/Timer.cc @@ -1,11 +1,10 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "Timer.h" #include "Desc.h" -#include "Serializer.h" #include "broker/Manager.h" // Names of timers in same order than in TimerType. @@ -20,8 +19,6 @@ const char* TimerNames[] = { "FileAnalysisInactivityTimer", "FlowWeirdTimer", "FragTimer", - "IncrementalSendTimer", - "IncrementalWriteTimer", "InterconnTimer", "IPTunnelInactivityTimer", "NetbiosExpireTimer", @@ -55,41 +52,6 @@ void Timer::Describe(ODesc* d) const d->Add(Time()); } -bool Timer::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Timer* Timer::Unserialize(UnserialInfo* info) - { - Timer* timer = (Timer*) SerialObj::Unserialize(info, SER_TIMER); - if ( ! timer ) - return 0; - - timer_mgr->Add(timer); - - return timer; - } - -bool Timer::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_TIMER, SerialObj); - char tmp = type; - return SERIALIZE(tmp) && SERIALIZE(time); - } - -bool Timer::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - char tmp; - if ( ! UNSERIALIZE(&tmp) ) - return false; - type = tmp; - - return UNSERIALIZE(&time); - } - unsigned int TimerMgr::current_timers[NUM_TIMER_TYPES]; TimerMgr::~TimerMgr() diff --git a/src/Timer.h b/src/Timer.h index 8d6de857a0..02ebb2773c 100644 --- a/src/Timer.h +++ b/src/Timer.h @@ -6,7 +6,6 @@ #include #include -#include "SerialObj.h" #include "PriorityQueue.h" extern "C" { @@ -25,8 +24,6 @@ enum TimerType { TIMER_FILE_ANALYSIS_INACTIVITY, TIMER_FLOW_WEIRD_EXPIRE, TIMER_FRAG, - TIMER_INCREMENTAL_SEND, - TIMER_INCREMENTAL_WRITE, TIMER_INTERCONN, TIMER_IP_TUNNEL_INACTIVITY, TIMER_NB_EXPIRE, @@ -51,10 +48,9 @@ const int NUM_TIMER_TYPES = int(TIMER_TIMERMGR_EXPIRE) + 1; extern const char* timer_type_to_string(TimerType type); -class Serializer; class ODesc; -class Timer : public SerialObj, public PQ_Element { +class Timer : public PQ_Element { public: Timer(double t, TimerType arg_type) : PQ_Element(t) { type = (char) arg_type; } @@ -69,14 +65,9 @@ public: void Describe(ODesc* d) const; - bool Serialize(SerialInfo* info) const; - static Timer* Unserialize(UnserialInfo* info); - protected: Timer() {} - DECLARE_ABSTRACT_SERIAL(Timer); - unsigned int type:8; }; diff --git a/src/Trigger.cc b/src/Trigger.cc index 213707b6b8..ae6483e3f5 100644 --- a/src/Trigger.cc +++ b/src/Trigger.cc @@ -33,7 +33,7 @@ TraversalCode TriggerTraversalCallback::PreExpr(const Expr* expr) trigger->Register(e->Id()); Val* v = e->Id()->ID_Val(); - if ( v && v->IsMutableVal() ) + if ( v && v->Modifiable() ) trigger->Register(v); break; }; @@ -382,38 +382,35 @@ void Trigger::Timeout() void Trigger::Register(ID* id) { assert(! disabled); - notifiers.Register(id, this); + notifier::registry.Register(id, this); Ref(id); - ids.insert(id); + objs.push_back({id, id}); } void Trigger::Register(Val* val) { + if ( ! val->Modifiable() ) + return; + assert(! disabled); - notifiers.Register(val, this); + notifier::registry.Register(val->Modifiable(), this); Ref(val); - vals.insert(val); + objs.emplace_back(val, val->Modifiable()); } void Trigger::UnregisterAll() { - loop_over_list(ids, i) + DBG_LOG(DBG_NOTIFIERS, "%s: unregistering all", Name()); + + for ( const auto& o : objs ) { - notifiers.Unregister(ids[i], this); - Unref(ids[i]); + notifier::registry.Unregister(o.second, this); + Unref(o.first); } - ids.clear(); - - loop_over_list(vals, j) - { - notifiers.Unregister(vals[j], this); - Unref(vals[j]); - } - - vals.clear(); + objs.clear(); } void Trigger::Attach(Trigger *trigger) diff --git a/src/Trigger.h b/src/Trigger.h index 0f7889d19a..2e0c91865f 100644 --- a/src/Trigger.h +++ b/src/Trigger.h @@ -4,7 +4,7 @@ #include #include -#include "StateAccess.h" +#include "Notifier.h" #include "Traverse.h" // Triggers are the heart of "when" statements: expressions that when @@ -13,7 +13,7 @@ class TriggerTimer; class TriggerTraversalCallback; -class Trigger : public NotifierRegistry::Notifier, public BroObj { +class Trigger : public BroObj, public notifier::Receiver { public: // Don't access Trigger objects; they take care of themselves after // instantiation. Note that if the condition is already true, the @@ -61,12 +61,10 @@ public: { d->Add(""); } // Overidden from Notifier. We queue the trigger and evaluate it // later to avoid race conditions. - void Access(ID* id, const StateAccess& sa) override - { QueueTrigger(this); } - void Access(Val* val, const StateAccess& sa) override + void Modified(notifier::Modifiable* m) override { QueueTrigger(this); } - const char* Name() const override; + const char* Name() const; static void QueueTrigger(Trigger* trigger); @@ -104,8 +102,7 @@ private: bool delayed; // true if a function call is currently being delayed bool disabled; - val_list vals; - id_list ids; + std::vector> objs; typedef map ValCache; ValCache cache; diff --git a/src/TunnelEncapsulation.h b/src/TunnelEncapsulation.h index 27729e56b7..5e83d91691 100644 --- a/src/TunnelEncapsulation.h +++ b/src/TunnelEncapsulation.h @@ -3,7 +3,7 @@ #ifndef TUNNELS_H #define TUNNELS_H -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "IPAddr.h" #include "Val.h" diff --git a/src/Type.cc b/src/Type.cc index 77a5ac6d16..60461e026f 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -1,15 +1,14 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Type.h" #include "Attr.h" #include "Expr.h" #include "Scope.h" -#include "Serializer.h" #include "Reporter.h" -#include "broxygen/Manager.h" -#include "broxygen/utils.h" +#include "zeekygen/Manager.h" +#include "zeekygen/utils.h" #include #include @@ -122,27 +121,30 @@ BroType::BroType(TypeTag t, bool arg_base_type) } -BroType* BroType::Clone() const +BroType* BroType::ShallowClone() { - SerializationFormat* form = new BinarySerializationFormat(); - form->StartWrite(); - CloneSerializer ss(form); - SerialInfo sinfo(&ss); - sinfo.cache = false; + switch ( tag ) { + case TYPE_VOID: + case TYPE_BOOL: + case TYPE_INT: + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_DOUBLE: + case TYPE_TIME: + case TYPE_INTERVAL: + case TYPE_STRING: + case TYPE_PATTERN: + case TYPE_TIMER: + case TYPE_PORT: + case TYPE_ADDR: + case TYPE_SUBNET: + case TYPE_ANY: + return new BroType(tag, base_type); - this->Serialize(&sinfo); - char* data; - uint32 len = form->EndWrite(&data); - form->StartRead(data, len); - - UnserialInfo uinfo(&ss); - uinfo.cache = false; - - BroType* rval = this->Unserialize(&uinfo, false); - assert(rval != this); - - free(data); - return rval; + default: + reporter->InternalError("cloning illegal base BroType"); + } + return nullptr; } int BroType::MatchesIndex(ListExpr*& index) const @@ -190,7 +192,7 @@ void BroType::Describe(ODesc* d) const void BroType::DescribeReST(ODesc* d, bool roles_only) const { - d->Add(fmt(":bro:type:`%s`", type_name(Tag()))); + d->Add(fmt(":zeek:type:`%s`", type_name(Tag()))); } void BroType::SetError() @@ -203,124 +205,6 @@ unsigned int BroType::MemoryAllocation() const return padded_sizeof(*this); } -bool BroType::Serialize(SerialInfo* info) const - { - // We always send full types (see below). - if ( ! SERIALIZE(true) ) - return false; - - bool ret = SerialObj::Serialize(info); - return ret; - } - -BroType* BroType::Unserialize(UnserialInfo* info, bool use_existing) - { - // To avoid external Broccoli clients needing to always send full type - // objects, we allow them to give us only the name of a type. To - // differentiate between the two cases, we exchange a flag first. - bool full_type = true;; - if ( ! UNSERIALIZE(&full_type) ) - return 0; - - if ( ! full_type ) - { - const char* name; - if ( ! UNSERIALIZE_STR(&name, 0) ) - return 0; - - ID* id = global_scope()->Lookup(name); - if ( ! id ) - { - info->s->Error(fmt("unknown type %s", name)); - return 0; - } - - BroType* t = id->AsType(); - if ( ! t ) - { - info->s->Error(fmt("%s is not a type", name)); - return 0; - } - - return t->Ref(); - } - - BroType* t = (BroType*) SerialObj::Unserialize(info, SER_BRO_TYPE); - - if ( ! t || ! use_existing ) - return t; - - if ( ! t->name.empty() ) - { - // Avoid creating a new type if it's known by name. - // Also avoids loss of base type name alias (from condition below). - ID* id = global_scope()->Lookup(t->name.c_str()); - BroType* t2 = id ? id->AsType() : 0; - - if ( t2 ) - { - Unref(t); - return t2->Ref(); - } - } - - if ( t->base_type ) - { - BroType* t2 = ::base_type(TypeTag(t->tag)); - Unref(t); - assert(t2); - return t2; - } - - assert(t); - return t; - } - -IMPLEMENT_SERIAL(BroType, SER_BRO_TYPE) - -bool BroType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BRO_TYPE, BroObj); - - info->s->WriteOpenTag("Type"); - - if ( ! (SERIALIZE(char(tag)) && SERIALIZE(char(internal_tag))) ) - return false; - - if ( ! (SERIALIZE(is_network_order) && SERIALIZE(base_type)) ) - return false; - - SERIALIZE_STR(name.c_str(), name.size()); - - info->s->WriteCloseTag("Type"); - - return true; - } - -bool BroType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - char c1, c2; - if ( ! (UNSERIALIZE(&c1) && UNSERIALIZE(&c2) ) ) - return 0; - - tag = (TypeTag) c1; - internal_tag = (InternalTypeTag) c2; - - if ( ! (UNSERIALIZE(&is_network_order) && UNSERIALIZE(&base_type)) ) - return 0; - - const char* n; - if ( ! UNSERIALIZE_STR(&n, 0) ) - return false; - - name = n; - delete [] n; - - return true; - } - TypeList::~TypeList() { loop_over_list(types, i) @@ -383,47 +267,6 @@ void TypeList::Describe(ODesc* d) const } } -IMPLEMENT_SERIAL(TypeList, SER_TYPE_LIST); - -bool TypeList::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_TYPE_LIST, BroType); - - SERIALIZE_OPTIONAL(pure_type); - - if ( ! SERIALIZE(types.length()) ) - return false; - - loop_over_list(types, j) - { - if ( ! types[j]->Serialize(info) ) - return false; - } - - return true; - } - -bool TypeList::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - UNSERIALIZE_OPTIONAL(pure_type, BroType::Unserialize(info)); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - while ( len-- ) - { - BroType* t = BroType::Unserialize(info); - if ( ! t ) - return false; - - types.append(t); - } - return true; - } - IndexType::~IndexType() { Unref(indices); @@ -478,7 +321,7 @@ void IndexType::Describe(ODesc* d) const void IndexType::DescribeReST(ODesc* d, bool roles_only) const { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); if ( IsSet() ) d->Add("set"); @@ -497,7 +340,7 @@ void IndexType::DescribeReST(ODesc* d, bool roles_only) const if ( ! t->GetName().empty() ) { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(t->GetName()); d->Add("`"); } @@ -513,7 +356,7 @@ void IndexType::DescribeReST(ODesc* d, bool roles_only) const if ( ! yield_type->GetName().empty() ) { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(yield_type->GetName()); d->Add("`"); } @@ -530,25 +373,6 @@ bool IndexType::IsSubNetIndex() const return false; } -IMPLEMENT_SERIAL(IndexType, SER_INDEX_TYPE); - -bool IndexType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_INDEX_TYPE, BroType); - - SERIALIZE_OPTIONAL(yield_type); - return indices->Serialize(info); - } - -bool IndexType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - UNSERIALIZE_OPTIONAL(yield_type, BroType::Unserialize(info)); - indices = (TypeList*) BroType::Unserialize(info); - return indices != 0; - } - TableType::TableType(TypeList* ind, BroType* yield) : IndexType(TYPE_TABLE, ind, yield) { @@ -577,6 +401,16 @@ TableType::TableType(TypeList* ind, BroType* yield) } } +TableType* TableType::ShallowClone() + { + if ( indices ) + indices->Ref(); + if ( yield_type ) + yield_type->Ref(); + + return new TableType(indices, yield_type); + } + bool TableType::IsUnspecifiedTable() const { // Unspecified types have an empty list of indices. @@ -650,18 +484,17 @@ SetType::SetType(TypeList* ind, ListExpr* arg_elements) : TableType(ind, 0) } } -IMPLEMENT_SERIAL(TableType, SER_TABLE_TYPE); - -bool TableType::DoSerialize(SerialInfo* info) const +SetType* SetType::ShallowClone() { - DO_SERIALIZE(SER_TABLE_TYPE, IndexType); - return true; - } + // constructor only consumes indices when elements + // is set + if ( elements && indices ) + { + elements->Ref(); + indices->Ref(); + } -bool TableType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(IndexType); - return true; + return new SetType(indices, elements); } SetType::~SetType() @@ -669,24 +502,6 @@ SetType::~SetType() Unref(elements); } -IMPLEMENT_SERIAL(SetType, SER_SET_TYPE); - -bool SetType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SET_TYPE, TableType); - - SERIALIZE_OPTIONAL(elements); - return true; - } - -bool SetType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(TableType); - - UNSERIALIZE_OPTIONAL(elements, (ListExpr*) Expr::Unserialize(info, EXPR_LIST)); - return true; - } - FuncType::FuncType(RecordType* arg_args, BroType* arg_yield, function_flavor arg_flavor) : BroType(TYPE_FUNC) { @@ -716,6 +531,16 @@ FuncType::FuncType(RecordType* arg_args, BroType* arg_yield, function_flavor arg } } +FuncType* FuncType::ShallowClone() + { + auto f = new FuncType(); + f->args = args->Ref()->AsRecordType(); + f->arg_types = arg_types->Ref()->AsTypeList(); + f->yield = yield->Ref(); + f->flavor = flavor; + return f; + } + string FuncType::FlavorString() const { switch ( flavor ) { @@ -800,7 +625,7 @@ void FuncType::Describe(ODesc* d) const void FuncType::DescribeReST(ODesc* d, bool roles_only) const { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(FlavorString()); d->Add("`"); d->Add(" ("); @@ -813,7 +638,7 @@ void FuncType::DescribeReST(ODesc* d, bool roles_only) const if ( ! yield->GetName().empty() ) { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(yield->GetName()); d->Add("`"); } @@ -822,80 +647,6 @@ void FuncType::DescribeReST(ODesc* d, bool roles_only) const } } -IMPLEMENT_SERIAL(FuncType, SER_FUNC_TYPE); - -bool FuncType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FUNC_TYPE, BroType); - - assert(args); - assert(arg_types); - - SERIALIZE_OPTIONAL(yield); - - int ser_flavor = 0; - - switch ( flavor ) { - - case FUNC_FLAVOR_FUNCTION: - ser_flavor = 0; - break; - - case FUNC_FLAVOR_EVENT: - ser_flavor = 1; - break; - - case FUNC_FLAVOR_HOOK: - ser_flavor = 2; - break; - - default: - reporter->InternalError("Invalid function flavor serialization"); - break; - } - - return args->Serialize(info) && - arg_types->Serialize(info) && - SERIALIZE(ser_flavor); - } - -bool FuncType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - UNSERIALIZE_OPTIONAL(yield, BroType::Unserialize(info)); - - args = (RecordType*) BroType::Unserialize(info); - if ( ! args ) - return false; - - arg_types = (TypeList*) BroType::Unserialize(info); - if ( ! arg_types ) - return false; - - int ser_flavor = 0; - - if ( ! UNSERIALIZE(&ser_flavor) ) - return false; - - switch ( ser_flavor ) { - case 0: - flavor = FUNC_FLAVOR_FUNCTION; - break; - case 1: - flavor = FUNC_FLAVOR_EVENT; - break; - case 2: - flavor = FUNC_FLAVOR_HOOK; - break; - default: - reporter->InternalError("Invalid function flavor unserialization"); - break; - } - - return true; - } - TypeDecl::TypeDecl(BroType* t, const char* i, attr_list* arg_attrs, bool in_record) { type = t; @@ -921,35 +672,6 @@ TypeDecl::~TypeDecl() delete [] id; } -bool TypeDecl::Serialize(SerialInfo* info) const - { - assert(type); - assert(id); - - SERIALIZE_OPTIONAL(attrs); - - if ( ! (type->Serialize(info) && SERIALIZE(id)) ) - return false; - - return true; - } - -TypeDecl* TypeDecl::Unserialize(UnserialInfo* info) - { - TypeDecl* t = new TypeDecl(0, 0, 0); - - UNSERIALIZE_OPTIONAL_STATIC(t->attrs, Attributes::Unserialize(info), t); - t->type = BroType::Unserialize(info); - - if ( ! (t->type && UNSERIALIZE_STR(&t->id, 0)) ) - { - delete t; - return 0; - } - - return t; - } - void TypeDecl::DescribeReST(ODesc* d, bool roles_only) const { d->Add(id); @@ -957,7 +679,7 @@ void TypeDecl::DescribeReST(ODesc* d, bool roles_only) const if ( ! type->GetName().empty() ) { - d->Add(":bro:type:`"); + d->Add(":zeek:type:`"); d->Add(type->GetName()); d->Add("`"); } @@ -977,6 +699,16 @@ RecordType::RecordType(type_decl_list* arg_types) : BroType(TYPE_RECORD) num_fields = types ? types->length() : 0; } +// in this case the clone is actually not so shallow, since +// it gets modified by everyone. +RecordType* RecordType::ShallowClone() + { + auto pass = new type_decl_list(); + loop_over_list(*types, i) + pass->append(new TypeDecl(*(*types)[i])); + return new RecordType(pass); + } + RecordType::~RecordType() { if ( types ) @@ -1073,7 +805,7 @@ void RecordType::Describe(ODesc* d) const void RecordType::DescribeReST(ODesc* d, bool roles_only) const { d->PushType(this); - d->Add(":bro:type:`record`"); + d->Add(":zeek:type:`record`"); if ( num_fields == 0 ) return; @@ -1197,8 +929,8 @@ void RecordType::DescribeFieldsReST(ODesc* d, bool func_args) const if ( func_args ) continue; - using broxygen::IdentifierInfo; - IdentifierInfo* doc = broxygen_mgr->GetIdentifierInfo(GetName()); + using zeekygen::IdentifierInfo; + IdentifierInfo* doc = zeekygen_mgr->GetIdentifierInfo(GetName()); if ( ! doc ) { @@ -1217,7 +949,7 @@ void RecordType::DescribeFieldsReST(ODesc* d, bool func_args) const field_from_script != type_from_script ) { d->PushIndent(); - d->Add(broxygen::redef_indication(field_from_script).c_str()); + d->Add(zeekygen::redef_indication(field_from_script).c_str()); d->PopIndent(); } @@ -1237,7 +969,7 @@ void RecordType::DescribeFieldsReST(ODesc* d, bool func_args) const { string s = cmnts[i]; - if ( broxygen::prettify_params(s) ) + if ( zeekygen::prettify_params(s) ) d->NL(); d->Add(s.c_str()); @@ -1253,65 +985,31 @@ void RecordType::DescribeFieldsReST(ODesc* d, bool func_args) const d->PopIndentNoNL(); } -IMPLEMENT_SERIAL(RecordType, SER_RECORD_TYPE) - -bool RecordType::DoSerialize(SerialInfo* info) const +string RecordType::GetFieldDeprecationWarning(int field, bool has_check) const { - DO_SERIALIZE(SER_RECORD_TYPE, BroType); - - if ( ! SERIALIZE(num_fields) ) - return false; - - if ( types ) + const TypeDecl* decl = FieldDecl(field); + if ( decl) { - if ( ! (SERIALIZE(true) && SERIALIZE(types->length())) ) - return false; - - loop_over_list(*types, i) + string result; + if ( const Attr* deprecation = decl->FindAttr(ATTR_DEPRECATED) ) { - if ( ! (*types)[i]->Serialize(info) ) - return false; + ConstExpr* expr = static_cast(deprecation->AttrExpr()); + if ( expr ) + { + StringVal* text = expr->Value()->AsStringVal(); + result = text->CheckString(); + } } + + if ( result.empty() ) + return fmt("deprecated (%s%s$%s)", GetName().c_str(), has_check ? "?" : "", + FieldName(field)); + else + return fmt("deprecated (%s%s$%s): %s", GetName().c_str(), has_check ? "?" : "", + FieldName(field), result.c_str()); } - else if ( ! SERIALIZE(false) ) - return false; - - return true; - } - -bool RecordType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - if ( ! UNSERIALIZE(&num_fields) ) - return false; - - bool has_it; - if ( ! UNSERIALIZE(&has_it) ) - return false; - - if ( has_it ) - { - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - types = new type_decl_list(len); - - while ( len-- ) - { - TypeDecl* t = TypeDecl::Unserialize(info); - if ( ! t ) - return false; - - types->append(t); - } - } - else - types = 0; - - return true; + return ""; } SubNetType::SubNetType() : BroType(TYPE_SUBNET) @@ -1326,20 +1024,6 @@ void SubNetType::Describe(ODesc* d) const d->Add(int(Tag())); } -IMPLEMENT_SERIAL(SubNetType, SER_SUBNET_TYPE); - -bool SubNetType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_SUBNET_TYPE, BroType); - return true; - } - -bool SubNetType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - return true; - } - FileType::FileType(BroType* yield_type) : BroType(TYPE_FILE) { @@ -1370,24 +1054,6 @@ void FileType::Describe(ODesc* d) const } } -IMPLEMENT_SERIAL(FileType, SER_FILE_TYPE); - -bool FileType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_FILE_TYPE, BroType); - - assert(yield); - return yield->Serialize(info); - } - -bool FileType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - yield = BroType::Unserialize(info); - return yield != 0; - } - OpaqueType::OpaqueType(const string& arg_name) : BroType(TYPE_OPAQUE) { name = arg_name; @@ -1405,29 +1071,7 @@ void OpaqueType::Describe(ODesc* d) const void OpaqueType::DescribeReST(ODesc* d, bool roles_only) const { - d->Add(fmt(":bro:type:`%s` of %s", type_name(Tag()), name.c_str())); - } - -IMPLEMENT_SERIAL(OpaqueType, SER_OPAQUE_TYPE); - -bool OpaqueType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_OPAQUE_TYPE, BroType); - return SERIALIZE_STR(name.c_str(), name.size()); - } - -bool OpaqueType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - const char* n; - if ( ! UNSERIALIZE_STR(&n, 0) ) - return false; - - name = n; - delete [] n; - - return true; + d->Add(fmt(":zeek:type:`%s` of %s", type_name(Tag()), name.c_str())); } EnumType::EnumType(const string& name) @@ -1437,18 +1081,26 @@ EnumType::EnumType(const string& name) SetName(name); } -EnumType::EnumType(EnumType* e) +EnumType::EnumType(const EnumType* e) : BroType(TYPE_ENUM) { counter = e->counter; SetName(e->GetName()); - for ( NameMap::iterator it = e->names.begin(); it != e->names.end(); ++it ) + for ( auto it = e->names.begin(); it != e->names.end(); ++it ) names[it->first] = it->second; vals = e->vals; } +EnumType* EnumType::ShallowClone() + { + if ( counter == 0 ) + return new EnumType(GetName()); + + return new EnumType(this); + } + EnumType::~EnumType() { for ( auto& kv : vals ) @@ -1458,7 +1110,7 @@ EnumType::~EnumType() // Note, we use reporter->Error() here (not Error()) to include the current script // location in the error message, rather than the one where the type was // originally defined. -void EnumType::AddName(const string& module_name, const char* name, bool is_export, bool deprecated) +void EnumType::AddName(const string& module_name, const char* name, bool is_export, Expr* deprecation) { /* implicit, auto-increment */ if ( counter < 0) @@ -1467,11 +1119,11 @@ void EnumType::AddName(const string& module_name, const char* name, bool is_expo SetError(); return; } - CheckAndAddName(module_name, name, counter, is_export, deprecated); + CheckAndAddName(module_name, name, counter, is_export, deprecation); counter++; } -void EnumType::AddName(const string& module_name, const char* name, bro_int_t val, bool is_export, bool deprecated) +void EnumType::AddName(const string& module_name, const char* name, bro_int_t val, bool is_export, Expr* deprecation) { /* explicit value specified */ if ( counter > 0 ) @@ -1481,11 +1133,11 @@ void EnumType::AddName(const string& module_name, const char* name, bro_int_t va return; } counter = -1; - CheckAndAddName(module_name, name, val, is_export, deprecated); + CheckAndAddName(module_name, name, val, is_export, deprecation); } void EnumType::CheckAndAddName(const string& module_name, const char* name, - bro_int_t val, bool is_export, bool deprecated) + bro_int_t val, bool is_export, Expr* deprecation) { if ( Lookup(val) ) { @@ -1502,15 +1154,15 @@ void EnumType::CheckAndAddName(const string& module_name, const char* name, id->SetType(this->Ref()); id->SetEnumConst(); - if ( deprecated ) - id->MakeDeprecated(); + if ( deprecation ) + id->MakeDeprecated(deprecation); - broxygen_mgr->Identifier(id); + zeekygen_mgr->Identifier(id); } else { // We allow double-definitions if matching exactly. This is so that - // we can define an enum both in a *.bif and *.bro for avoiding + // we can define an enum both in a *.bif and *.zeek for avoiding // cyclic dependencies. string fullname = make_full_var_name(module_name.c_str(), name); if ( id->Name() != fullname @@ -1597,7 +1249,7 @@ EnumVal* EnumType::GetVal(bro_int_t i) void EnumType::DescribeReST(ODesc* d, bool roles_only) const { - d->Add(":bro:type:`enum`"); + d->Add(":zeek:type:`enum`"); // Create temporary, reverse name map so that enums can be documented // in ascending order of their actual integral value instead of by name. @@ -1614,12 +1266,12 @@ void EnumType::DescribeReST(ODesc* d, bool roles_only) const d->PushIndent(); if ( roles_only ) - d->Add(fmt(":bro:enum:`%s`", it->second.c_str())); + d->Add(fmt(":zeek:enum:`%s`", it->second.c_str())); else - d->Add(fmt(".. bro:enum:: %s %s", it->second.c_str(), GetName().c_str())); + d->Add(fmt(".. zeek:enum:: %s %s", it->second.c_str(), GetName().c_str())); - using broxygen::IdentifierInfo; - IdentifierInfo* doc = broxygen_mgr->GetIdentifierInfo(it->second); + using zeekygen::IdentifierInfo; + IdentifierInfo* doc = zeekygen_mgr->GetIdentifierInfo(it->second); if ( ! doc ) { @@ -1634,7 +1286,7 @@ void EnumType::DescribeReST(ODesc* d, bool roles_only) const if ( doc->GetDeclaringScript() ) enum_from_script = doc->GetDeclaringScript()->Name(); - IdentifierInfo* type_doc = broxygen_mgr->GetIdentifierInfo(GetName()); + IdentifierInfo* type_doc = zeekygen_mgr->GetIdentifierInfo(GetName()); if ( type_doc && type_doc->GetDeclaringScript() ) type_from_script = type_doc->GetDeclaringScript()->Name(); @@ -1644,7 +1296,7 @@ void EnumType::DescribeReST(ODesc* d, bool roles_only) const { d->NL(); d->PushIndent(); - d->Add(broxygen::redef_indication(enum_from_script).c_str()); + d->Add(zeekygen::redef_indication(enum_from_script).c_str()); d->PopIndent(); } @@ -1672,64 +1324,16 @@ void EnumType::DescribeReST(ODesc* d, bool roles_only) const } } -IMPLEMENT_SERIAL(EnumType, SER_ENUM_TYPE); - -bool EnumType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_ENUM_TYPE, BroType); - - if ( ! (SERIALIZE(counter) && SERIALIZE((unsigned int) names.size()) && - // Dummy boolean for backwards compatibility. - SERIALIZE(false)) ) - return false; - - for ( NameMap::const_iterator iter = names.begin(); - iter != names.end(); ++iter ) - { - if ( ! SERIALIZE(iter->first) || ! SERIALIZE(iter->second) ) - return false; - } - - return true; - } - -bool EnumType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - - unsigned int len; - bool dummy; - if ( ! UNSERIALIZE(&counter) || - ! UNSERIALIZE(&len) || - // Dummy boolean for backwards compatibility. - ! UNSERIALIZE(&dummy) ) - return false; - - while ( len-- ) - { - const char* name; - bro_int_t val; - if ( ! (UNSERIALIZE_STR(&name, 0) && UNSERIALIZE(&val)) ) - return false; - - names[name] = val; - delete [] name; // names[name] converts to std::string - // note: the 'vals' map gets populated lazily, which works fine and - // also happens to avoid a leak due to circular reference between the - // types and vals (there's a special case for unserializing a known - // type that will unserialze and then immediately want to unref the - // type if we already have it, except that won't delete it as intended - // if we've already created circular references to it here). - } - - return true; - } - VectorType::VectorType(BroType* element_type) : BroType(TYPE_VECTOR), yield_type(element_type) { } +VectorType* VectorType::ShallowClone() + { + return new VectorType(yield_type); + } + VectorType::~VectorType() { Unref(yield_type); @@ -1773,10 +1377,12 @@ int VectorType::MatchesIndex(ListExpr*& index) const { expr_list& el = index->Exprs(); - if ( el.length() != 1 ) + if ( el.length() != 1 && el.length() != 2) return DOES_NOT_MATCH_INDEX; - if ( el[0]->Type()->Tag() == TYPE_VECTOR ) + if ( el.length() == 2 ) + return MATCHES_INDEX_VECTOR; + else if ( el[0]->Type()->Tag() == TYPE_VECTOR ) return (IsIntegral(el[0]->Type()->YieldType()->Tag()) || IsBool(el[0]->Type()->YieldType()->Tag())) ? MATCHES_INDEX_VECTOR : DOES_NOT_MATCH_INDEX; @@ -1791,21 +1397,6 @@ bool VectorType::IsUnspecifiedVector() const return yield_type->Tag() == TYPE_VOID; } -IMPLEMENT_SERIAL(VectorType, SER_VECTOR_TYPE); - -bool VectorType::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_VECTOR_TYPE, BroType); - return yield_type->Serialize(info); - } - -bool VectorType::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroType); - yield_type = BroType::Unserialize(info); - return yield_type != 0; - } - void VectorType::Describe(ODesc* d) const { if ( d->IsReadable() ) @@ -1818,12 +1409,12 @@ void VectorType::Describe(ODesc* d) const void VectorType::DescribeReST(ODesc* d, bool roles_only) const { - d->Add(fmt(":bro:type:`%s` of ", type_name(Tag()))); + d->Add(fmt(":zeek:type:`%s` of ", type_name(Tag()))); if ( yield_type->GetName().empty() ) yield_type->DescribeReST(d, roles_only); else - d->Add(fmt(":bro:type:`%s`", yield_type->GetName().c_str())); + d->Add(fmt(":zeek:type:`%s`", yield_type->GetName().c_str())); } BroType* base_type_no_ref(TypeTag tag) @@ -2123,6 +1714,10 @@ int is_assignable(BroType* t) return 0; } +#define CHECK_TYPE(t) \ + if ( t1 == t || t2 == t ) \ + return t; + TypeTag max_type(TypeTag t1, TypeTag t2) { if ( t1 == TYPE_INTERVAL || t1 == TYPE_TIME ) @@ -2132,10 +1727,6 @@ TypeTag max_type(TypeTag t1, TypeTag t2) if ( BothArithmetic(t1, t2) ) { -#define CHECK_TYPE(t) \ - if ( t1 == t || t2 == t ) \ - return t; - CHECK_TYPE(TYPE_DOUBLE); CHECK_TYPE(TYPE_INT); CHECK_TYPE(TYPE_COUNT); @@ -2266,7 +1857,7 @@ BroType* merge_types(const BroType* t1, const BroType* t2) if ( rt1->NumFields() != rt2->NumFields() ) return 0; - type_decl_list* tdl3 = new type_decl_list; + type_decl_list* tdl3 = new type_decl_list(rt1->NumFields()); for ( int i = 0; i < rt1->NumFields(); ++i ) { diff --git a/src/Type.h b/src/Type.h index bc13997461..19fad4b2ce 100644 --- a/src/Type.h +++ b/src/Type.h @@ -72,7 +72,6 @@ class SubNetType; class FuncType; class ListExpr; class EnumType; -class Serializer; class VectorType; class TypeType; class OpaqueType; @@ -87,7 +86,15 @@ public: explicit BroType(TypeTag tag, bool base_type = false); ~BroType() override { } - BroType* Clone() const; + // Performs a shallow clone operation of the Bro type. + // This especially means that especially for tables the types + // are not recursively cloned; altering one type will in this case + // alter one of them. + // The main use for this is alias tracking. + // Clone operations will mostly be implemented in the derived classes; + // in addition cloning will be limited to classes that can be reached by + // the script-level. + virtual BroType* ShallowClone(); TypeTag Tag() const { return tag; } InternalTypeTag InternalType() const { return internal_tag; } @@ -108,7 +115,7 @@ public: // this type is a table[string] of port, then returns the "port" // type. Returns nil if this is not an index type. virtual BroType* YieldType(); - const BroType* YieldType() const + virtual const BroType* YieldType() const { return ((BroType*) this)->YieldType(); } // Returns true if this type is a record and contains the @@ -256,9 +263,6 @@ public: virtual unsigned MemoryAllocation() const; - bool Serialize(SerialInfo* info) const; - static BroType* Unserialize(UnserialInfo* info, bool use_existing = true); - void SetName(const string& arg_name) { name = arg_name; } string GetName() const { return name; } @@ -275,8 +279,6 @@ protected: void SetError(); - DECLARE_SERIAL(BroType) - private: TypeTag tag; InternalTypeTag internal_tag; @@ -325,8 +327,6 @@ public: } protected: - DECLARE_SERIAL(TypeList) - BroType* pure_type; type_list types; }; @@ -338,7 +338,7 @@ public: TypeList* Indices() const { return indices; } const type_list* IndexTypes() const { return indices->Types(); } BroType* YieldType() override; - const BroType* YieldType() const; + const BroType* YieldType() const override; void Describe(ODesc* d) const override; void DescribeReST(ODesc* d, bool roles_only = false) const override; @@ -356,8 +356,6 @@ protected: } ~IndexType() override; - DECLARE_SERIAL(IndexType) - TypeList* indices; BroType* yield_type; }; @@ -366,6 +364,8 @@ class TableType : public IndexType { public: TableType(TypeList* ind, BroType* yield); + TableType* ShallowClone() override; + // Returns true if this table type is "unspecified", which is // what one gets using an empty "set()" or "table()" constructor. bool IsUnspecifiedTable() const; @@ -374,8 +374,6 @@ protected: TableType() {} TypeList* ExpandRecordIndex(RecordType* rt) const; - - DECLARE_SERIAL(TableType) }; class SetType : public TableType { @@ -383,25 +381,26 @@ public: SetType(TypeList* ind, ListExpr* arg_elements); ~SetType() override; + SetType* ShallowClone() override; + ListExpr* SetElements() const { return elements; } protected: SetType() {} ListExpr* elements; - - DECLARE_SERIAL(SetType) }; class FuncType : public BroType { public: FuncType(RecordType* args, BroType* yield, function_flavor f); + FuncType* ShallowClone() override; ~FuncType() override; RecordType* Args() const { return args; } BroType* YieldType() override; - const BroType* YieldType() const; + const BroType* YieldType() const override; void SetYieldType(BroType* arg_yield) { yield = arg_yield; } function_flavor Flavor() const { return flavor; } string FlavorString() const; @@ -419,9 +418,7 @@ public: void DescribeReST(ODesc* d, bool roles_only = false) const override; protected: - FuncType() { args = 0; arg_types = 0; yield = 0; flavor = FUNC_FLAVOR_FUNCTION; } - DECLARE_SERIAL(FuncType) - + FuncType() : BroType(TYPE_FUNC) { args = 0; arg_types = 0; yield = 0; flavor = FUNC_FLAVOR_FUNCTION; } RecordType* args; TypeList* arg_types; BroType* yield; @@ -431,6 +428,7 @@ protected: class TypeType : public BroType { public: explicit TypeType(BroType* t) : BroType(TYPE_TYPE) { type = t->Ref(); } + TypeType* ShallowClone() override { return new TypeType(type); } ~TypeType() override { Unref(type); } BroType* Type() { return type; } @@ -450,9 +448,6 @@ public: const Attr* FindAttr(attr_tag a) const { return attrs ? attrs->FindAttr(a) : 0; } - bool Serialize(SerialInfo* info) const; - static TypeDecl* Unserialize(UnserialInfo* info); - virtual void DescribeReST(ODesc* d, bool roles_only = false) const; BroType* type; @@ -460,9 +455,13 @@ public: const char* id; }; +declare(PList,TypeDecl); +typedef PList(TypeDecl) type_decl_list; + class RecordType : public BroType { public: explicit RecordType(type_decl_list* types); + RecordType* ShallowClone() override; ~RecordType() override; @@ -495,11 +494,17 @@ public: void DescribeFields(ODesc* d) const; void DescribeFieldsReST(ODesc* d, bool func_args) const; + bool IsFieldDeprecated(int field) const + { + const TypeDecl* decl = FieldDecl(field); + return decl && decl->FindAttr(ATTR_DEPRECATED) != 0; + } + + string GetFieldDeprecationWarning(int field, bool has_check) const; + protected: RecordType() { types = 0; } - DECLARE_SERIAL(RecordType) - int num_fields; type_decl_list* types; }; @@ -508,13 +513,12 @@ class SubNetType : public BroType { public: SubNetType(); void Describe(ODesc* d) const override; -protected: - DECLARE_SERIAL(SubNetType) }; class FileType : public BroType { public: explicit FileType(BroType* yield_type); + FileType* ShallowClone() override { return new FileType(yield->Ref()); } ~FileType() override; BroType* YieldType() override; @@ -524,14 +528,13 @@ public: protected: FileType() { yield = 0; } - DECLARE_SERIAL(FileType) - BroType* yield; }; class OpaqueType : public BroType { public: explicit OpaqueType(const string& name); + OpaqueType* ShallowClone() override { return new OpaqueType(name); } ~OpaqueType() override { }; const string& Name() const { return name; } @@ -542,8 +545,6 @@ public: protected: OpaqueType() { } - DECLARE_SERIAL(OpaqueType) - string name; }; @@ -551,18 +552,19 @@ class EnumType : public BroType { public: typedef std::list > enum_name_list; - explicit EnumType(EnumType* e); + explicit EnumType(const EnumType* e); explicit EnumType(const string& arg_name); + EnumType* ShallowClone() override; ~EnumType() override; // The value of this name is next internal counter value, starting // with zero. The internal counter is incremented. - void AddName(const string& module_name, const char* name, bool is_export, bool deprecated); + void AddName(const string& module_name, const char* name, bool is_export, Expr* deprecation = nullptr); // The value of this name is set to val. Once a value has been // explicitly assigned using this method, no further names can be // added that aren't likewise explicitly initalized. - void AddName(const string& module_name, const char* name, bro_int_t val, bool is_export, bool deprecated); + void AddName(const string& module_name, const char* name, bro_int_t val, bool is_export, Expr* deprecation = nullptr); // -1 indicates not found. bro_int_t Lookup(const string& module_name, const char* name) const; @@ -579,14 +581,12 @@ public: protected: EnumType() { counter = 0; } - DECLARE_SERIAL(EnumType) - void AddNameInternal(const string& module_name, const char* name, bro_int_t val, bool is_export); void CheckAndAddName(const string& module_name, const char* name, bro_int_t val, bool is_export, - bool deprecated); + Expr* deprecation = nullptr); typedef std::map NameMap; NameMap names; @@ -606,9 +606,10 @@ protected: class VectorType : public BroType { public: explicit VectorType(BroType* t); + VectorType* ShallowClone() override; ~VectorType() override; BroType* YieldType() override; - const BroType* YieldType() const; + const BroType* YieldType() const override; int MatchesIndex(ListExpr*& index) const override; @@ -622,8 +623,6 @@ public: protected: VectorType() { yield_type = 0; } - DECLARE_SERIAL(VectorType) - BroType* yield_type; }; @@ -636,6 +635,7 @@ extern OpaqueType* topk_type; extern OpaqueType* bloomfilter_type; extern OpaqueType* x509_opaque_type; extern OpaqueType* ocsp_resp_opaque_type; +extern OpaqueType* paraglob_type; // Returns the Bro basic (non-parameterized) type with the given type. // The reference count of the type is not increased. @@ -704,10 +704,6 @@ bool is_atomic_type(const BroType* t); // True if the given type tag corresponds to a function type. #define IsFunc(t) (t == TYPE_FUNC) -// True if the given type tag corresponds to mutable type. -#define IsMutable(t) \ - (t == TYPE_RECORD || t == TYPE_TABLE || t == TYPE_VECTOR) - // True if the given type type is a vector. #define IsVector(t) (t == TYPE_VECTOR) diff --git a/src/Val.cc b/src/Val.cc index b55a9090d3..017516acd8 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -20,8 +20,6 @@ #include "Scope.h" #include "NetVar.h" #include "Expr.h" -#include "Serializer.h" -#include "RemoteSerializer.h" #include "PrefixTable.h" #include "Conn.h" #include "Reporter.h" @@ -72,246 +70,68 @@ Val::~Val() #endif } -Val* Val::Clone() const +Val* Val::Clone() { - SerializationFormat* form = new BinarySerializationFormat(); - form->StartWrite(); - - CloneSerializer ss(form); - SerialInfo sinfo(&ss); - sinfo.cache = false; - sinfo.include_locations = false; - - if ( ! this->Serialize(&sinfo) ) - return 0; - - char* data; - uint32 len = form->EndWrite(&data); - form->StartRead(data, len); - - UnserialInfo uinfo(&ss); - uinfo.cache = false; - Val* clone = Unserialize(&uinfo, type); - - free(data); - return clone; - } - -bool Val::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Val* Val::Unserialize(UnserialInfo* info, TypeTag type, const BroType* exact_type) - { - Val* v = (Val*) SerialObj::Unserialize(info, SER_VAL); - if ( ! v ) - return 0; - - if ( type != TYPE_ANY && (v->Type()->Tag() != type - || (exact_type && ! same_type(exact_type, v->Type()))) ) - { - info->s->Error("type mismatch for value"); - Unref(v); - return 0; - } - - // For MutableVals, we may get a value which, by considering the - // globally unique ID, we already know. To keep references correct, - // we have to bind to the local version. (FIXME: This is not the - // nicest solution. Ideally, DoUnserialize() should be able to pass - // us an alternative ptr to the correct object.) - if ( v->IsMutableVal() ) - { - MutableVal* mv = v->AsMutableVal(); - if ( mv->HasUniqueID() ) - { - ID* current = - global_scope()->Lookup(mv->UniqueID()->Name()); - - if ( current && current != mv->UniqueID() ) - { - DBG_LOG(DBG_STATE, "binding to already existing ID %s\n", current->Name()); - assert(current->ID_Val()); - - // Need to unset the ID here. Otherwise, - // when the SerializationCache destroys - // the value, the global name will disappear. - mv->SetID(0); - Unref(v); - return current->ID_Val()->Ref(); - } - } - } - - // An enum may be bound to a different internal number remotely than we - // do for the same identifier. Check if this is the case, and, if yes, - // rebind to our value. - if ( v->Type()->Tag() == TYPE_ENUM ) - { - int rv = v->AsEnum(); - EnumType* rt = v->Type()->AsEnumType(); - - const char* name = rt->Lookup(rv); - if ( name ) - { - // See if we know the enum locally. - ID* local = global_scope()->Lookup(name); - if ( local && local->IsEnumConst() ) - { - EnumType* lt = local->Type()->AsEnumType(); - int lv = lt->Lookup(local->ModuleName(), - local->Name()); - - // Compare. - if ( rv != lv ) - { - // Different, so let's bind the val - // to the local type. - v->val.int_val = lv; - Unref(rt); - v->type = lt; - ::Ref(lt); - } - } - } - - } - + Val::CloneState state; + auto v = Clone(&state); return v; } -IMPLEMENT_SERIAL(Val, SER_VAL); - -bool Val::DoSerialize(SerialInfo* info) const +Val* Val::Clone(CloneState* state) { - DO_SERIALIZE(SER_VAL, BroObj); + auto i = state->clones.find(this); - if ( ! type->Serialize(info) ) - return false; + if ( i != state->clones.end() ) + return i->second->Ref(); + auto c = DoClone(state); + + if ( ! c ) + reporter->RuntimeError(GetLocationInfo(), "cannot clone value"); + + return c; + } + +Val* Val::DoClone(CloneState* state) + { switch ( type->InternalType() ) { - case TYPE_INTERNAL_VOID: - info->s->Error("type is void"); - return false; - case TYPE_INTERNAL_INT: - return SERIALIZE(val.int_val); - case TYPE_INTERNAL_UNSIGNED: - return SERIALIZE(val.uint_val); - case TYPE_INTERNAL_DOUBLE: - return SERIALIZE(val.double_val); - - case TYPE_INTERNAL_STRING: - return SERIALIZE_STR((const char*) val.string_val->Bytes(), - val.string_val->Len()); - - case TYPE_INTERNAL_ADDR: - return SERIALIZE(*val.addr_val); - - case TYPE_INTERNAL_SUBNET: - return SERIALIZE(*val.subnet_val); + // Immutable. + return Ref(); case TYPE_INTERNAL_OTHER: - // Derived classes are responsible for this. - // Exception: Functions and files. There aren't any derived - // classes. + // Derived classes are responsible for this. Exception: + // Functions and files. There aren't any derived classes. if ( type->Tag() == TYPE_FUNC ) - if ( ! AsFunc()->Serialize(info) ) - return false; + // Immutable. + return Ref(); if ( type->Tag() == TYPE_FILE ) - if ( ! AsFile()->Serialize(info) ) - return false; - return true; + { + // I think we can just ref the file here - it is unclear what else + // to do. In the case of cached files, I think this is equivalent + // to what happened before - serialization + unserialization just + // have you the same pointer that you already had. In the case of + // non-cached files, the behavior now is different; in the past, + // serialize + unserialize gave you a new file object because the + // old one was not in the list anymore. This object was + // automatically opened. This does not happen anymore - instead you + // get the non-cached pointer back which is brought back into the + // cache when written too. + return Ref(); + } - case TYPE_INTERNAL_ERROR: - info->s->Error("type is error"); - return false; + // Fall-through. default: - info->s->Error("type is out of range"); - return false; + reporter->InternalError("cloning illegal base type"); } - return false; - } - -bool Val::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BroObj); - - if ( type ) - Unref(type); - - if ( ! (type = BroType::Unserialize(info)) ) - return false; - - switch ( type->InternalType() ) { - case TYPE_INTERNAL_VOID: - info->s->Error("type is void"); - return false; - - case TYPE_INTERNAL_INT: - return UNSERIALIZE(&val.int_val); - - case TYPE_INTERNAL_UNSIGNED: - return UNSERIALIZE(&val.uint_val); - - case TYPE_INTERNAL_DOUBLE: - return UNSERIALIZE(&val.double_val); - - case TYPE_INTERNAL_STRING: - const char* str; - int len; - if ( ! UNSERIALIZE_STR(&str, &len) ) - return false; - - val.string_val = new BroString((u_char*) str, len, 1); - delete [] str; - return true; - - case TYPE_INTERNAL_ADDR: - { - val.addr_val = new IPAddr(); - return UNSERIALIZE(val.addr_val); - } - - case TYPE_INTERNAL_SUBNET: - { - val.subnet_val = new IPPrefix(); - return UNSERIALIZE(val.subnet_val); - } - - case TYPE_INTERNAL_OTHER: - // Derived classes are responsible for this. - // Exception: Functions and files. There aren't any derived - // classes. - if ( type->Tag() == TYPE_FUNC ) - { - val.func_val = Func::Unserialize(info); - return val.func_val != 0; - } - else if ( type->Tag() == TYPE_FILE ) - { - val.file_val = BroFile::Unserialize(info); - return val.file_val != 0; - } - return true; - - case TYPE_INTERNAL_ERROR: - info->s->Error("type is error"); - return false; - - default: - info->s->Error("type out of range"); - return false; - } - - return false; - } + reporter->InternalError("cannot be reached"); + return nullptr; + } int Val::IsZero() const { @@ -531,181 +351,33 @@ void Val::ValDescribeReST(ODesc* d) const } } -MutableVal::~MutableVal() + +bool Val::WouldOverflow(const BroType* from_type, const BroType* to_type, const Val* val) { - for ( list::iterator i = aliases.begin(); i != aliases.end(); ++i ) + if ( !to_type || !from_type ) + return true; + else if ( same_type(to_type, from_type) ) + return false; + + if ( to_type->InternalType() == TYPE_INTERNAL_DOUBLE ) + return false; + else if ( to_type->InternalType() == TYPE_INTERNAL_UNSIGNED ) { - if ( global_scope() ) - global_scope()->Remove((*i)->Name()); - (*i)->ClearVal(); // just to make sure. - Unref((*i)); + if ( from_type->InternalType() == TYPE_INTERNAL_DOUBLE ) + return (val->InternalDouble() < 0.0 || val->InternalDouble() > static_cast(UINT64_MAX)); + else if ( from_type->InternalType() == TYPE_INTERNAL_INT ) + return (val->InternalInt() < 0); + } + else if ( to_type->InternalType() == TYPE_INTERNAL_INT ) + { + if ( from_type->InternalType() == TYPE_INTERNAL_DOUBLE ) + return (val->InternalDouble() < static_cast(INT64_MIN) || + val->InternalDouble() > static_cast(INT64_MAX)); + else if ( from_type->InternalType() == TYPE_INTERNAL_UNSIGNED ) + return (val->InternalUnsigned() > INT64_MAX); } - if ( id ) - { - if ( global_scope() ) - global_scope()->Remove(id->Name()); - id->ClearVal(); // just to make sure. - Unref(id); - } - } - -bool MutableVal::AddProperties(Properties arg_props) - { - if ( (props | arg_props) == props ) - // No change. - return false; - - props |= arg_props; - - if ( ! id ) - Bind(); - - return true; - } - - -bool MutableVal::RemoveProperties(Properties arg_props) - { - if ( (props & ~arg_props) == props ) - // No change. - return false; - - props &= ~arg_props; - - return true; - } - -ID* MutableVal::Bind() const - { - static bool initialized = false; - - assert(!id); - - static unsigned int id_counter = 0; - static const int MAX_NAME_SIZE = 128; - static char name[MAX_NAME_SIZE]; - static char* end_of_static_str = 0; - - if ( ! initialized ) - { - // Get local IP. - char host[MAXHOSTNAMELEN]; - strcpy(host, "localhost"); - gethostname(host, MAXHOSTNAMELEN); - host[MAXHOSTNAMELEN-1] = '\0'; -#if 0 - // We ignore errors. - struct hostent* ent = gethostbyname(host); - - uint32 ip; - if ( ent && ent->h_addr_list[0] ) - ip = *(uint32*) ent->h_addr_list[0]; - else - ip = htonl(0x7f000001); // 127.0.0.1 - - safe_snprintf(name, MAX_NAME_SIZE, "#%s#%d#", - IPAddr(IPv4, &ip, IPAddr::Network)->AsString().c_str(), - getpid()); -#else - safe_snprintf(name, MAX_NAME_SIZE, "#%s#%d#", host, getpid()); -#endif - - end_of_static_str = name + strlen(name); - - initialized = true; - } - - safe_snprintf(end_of_static_str, MAX_NAME_SIZE - (end_of_static_str - name), - "%u", ++id_counter); - name[MAX_NAME_SIZE-1] = '\0'; - -// DBG_LOG(DBG_STATE, "new unique ID %s", name); - - id = new ID(name, SCOPE_GLOBAL, true); - id->SetType(const_cast(this)->Type()->Ref()); - - global_scope()->Insert(name, id); - - id->SetVal(const_cast(this), OP_NONE, true); - - return id; - } - -void MutableVal::TransferUniqueID(MutableVal* mv) - { - const char* new_name = mv->UniqueID()->Name(); - - if ( ! id ) - Bind(); - - DBG_LOG(DBG_STATE, "transfering ID (new %s, old/alias %s)", new_name, id->Name()); - - // Keep old name as alias. - aliases.push_back(id); - - id = new ID(new_name, SCOPE_GLOBAL, true); - id->SetType(const_cast(this)->Type()->Ref()); - global_scope()->Insert(new_name, id); - id->SetVal(const_cast(this), OP_NONE, true); - - Unref(mv->id); - mv->id = 0; - } - -IMPLEMENT_SERIAL(MutableVal, SER_MUTABLE_VAL); - -bool MutableVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_MUTABLE_VAL, Val); - - if ( ! SERIALIZE(props) ) - return false; - - // Don't use ID::Serialize here, that would loop. All we - // need is the name, anyway. - const char* name = id ? id->Name() : ""; - if ( ! SERIALIZE(name) ) - return false; - - return true; - } - -bool MutableVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - - if ( ! UNSERIALIZE(&props) ) - return false; - - id = 0; - - const char* name; - if ( ! UNSERIALIZE_STR(&name, 0) ) - return false; - - if ( *name ) - { - id = new ID(name, SCOPE_GLOBAL, true); - id->SetVal(this, OP_NONE, true); - - ID* current = global_scope()->Lookup(name); - if ( ! current ) - { - global_scope()->Insert(name, id); - DBG_LOG(DBG_STATE, "installed formerly unknown ID %s", id->Name()); - } - else - { - DBG_LOG(DBG_STATE, "got already known ID %s", current->Name()); - // This means that we already know the value and - // that in fact we should bind to the local value. - // Val::Unserialize() will take care of this. - } - } - - delete [] name; - return true; + return false; } IntervalVal::IntervalVal(double quantity, double units) : @@ -750,20 +422,6 @@ void IntervalVal::ValDescribe(ODesc* d) const DO_UNIT(Microseconds, "usec") } -IMPLEMENT_SERIAL(IntervalVal, SER_INTERVAL_VAL); - -bool IntervalVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_INTERVAL_VAL, Val); - return true; - } - -bool IntervalVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; - } - PortVal* PortManager::Get(uint32 port_num) const { return val_mgr->GetPort(port_num); @@ -862,18 +520,10 @@ void PortVal::ValDescribe(ODesc* d) const d->Add("/unknown"); } -IMPLEMENT_SERIAL(PortVal, SER_PORT_VAL); - -bool PortVal::DoSerialize(SerialInfo* info) const +Val* PortVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_PORT_VAL, Val); - return true; - } - -bool PortVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; + // Immutable. + return Ref(); } AddrVal::AddrVal(const char* text) : Val(TYPE_ADDR) @@ -920,18 +570,10 @@ Val* AddrVal::SizeVal() const return val_mgr->GetCount(128); } -IMPLEMENT_SERIAL(AddrVal, SER_ADDR_VAL); - -bool AddrVal::DoSerialize(SerialInfo* info) const +Val* AddrVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_ADDR_VAL, Val); - return true; - } - -bool AddrVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; + // Immutable. + return Ref(); } SubNetVal::SubNetVal(const char* text) : Val(TYPE_SUBNET) @@ -1044,18 +686,10 @@ bool SubNetVal::Contains(const IPAddr& addr) const return val.subnet_val->Contains(a); } -IMPLEMENT_SERIAL(SubNetVal, SER_SUBNET_VAL); - -bool SubNetVal::DoSerialize(SerialInfo* info) const +Val* SubNetVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_SUBNET_VAL, Val); - return true; - } - -bool SubNetVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; + // Immutable. + return Ref(); } StringVal::StringVal(BroString* s) : Val(TYPE_STRING) @@ -1100,18 +734,14 @@ unsigned int StringVal::MemoryAllocation() const return padded_sizeof(*this) + val.string_val->MemoryAllocation(); } -IMPLEMENT_SERIAL(StringVal, SER_STRING_VAL); - -bool StringVal::DoSerialize(SerialInfo* info) const +Val* StringVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_STRING_VAL, Val); - return true; - } - -bool StringVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; + // We could likely treat this type as immutable and return a reference + // instead of creating a new copy, but we first need to be careful and + // audit whether anything internal actually does mutate it. + return state->NewClone(this, new StringVal( + new BroString((u_char*) val.string_val->Bytes(), + val.string_val->Len(), 1))); } PatternVal::PatternVal(RE_Matcher* re) : Val(base_type(TYPE_PATTERN)) @@ -1162,20 +792,15 @@ unsigned int PatternVal::MemoryAllocation() const return padded_sizeof(*this) + val.re_val->MemoryAllocation(); } -IMPLEMENT_SERIAL(PatternVal, SER_PATTERN_VAL); - -bool PatternVal::DoSerialize(SerialInfo* info) const +Val* PatternVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_PATTERN_VAL, Val); - return AsPattern()->Serialize(info); - } - -bool PatternVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - - val.re_val = RE_Matcher::Unserialize(info); - return val.re_val != 0; + // We could likely treat this type as immutable and return a reference + // instead of creating a new copy, but we first need to be careful and + // audit whether anything internal actually does mutate it. + auto re = new RE_Matcher(val.re_val->PatternText(), + val.re_val->AnywherePatternText()); + re->Compile(); + return state->NewClone(this, new PatternVal(re)); } ListVal::ListVal(TypeTag t) @@ -1260,50 +885,16 @@ void ListVal::Describe(ODesc* d) const } } -IMPLEMENT_SERIAL(ListVal, SER_LIST_VAL); - -bool ListVal::DoSerialize(SerialInfo* info) const +Val* ListVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_LIST_VAL, Val); - - if ( ! (SERIALIZE(char(tag)) && SERIALIZE(vals.length())) ) - return false; + auto lv = new ListVal(tag); + lv->vals.resize(vals.length()); + state->NewClone(this, lv); loop_over_list(vals, i) - { - if ( ! vals[i]->Serialize(info) ) - return false; - } + lv->Append(vals[i]->Clone(state)); - return true; - } - -bool ListVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - - char t; - int len; - - if ( ! (UNSERIALIZE(&t) && UNSERIALIZE(&len)) ) - return false; - - tag = TypeTag(t); - - while ( len-- ) - { - Val* v = Val::Unserialize(info, TYPE_ANY); - if ( ! v ) - return false; - - vals.append(v); - } - - // Our dtor will do Unref(type) in addition to Val's dtor. - if ( type ) - type->Ref(); - - return true; + return lv; } unsigned int ListVal::MemoryAllocation() const @@ -1343,7 +934,7 @@ static void table_entry_val_delete_func(void* val) delete tv; } -TableVal::TableVal(TableType* t, Attributes* a) : MutableVal(t) +TableVal::TableVal(TableType* t, Attributes* a) : Val(t) { Init(t); SetAttrs(a); @@ -1462,7 +1053,7 @@ void TableVal::CheckExpireAttr(attr_tag at) } } -int TableVal::Assign(Val* index, Val* new_val, Opcode op) +int TableVal::Assign(Val* index, Val* new_val) { HashKey* k = ComputeHash(index); if ( ! k ) @@ -1472,34 +1063,16 @@ int TableVal::Assign(Val* index, Val* new_val, Opcode op) return 0; } - return Assign(index, k, new_val, op); + return Assign(index, k, new_val); } -int TableVal::Assign(Val* index, HashKey* k, Val* new_val, Opcode op) +int TableVal::Assign(Val* index, HashKey* k, Val* new_val) { int is_set = table_type->IsSet(); if ( (is_set && new_val) || (! is_set && ! new_val) ) InternalWarning("bad set/table in TableVal::Assign"); - BroType* yt = Type()->AsTableType()->YieldType(); - - if ( yt && yt->Tag() == TYPE_TABLE && - new_val->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - // Join two mergeable sets. - Val* old = Lookup(index, false); - if ( old && old->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - if ( LoggingAccess() && op != OP_NONE ) - StateAccess::Log(new StateAccess(OP_ASSIGN_IDX, - this, index, new_val, old)); - new_val->AsTableVal()->AddTo(old->AsTableVal(), 0, false); - Unref(new_val); - return 1; - } - } - TableEntryVal* new_entry_val = new TableEntryVal(new_val); HashKey k_copy(k->Key(), k->Size(), k->Hash()); TableEntryVal* old_entry_val = AsNonConstTable()->Insert(k, new_entry_val); @@ -1522,64 +1095,6 @@ int TableVal::Assign(Val* index, HashKey* k, Val* new_val, Opcode op) subnets->Insert(index, new_entry_val); } - if ( LoggingAccess() && op != OP_NONE ) - { - Val* rec_index = 0; - if ( ! index ) - index = rec_index = RecoverIndex(&k_copy); - - if ( new_val ) - { - // A table. - if ( new_val->IsMutableVal() ) - new_val->AsMutableVal()->AddProperties(GetProperties()); - - bool unref_old_val = false; - Val* old_val = old_entry_val ? - old_entry_val->Value() : 0; - if ( op == OP_INCR && ! old_val ) - // If it's an increment, somebody has already - // checked that the index is there. If it's - // not, that can only be due to using the - // default. - { - old_val = Default(index); - unref_old_val = true; - } - - assert(op != OP_INCR || old_val); - - StateAccess::Log( - new StateAccess( - op == OP_INCR ? - OP_INCR_IDX : OP_ASSIGN_IDX, - this, index, new_val, old_val)); - - if ( unref_old_val ) - Unref(old_val); - } - - else - { - // A set. - if ( old_entry_val && remote_check_sync_consistency ) - { - Val* has_old_val = val_mgr->GetInt(1); - StateAccess::Log( - new StateAccess(OP_ADD, this, index, - has_old_val)); - Unref(has_old_val); - } - else - StateAccess::Log( - new StateAccess(OP_ADD, this, - index, 0, 0)); - } - - if ( rec_index ) - Unref(rec_index); - } - // Keep old expiration time if necessary. if ( old_entry_val && attrs && attrs->FindAttr(ATTR_EXPIRE_CREATE) ) new_entry_val->SetExpireAccess(old_entry_val->ExpireAccessTime()); @@ -1633,15 +1148,13 @@ int TableVal::AddTo(Val* val, int is_first_init, bool propagate_ops) const if ( type->IsSet() ) { - if ( ! t->Assign(v->Value(), k, 0, - propagate_ops ? OP_ASSIGN : OP_NONE) ) + if ( ! t->Assign(v->Value(), k, 0) ) return 0; } else { v->Ref(); - if ( ! t->Assign(0, k, v->Value(), - propagate_ops ? OP_ASSIGN : OP_NONE) ) + if ( ! t->Assign(0, k, v->Value()) ) return 0; } } @@ -1861,29 +1374,30 @@ Val* TableVal::Default(Val* index) return def_attr->AttrExpr()->IsConst() ? def_val->Ref() : def_val->Clone(); const Func* f = def_val->AsFunc(); - val_list* vl = new val_list(); + val_list vl; if ( index->Type()->Tag() == TYPE_LIST ) { const val_list* vl0 = index->AsListVal()->Vals(); + vl = val_list(vl0->length()); loop_over_list(*vl0, i) - vl->append((*vl0)[i]->Ref()); + vl.append((*vl0)[i]->Ref()); } else - vl->append(index->Ref()); + { + vl = val_list{index->Ref()}; + } Val* result = 0; try { - result = f->Call(vl); + result = f->Call(&vl); } catch ( InterpreterException& e ) { /* Already reported. */ } - delete vl; - if ( ! result ) { Error("no value returned from &default function"); @@ -1909,11 +1423,7 @@ Val* TableVal::Lookup(Val* index, bool use_default_val) if ( v ) { if ( attrs && attrs->FindAttr(ATTR_EXPIRE_READ) ) - { v->SetExpireAccess(network_time); - if ( LoggingAccess() && ExpirationEnabled() ) - ReadOperation(index, v); - } return v->Value() ? v->Value() : this; } @@ -1940,11 +1450,7 @@ Val* TableVal::Lookup(Val* index, bool use_default_val) if ( v ) { if ( attrs && attrs->FindAttr(ATTR_EXPIRE_READ) ) - { v->SetExpireAccess(network_time); - if ( LoggingAccess() && ExpirationEnabled() ) - ReadOperation(index, v); - } return v->Value() ? v->Value() : this; } @@ -1998,11 +1504,7 @@ TableVal* TableVal::LookupSubnetValues(const SubNetVal* search) if ( entry ) { if ( attrs && attrs->FindAttr(ATTR_EXPIRE_READ) ) - { entry->SetExpireAccess(network_time); - if ( LoggingAccess() && ExpirationEnabled() ) - ReadOperation(s, entry); - } } Unref(s); // assign does not consume index @@ -2032,8 +1534,6 @@ bool TableVal::UpdateTimestamp(Val* index) return false; v->SetExpireAccess(network_time); - if ( LoggingAccess() && attrs->FindAttr(ATTR_EXPIRE_READ) ) - ReadOperation(index, v); return true; } @@ -2052,30 +1552,6 @@ Val* TableVal::Delete(const Val* index) if ( subnets && ! subnets->Remove(index) ) reporter->InternalWarning("index not in prefix table"); - if ( LoggingAccess() ) - { - if ( v ) - { - if ( v->Value() && remote_check_sync_consistency ) - // A table. - StateAccess::Log( - new StateAccess(OP_DEL, this, - index, v->Value())); - else - { - // A set. - Val* has_old_val = val_mgr->GetInt(1); - StateAccess::Log( - new StateAccess(OP_DEL, this, index, - has_old_val)); - Unref(has_old_val); - } - } - else - StateAccess::Log( - new StateAccess(OP_DEL, this, index, 0)); - } - delete k; delete v; @@ -2098,9 +1574,6 @@ Val* TableVal::Delete(const HashKey* k) delete v; - if ( LoggingAccess() ) - StateAccess::Log(new StateAccess(OP_DEL, this, k)); - Modified(); return va; } @@ -2268,7 +1741,7 @@ int TableVal::ExpandCompoundAndInit(val_list* vl, int k, Val* new_val) return 1; } -int TableVal::CheckAndAssign(Val* index, Val* new_val, Opcode op) +int TableVal::CheckAndAssign(Val* index, Val* new_val) { Val* v = 0; if ( subnets ) @@ -2280,7 +1753,7 @@ int TableVal::CheckAndAssign(Val* index, Val* new_val, Opcode op) if ( v ) index->Warn("multiple initializations for index"); - return Assign(index, new_val, op); + return Assign(index, new_val); } void TableVal::InitTimer(double delay) @@ -2312,6 +1785,7 @@ void TableVal::DoExpire(double t) HashKey* k = 0; TableEntryVal* v = 0; TableEntryVal* v_saved = 0; + bool modified = false; for ( int i = 0; i < table_incremental_step && (v = tbl->NextEntry(k, expire_cookie)); ++i ) @@ -2319,7 +1793,7 @@ void TableVal::DoExpire(double t) if ( v->ExpireAccessTime() == 0 ) { // This happens when we insert val while network_time - // hasn't been initialized yet (e.g. in bro_init()), and + // hasn't been initialized yet (e.g. in zeek_init()), and // also when bro_start_network_time hasn't been initialized // (e.g. before first packet). The expire_access_time is // correct, so we just need to wait. @@ -2364,19 +1838,18 @@ void TableVal::DoExpire(double t) Unref(index); } - if ( LoggingAccess() ) - StateAccess::Log( - new StateAccess(OP_EXPIRE, this, k)); - tbl->RemoveEntry(k); Unref(v->Value()); delete v; - Modified(); + modified = true; } delete k; } + if ( modified ) + Modified(); + if ( ! v ) { expire_cookie = 0; @@ -2423,21 +1896,6 @@ double TableVal::CallExpireFunc(Val* idx) return 0; } - val_list* vl = new val_list; - vl->append(Ref()); - - // Flatten lists of a single element. - if ( idx->Type()->Tag() == TYPE_LIST && - idx->AsListVal()->Length() == 1 ) - { - Val* old = idx; - idx = idx->AsListVal()->Index(0); - idx->Ref(); - Unref(old); - } - - vl->append(idx); - double secs = 0; try @@ -2447,19 +1905,31 @@ double TableVal::CallExpireFunc(Val* idx) if ( ! vf ) { // Will have been reported already. - delete_vals(vl); + Unref(idx); return 0; } if ( vf->Type()->Tag() != TYPE_FUNC ) { - Unref(vf); - delete_vals(vl); vf->Error("not a function"); + Unref(vf); + Unref(idx); return 0; } - Val* vs = vf->AsFunc()->Call(vl); + + // Flatten lists of a single element. + if ( idx->Type()->Tag() == TYPE_LIST && + idx->AsListVal()->Length() == 1 ) + { + Val* old = idx; + idx = idx->AsListVal()->Index(0); + idx->Ref(); + Unref(old); + } + + val_list vl{Ref(), idx}; + Val* vs = vf->AsFunc()->Call(&vl); if ( vs ) { @@ -2468,7 +1938,6 @@ double TableVal::CallExpireFunc(Val* idx) } Unref(vf); - delete vl; } catch ( InterpreterException& e ) @@ -2478,296 +1947,54 @@ double TableVal::CallExpireFunc(Val* idx) return secs; } -void TableVal::ReadOperation(Val* index, TableEntryVal* v) +Val* TableVal::DoClone(CloneState* state) { - double timeout = GetExpireTime(); + auto tv = new TableVal(table_type); + state->NewClone(this, tv); - if ( timeout < 0 ) - // Skip in case of unset/invalid expiration value. If it's an - // error, it has been reported already. - return; + const PDict(TableEntryVal)* tbl = AsTable(); + IterCookie* cookie = tbl->InitForIteration(); - // In theory we need to only propagate one update per &read_expire - // interval to prevent peers from expiring intervals. To account for - // practical issues such as latency, we send one update every half - // &read_expire. - if ( network_time - v->LastReadUpdate() > timeout / 2 ) + HashKey* key; + TableEntryVal* val; + while ( (val = tbl->NextEntry(key, cookie)) ) { - StateAccess::Log(new StateAccess(OP_READ_IDX, this, index)); - v->SetLastReadUpdate(network_time); - } - } - -IMPLEMENT_SERIAL(TableVal, SER_TABLE_VAL); - -// This is getting rather complex due to the ability to suspend even within -// deeply-nested values. -bool TableVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE_WITH_SUSPEND(SER_TABLE_VAL, MutableVal); - - // The current state of the serialization. - struct State { - IterCookie* c; - TableEntryVal* v; // current value - bool did_index; // already wrote the val's index - }* state = 0; - - PDict(TableEntryVal)* tbl = - const_cast(this)->AsNonConstTable(); - - if ( info->cont.NewInstance() ) - { - // For simplicity, we disable suspension for the objects - // serialized here. (In fact we know that *currently* - // they won't even try). - DisableSuspend suspend(info); - - state = new State; - state->c = tbl->InitForIteration(); - tbl->MakeRobustCookie(state->c); - state->v = 0; - state->did_index = false; - info->s->WriteOpenTag(table_type->IsSet() ? "set" : "table"); - - SERIALIZE_OPTIONAL(attrs); - SERIALIZE_OPTIONAL(expire_time); - SERIALIZE_OPTIONAL(expire_func); - - // Make sure nobody kills us in between. - const_cast(this)->Ref(); - } - - else if ( info->cont.ChildSuspended() ) - state = (State*) info->cont.RestoreState(); - - else if ( info->cont.Resuming() ) - { - info->cont.Resume(); - state = (State*) info->cont.RestoreState(); - } - else - reporter->InternalError("unknown continuation state"); - - HashKey* k = 0; - int count = 0; - - assert((!info->cont.ChildSuspended()) || state->v); - - while ( true ) - { - if ( ! state->v ) - { - state->v = tbl->NextEntry(k, state->c); - if ( ! state->c ) - { - // No next one. - if ( ! SERIALIZE(false) ) - { - delete k; - return false; - } - - break; - } - - // There's a value coming. - if ( ! SERIALIZE(true) ) - { - delete k; - return false; - } - - if ( state->v->Value() ) - state->v->Ref(); - - state->did_index = false; - } - - // Serialize index. - if ( k && ! state->did_index ) - { - // Indices are rather small, so we disable suspension - // here again. - DisableSuspend suspend(info); - info->s->WriteOpenTag("key"); - ListVal* index = table_hash->RecoverVals(k)->AsListVal(); - delete k; - - if ( ! index->Serialize(info) ) - return false; - - Unref(index); - info->s->WriteCloseTag("key"); - - state->did_index = true; - - // Start serializing data. - if ( ! type->IsSet() ) - info->s->WriteOpenTag("value"); - } - - if ( ! type->IsSet() ) - { - info->cont.SaveState(state); - info->cont.SaveContext(); - bool result = state->v->val->Serialize(info); - info->cont.RestoreContext(); - - if ( ! result ) - return false; - - if ( info->cont.ChildSuspended() ) - return true; - } - - double eat = state->v->ExpireAccessTime(); - - if ( ! (SERIALIZE(state->v->last_access_time) && - SERIALIZE(eat)) ) - return false; - - info->s->WriteCloseTag("value"); - - if ( state->v->Value() ) - state->v->Unref(); - state->v = 0; // Next value. - - // Suspend if we've done enough for now (which means we - // have serialized more than table_incremental_step entries - // in a row; if an entry has suspended itself in between, - // we start counting from 0). - if ( info->may_suspend && ++count > table_incremental_step) - { - info->cont.SaveState(state); - info->cont.Suspend(); - reporter->Info("TableVals serialization suspended right in the middle."); - return true; - } - } - - info->s->WriteCloseTag(table_type->IsSet() ? "set" : "table"); - delete state; - - Unref(const_cast(this)); - return true; - } - -bool TableVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(MutableVal); - - Init((TableType*) type); - - UNSERIALIZE_OPTIONAL(attrs, Attributes::Unserialize(info)); - UNSERIALIZE_OPTIONAL(expire_time, Expr::Unserialize(info)); - UNSERIALIZE_OPTIONAL(expire_func, Expr::Unserialize(info)); - - while ( true ) - { - // Anymore? - bool next; - if ( ! UNSERIALIZE(&next) ) - return false; - - if ( ! next ) - break; - - // Unserialize index. - ListVal* index = - (ListVal*) Val::Unserialize(info, table_type->Indices()); - if ( ! index ) - return false; - - // Unserialize data. - Val* entry; - if ( ! table_type->IsSet() ) - { - entry = Val::Unserialize(info, type->YieldType()); - if ( ! entry ) - return false; - } - else - entry = 0; - - TableEntryVal* entry_val = new TableEntryVal(entry); - - double eat; - - if ( ! UNSERIALIZE(&entry_val->last_access_time) || - ! UNSERIALIZE(&eat) ) - { - entry_val->Unref(); - delete entry_val; - return false; - } - - entry_val->SetExpireAccess(eat); - - HashKey* key = ComputeHash(index); - TableEntryVal* old_entry_val = - AsNonConstTable()->Insert(key, entry_val); - assert(! old_entry_val); - - delete key; + TableEntryVal* nval = val->Clone(state); + tv->AsNonConstTable()->Insert(key, nval); if ( subnets ) - subnets->Insert(index, entry_val); + { + Val* idx = RecoverIndex(key); + tv->subnets->Insert(idx, nval); + Unref(idx); + } - Unref(index); + delete key; } - // If necessary, activate the expire timer. if ( attrs ) { - CheckExpireAttr(ATTR_EXPIRE_READ); - CheckExpireAttr(ATTR_EXPIRE_WRITE); - CheckExpireAttr(ATTR_EXPIRE_CREATE); + ::Ref(attrs); + tv->attrs = attrs; } - return true; - } + if ( expire_time ) + { + tv->expire_time = expire_time->Ref(); -bool TableVal::AddProperties(Properties arg_props) - { - if ( ! MutableVal::AddProperties(arg_props) ) - return false; + // As network_time is not necessarily initialized yet, we set + // a timer which fires immediately. + timer = new TableValTimer(this, 1); + timer_mgr->Add(timer); + } - if ( Type()->IsSet() || ! RecursiveProps(arg_props) ) - return true; + if ( expire_func ) + tv->expire_func = expire_func->Ref(); - // For a large table, this could get expensive. So, let's hope - // that nobody creates such a table *before* making it persistent - // (for example by inserting it into another table). - TableEntryVal* v; - PDict(TableEntryVal)* tbl = val.table_val; - IterCookie* c = tbl->InitForIteration(); - while ( (v = tbl->NextEntry(c)) ) - if ( v->Value()->IsMutableVal() ) - v->Value()->AsMutableVal()->AddProperties(RecursiveProps(arg_props)); + if ( def_val ) + tv->def_val = def_val->Ref(); - return true; - } - -bool TableVal::RemoveProperties(Properties arg_props) - { - if ( ! MutableVal::RemoveProperties(arg_props) ) - return false; - - if ( Type()->IsSet() || ! RecursiveProps(arg_props) ) - return true; - - // For a large table, this could get expensive. So, let's hope - // that nobody creates such a table *before* making it persistent - // (for example by inserting it into another table). - TableEntryVal* v; - PDict(TableEntryVal)* tbl = val.table_val; - IterCookie* c = tbl->InitForIteration(); - while ( (v = tbl->NextEntry(c)) ) - if ( v->Value()->IsMutableVal() ) - v->Value()->AsMutableVal()->RemoveProperties(RecursiveProps(arg_props)); - - return true; + return tv; } unsigned int TableVal::MemoryAllocation() const @@ -2791,21 +2018,29 @@ unsigned int TableVal::MemoryAllocation() const vector RecordVal::parse_time_records; -RecordVal::RecordVal(RecordType* t) : MutableVal(t) +RecordVal::RecordVal(RecordType* t, bool init_fields) : Val(t) { origin = 0; - record_type = t; - int n = record_type->NumFields(); + int n = t->NumFields(); val_list* vl = val.val_list_val = new val_list(n); + if ( is_parsing ) + { + parse_time_records.emplace_back(this); + Ref(); + } + + if ( ! init_fields ) + return; + // Initialize to default values from RecordType (which are nil // by default). for ( int i = 0; i < n; ++i ) { - Attributes* a = record_type->FieldDecl(i)->attrs; + Attributes* a = t->FieldDecl(i)->attrs; Attr* def_attr = a ? a->FindAttr(ATTR_DEFAULT) : 0; Val* def = def_attr ? def_attr->AttrExpr()->Eval(0) : 0; - BroType* type = record_type->FieldDecl(i)->type; + BroType* type = t->FieldDecl(i)->type; if ( def && type->Tag() == TYPE_RECORD && def->Type()->Tag() == TYPE_RECORD && @@ -2836,12 +2071,6 @@ RecordVal::RecordVal(RecordType* t) : MutableVal(t) vl->append(def ? def->Ref() : 0); Unref(def); - - if ( is_parsing ) - { - parse_time_records.emplace_back(this); - Ref(); - } } } @@ -2850,44 +2079,9 @@ RecordVal::~RecordVal() delete_vals(AsNonConstRecord()); } -void RecordVal::Assign(int field, Val* new_val, Opcode op) +void RecordVal::Assign(int field, Val* new_val) { - if ( new_val && Lookup(field) && - record_type->FieldType(field)->Tag() == TYPE_TABLE && - new_val->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - // Join two mergeable sets. - Val* old = Lookup(field); - if ( old->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - if ( LoggingAccess() && op != OP_NONE ) - { - StringVal* index = new StringVal(Type()->AsRecordType()->FieldName(field)); - StateAccess::Log(new StateAccess(OP_ASSIGN_IDX, this, index, new_val, old)); - Unref(index); - } - - new_val->AsTableVal()->AddTo(old->AsTableVal(), 0, false); - Unref(new_val); - return; - } - } - Val* old_val = AsNonConstRecord()->replace(field, new_val); - - if ( LoggingAccess() && op != OP_NONE ) - { - if ( new_val && new_val->IsMutableVal() ) - new_val->AsMutableVal()->AddProperties(GetProperties()); - - StringVal* index = new StringVal(Type()->AsRecordType()->FieldName(field)); - StateAccess::Log( - new StateAccess( - op == OP_INCR ? OP_INCR_IDX : OP_ASSIGN_IDX, - this, index, new_val, old_val)); - Unref(index); // The logging may keep a cached copy. - } - Unref(old_val); Modified(); } @@ -2904,7 +2098,7 @@ Val* RecordVal::LookupWithDefault(int field) const if ( val ) return val->Ref(); - return record_type->FieldDefault(field); + return Type()->AsRecordType()->FieldDefault(field); } void RecordVal::ResizeParseTimeRecords() @@ -2912,7 +2106,7 @@ void RecordVal::ResizeParseTimeRecords() for ( auto& rv : parse_time_records ) { auto vs = rv->val.val_list_val; - auto rt = rv->record_type; + auto rt = rv->Type()->AsRecordType(); auto current_length = vs->length(); auto required_length = rt->NumFields(); @@ -2932,7 +2126,7 @@ void RecordVal::ResizeParseTimeRecords() Val* RecordVal::Lookup(const char* field, bool with_default) const { - int idx = record_type->FieldOffset(field); + int idx = Type()->AsRecordType()->FieldOffset(field); if ( idx < 0 ) reporter->InternalError("missing record field: %s", field); @@ -3017,6 +2211,7 @@ void RecordVal::Describe(ODesc* d) const { const val_list* vl = AsRecord(); int n = vl->length(); + auto record_type = Type()->AsRecordType(); if ( d->IsBinary() || d->IsPortable() ) { @@ -3053,6 +2248,7 @@ void RecordVal::DescribeReST(ODesc* d) const { const val_list* vl = AsRecord(); int n = vl->length(); + auto record_type = Type()->AsRecordType(); d->Add("{"); d->PushIndent(); @@ -3077,94 +2273,24 @@ void RecordVal::DescribeReST(ODesc* d) const d->Add("}"); } -IMPLEMENT_SERIAL(RecordVal, SER_RECORD_VAL); - -bool RecordVal::DoSerialize(SerialInfo* info) const +Val* RecordVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_RECORD_VAL, MutableVal); - - // We could use the type name as a tag here. - info->s->WriteOpenTag("record"); - - // We don't need to serialize record_type as it's simply the - // casted table_type. - // FIXME: What about origin? - - if ( ! SERIALIZE(val.val_list_val->length()) ) - return false; + // We set origin to 0 here. Origin only seems to be used for exactly one + // purpose - to find the connection record that is associated with a + // record. As we cannot guarantee that it will ber zeroed out at the + // approproate time (as it seems to be guaranteed for the original record) + // we don't touch it. + auto rv = new RecordVal(Type()->AsRecordType(), false); + rv->origin = nullptr; + state->NewClone(this, rv); loop_over_list(*val.val_list_val, i) { - info->s->WriteOpenTag(record_type->FieldName(i)); - Val* v = (*val.val_list_val)[i]; - SERIALIZE_OPTIONAL(v); - info->s->WriteCloseTag(record_type->FieldName(i)); + Val* v = (*val.val_list_val)[i] ? (*val.val_list_val)[i]->Clone(state) : nullptr; + rv->val.val_list_val->append(v); } - info->s->WriteCloseTag("record"); - - return true; - } - -bool RecordVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(MutableVal); - - record_type = (RecordType*) type; - origin = 0; - - int len; - if ( ! UNSERIALIZE(&len) ) - { - val.val_list_val = new val_list; - return false; - } - - val.val_list_val = new val_list(len); - - for ( int i = 0; i < len; ++i ) - { - Val* v; - UNSERIALIZE_OPTIONAL(v, Val::Unserialize(info)); - AsNonConstRecord()->append(v); // correct for v==0, too. - } - - return true; - } - -bool RecordVal::AddProperties(Properties arg_props) - { - if ( ! MutableVal::AddProperties(arg_props) ) - return false; - - if ( ! RecursiveProps(arg_props) ) - return true; - - loop_over_list(*val.val_list_val, i) - { - Val* v = (*val.val_list_val)[i]; - if ( v && v->IsMutableVal() ) - v->AsMutableVal()->AddProperties(RecursiveProps(arg_props)); - } - return true; - } - - -bool RecordVal::RemoveProperties(Properties arg_props) - { - if ( ! MutableVal::RemoveProperties(arg_props) ) - return false; - - if ( ! RecursiveProps(arg_props) ) - return true; - - loop_over_list(*val.val_list_val, i) - { - Val* v = (*val.val_list_val)[i]; - if ( v && v->IsMutableVal() ) - v->AsMutableVal()->RemoveProperties(RecursiveProps(arg_props)); - } - return true; + return rv; } unsigned int RecordVal::MemoryAllocation() const @@ -3193,21 +2319,13 @@ void EnumVal::ValDescribe(ODesc* d) const d->Add(ename); } -IMPLEMENT_SERIAL(EnumVal, SER_ENUM_VAL); - -bool EnumVal::DoSerialize(SerialInfo* info) const +Val* EnumVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_ENUM_VAL, Val); - return true; + // Immutable. + return Ref(); } -bool EnumVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; - } - -VectorVal::VectorVal(VectorType* t) : MutableVal(t) +VectorVal::VectorVal(VectorType* t) : Val(t) { vector_type = t->Ref()->AsVectorType(); val.vector_val = new vector(); @@ -3223,7 +2341,7 @@ VectorVal::~VectorVal() delete val.vector_val; } -bool VectorVal::Assign(unsigned int index, Val* element, Opcode op) +bool VectorVal::Assign(unsigned int index, Val* element) { if ( element && ! same_type(element->Type(), vector_type->YieldType(), 0) ) @@ -3232,30 +2350,6 @@ bool VectorVal::Assign(unsigned int index, Val* element, Opcode op) return false; } - BroType* yt = Type()->AsVectorType()->YieldType(); - - if ( yt && yt->Tag() == TYPE_TABLE && - element->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - // Join two mergeable sets. - Val* old = Lookup(index); - if ( old && old->AsTableVal()->FindAttr(ATTR_MERGEABLE) ) - { - if ( LoggingAccess() && op != OP_NONE ) - { - Val* ival = val_mgr->GetCount(index); - StateAccess::Log(new StateAccess(OP_ASSIGN_IDX, - this, ival, element, - (*val.vector_val)[index])); - Unref(ival); - } - - element->AsTableVal()->AddTo(old->AsTableVal(), 0, false); - Unref(element); - return true; - } - } - Val* val_at_index = 0; if ( index < val.vector_val->size() ) @@ -3263,19 +2357,6 @@ bool VectorVal::Assign(unsigned int index, Val* element, Opcode op) else val.vector_val->resize(index + 1); - if ( LoggingAccess() && op != OP_NONE ) - { - if ( element->IsMutableVal() ) - element->AsMutableVal()->AddProperties(GetProperties()); - - Val* ival = val_mgr->GetCount(index); - - StateAccess::Log(new StateAccess(op == OP_INCR ? - OP_INCR_IDX : OP_ASSIGN_IDX, - this, ival, element, val_at_index)); - Unref(ival); - } - Unref(val_at_index); // Note: we do *not* Ref() the element, if any, at this point. @@ -3299,6 +2380,45 @@ bool VectorVal::AssignRepeat(unsigned int index, unsigned int how_many, return true; } +bool VectorVal::Insert(unsigned int index, Val* element) + { + if ( element && + ! same_type(element->Type(), vector_type->YieldType(), 0) ) + { + Unref(element); + return false; + } + + vector::iterator it; + + if ( index < val.vector_val->size() ) + it = std::next(val.vector_val->begin(), index); + else + it = val.vector_val->end(); + + // Note: we do *not* Ref() the element, if any, at this point. + // AssignExpr::Eval() already does this; other callers must remember + // to do it similarly. + val.vector_val->insert(it, element); + + Modified(); + return true; + } + +bool VectorVal::Remove(unsigned int index) + { + if ( index >= val.vector_val->size() ) + return false; + + Val* val_at_index = (*val.vector_val)[index]; + auto it = std::next(val.vector_val->begin(), index); + val.vector_val->erase(it); + Unref(val_at_index); + + Modified(); + return true; + } + int VectorVal::AddTo(Val* val, int /* is_first_init */) const { if ( val->Type()->Tag() != TYPE_VECTOR ) @@ -3348,79 +2468,19 @@ unsigned int VectorVal::ResizeAtLeast(unsigned int new_num_elements) return Resize(new_num_elements); } -bool VectorVal::AddProperties(Properties arg_props) +Val* VectorVal::DoClone(CloneState* state) { - if ( ! MutableVal::AddProperties(arg_props) ) - return false; - - if ( ! RecursiveProps(arg_props) ) - return true; - - for ( unsigned int i = 0; i < val.vector_val->size(); ++i ) - if ( (*val.vector_val)[i]->IsMutableVal() ) - (*val.vector_val)[i]->AsMutableVal()->AddProperties(RecursiveProps(arg_props)); - - return true; - } - -bool VectorVal::RemoveProperties(Properties arg_props) - { - if ( ! MutableVal::RemoveProperties(arg_props) ) - return false; - - if ( ! RecursiveProps(arg_props) ) - return true; - - for ( unsigned int i = 0; i < val.vector_val->size(); ++i ) - if ( (*val.vector_val)[i]->IsMutableVal() ) - (*val.vector_val)[i]->AsMutableVal()->RemoveProperties(RecursiveProps(arg_props)); - - return true; - } - -IMPLEMENT_SERIAL(VectorVal, SER_VECTOR_VAL); - -bool VectorVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_VECTOR_VAL, MutableVal); - - info->s->WriteOpenTag("vector"); - - if ( ! SERIALIZE(unsigned(val.vector_val->size())) ) - return false; + auto vv = new VectorVal(vector_type); + vv->val.vector_val->reserve(val.vector_val->size()); + state->NewClone(this, vv); for ( unsigned int i = 0; i < val.vector_val->size(); ++i ) { - info->s->WriteOpenTag("value"); - Val* v = (*val.vector_val)[i]; - SERIALIZE_OPTIONAL(v); - info->s->WriteCloseTag("value"); + auto v = (*val.vector_val)[i]->Clone(state); + vv->val.vector_val->push_back(v); } - info->s->WriteCloseTag("vector"); - - return true; - } - -bool VectorVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(MutableVal); - - val.vector_val = new vector; - vector_type = type->Ref()->AsVectorType(); - - int len; - if ( ! UNSERIALIZE(&len) ) - return false; - - for ( int i = 0; i < len; ++i ) - { - Val* v; - UNSERIALIZE_OPTIONAL(v, Val::Unserialize(info, TYPE_ANY)); // accept any type - Assign(i, v); - } - - return true; + return vv; } void VectorVal::ValDescribe(ODesc* d) const @@ -3442,29 +2502,7 @@ void VectorVal::ValDescribe(ODesc* d) const d->Add("]"); } -OpaqueVal::OpaqueVal(OpaqueType* t) : Val(t) - { - } - -OpaqueVal::~OpaqueVal() - { - } - -IMPLEMENT_SERIAL(OpaqueVal, SER_OPAQUE_VAL); - -bool OpaqueVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_OPAQUE_VAL, Val); - return true; - } - -bool OpaqueVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Val); - return true; - } - -Val* check_and_promote(Val* v, const BroType* t, int is_init) +Val* check_and_promote(Val* v, const BroType* t, int is_init, const Location* expr_location) { if ( ! v ) return 0; @@ -3488,7 +2526,7 @@ Val* check_and_promote(Val* v, const BroType* t, int is_init) if ( same_type(t, vt, is_init) ) return v; - t->Error("type clash", v); + t->Error("type clash", v, 0, expr_location); Unref(v); return 0; } @@ -3497,9 +2535,9 @@ Val* check_and_promote(Val* v, const BroType* t, int is_init) (! IsArithmetic(v_tag) || t_tag != TYPE_TIME || ! v->IsZero()) ) { if ( t_tag == TYPE_LIST || v_tag == TYPE_LIST ) - t->Error("list mixed with scalar", v); + t->Error("list mixed with scalar", v, 0, expr_location); else - t->Error("arithmetic mixed with non-arithmetic", v); + t->Error("arithmetic mixed with non-arithmetic", v, 0, expr_location); Unref(v); return 0; } @@ -3507,12 +2545,12 @@ Val* check_and_promote(Val* v, const BroType* t, int is_init) if ( v_tag == t_tag ) return v; - if ( t_tag != TYPE_TIME ) + if ( t_tag != TYPE_TIME && ! BothArithmetic(t_tag, v_tag) ) { TypeTag mt = max_type(t_tag, v_tag); if ( mt != t_tag ) { - t->Error("over-promotion of arithmetic value", v); + t->Error("over-promotion of arithmetic value", v, 0, expr_location); Unref(v); return 0; } @@ -3529,7 +2567,13 @@ Val* check_and_promote(Val* v, const BroType* t, int is_init) Val* promoted_v; switch ( it ) { case TYPE_INTERNAL_INT: - if ( t_tag == TYPE_INT ) + if ( ( vit == TYPE_INTERNAL_UNSIGNED || vit == TYPE_INTERNAL_DOUBLE ) && Val::WouldOverflow(vt, t, v) ) + { + t->Error("overflow promoting from unsigned/double to signed arithmetic value", v, 0, expr_location); + Unref(v); + return 0; + } + else if ( t_tag == TYPE_INT ) promoted_v = val_mgr->GetInt(v->CoerceToInt()); else if ( t_tag == TYPE_BOOL ) promoted_v = val_mgr->GetBool(v->CoerceToInt()); @@ -3543,7 +2587,13 @@ Val* check_and_promote(Val* v, const BroType* t, int is_init) break; case TYPE_INTERNAL_UNSIGNED: - if ( t_tag == TYPE_COUNT || t_tag == TYPE_COUNTER ) + if ( ( vit == TYPE_INTERNAL_DOUBLE || vit == TYPE_INTERNAL_INT) && Val::WouldOverflow(vt, t, v) ) + { + t->Error("overflow promoting from signed/double to unsigned arithmetic value", v, 0, expr_location); + Unref(v); + return 0; + } + else if ( t_tag == TYPE_COUNT || t_tag == TYPE_COUNTER ) promoted_v = val_mgr->GetCount(v->CoerceToUnsigned()); else // port { diff --git a/src/Val.h b/src/Val.h index 63e790848d..43523df26c 100644 --- a/src/Val.h +++ b/src/Val.h @@ -3,11 +3,10 @@ #ifndef val_h #define val_h -// BRO values. - #include #include #include +#include #include "net_util.h" #include "Type.h" @@ -18,8 +17,9 @@ #include "Timer.h" #include "ID.h" #include "Scope.h" -#include "StateAccess.h" +#include "Notifier.h" #include "IPAddr.h" +#include "DebugLogger.h" // We have four different port name spaces: TCP, UDP, ICMP, and UNKNOWN. // We distinguish between them based on the bits specified in the *_PORT_MASK @@ -36,7 +36,6 @@ class Func; class BroFile; class RE_Matcher; class PrefixTable; -class SerialInfo; class PortVal; class AddrVal; @@ -49,7 +48,7 @@ class RecordVal; class ListVal; class StringVal; class EnumVal; -class MutableVal; +class OpaqueVal; class StateAccess; @@ -87,7 +86,7 @@ typedef union { class Val : public BroObj { public: - BRO_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") + ZEEK_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") Val(bool b, TypeTag t) { val.int_val = b; @@ -97,7 +96,7 @@ public: #endif } - BRO_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") + ZEEK_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") Val(int32 i, TypeTag t) { val.int_val = bro_int_t(i); @@ -107,7 +106,7 @@ public: #endif } - BRO_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") + ZEEK_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") Val(uint32 u, TypeTag t) { val.uint_val = bro_uint_t(u); @@ -117,7 +116,7 @@ public: #endif } - BRO_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") + ZEEK_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") Val(int64 i, TypeTag t) { val.int_val = i; @@ -127,7 +126,7 @@ public: #endif } - BRO_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") + ZEEK_DEPRECATED("use val_mgr->GetBool, GetFalse/GetTrue, GetInt, or GetCount instead") Val(uint64 u, TypeTag t) { val.uint_val = u; @@ -172,7 +171,7 @@ public: ~Val() override; Val* Ref() { ::Ref(this); return this; } - virtual Val* Clone() const; + Val* Clone(); int IsZero() const; int IsOne() const; @@ -304,6 +303,7 @@ public: CONVERTER(TYPE_STRING, StringVal*, AsStringVal) CONVERTER(TYPE_VECTOR, VectorVal*, AsVectorVal) CONVERTER(TYPE_ENUM, EnumVal*, AsEnumVal) + CONVERTER(TYPE_OPAQUE, OpaqueVal*, AsOpaqueVal) #define CONST_CONVERTER(tag, ctype, name) \ const ctype name() const \ @@ -321,36 +321,14 @@ public: CONST_CONVERTER(TYPE_LIST, ListVal*, AsListVal) CONST_CONVERTER(TYPE_STRING, StringVal*, AsStringVal) CONST_CONVERTER(TYPE_VECTOR, VectorVal*, AsVectorVal) - - bool IsMutableVal() const - { - return IsMutable(type->Tag()); - } - - const MutableVal* AsMutableVal() const - { - if ( ! IsMutableVal() ) - BadTag("Val::AsMutableVal", type_name(type->Tag())); - return (MutableVal*) this; - } - - MutableVal* AsMutableVal() - { - if ( ! IsMutableVal() ) - BadTag("Val::AsMutableVal", type_name(type->Tag())); - return (MutableVal*) this; - } + CONST_CONVERTER(TYPE_OPAQUE, OpaqueVal*, AsOpaqueVal) void Describe(ODesc* d) const override; virtual void DescribeReST(ODesc* d) const; - bool Serialize(SerialInfo* info) const; - static Val* Unserialize(UnserialInfo* info, TypeTag type = TYPE_ANY) - { return Unserialize(info, type, 0); } - static Val* Unserialize(UnserialInfo* info, const BroType* exact_type) - { return Unserialize(info, exact_type->Tag(), exact_type); } - - DECLARE_SERIAL(Val); + // To be overridden by mutable derived class to enable change + // notification. + virtual notifier::Modifiable* Modifiable() { return 0; } #ifdef DEBUG // For debugging, we keep a reference to the global ID to which a @@ -367,10 +345,16 @@ public: } #endif + static bool WouldOverflow(const BroType* from_type, const BroType* to_type, const Val* val); + protected: friend class EnumType; + friend class ListVal; + friend class RecordVal; + friend class VectorVal; friend class ValManager; + friend class TableEntryVal; virtual void ValDescribe(ODesc* d) const; virtual void ValDescribeReST(ODesc* d) const; @@ -415,9 +399,22 @@ protected: ACCESSOR(TYPE_TABLE, PDict(TableEntryVal)*, table_val, AsNonConstTable) ACCESSOR(TYPE_RECORD, val_list*, val_list_val, AsNonConstRecord) - // Just an internal helper. - static Val* Unserialize(UnserialInfo* info, TypeTag type, - const BroType* exact_type); + // For internal use by the Val::Clone() methods. + struct CloneState { + // Caches a cloned value for later reuse during the same + // cloning operation. For recursive types, call this *before* + // descending down. + Val* NewClone(Val *src, Val* dst) + { + clones.insert(std::make_pair(src, dst)); + return dst; + } + + std::unordered_map clones; + }; + + Val* Clone(CloneState* state); + virtual Val* DoClone(CloneState* state); BroValUnion val; BroType* type; @@ -432,15 +429,15 @@ protected: class PortManager { public: // Port number given in host order. - BRO_DEPRECATED("use val_mgr->GetPort() instead") + ZEEK_DEPRECATED("use val_mgr->GetPort() instead") PortVal* Get(uint32 port_num, TransportProto port_type) const; // Host-order port number already masked with port space protocol mask. - BRO_DEPRECATED("use val_mgr->GetPort() instead") + ZEEK_DEPRECATED("use val_mgr->GetPort() instead") PortVal* Get(uint32 port_num) const; // Returns a masked port number - BRO_DEPRECATED("use PortVal::Mask() instead") + ZEEK_DEPRECATED("use PortVal::Mask() instead") uint32 Mask(uint32 port_num, TransportProto port_type) const; }; @@ -501,82 +498,6 @@ private: extern ValManager* val_mgr; -class MutableVal : public Val { -public: - // Each MutableVal gets a globally unique ID that can be used to - // reference it no matter if it's directly bound to any user-visible - // ID. This ID is inserted into the global namespace. - ID* UniqueID() const { return id ? id : Bind(); } - - // Returns true if we've already generated a unique ID. - bool HasUniqueID() const { return id; } - - // Transfers the unique ID of the given value to this value. We keep our - // old ID as an alias. - void TransferUniqueID(MutableVal* mv); - - // MutableVals can have properties (let's refrain from calling them - // attributes!). Most properties are recursive. If a derived object - // can contain MutableVals itself, the object has to override - // {Add,Remove}Properties(). RecursiveProp(state) masks out all non- - // recursive properties. If this is non-null, an overriden method must - // call itself with RecursiveProp(state) as argument for all contained - // values. (In any case, don't forget to call the parent's method.) - typedef char Properties; - - static const int PERSISTENT = 0x01; - static const int SYNCHRONIZED = 0x02; - - // Tracked by NotifierRegistry, not recursive. - static const int TRACKED = 0x04; - - int RecursiveProps(int prop) const { return prop & ~TRACKED; } - - Properties GetProperties() const { return props; } - virtual bool AddProperties(Properties state); - virtual bool RemoveProperties(Properties state); - - // Whether StateAccess:LogAccess needs to be called. - bool LoggingAccess() const - { -#ifndef DEBUG - return props & (SYNCHRONIZED|PERSISTENT|TRACKED); -#else - return debug_logger.IsVerbose() || - (props & (SYNCHRONIZED|PERSISTENT|TRACKED)); -#endif - } - - uint64 LastModified() const override { return last_modified; } - - // Mark value as changed. - void Modified() - { - last_modified = IncreaseTimeCounter(); - } - -protected: - explicit MutableVal(BroType* t) : Val(t) - { props = 0; id = 0; last_modified = SerialObj::ALWAYS; } - MutableVal() { props = 0; id = 0; last_modified = SerialObj::ALWAYS; } - ~MutableVal() override; - - friend class ID; - friend class Val; - - void SetID(ID* arg_id) { Unref(id); id = arg_id; } - - DECLARE_SERIAL(MutableVal); - -private: - ID* Bind() const; - - mutable ID* id; - list aliases; - Properties props; - uint64 last_modified; -}; - #define Microseconds 1e-6 #define Milliseconds 1e-3 #define Seconds 1.0 @@ -592,19 +513,17 @@ protected: IntervalVal() {} void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(IntervalVal); }; class PortVal : public Val { public: // Port number given in host order. - BRO_DEPRECATED("use val_mgr->GetPort() instead") + ZEEK_DEPRECATED("use val_mgr->GetPort() instead") PortVal(uint32 p, TransportProto port_type); // Host-order port number already masked with port space protocol mask. - BRO_DEPRECATED("use val_mgr->GetPort() instead") + ZEEK_DEPRECATED("use val_mgr->GetPort() instead") explicit PortVal(uint32 p); Val* SizeVal() const override { return val_mgr->GetInt(val.uint_val); } @@ -639,8 +558,7 @@ protected: PortVal(uint32 p, bool unused); void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(PortVal); + Val* DoClone(CloneState* state) override; }; class AddrVal : public Val { @@ -664,7 +582,7 @@ protected: explicit AddrVal(TypeTag t) : Val(t) { } explicit AddrVal(BroType* t) : Val(t) { } - DECLARE_SERIAL(AddrVal); + Val* DoClone(CloneState* state) override; }; class SubNetVal : public Val { @@ -692,8 +610,7 @@ protected: SubNetVal() {} void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(SubNetVal); + Val* DoClone(CloneState* state) override; }; class StringVal : public Val { @@ -724,8 +641,7 @@ protected: StringVal() {} void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(StringVal); + Val* DoClone(CloneState* state) override; }; class PatternVal : public Val { @@ -744,8 +660,7 @@ protected: PatternVal() {} void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(PatternVal); + Val* DoClone(CloneState* state) override; }; // ListVals are mainly used to index tables that have more than one @@ -789,7 +704,7 @@ protected: friend class Val; ListVal() {} - DECLARE_SERIAL(ListVal); + Val* DoClone(CloneState* state) override; val_list vals; TypeTag tag; @@ -803,9 +718,18 @@ public: { val = v; last_access_time = network_time; - expire_access_time = last_read_update = + expire_access_time = int(network_time - bro_start_network_time); } + + TableEntryVal* Clone(Val::CloneState* state) + { + auto rval = new TableEntryVal(val ? val->Clone(state) : nullptr); + rval->last_access_time = last_access_time; + rval->expire_access_time = expire_access_time; + return rval; + } + ~TableEntryVal() { } Val* Value() { return val; } @@ -818,24 +742,16 @@ public: void SetExpireAccess(double time) { expire_access_time = int(time - bro_start_network_time); } - // Returns/sets time of when we propagated the last OP_READ_IDX - // for this item. - double LastReadUpdate() const - { return bro_start_network_time + last_read_update; } - void SetLastReadUpdate(double time) - { last_read_update = int(time - bro_start_network_time); } - protected: friend class TableVal; Val* val; double last_access_time; - // The next two entries store seconds since Bro's start. We use - // ints here to save a few bytes, as we do not need a high resolution - // for these anyway. + // The next entry stores seconds since Bro's start. We use ints here + // to save a few bytes, as we do not need a high resolution for these + // anyway. int expire_access_time; - int last_read_update; }; class TableValTimer : public Timer { @@ -852,7 +768,7 @@ protected: }; class CompositeHash; -class TableVal : public MutableVal { +class TableVal : public Val, public notifier::Modifiable { public: explicit TableVal(TableType* t, Attributes* attrs = 0); ~TableVal() override; @@ -862,8 +778,8 @@ public: // version takes a HashKey and Unref()'s it when done. If we're a // set, new_val has to be nil. If we aren't a set, index may be nil // in the second version. - int Assign(Val* index, Val* new_val, Opcode op = OP_ASSIGN); - int Assign(Val* index, HashKey* k, Val* new_val, Opcode op = OP_ASSIGN); + int Assign(Val* index, Val* new_val); + int Assign(Val* index, HashKey* k, Val* new_val); Val* SizeVal() const override { return val_mgr->GetCount(Size()); } @@ -965,19 +881,17 @@ public: HashKey* ComputeHash(const Val* index) const { return table_hash->ComputeHash(index, 1); } + notifier::Modifiable* Modifiable() override { return this; } + protected: friend class Val; - friend class StateAccess; TableVal() {} void Init(TableType* t); void CheckExpireAttr(attr_tag at); int ExpandCompoundAndInit(val_list* vl, int k, Val* new_val); - int CheckAndAssign(Val* index, Val* new_val, Opcode op = OP_ASSIGN); - - bool AddProperties(Properties arg_state) override; - bool RemoveProperties(Properties arg_state) override; + int CheckAndAssign(Val* index, Val* new_val); // Calculates default value for index. Returns 0 if none. Val* Default(Val* index); @@ -994,10 +908,7 @@ protected: // takes ownership of the reference. double CallExpireFunc(Val *idx); - // Propagates a read operation if necessary. - void ReadOperation(Val* index, TableEntryVal *v); - - DECLARE_SERIAL(TableVal); + Val* DoClone(CloneState* state) override; TableType* table_type; CompositeHash* table_hash; @@ -1010,15 +921,15 @@ protected: Val* def_val; }; -class RecordVal : public MutableVal { +class RecordVal : public Val, public notifier::Modifiable { public: - explicit RecordVal(RecordType* t); + explicit RecordVal(RecordType* t, bool init_fields = true); ~RecordVal() override; Val* SizeVal() const override - { return val_mgr->GetCount(record_type->NumFields()); } + { return val_mgr->GetCount(Type()->AsRecordType()->NumFields()); } - void Assign(int field, Val* new_val, Opcode op = OP_ASSIGN); + void Assign(int field, Val* new_val); Val* Lookup(int field) const; // Does not Ref() value. Val* LookupWithDefault(int field) const; // Does Ref() value. @@ -1057,6 +968,8 @@ public: unsigned int MemoryAllocation() const override; void DescribeReST(ODesc* d) const override; + notifier::Modifiable* Modifiable() override { return this; } + // Extend the underlying arrays of record instances created during // parsing to match the number of fields in the record type (they may // mismatch as a result of parse-time record type redefinitions. @@ -1066,10 +979,7 @@ protected: friend class Val; RecordVal() {} - bool AddProperties(Properties arg_state) override; - bool RemoveProperties(Properties arg_state) override; - - DECLARE_SERIAL(RecordVal); + Val* DoClone(CloneState* state) override; RecordType* record_type; BroObj* origin; @@ -1080,7 +990,7 @@ protected: class EnumVal : public Val { public: - BRO_DEPRECATED("use t->GetVal(i) instead") + ZEEK_DEPRECATED("use t->GetVal(i) instead") EnumVal(int i, EnumType* t) : Val(t) { val.int_val = i; @@ -1100,12 +1010,11 @@ protected: EnumVal() {} void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(EnumVal); + Val* DoClone(CloneState* state) override; }; -class VectorVal : public MutableVal { +class VectorVal : public Val, public notifier::Modifiable { public: explicit VectorVal(VectorType* t); ~VectorVal() override; @@ -1119,11 +1028,11 @@ public: // Note: does NOT Ref() the element! Remember to do so unless // the element was just created and thus has refcount 1. // - bool Assign(unsigned int index, Val* element, Opcode op = OP_ASSIGN); - bool Assign(Val* index, Val* element, Opcode op = OP_ASSIGN) + bool Assign(unsigned int index, Val* element); + bool Assign(Val* index, Val* element) { return Assign(index->AsListVal()->Index(0)->CoerceToUnsigned(), - element, op); + element); } // Assigns the value to how_many locations starting at index. @@ -1153,40 +1062,30 @@ public: // Won't shrink size. unsigned int ResizeAtLeast(unsigned int new_num_elements); + notifier::Modifiable* Modifiable() override { return this; } + + // Insert an element at a specific position into the underlying vector. + bool Insert(unsigned int index, Val* element); + + // Removes an element at a specific position. + bool Remove(unsigned int index); + protected: friend class Val; VectorVal() { } - bool AddProperties(Properties arg_state) override; - bool RemoveProperties(Properties arg_state) override; void ValDescribe(ODesc* d) const override; - - DECLARE_SERIAL(VectorVal); + Val* DoClone(CloneState* state) override; VectorType* vector_type; }; -// Base class for values with types that are managed completely internally, -// with no further script-level operators provided (other than bif -// functions). See OpaqueVal.h for derived classes. -class OpaqueVal : public Val { -public: - explicit OpaqueVal(OpaqueType* t); - ~OpaqueVal() override; - -protected: - friend class Val; - OpaqueVal() { } - - DECLARE_SERIAL(OpaqueVal); -}; - // Checks the given value for consistency with the given type. If an // exact match, returns it. If promotable, returns the promoted version, // Unref()'ing the original. If not a match, generates an error message // and returns nil, also Unref()'ing v. If is_init is true, then // the checking is done in the context of an initialization. -extern Val* check_and_promote(Val* v, const BroType* t, int is_init); +extern Val* check_and_promote(Val* v, const BroType* t, int is_init, const Location* expr_location = nullptr); // Given a pointer to where a Val's core (i.e., its BRO value) resides, // returns a corresponding newly-created or Ref()'d Val. ptr must already diff --git a/src/Var.cc b/src/Var.cc index 8534fdd910..74cfee291f 100644 --- a/src/Var.cc +++ b/src/Var.cc @@ -1,13 +1,11 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Var.h" #include "Func.h" #include "Stmt.h" #include "Scope.h" -#include "Serializer.h" -#include "RemoteSerializer.h" #include "EventRegistry.h" #include "Traverse.h" @@ -142,26 +140,6 @@ static void make_var(ID* id, BroType* t, init_class c, Expr* init, } } - if ( id->FindAttr(ATTR_PERSISTENT) || id->FindAttr(ATTR_SYNCHRONIZED) ) - { - if ( dt == VAR_CONST ) - { - id->Error("&persistent/synchronized with constant"); - return; - } - else if ( dt == VAR_OPTION ) - { - id->Error("&persistent/synchronized with option"); - return; - } - - if ( ! id->IsGlobal() ) - { - id->Error("&persistant/synchronized with non-global"); - return; - } - } - if ( do_init ) { if ( c == INIT_NONE && dt == VAR_REDEF && t->IsTable() && @@ -295,7 +273,7 @@ void add_type(ID* id, BroType* t, attr_list* attr) tnew = t; else // Clone the type to preserve type name aliasing. - tnew = t->Clone(); + tnew = t->ShallowClone(); BroType::AddAlias(new_type_name, tnew); @@ -325,8 +303,7 @@ static void transfer_arg_defaults(RecordType* args, RecordType* recv) if ( ! recv_i->attrs ) { - attr_list* a = new attr_list(); - a->append(def); + attr_list* a = new attr_list{def}; recv_i->attrs = new Attributes(a, recv_i->type, true); } @@ -335,16 +312,21 @@ static void transfer_arg_defaults(RecordType* args, RecordType* recv) } } -static bool has_attr(const attr_list* al, attr_tag tag) +static Attr* find_attr(const attr_list* al, attr_tag tag) { if ( ! al ) - return false; + return nullptr; for ( int i = 0; i < al->length(); ++i ) if ( (*al)[i]->Tag() == tag ) - return true; + return (*al)[i]; - return false; + return nullptr; + } + +static bool has_attr(const attr_list* al, attr_tag tag) + { + return find_attr(al, tag) != nullptr; } void begin_func(ID* id, const char* module_name, function_flavor flavor, @@ -421,8 +403,8 @@ void begin_func(ID* id, const char* module_name, function_flavor flavor, arg_id->SetType(arg_i->type->Ref()); } - if ( has_attr(attrs, ATTR_DEPRECATED) ) - id->MakeDeprecated(); + if ( Attr* depr_attr = find_attr(attrs, ATTR_DEPRECATED) ) + id->MakeDeprecated(depr_attr->AttrExpr()); } class OuterIDBindingFinder : public TraversalCallback { diff --git a/src/analyzer/Analyzer.cc b/src/analyzer/Analyzer.cc index 818dd917e8..9977d44e70 100644 --- a/src/analyzer/Analyzer.cc +++ b/src/analyzer/Analyzer.cc @@ -662,20 +662,26 @@ void Analyzer::ProtocolConfirmation(Tag arg_tag) if ( protocol_confirmed ) return; + protocol_confirmed = true; + + if ( ! protocol_confirmation ) + return; + EnumVal* tval = arg_tag ? arg_tag.AsEnumVal() : tag.AsEnumVal(); Ref(tval); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(tval); - vl->append(val_mgr->GetCount(id)); - mgr.QueueEvent(protocol_confirmation, vl); - - protocol_confirmed = true; + mgr.QueueEventFast(protocol_confirmation, { + BuildConnVal(), + tval, + val_mgr->GetCount(id), + }); } void Analyzer::ProtocolViolation(const char* reason, const char* data, int len) { + if ( ! protocol_violation ) + return; + StringVal* r; if ( data && len ) @@ -692,12 +698,12 @@ void Analyzer::ProtocolViolation(const char* reason, const char* data, int len) EnumVal* tval = tag.AsEnumVal(); Ref(tval); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(tval); - vl->append(val_mgr->GetCount(id)); - vl->append(r); - mgr.QueueEvent(protocol_violation, vl); + mgr.QueueEventFast(protocol_violation, { + BuildConnVal(), + tval, + val_mgr->GetCount(id), + r, + }); } void Analyzer::AddTimer(analyzer_timer_func timer, double t, @@ -782,6 +788,16 @@ void Analyzer::ConnectionEvent(EventHandlerPtr f, val_list* vl) conn->ConnectionEvent(f, this, vl); } +void Analyzer::ConnectionEvent(EventHandlerPtr f, val_list vl) + { + conn->ConnectionEvent(f, this, std::move(vl)); + } + +void Analyzer::ConnectionEventFast(EventHandlerPtr f, val_list vl) + { + conn->ConnectionEventFast(f, this, std::move(vl)); + } + void Analyzer::Weird(const char* name, const char* addl) { conn->Weird(name, addl); diff --git a/src/analyzer/Analyzer.h b/src/analyzer/Analyzer.h index a13df7e21e..141d420a82 100644 --- a/src/analyzer/Analyzer.h +++ b/src/analyzer/Analyzer.h @@ -541,6 +541,18 @@ public: */ void ConnectionEvent(EventHandlerPtr f, val_list* vl); + /** + * Convenience function that forwards directly to + * Connection::ConnectionEvent(). + */ + void ConnectionEvent(EventHandlerPtr f, val_list vl); + + /** + * Convenience function that forwards directly to + * Connection::ConnectionEventFast(). + */ + void ConnectionEventFast(EventHandlerPtr f, val_list vl); + /** * Convenience function that forwards directly to the corresponding * Connection::Weird(). diff --git a/src/analyzer/CMakeLists.txt b/src/analyzer/CMakeLists.txt index 20b53d7ca8..4dc2830737 100644 --- a/src/analyzer/CMakeLists.txt +++ b/src/analyzer/CMakeLists.txt @@ -1,5 +1,5 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/analyzer/Component.h b/src/analyzer/Component.h index c52bf05fc6..74224e4ba4 100644 --- a/src/analyzer/Component.h +++ b/src/analyzer/Component.h @@ -7,7 +7,7 @@ #include "plugin/Component.h" #include "plugin/TaggedComponent.h" -#include "../bro-config.h" +#include "../zeek-config.h" #include "../util.h" class Connection; diff --git a/src/analyzer/Manager.cc b/src/analyzer/Manager.cc index 1546f846e5..c7e156b41e 100644 --- a/src/analyzer/Manager.cc +++ b/src/analyzer/Manager.cc @@ -113,7 +113,7 @@ void Manager::InitPostScript() void Manager::DumpDebug() { #ifdef DEBUG - DBG_LOG(DBG_ANALYZER, "Available analyzers after bro_init():"); + DBG_LOG(DBG_ANALYZER, "Available analyzers after zeek_init():"); list all_analyzers = GetComponents(); for ( list::const_iterator i = all_analyzers.begin(); i != all_analyzers.end(); ++i ) DBG_LOG(DBG_ANALYZER, " %s (%s)", (*i)->Name().c_str(), diff --git a/src/analyzer/Manager.h b/src/analyzer/Manager.h index 7f58a45cbf..8f6d982394 100644 --- a/src/analyzer/Manager.h +++ b/src/analyzer/Manager.h @@ -78,10 +78,10 @@ public: /** * Dumps out the state of all registered analyzers to the \c analyzer - * debug stream. Should be called only after any \c bro_init events + * debug stream. Should be called only after any \c zeek_init events * have executed to ensure that any of their changes are applied. */ - void DumpDebug(); // Called after bro_init() events. + void DumpDebug(); // Called after zeek_init() events. /** * Enables an analyzer type. Only enabled analyzers will be diff --git a/src/analyzer/Tag.h b/src/analyzer/Tag.h index 926196c747..92aff38189 100644 --- a/src/analyzer/Tag.h +++ b/src/analyzer/Tag.h @@ -3,7 +3,7 @@ #ifndef ANALYZER_TAG_H #define ANALYZER_TAG_H -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "../Tag.h" #include "plugin/TaggedComponent.h" diff --git a/src/analyzer/protocol/CMakeLists.txt b/src/analyzer/protocol/CMakeLists.txt index 882ba23da9..8ebded627b 100644 --- a/src/analyzer/protocol/CMakeLists.txt +++ b/src/analyzer/protocol/CMakeLists.txt @@ -36,9 +36,9 @@ add_subdirectory(rdp) add_subdirectory(rfb) add_subdirectory(rpc) add_subdirectory(sip) -add_subdirectory(snmp) add_subdirectory(smb) add_subdirectory(smtp) +add_subdirectory(snmp) add_subdirectory(socks) add_subdirectory(ssh) add_subdirectory(ssl) diff --git a/src/analyzer/protocol/arp/ARP.cc b/src/analyzer/protocol/arp/ARP.cc index 83166bd149..d3a4ab688f 100644 --- a/src/analyzer/protocol/arp/ARP.cc +++ b/src/analyzer/protocol/arp/ARP.cc @@ -190,13 +190,13 @@ void ARP_Analyzer::BadARP(const struct arp_pkthdr* hdr, const char* msg) if ( ! bad_arp ) return; - val_list* vl = new val_list; - vl->append(ConstructAddrVal(ar_spa(hdr))); - vl->append(EthAddrToStr((const u_char*) ar_sha(hdr))); - vl->append(ConstructAddrVal(ar_tpa(hdr))); - vl->append(EthAddrToStr((const u_char*) ar_tha(hdr))); - vl->append(new StringVal(msg)); - mgr.QueueEvent(bad_arp, vl); + mgr.QueueEventFast(bad_arp, { + ConstructAddrVal(ar_spa(hdr)), + EthAddrToStr((const u_char*) ar_sha(hdr)), + ConstructAddrVal(ar_tpa(hdr)), + EthAddrToStr((const u_char*) ar_tha(hdr)), + new StringVal(msg), + }); } void ARP_Analyzer::Corrupted(const char* msg) @@ -212,18 +212,14 @@ void ARP_Analyzer::RREvent(EventHandlerPtr e, if ( ! e ) return; - // init the val_list - val_list* vl = new val_list; - - // prepare the event arguments - vl->append(EthAddrToStr(src)); - vl->append(EthAddrToStr(dst)); - vl->append(ConstructAddrVal(spa)); - vl->append(EthAddrToStr((const u_char*) sha)); - vl->append(ConstructAddrVal(tpa)); - vl->append(EthAddrToStr((const u_char*) tha)); - - mgr.QueueEvent(e, vl); + mgr.QueueEventFast(e, { + EthAddrToStr(src), + EthAddrToStr(dst), + ConstructAddrVal(spa), + EthAddrToStr((const u_char*) sha), + ConstructAddrVal(tpa), + EthAddrToStr((const u_char*) tha), + }); } AddrVal* ARP_Analyzer::ConstructAddrVal(const void* addr) diff --git a/src/analyzer/protocol/arp/ARP.h b/src/analyzer/protocol/arp/ARP.h index 86ea14d694..34c944724a 100644 --- a/src/analyzer/protocol/arp/ARP.h +++ b/src/analyzer/protocol/arp/ARP.h @@ -3,7 +3,7 @@ #ifndef ANALYZER_PROTOCOL_ARP_ARP_H #define ANALYZER_PROTOCOL_ARP_ARP_H -#include "bro-config.h" +#include "zeek-config.h" #include #include #include diff --git a/src/analyzer/protocol/arp/CMakeLists.txt b/src/analyzer/protocol/arp/CMakeLists.txt index eec6755a18..0b911b1979 100644 --- a/src/analyzer/protocol/arp/CMakeLists.txt +++ b/src/analyzer/protocol/arp/CMakeLists.txt @@ -4,12 +4,12 @@ # it's also parsing a protocol just like them. The current structure # is merely a left-over from when this code was written. -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro ARP) -bro_plugin_cc(ARP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek ARP) +zeek_plugin_cc(ARP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/arp/Plugin.cc b/src/analyzer/protocol/arp/Plugin.cc index d0297d5f78..0ba8648b30 100644 --- a/src/analyzer/protocol/arp/Plugin.cc +++ b/src/analyzer/protocol/arp/Plugin.cc @@ -4,14 +4,14 @@ #include "plugin/Plugin.h" namespace plugin { -namespace Bro_ARP { +namespace Zeek_ARP { class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() { plugin::Configuration config; - config.name = "Bro::ARP"; + config.name = "Zeek::ARP"; config.description = "ARP Parsing"; return config; } diff --git a/src/analyzer/protocol/arp/events.bif b/src/analyzer/protocol/arp/events.bif index efee33d7f4..f8c6394455 100644 --- a/src/analyzer/protocol/arp/events.bif +++ b/src/analyzer/protocol/arp/events.bif @@ -15,7 +15,7 @@ ## ## THA: The target hardware address. ## -## .. bro:see:: arp_reply bad_arp +## .. zeek:see:: arp_reply bad_arp event arp_request%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string%); @@ -36,11 +36,11 @@ event arp_request%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, ## ## THA: The target hardware address. ## -## .. bro:see:: arp_request bad_arp +## .. zeek:see:: arp_request bad_arp event arp_reply%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string%); -## Generated for ARP packets that Bro cannot interpret. Examples are packets +## Generated for ARP packets that Zeek cannot interpret. Examples are packets ## with non-standard hardware address formats or hardware addresses that do not ## match the originator of the packet. ## @@ -54,10 +54,10 @@ event arp_reply%(mac_src: string, mac_dst: string, SPA: addr, SHA: string, ## ## explanation: A short description of why the ARP packet is considered "bad". ## -## .. bro:see:: arp_reply arp_request +## .. zeek:see:: arp_reply arp_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event bad_arp%(SPA: addr, SHA: string, TPA: addr, THA: string, explanation: string%); diff --git a/src/analyzer/protocol/ayiya/CMakeLists.txt b/src/analyzer/protocol/ayiya/CMakeLists.txt index 50113b72d7..480d0bdfeb 100644 --- a/src/analyzer/protocol/ayiya/CMakeLists.txt +++ b/src/analyzer/protocol/ayiya/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro AYIYA) -bro_plugin_cc(AYIYA.cc Plugin.cc) -bro_plugin_pac(ayiya.pac ayiya-protocol.pac ayiya-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek AYIYA) +zeek_plugin_cc(AYIYA.cc Plugin.cc) +zeek_plugin_pac(ayiya.pac ayiya-protocol.pac ayiya-analyzer.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ayiya/Plugin.cc b/src/analyzer/protocol/ayiya/Plugin.cc index 7b660722e4..2b4b8ee7d9 100644 --- a/src/analyzer/protocol/ayiya/Plugin.cc +++ b/src/analyzer/protocol/ayiya/Plugin.cc @@ -6,7 +6,7 @@ #include "AYIYA.h" namespace plugin { -namespace Bro_AYIYA { +namespace Zeek_AYIYA { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("AYIYA", ::analyzer::ayiya::AYIYA_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::AYIYA"; + config.name = "Zeek::AYIYA"; config.description = "AYIYA Analyzer"; return config; } diff --git a/src/analyzer/protocol/backdoor/BackDoor.cc b/src/analyzer/protocol/backdoor/BackDoor.cc index ecfb660b94..2e8d47d1d0 100644 --- a/src/analyzer/protocol/backdoor/BackDoor.cc +++ b/src/analyzer/protocol/backdoor/BackDoor.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "BackDoor.h" #include "Event.h" @@ -246,13 +246,15 @@ void BackDoorEndpoint::RloginSignatureFound(int len) rlogin_checking_done = 1; - val_list* vl = new val_list; - vl->append(endp->TCP()->BuildConnVal()); - vl->append(val_mgr->GetBool(endp->IsOrig())); - vl->append(val_mgr->GetCount(rlogin_num_null)); - vl->append(val_mgr->GetCount(len)); + if ( ! rlogin_signature_found ) + return; - endp->TCP()->ConnectionEvent(rlogin_signature_found, vl); + endp->TCP()->ConnectionEventFast(rlogin_signature_found, { + endp->TCP()->BuildConnVal(), + val_mgr->GetBool(endp->IsOrig()), + val_mgr->GetCount(rlogin_num_null), + val_mgr->GetCount(len), + }); } void BackDoorEndpoint::CheckForTelnet(uint64 /* seq */, int len, const u_char* data) @@ -338,12 +340,14 @@ void BackDoorEndpoint::CheckForTelnet(uint64 /* seq */, int len, const u_char* d void BackDoorEndpoint::TelnetSignatureFound(int len) { - val_list* vl = new val_list; - vl->append(endp->TCP()->BuildConnVal()); - vl->append(val_mgr->GetBool(endp->IsOrig())); - vl->append(val_mgr->GetCount(len)); + if ( ! telnet_signature_found ) + return; - endp->TCP()->ConnectionEvent(telnet_signature_found, vl); + endp->TCP()->ConnectionEventFast(telnet_signature_found, { + endp->TCP()->BuildConnVal(), + val_mgr->GetBool(endp->IsOrig()), + val_mgr->GetCount(len), + }); } void BackDoorEndpoint::CheckForSSH(uint64 seq, int len, const u_char* data) @@ -643,13 +647,15 @@ void BackDoorEndpoint::CheckForHTTPProxy(uint64 /* seq */, int len, void BackDoorEndpoint::SignatureFound(EventHandlerPtr e, int do_orig) { - val_list* vl = new val_list; - vl->append(endp->TCP()->BuildConnVal()); + if ( ! e ) + return; if ( do_orig ) - vl->append(val_mgr->GetBool(endp->IsOrig())); + endp->TCP()->ConnectionEventFast(e, + {endp->TCP()->BuildConnVal(), val_mgr->GetBool(endp->IsOrig())}); - endp->TCP()->ConnectionEvent(e, vl); + else + endp->TCP()->ConnectionEventFast(e, {endp->TCP()->BuildConnVal()}); } @@ -776,20 +782,22 @@ void BackDoor_Analyzer::StatTimer(double t, int is_expire) void BackDoor_Analyzer::StatEvent() { - val_list* vl = new val_list; - vl->append(TCP()->BuildConnVal()); - vl->append(orig_endp->BuildStats()); - vl->append(resp_endp->BuildStats()); + if ( ! backdoor_stats ) + return; - TCP()->ConnectionEvent(backdoor_stats, vl); + TCP()->ConnectionEventFast(backdoor_stats, { + TCP()->BuildConnVal(), + orig_endp->BuildStats(), + resp_endp->BuildStats(), + }); } void BackDoor_Analyzer::RemoveEvent() { - val_list* vl = new val_list; - vl->append(TCP()->BuildConnVal()); + if ( ! backdoor_remove_conn ) + return; - TCP()->ConnectionEvent(backdoor_remove_conn, vl); + TCP()->ConnectionEventFast(backdoor_remove_conn, {TCP()->BuildConnVal()}); } BackDoorTimer::BackDoorTimer(double t, BackDoor_Analyzer* a) diff --git a/src/analyzer/protocol/backdoor/CMakeLists.txt b/src/analyzer/protocol/backdoor/CMakeLists.txt index 5df04769f6..66511d3d99 100644 --- a/src/analyzer/protocol/backdoor/CMakeLists.txt +++ b/src/analyzer/protocol/backdoor/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro BackDoor) -bro_plugin_cc(BackDoor.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek BackDoor) +zeek_plugin_cc(BackDoor.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/backdoor/Plugin.cc b/src/analyzer/protocol/backdoor/Plugin.cc index 111ba70709..aeec615c50 100644 --- a/src/analyzer/protocol/backdoor/Plugin.cc +++ b/src/analyzer/protocol/backdoor/Plugin.cc @@ -6,7 +6,7 @@ #include "BackDoor.h" namespace plugin { -namespace Bro_BackDoor { +namespace Zeek_BackDoor { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("BackDoor", ::analyzer::backdoor::BackDoor_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::BackDoor"; + config.name = "Zeek::BackDoor"; config.description = "Backdoor Analyzer deprecated"; return config; } diff --git a/src/analyzer/protocol/bittorrent/BitTorrent.cc b/src/analyzer/protocol/bittorrent/BitTorrent.cc index 652d3d120c..c57d694c6e 100644 --- a/src/analyzer/protocol/bittorrent/BitTorrent.cc +++ b/src/analyzer/protocol/bittorrent/BitTorrent.cc @@ -120,10 +120,10 @@ void BitTorrent_Analyzer::DeliverWeird(const char* msg, bool orig) { if ( bittorrent_peer_weird ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(msg)); - ConnectionEvent(bittorrent_peer_weird, vl); + ConnectionEventFast(bittorrent_peer_weird, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(msg), + }); } } diff --git a/src/analyzer/protocol/bittorrent/BitTorrentTracker.cc b/src/analyzer/protocol/bittorrent/BitTorrentTracker.cc index 54cac790fb..a1a40e8d56 100644 --- a/src/analyzer/protocol/bittorrent/BitTorrentTracker.cc +++ b/src/analyzer/protocol/bittorrent/BitTorrentTracker.cc @@ -247,11 +247,11 @@ void BitTorrentTracker_Analyzer::DeliverWeird(const char* msg, bool orig) { if ( bt_tracker_weird ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(msg)); - ConnectionEvent(bt_tracker_weird, vl); + ConnectionEventFast(bt_tracker_weird, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(msg), + }); } } @@ -346,19 +346,17 @@ void BitTorrentTracker_Analyzer::RequestGet(char* uri) void BitTorrentTracker_Analyzer::EmitRequest(void) { - val_list* vl; - ProtocolConfirmation(); - vl = new val_list; - vl->append(BuildConnVal()); - vl->append(req_val_uri); - vl->append(req_val_headers); + if ( bt_tracker_request ) + ConnectionEventFast(bt_tracker_request, { + BuildConnVal(), + req_val_uri, + req_val_headers, + }); req_val_uri = 0; req_val_headers = 0; - - ConnectionEvent(bt_tracker_request, vl); } bool BitTorrentTracker_Analyzer::ParseResponse(char* line) @@ -404,11 +402,12 @@ bool BitTorrentTracker_Analyzer::ParseResponse(char* line) { if ( res_status != 200 ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetCount(res_status)); - vl->append(res_val_headers); - ConnectionEvent(bt_tracker_response_not_ok, vl); + if ( bt_tracker_response_not_ok ) + ConnectionEventFast(bt_tracker_response_not_ok, { + BuildConnVal(), + val_mgr->GetCount(res_status), + res_val_headers, + }); res_val_headers = 0; res_buf_pos = res_buf + res_buf_len; res_state = BTT_RES_DONE; @@ -790,16 +789,16 @@ void BitTorrentTracker_Analyzer::EmitResponse(void) { ProtocolConfirmation(); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetCount(res_status)); - vl->append(res_val_headers); - vl->append(res_val_peers); - vl->append(res_val_benc); + if ( bt_tracker_response ) + ConnectionEventFast(bt_tracker_response, { + BuildConnVal(), + val_mgr->GetCount(res_status), + res_val_headers, + res_val_peers, + res_val_benc, + }); res_val_headers = 0; res_val_peers = 0; res_val_benc = 0; - - ConnectionEvent(bt_tracker_response, vl); } diff --git a/src/analyzer/protocol/bittorrent/CMakeLists.txt b/src/analyzer/protocol/bittorrent/CMakeLists.txt index 630ea03498..ca7c9b9e36 100644 --- a/src/analyzer/protocol/bittorrent/CMakeLists.txt +++ b/src/analyzer/protocol/bittorrent/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro BitTorrent) -bro_plugin_cc(BitTorrent.cc BitTorrentTracker.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(bittorrent.pac bittorrent-analyzer.pac bittorrent-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek BitTorrent) +zeek_plugin_cc(BitTorrent.cc BitTorrentTracker.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(bittorrent.pac bittorrent-analyzer.pac bittorrent-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/bittorrent/Plugin.cc b/src/analyzer/protocol/bittorrent/Plugin.cc index b663dde25d..14f778ac9f 100644 --- a/src/analyzer/protocol/bittorrent/Plugin.cc +++ b/src/analyzer/protocol/bittorrent/Plugin.cc @@ -7,7 +7,7 @@ #include "BitTorrentTracker.h" namespace plugin { -namespace Bro_BitTorrent { +namespace Zeek_BitTorrent { class Plugin : public plugin::Plugin { public: @@ -17,7 +17,7 @@ public: AddComponent(new ::analyzer::Component("BitTorrentTracker", ::analyzer::bittorrent::BitTorrentTracker_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::BitTorrent"; + config.name = "Zeek::BitTorrent"; config.description = "BitTorrent Analyzer"; return config; } diff --git a/src/analyzer/protocol/bittorrent/events.bif b/src/analyzer/protocol/bittorrent/events.bif index 8c4ddc146f..d86b497437 100644 --- a/src/analyzer/protocol/bittorrent/events.bif +++ b/src/analyzer/protocol/bittorrent/events.bif @@ -3,7 +3,7 @@ ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_have bittorrent_peer_interested bittorrent_peer_keep_alive ## bittorrent_peer_not_interested bittorrent_peer_piece bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -16,7 +16,7 @@ event bittorrent_peer_handshake%(c: connection, is_orig: bool, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_not_interested bittorrent_peer_piece bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -28,7 +28,7 @@ event bittorrent_peer_keep_alive%(c: connection, is_orig: bool%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -40,7 +40,7 @@ event bittorrent_peer_choke%(c: connection, is_orig: bool%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request @@ -52,7 +52,7 @@ event bittorrent_peer_unchoke%(c: connection, is_orig: bool%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_keep_alive ## bittorrent_peer_not_interested bittorrent_peer_piece bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -64,7 +64,7 @@ event bittorrent_peer_interested%(c: connection, is_orig: bool%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_piece bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -76,7 +76,7 @@ event bittorrent_peer_not_interested%(c: connection, is_orig: bool%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_interested bittorrent_peer_keep_alive ## bittorrent_peer_not_interested bittorrent_peer_piece bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -88,7 +88,7 @@ event bittorrent_peer_have%(c: connection, is_orig: bool, piece_index: count%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_cancel bittorrent_peer_choke bittorrent_peer_handshake +## .. zeek:see:: bittorrent_peer_cancel bittorrent_peer_choke bittorrent_peer_handshake ## bittorrent_peer_have bittorrent_peer_interested bittorrent_peer_keep_alive ## bittorrent_peer_not_interested bittorrent_peer_piece bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -100,7 +100,7 @@ event bittorrent_peer_bitfield%(c: connection, is_orig: bool, bitfield: string%) ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_unchoke bittorrent_peer_unknown @@ -113,7 +113,7 @@ event bittorrent_peer_request%(c: connection, is_orig: bool, index: count, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_port ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -126,7 +126,7 @@ event bittorrent_peer_piece%(c: connection, is_orig: bool, index: count, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -139,7 +139,7 @@ event bittorrent_peer_cancel%(c: connection, is_orig: bool, index: count, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_request bittorrent_peer_unchoke bittorrent_peer_unknown @@ -151,7 +151,7 @@ event bittorrent_peer_port%(c: connection, is_orig: bool, listen_port: port%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -164,7 +164,7 @@ event bittorrent_peer_unknown%(c: connection, is_orig: bool, message_id: count, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -176,7 +176,7 @@ event bittorrent_peer_weird%(c: connection, is_orig: bool, msg: string%); ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -189,7 +189,7 @@ event bt_tracker_request%(c: connection, uri: string, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -204,7 +204,7 @@ event bt_tracker_response%(c: connection, status: count, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke @@ -217,7 +217,7 @@ event bt_tracker_response_not_ok%(c: connection, status: count, ## See `Wikipedia `__ for ## more information about the BitTorrent protocol. ## -## .. bro:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke +## .. zeek:see:: bittorrent_peer_bitfield bittorrent_peer_cancel bittorrent_peer_choke ## bittorrent_peer_handshake bittorrent_peer_have bittorrent_peer_interested ## bittorrent_peer_keep_alive bittorrent_peer_not_interested bittorrent_peer_piece ## bittorrent_peer_port bittorrent_peer_request bittorrent_peer_unchoke diff --git a/src/analyzer/protocol/conn-size/CMakeLists.txt b/src/analyzer/protocol/conn-size/CMakeLists.txt index f89a6e5b88..30b1bedab3 100644 --- a/src/analyzer/protocol/conn-size/CMakeLists.txt +++ b/src/analyzer/protocol/conn-size/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro ConnSize) -bro_plugin_cc(ConnSize.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek ConnSize) +zeek_plugin_cc(ConnSize.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/conn-size/ConnSize.cc b/src/analyzer/protocol/conn-size/ConnSize.cc index 52d81e3111..1b18335e7f 100644 --- a/src/analyzer/protocol/conn-size/ConnSize.cc +++ b/src/analyzer/protocol/conn-size/ConnSize.cc @@ -47,11 +47,11 @@ void ConnSize_Analyzer::ThresholdEvent(EventHandlerPtr f, uint64 threshold, bool if ( ! f ) return; - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetCount(threshold)); - vl->append(val_mgr->GetBool(is_orig)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + val_mgr->GetCount(threshold), + val_mgr->GetBool(is_orig), + }); } void ConnSize_Analyzer::CheckSizes(bool is_orig) diff --git a/src/analyzer/protocol/conn-size/Plugin.cc b/src/analyzer/protocol/conn-size/Plugin.cc index d373ce5d4a..ce1b600da2 100644 --- a/src/analyzer/protocol/conn-size/Plugin.cc +++ b/src/analyzer/protocol/conn-size/Plugin.cc @@ -6,7 +6,7 @@ #include "ConnSize.h" namespace plugin { -namespace Bro_ConnSize { +namespace Zeek_ConnSize { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("ConnSize", ::analyzer::conn_size::ConnSize_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::ConnSize"; + config.name = "Zeek::ConnSize"; config.description = "Connection size analyzer"; return config; } diff --git a/src/analyzer/protocol/conn-size/events.bif b/src/analyzer/protocol/conn-size/events.bif index 38b263db57..9b1007ec3b 100644 --- a/src/analyzer/protocol/conn-size/events.bif +++ b/src/analyzer/protocol/conn-size/events.bif @@ -8,7 +8,7 @@ ## ## is_orig: true if the threshold was crossed by the originator of the connection ## -## .. bro:see:: set_current_conn_packets_threshold set_current_conn_bytes_threshold conn_packets_threshold_crossed +## .. zeek:see:: set_current_conn_packets_threshold set_current_conn_bytes_threshold conn_packets_threshold_crossed ## get_current_conn_bytes_threshold get_current_conn_packets_threshold event conn_bytes_threshold_crossed%(c: connection, threshold: count, is_orig: bool%); @@ -22,6 +22,6 @@ event conn_bytes_threshold_crossed%(c: connection, threshold: count, is_orig: bo ## ## is_orig: true if the threshold was crossed by the originator of the connection ## -## .. bro:see:: set_current_conn_packets_threshold set_current_conn_bytes_threshold conn_bytes_threshold_crossed +## .. zeek:see:: set_current_conn_packets_threshold set_current_conn_bytes_threshold conn_bytes_threshold_crossed ## get_current_conn_bytes_threshold get_current_conn_packets_threshold event conn_packets_threshold_crossed%(c: connection, threshold: count, is_orig: bool%); diff --git a/src/analyzer/protocol/conn-size/functions.bif b/src/analyzer/protocol/conn-size/functions.bif index d4ad045da7..9dc91bb722 100644 --- a/src/analyzer/protocol/conn-size/functions.bif +++ b/src/analyzer/protocol/conn-size/functions.bif @@ -26,7 +26,7 @@ static analyzer::Analyzer* GetConnsizeAnalyzer(Val* cid) ## ## is_orig: If true, threshold is set for bytes from originator, otherwhise for bytes from responder. ## -## .. bro:see:: set_current_conn_packets_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed +## .. zeek:see:: set_current_conn_packets_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed ## get_current_conn_bytes_threshold get_current_conn_packets_threshold function set_current_conn_bytes_threshold%(cid: conn_id, threshold: count, is_orig: bool%): bool %{ @@ -49,7 +49,7 @@ function set_current_conn_bytes_threshold%(cid: conn_id, threshold: count, is_or ## ## is_orig: If true, threshold is set for packets from originator, otherwhise for packets from responder. ## -## .. bro:see:: set_current_conn_bytes_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed +## .. zeek:see:: set_current_conn_bytes_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed ## get_current_conn_bytes_threshold get_current_conn_packets_threshold function set_current_conn_packets_threshold%(cid: conn_id, threshold: count, is_orig: bool%): bool %{ @@ -70,7 +70,7 @@ function set_current_conn_packets_threshold%(cid: conn_id, threshold: count, is_ ## ## Returns: 0 if no threshold is set or the threshold in bytes ## -## .. bro:see:: set_current_conn_packets_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed +## .. zeek:see:: set_current_conn_packets_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed ## get_current_conn_packets_threshold function get_current_conn_bytes_threshold%(cid: conn_id, is_orig: bool%): count %{ @@ -89,7 +89,7 @@ function get_current_conn_bytes_threshold%(cid: conn_id, is_orig: bool%): count ## ## Returns: 0 if no threshold is set or the threshold in packets ## -## .. bro:see:: set_current_conn_packets_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed +## .. zeek:see:: set_current_conn_packets_threshold conn_bytes_threshold_crossed conn_packets_threshold_crossed ## get_current_conn_bytes_threshold function get_current_conn_packets_threshold%(cid: conn_id, is_orig: bool%): count %{ diff --git a/src/analyzer/protocol/dce-rpc/CMakeLists.txt b/src/analyzer/protocol/dce-rpc/CMakeLists.txt index 959e6ac87c..286f7fd0b2 100644 --- a/src/analyzer/protocol/dce-rpc/CMakeLists.txt +++ b/src/analyzer/protocol/dce-rpc/CMakeLists.txt @@ -1,12 +1,12 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro DCE_RPC) -bro_plugin_cc(DCE_RPC.cc Plugin.cc) -bro_plugin_bif(consts.bif types.bif events.bif) -bro_plugin_pac( +zeek_plugin_begin(Zeek DCE_RPC) +zeek_plugin_cc(DCE_RPC.cc Plugin.cc) +zeek_plugin_bif(consts.bif types.bif events.bif) +zeek_plugin_pac( dce_rpc.pac dce_rpc-protocol.pac dce_rpc-analyzer.pac @@ -14,5 +14,5 @@ bro_plugin_pac( endpoint-atsvc.pac endpoint-epmapper.pac ) -bro_plugin_end() +zeek_plugin_end() diff --git a/src/analyzer/protocol/dce-rpc/DCE_RPC.cc b/src/analyzer/protocol/dce-rpc/DCE_RPC.cc index f7a96fbb6e..0f401d75de 100644 --- a/src/analyzer/protocol/dce-rpc/DCE_RPC.cc +++ b/src/analyzer/protocol/dce-rpc/DCE_RPC.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/analyzer/protocol/dce-rpc/Plugin.cc b/src/analyzer/protocol/dce-rpc/Plugin.cc index c4d250921d..d821cbea2b 100644 --- a/src/analyzer/protocol/dce-rpc/Plugin.cc +++ b/src/analyzer/protocol/dce-rpc/Plugin.cc @@ -6,7 +6,7 @@ #include "DCE_RPC.h" namespace plugin { -namespace Bro_DCE_RPC { +namespace Zeek_DCE_RPC { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("DCE_RPC", ::analyzer::dce_rpc::DCE_RPC_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::DCE_RPC"; + config.name = "Zeek::DCE_RPC"; config.description = "DCE-RPC analyzer"; return config; } diff --git a/src/analyzer/protocol/dce-rpc/events.bif b/src/analyzer/protocol/dce-rpc/events.bif index 1e4a4e0d51..1f2b61255c 100644 --- a/src/analyzer/protocol/dce-rpc/events.bif +++ b/src/analyzer/protocol/dce-rpc/events.bif @@ -12,7 +12,7 @@ ## ## ptype: Enum representation of the prodecure type of the message. ## -## .. bro:see:: dce_rpc_bind dce_rpc_bind_ack dce_rpc_request dce_rpc_response +## .. zeek:see:: dce_rpc_bind dce_rpc_bind_ack dce_rpc_request dce_rpc_response event dce_rpc_message%(c: connection, is_orig: bool, fid: count, ptype_id: count, ptype: DCE_RPC::PType%); ## Generated for every :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` bind request message. @@ -33,7 +33,7 @@ event dce_rpc_message%(c: connection, is_orig: bool, fid: count, ptype_id: count ## ## ver_minor: The minor version of the endpoint being requested. ## -## .. bro:see:: dce_rpc_message dce_rpc_bind_ack dce_rpc_request dce_rpc_response +## .. zeek:see:: dce_rpc_message dce_rpc_bind_ack dce_rpc_request dce_rpc_response event dce_rpc_bind%(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count%); ## Generated for every :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` alter context request message. @@ -54,7 +54,7 @@ event dce_rpc_bind%(c: connection, fid: count, ctx_id: count, uuid: string, ver_ ## ## ver_minor: The minor version of the endpoint being requested. ## -## .. bro:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_request dce_rpc_response dce_rpc_alter_context_resp +## .. zeek:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_request dce_rpc_response dce_rpc_alter_context_resp event dce_rpc_alter_context%(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count%); ## Generated for every :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` bind request ack message. @@ -67,7 +67,7 @@ event dce_rpc_alter_context%(c: connection, fid: count, ctx_id: count, uuid: str ## ## sec_addr: Secondary address for the ack. ## -## .. bro:see:: dce_rpc_message dce_rpc_bind dce_rpc_request dce_rpc_response +## .. zeek:see:: dce_rpc_message dce_rpc_bind dce_rpc_request dce_rpc_response event dce_rpc_bind_ack%(c: connection, fid: count, sec_addr: string%); ## Generated for every :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` alter context response message. @@ -78,7 +78,7 @@ event dce_rpc_bind_ack%(c: connection, fid: count, sec_addr: string%); ## message. Zero will be used if the :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` was ## not transported over a pipe. ## -## .. bro:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_request dce_rpc_response dce_rpc_alter_context +## .. zeek:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_request dce_rpc_response dce_rpc_alter_context event dce_rpc_alter_context_resp%(c: connection, fid: count%); ## Generated for every :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` request message. @@ -95,7 +95,7 @@ event dce_rpc_alter_context_resp%(c: connection, fid: count%); ## ## stub_len: Length of the data for the request. ## -## .. bro:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_response +## .. zeek:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_response event dce_rpc_request%(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count%); ## Generated for every :abbr:`DCE-RPC (Distributed Computing Environment/Remote Procedure Calls)` response message. @@ -112,5 +112,5 @@ event dce_rpc_request%(c: connection, fid: count, ctx_id: count, opnum: count, s ## ## stub_len: Length of the data for the response. ## -## .. bro:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_request +## .. zeek:see:: dce_rpc_message dce_rpc_bind dce_rpc_bind_ack dce_rpc_request event dce_rpc_response%(c: connection, fid: count, ctx_id: count, opnum: count, stub_len: count%); diff --git a/src/analyzer/protocol/dhcp/CMakeLists.txt b/src/analyzer/protocol/dhcp/CMakeLists.txt index 6077adfeb6..8fa784b4be 100644 --- a/src/analyzer/protocol/dhcp/CMakeLists.txt +++ b/src/analyzer/protocol/dhcp/CMakeLists.txt @@ -1,11 +1,11 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro DHCP) -bro_plugin_cc(DHCP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(types.bif) -bro_plugin_pac(dhcp.pac dhcp-protocol.pac dhcp-analyzer.pac dhcp-options.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek DHCP) +zeek_plugin_cc(DHCP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(types.bif) +zeek_plugin_pac(dhcp.pac dhcp-protocol.pac dhcp-analyzer.pac dhcp-options.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/dhcp/Plugin.cc b/src/analyzer/protocol/dhcp/Plugin.cc index eecf6f9170..62318604c4 100644 --- a/src/analyzer/protocol/dhcp/Plugin.cc +++ b/src/analyzer/protocol/dhcp/Plugin.cc @@ -6,7 +6,7 @@ #include "DHCP.h" namespace plugin { -namespace Bro_DHCP { +namespace Zeek_DHCP { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("DHCP", ::analyzer::dhcp::DHCP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::DHCP"; + config.name = "Zeek::DHCP"; config.description = "DHCP analyzer"; return config; } diff --git a/src/analyzer/protocol/dhcp/dhcp-options.pac b/src/analyzer/protocol/dhcp/dhcp-options.pac index 75236b311c..f1b45fbc6f 100644 --- a/src/analyzer/protocol/dhcp/dhcp-options.pac +++ b/src/analyzer/protocol/dhcp/dhcp-options.pac @@ -21,6 +21,29 @@ refine typeattr Option += &let { }; +############################## +# TIME OFFSET OPTION +############################## +let TIME_OFFSET_OPTION = 2; + +# Parse the option +refine casetype OptionValue += { + TIME_OFFSET_OPTION -> time_offset : int32; +}; + +refine flow DHCP_Flow += { + function process_time_offset_option(v: OptionValue): bool + %{ + ${context.flow}->options->Assign(25, val_mgr->GetInt(${v.time_offset})); + return true; + %} +}; + +refine typeattr Option += &let { + proc_timeoffset_option = $context.flow.process_time_offset_option(info.value) &if(code==TIME_OFFSET_OPTION); +}; + + ############################## # ROUTER OPTION ############################## @@ -55,6 +78,74 @@ refine typeattr Option += &let { }; +############################## +# TIME SERVER OPTION +############################## +let TIME_SERVER_OPTION = 4; + +# Parse the option +refine casetype OptionValue += { + TIME_SERVER_OPTION -> timeserver_list : uint32[length/4]; +}; + +refine flow DHCP_Flow += { + function process_timeserver_option(v: OptionValue): bool + %{ + VectorVal* timeserver_list = new VectorVal(BifType::Vector::DHCP::Addrs); + int num_servers = ${v.timeserver_list}->size(); + vector* rlist = ${v.timeserver_list}; + + for ( int i = 0; i < num_servers; ++i ) + { + uint32 raddr = (*rlist)[i]; + timeserver_list->Assign(i, new AddrVal(htonl(raddr))); + } + + ${context.flow}->options->Assign(26, timeserver_list); + + return true; + %} +}; + +refine typeattr Option += &let { + proc_timeserver_option = $context.flow.process_timeserver_option(info.value) &if(code==TIME_SERVER_OPTION); +}; + + +############################## +# NAME SERVER OPTION +############################## +let NAME_SERVER_OPTION = 5; + +# Parse the option +refine casetype OptionValue += { + NAME_SERVER_OPTION -> nameserver_list : uint32[length/4]; +}; + +refine flow DHCP_Flow += { + function process_nameserver_option(v: OptionValue): bool + %{ + VectorVal* nameserver_list = new VectorVal(BifType::Vector::DHCP::Addrs); + int num_servers = ${v.nameserver_list}->size(); + vector* rlist = ${v.nameserver_list}; + + for ( int i = 0; i < num_servers; ++i ) + { + uint32 raddr = (*rlist)[i]; + nameserver_list->Assign(i, new AddrVal(htonl(raddr))); + } + + ${context.flow}->options->Assign(27, nameserver_list); + + return true; + %} +}; + +refine typeattr Option += &let { + proc_nameserver_option = $context.flow.process_nameserver_option(info.value) &if(code==NAME_SERVER_OPTION); +}; + + ############################## # DNS SERVER OPTION ############################## @@ -194,6 +285,39 @@ refine typeattr Option += &let { }; +############################## +# NTP SERVER OPTION +############################## +let NTP_SERVER_OPTION = 42; + +# Parse the option +refine casetype OptionValue += { + NTP_SERVER_OPTION -> ntpserver_list : uint32[length/4]; +}; + +refine flow DHCP_Flow += { + function process_ntpserver_option(v: OptionValue): bool + %{ + VectorVal* ntpserver_list = new VectorVal(BifType::Vector::DHCP::Addrs); + int num_servers = ${v.ntpserver_list}->size(); + vector* rlist = ${v.ntpserver_list}; + + for ( int i = 0; i < num_servers; ++i ) + { + uint32 raddr = (*rlist)[i]; + ntpserver_list->Assign(i, new AddrVal(htonl(raddr))); + } + + ${context.flow}->options->Assign(28, ntpserver_list); + + return true; + %} +}; + +refine typeattr Option += &let { + proc_ntpserver_option = $context.flow.process_ntpserver_option(info.value) &if(code==NTP_SERVER_OPTION); +}; + ############################## # VENDOR SPECIFIC OPTION ############################## diff --git a/src/analyzer/protocol/dnp3/CMakeLists.txt b/src/analyzer/protocol/dnp3/CMakeLists.txt index b1c0f0b760..aaa7581319 100644 --- a/src/analyzer/protocol/dnp3/CMakeLists.txt +++ b/src/analyzer/protocol/dnp3/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro DNP3) -bro_plugin_cc(DNP3.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(dnp3.pac dnp3-analyzer.pac dnp3-protocol.pac dnp3-objects.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek DNP3) +zeek_plugin_cc(DNP3.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(dnp3.pac dnp3-analyzer.pac dnp3-protocol.pac dnp3-objects.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/dnp3/Plugin.cc b/src/analyzer/protocol/dnp3/Plugin.cc index 6a64138ce7..8543360b6a 100644 --- a/src/analyzer/protocol/dnp3/Plugin.cc +++ b/src/analyzer/protocol/dnp3/Plugin.cc @@ -6,7 +6,7 @@ #include "DNP3.h" namespace plugin { -namespace Bro_DNP3 { +namespace Zeek_DNP3 { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("DNP3_UDP", ::analyzer::dnp3::DNP3_UDP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::DNP3"; + config.name = "Zeek::DNP3"; config.description = "DNP3 UDP/TCP analyzers"; return config; } diff --git a/src/analyzer/protocol/dns/CMakeLists.txt b/src/analyzer/protocol/dns/CMakeLists.txt index c63b2dc690..76c3129eba 100644 --- a/src/analyzer/protocol/dns/CMakeLists.txt +++ b/src/analyzer/protocol/dns/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro DNS) -bro_plugin_cc(DNS.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek DNS) +zeek_plugin_cc(DNS.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/dns/DNS.cc b/src/analyzer/protocol/dns/DNS.cc index 944ce92731..51a8d1cec3 100644 --- a/src/analyzer/protocol/dns/DNS.cc +++ b/src/analyzer/protocol/dns/DNS.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -46,13 +46,12 @@ int DNS_Interpreter::ParseMessage(const u_char* data, int len, int is_query) if ( dns_message ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_query)); - vl->append(msg.BuildHdrVal()); - vl->append(val_mgr->GetCount(len)); - - analyzer->ConnectionEvent(dns_message, vl); + analyzer->ConnectionEventFast(dns_message, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_query), + msg.BuildHdrVal(), + val_mgr->GetCount(len), + }); } // There is a great deal of non-DNS traffic that runs on port 53. @@ -133,11 +132,11 @@ int DNS_Interpreter::ParseMessage(const u_char* data, int len, int is_query) int DNS_Interpreter::EndMessage(DNS_MsgInfo* msg) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - analyzer->ConnectionEvent(dns_end, vl); + if ( dns_end ) + analyzer->ConnectionEventFast(dns_end, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + }); return 1; } @@ -282,6 +281,10 @@ int DNS_Interpreter::ParseAnswer(DNS_MsgInfo* msg, status = ParseRR_TXT(msg, data, len, rdlength, msg_start); break; + case TYPE_SPF: + status = ParseRR_SPF(msg, data, len, rdlength, msg_start); + break; + case TYPE_CAA: status = ParseRR_CAA(msg, data, len, rdlength, msg_start); break; @@ -336,11 +339,11 @@ int DNS_Interpreter::ParseAnswer(DNS_MsgInfo* msg, if ( dns_unknown_reply && ! msg->skip_event ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - analyzer->ConnectionEvent(dns_unknown_reply, vl); + analyzer->ConnectionEventFast(dns_unknown_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + }); } analyzer->Weird("DNS_RR_unknown_type", fmt("%d", msg->atype)); @@ -551,14 +554,12 @@ int DNS_Interpreter::ParseRR_Name(DNS_MsgInfo* msg, if ( reply_event && ! msg->skip_event ) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(new StringVal(new BroString(name, name_end - name, 1))); - - analyzer->ConnectionEvent(reply_event, vl); + analyzer->ConnectionEventFast(reply_event, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + new StringVal(new BroString(name, name_end - name, 1)), + }); } return 1; @@ -598,14 +599,7 @@ int DNS_Interpreter::ParseRR_SOA(DNS_MsgInfo* msg, if ( dns_SOA_reply && ! msg->skip_event ) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - RecordVal* r = new RecordVal(dns_soa); - r->Assign(0, new StringVal(new BroString(mname, mname_end - mname, 1))); r->Assign(1, new StringVal(new BroString(rname, rname_end - rname, 1))); r->Assign(2, val_mgr->GetCount(serial)); @@ -614,9 +608,12 @@ int DNS_Interpreter::ParseRR_SOA(DNS_MsgInfo* msg, r->Assign(5, new IntervalVal(double(expire), Seconds)); r->Assign(6, new IntervalVal(double(minimum), Seconds)); - vl->append(r); - - analyzer->ConnectionEvent(dns_SOA_reply, vl); + analyzer->ConnectionEventFast(dns_SOA_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + r + }); } return 1; @@ -642,15 +639,13 @@ int DNS_Interpreter::ParseRR_MX(DNS_MsgInfo* msg, if ( dns_MX_reply && ! msg->skip_event ) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(new StringVal(new BroString(name, name_end - name, 1))); - vl->append(val_mgr->GetCount(preference)); - - analyzer->ConnectionEvent(dns_MX_reply, vl); + analyzer->ConnectionEventFast(dns_MX_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + new StringVal(new BroString(name, name_end - name, 1)), + val_mgr->GetCount(preference), + }); } return 1; @@ -687,16 +682,15 @@ int DNS_Interpreter::ParseRR_SRV(DNS_MsgInfo* msg, if ( dns_SRV_reply && ! msg->skip_event ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(new StringVal(new BroString(name, name_end - name, 1))); - vl->append(val_mgr->GetCount(priority)); - vl->append(val_mgr->GetCount(weight)); - vl->append(val_mgr->GetCount(port)); - - analyzer->ConnectionEvent(dns_SRV_reply, vl); + analyzer->ConnectionEventFast(dns_SRV_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + new StringVal(new BroString(name, name_end - name, 1)), + val_mgr->GetCount(priority), + val_mgr->GetCount(weight), + val_mgr->GetCount(port), + }); } return 1; @@ -711,12 +705,11 @@ int DNS_Interpreter::ParseRR_EDNS(DNS_MsgInfo* msg, if ( dns_EDNS_addl && ! msg->skip_event ) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildEDNS_Val()); - analyzer->ConnectionEvent(dns_EDNS_addl, vl); + analyzer->ConnectionEventFast(dns_EDNS_addl, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildEDNS_Val(), + }); } // Currently EDNS supports the movement of type:data pairs @@ -778,24 +771,24 @@ int DNS_Interpreter::ParseRR_TSIG(DNS_MsgInfo* msg, unsigned int rr_error = ExtractShort(data, len); ExtractOctets(data, len, 0); // Other Data - msg->tsig = new TSIG_DATA; + if ( dns_TSIG_addl ) + { + TSIG_DATA tsig; + tsig.alg_name = + new BroString(alg_name, alg_name_end - alg_name, 1); + tsig.sig = request_MAC; + tsig.time_s = sign_time_sec; + tsig.time_ms = sign_time_msec; + tsig.fudge = fudge; + tsig.orig_id = orig_id; + tsig.rr_error = rr_error; - msg->tsig->alg_name = - new BroString(alg_name, alg_name_end - alg_name, 1); - msg->tsig->sig = request_MAC; - msg->tsig->time_s = sign_time_sec; - msg->tsig->time_ms = sign_time_msec; - msg->tsig->fudge = fudge; - msg->tsig->orig_id = orig_id; - msg->tsig->rr_error = rr_error; - - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildTSIG_Val()); - - analyzer->ConnectionEvent(dns_TSIG_addl, vl); + analyzer->ConnectionEventFast(dns_TSIG_addl, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildTSIG_Val(&tsig), + }); + } return 1; } @@ -878,25 +871,26 @@ int DNS_Interpreter::ParseRR_RRSIG(DNS_MsgInfo* msg, break; } - RRSIG_DATA rrsig; - rrsig.type_covered = type_covered; - rrsig.algorithm = algo; - rrsig.labels = lab; - rrsig.orig_ttl = orig_ttl; - rrsig.sig_exp = sign_exp; - rrsig.sig_incep = sign_incp; - rrsig.key_tag = key_tag; - rrsig.signer_name = new BroString(name, name_end - name, 1); - rrsig.signature = sign; + if ( dns_RRSIG ) + { + RRSIG_DATA rrsig; + rrsig.type_covered = type_covered; + rrsig.algorithm = algo; + rrsig.labels = lab; + rrsig.orig_ttl = orig_ttl; + rrsig.sig_exp = sign_exp; + rrsig.sig_incep = sign_incp; + rrsig.key_tag = key_tag; + rrsig.signer_name = new BroString(name, name_end - name, 1); + rrsig.signature = sign; - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(msg->BuildRRSIG_Val(&rrsig)); - - analyzer->ConnectionEvent(dns_RRSIG, vl); + analyzer->ConnectionEventFast(dns_RRSIG, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + msg->BuildRRSIG_Val(&rrsig), + }); + } return 1; } @@ -977,20 +971,21 @@ int DNS_Interpreter::ParseRR_DNSKEY(DNS_MsgInfo* msg, break; } - DNSKEY_DATA dnskey; - dnskey.dflags = dflags; - dnskey.dalgorithm = dalgorithm; - dnskey.dprotocol = dprotocol; - dnskey.public_key = key; + if ( dns_DNSKEY ) + { + DNSKEY_DATA dnskey; + dnskey.dflags = dflags; + dnskey.dalgorithm = dalgorithm; + dnskey.dprotocol = dprotocol; + dnskey.public_key = key; - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(msg->BuildDNSKEY_Val(&dnskey)); - - analyzer->ConnectionEvent(dns_DNSKEY, vl); + analyzer->ConnectionEventFast(dns_DNSKEY, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + msg->BuildDNSKEY_Val(&dnskey), + }); + } return 1; } @@ -1035,15 +1030,16 @@ int DNS_Interpreter::ParseRR_NSEC(DNS_MsgInfo* msg, typebitmaps_len = typebitmaps_len - (2 + bmlen); } - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(new StringVal(new BroString(name, name_end - name, 1))); - vl->append(char_strings); - - analyzer->ConnectionEvent(dns_NSEC, vl); + if ( dns_NSEC ) + analyzer->ConnectionEventFast(dns_NSEC, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + new StringVal(new BroString(name, name_end - name, 1)), + char_strings, + }); + else + Unref(char_strings); return 1; } @@ -1111,24 +1107,25 @@ int DNS_Interpreter::ParseRR_NSEC3(DNS_MsgInfo* msg, typebitmaps_len = typebitmaps_len - (2 + bmlen); } - NSEC3_DATA nsec3; - nsec3.nsec_flags = nsec_flags; - nsec3.nsec_hash_algo = hash_algo; - nsec3.nsec_iter = iter; - nsec3.nsec_salt_len = salt_len; - nsec3.nsec_salt = salt_val; - nsec3.nsec_hlen = hash_len; - nsec3.nsec_hash = hash_val; - nsec3.bitmaps = char_strings; + if ( dns_NSEC3 ) + { + NSEC3_DATA nsec3; + nsec3.nsec_flags = nsec_flags; + nsec3.nsec_hash_algo = hash_algo; + nsec3.nsec_iter = iter; + nsec3.nsec_salt_len = salt_len; + nsec3.nsec_salt = salt_val; + nsec3.nsec_hlen = hash_len; + nsec3.nsec_hash = hash_val; + nsec3.bitmaps = char_strings; - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(msg->BuildNSEC3_Val(&nsec3)); - - analyzer->ConnectionEvent(dns_NSEC3, vl); + analyzer->ConnectionEventFast(dns_NSEC3, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + msg->BuildNSEC3_Val(&nsec3), + }); + } return 1; } @@ -1172,20 +1169,21 @@ int DNS_Interpreter::ParseRR_DS(DNS_MsgInfo* msg, break; } - DS_DATA ds; - ds.key_tag = ds_key_tag; - ds.algorithm = ds_algo; - ds.digest_type = ds_dtype; - ds.digest_val = ds_digest; + if ( dns_DS ) + { + DS_DATA ds; + ds.key_tag = ds_key_tag; + ds.algorithm = ds_algo; + ds.digest_type = ds_dtype; + ds.digest_val = ds_digest; - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(msg->BuildDS_Val(&ds)); - - analyzer->ConnectionEvent(dns_DS, vl); + analyzer->ConnectionEventFast(dns_DS, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + msg->BuildDS_Val(&ds), + }); + } return 1; } @@ -1203,14 +1201,12 @@ int DNS_Interpreter::ParseRR_A(DNS_MsgInfo* msg, if ( dns_A_reply && ! msg->skip_event ) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(new AddrVal(htonl(addr))); - - analyzer->ConnectionEvent(dns_A_reply, vl); + analyzer->ConnectionEventFast(dns_A_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + new AddrVal(htonl(addr)), + }); } return 1; @@ -1242,13 +1238,12 @@ int DNS_Interpreter::ParseRR_AAAA(DNS_MsgInfo* msg, event = dns_A6_reply; if ( event && ! msg->skip_event ) { - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(new AddrVal(addr)); - analyzer->ConnectionEvent(event, vl); + analyzer->ConnectionEventFast(event, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + new AddrVal(addr), + }); } return 1; @@ -1317,14 +1312,45 @@ int DNS_Interpreter::ParseRR_TXT(DNS_MsgInfo* msg, while ( (char_string = extract_char_string(analyzer, data, len, rdlength)) ) char_strings->Assign(char_strings->Size(), char_string); - val_list* vl = new val_list; + if ( dns_TXT_reply ) + analyzer->ConnectionEventFast(dns_TXT_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + char_strings, + }); + else + Unref(char_strings); - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(char_strings); + return rdlength == 0; + } - analyzer->ConnectionEvent(dns_TXT_reply, vl); +int DNS_Interpreter::ParseRR_SPF(DNS_MsgInfo* msg, + const u_char*& data, int& len, int rdlength, + const u_char* msg_start) + { + if ( ! dns_SPF_reply || msg->skip_event ) + { + data += rdlength; + len -= rdlength; + return 1; + } + + VectorVal* char_strings = new VectorVal(string_vec); + StringVal* char_string; + + while ( (char_string = extract_char_string(analyzer, data, len, rdlength)) ) + char_strings->Assign(char_strings->Size(), char_string); + + if ( dns_SPF_reply ) + analyzer->ConnectionEventFast(dns_SPF_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + char_strings, + }); + else + Unref(char_strings); return rdlength == 0; } @@ -1359,16 +1385,20 @@ int DNS_Interpreter::ParseRR_CAA(DNS_MsgInfo* msg, data += value->Len(); rdlength -= value->Len(); - val_list* vl = new val_list; - - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(msg->BuildAnswerVal()); - vl->append(val_mgr->GetCount(flags)); - vl->append(new StringVal(tag)); - vl->append(new StringVal(value)); - - analyzer->ConnectionEvent(dns_CAA_reply, vl); + if ( dns_CAA_reply ) + analyzer->ConnectionEventFast(dns_CAA_reply, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + msg->BuildAnswerVal(), + val_mgr->GetCount(flags), + new StringVal(tag), + new StringVal(value), + }); + else + { + delete tag; + delete value; + } return rdlength == 0; } @@ -1382,14 +1412,14 @@ void DNS_Interpreter::SendReplyOrRejectEvent(DNS_MsgInfo* msg, RR_Type qtype = RR_Type(ExtractShort(data, len)); int qclass = ExtractShort(data, len); - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(msg->BuildHdrVal()); - vl->append(new StringVal(question_name)); - vl->append(val_mgr->GetCount(qtype)); - vl->append(val_mgr->GetCount(qclass)); - - analyzer->ConnectionEvent(event, vl); + if ( event ) + analyzer->ConnectionEventFast(event, { + analyzer->BuildConnVal(), + msg->BuildHdrVal(), + new StringVal(question_name), + val_mgr->GetCount(qtype), + val_mgr->GetCount(qclass), + }); } @@ -1423,7 +1453,6 @@ DNS_MsgInfo::DNS_MsgInfo(DNS_RawMsgHdr* hdr, int arg_is_query) answer_type = DNS_QUESTION; skip_event = 0; - tsig = 0; } DNS_MsgInfo::~DNS_MsgInfo() @@ -1502,7 +1531,7 @@ Val* DNS_MsgInfo::BuildEDNS_Val() return r; } -Val* DNS_MsgInfo::BuildTSIG_Val() +Val* DNS_MsgInfo::BuildTSIG_Val(struct TSIG_DATA* tsig) { RecordVal* r = new RecordVal(dns_tsig_additional); double rtime = tsig->time_s + tsig->time_ms / 1000.0; @@ -1519,9 +1548,6 @@ Val* DNS_MsgInfo::BuildTSIG_Val() r->Assign(7, val_mgr->GetCount(tsig->rr_error)); r->Assign(8, val_mgr->GetCount(is_query)); - delete tsig; - tsig = 0; - return r; } @@ -1737,10 +1763,11 @@ void DNS_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, { if ( ! interp->ParseMessage(data, len, 1) && non_dns_request ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(len, (const char*) data)); - ConnectionEvent(non_dns_request, vl); + if ( non_dns_request ) + ConnectionEventFast(non_dns_request, { + BuildConnVal(), + new StringVal(len, (const char*) data), + }); } } diff --git a/src/analyzer/protocol/dns/DNS.h b/src/analyzer/protocol/dns/DNS.h index f095fe96fa..8a82768ce0 100644 --- a/src/analyzer/protocol/dns/DNS.h +++ b/src/analyzer/protocol/dns/DNS.h @@ -63,6 +63,8 @@ typedef enum { TYPE_DNSKEY = 48, ///< DNS Key record (RFC 4034) TYPE_DS = 43, ///< Delegation signer (RFC 4034) TYPE_NSEC3 = 50, + // Obsoleted + TYPE_SPF = 99, ///< Alternative: storing SPF data in TXT records, using the same format (RFC 4408). Support for it was discontinued in RFC 7208 // The following are only valid in queries. TYPE_AXFR = 252, TYPE_ALL = 255, @@ -182,7 +184,7 @@ public: Val* BuildHdrVal(); Val* BuildAnswerVal(); Val* BuildEDNS_Val(); - Val* BuildTSIG_Val(); + Val* BuildTSIG_Val(struct TSIG_DATA*); Val* BuildRRSIG_Val(struct RRSIG_DATA*); Val* BuildDNSKEY_Val(struct DNSKEY_DATA*); Val* BuildNSEC3_Val(struct NSEC3_DATA*); @@ -214,10 +216,6 @@ public: ///< identical answer, there may be problems // uint32* addr; ///< cache value to pass back results ///< for forward lookups - - // More values for spesific DNS types. - //struct EDNS_ADDITIONAL* edns; - struct TSIG_DATA* tsig; }; @@ -286,6 +284,9 @@ protected: int ParseRR_TXT(DNS_MsgInfo* msg, const u_char*& data, int& len, int rdlength, const u_char* msg_start); + int ParseRR_SPF(DNS_MsgInfo* msg, + const u_char*& data, int& len, int rdlength, + const u_char* msg_start); int ParseRR_CAA(DNS_MsgInfo* msg, const u_char*& data, int& len, int rdlength, const u_char* msg_start); diff --git a/src/analyzer/protocol/dns/Plugin.cc b/src/analyzer/protocol/dns/Plugin.cc index 1cba094c54..3ceef34ea1 100644 --- a/src/analyzer/protocol/dns/Plugin.cc +++ b/src/analyzer/protocol/dns/Plugin.cc @@ -6,7 +6,7 @@ #include "DNS.h" namespace plugin { -namespace Bro_DNS { +namespace Zeek_DNS { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("Contents_DNS", 0)); plugin::Configuration config; - config.name = "Bro::DNS"; + config.name = "Zeek::DNS"; config.description = "DNS analyzer"; return config; } diff --git a/src/analyzer/protocol/dns/events.bif b/src/analyzer/protocol/dns/events.bif index 6fe741d4d9..7ddbd0c7b3 100644 --- a/src/analyzer/protocol/dns/events.bif +++ b/src/analyzer/protocol/dns/events.bif @@ -1,7 +1,7 @@ ## Generated for all DNS messages. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -13,9 +13,9 @@ ## ## len: The length of the message's raw representation (i.e., the DNS payload). ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end ## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_query_reply dns_rejected ## dns_request non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -26,7 +26,7 @@ event dns_message%(c: connection, is_orig: bool, msg: dns_msg, len: count%); ## is raised once for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -40,9 +40,9 @@ event dns_message%(c: connection, is_orig: bool, msg: dns_msg, len: count%); ## ## qclass: The queried resource record class. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end ## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -55,7 +55,7 @@ event dns_request%(c: connection, msg: dns_msg, query: string, qtype: count, qcl ## the reply; there's no stateful correlation with the query. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -69,9 +69,9 @@ event dns_request%(c: connection, msg: dns_msg, query: string, qtype: count, qcl ## ## qclass: The queried resource record class. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end ## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_request non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -81,7 +81,7 @@ event dns_rejected%(c: connection, msg: dns_msg, query: string, qtype: count, qc ## Generated for each entry in the Question section of a DNS reply. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -95,9 +95,9 @@ event dns_rejected%(c: connection, msg: dns_msg, query: string, qtype: count, qc ## ## qclass: The queried resource record class. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end ## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_rejected ## dns_request non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -109,7 +109,7 @@ event dns_query_reply%(c: connection, msg: dns_msg, query: string, ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -121,9 +121,9 @@ event dns_query_reply%(c: connection, msg: dns_msg, query: string, ## ## a: The address returned by the reply. ## -## .. bro:see:: dns_AAAA_reply dns_A6_reply dns_CNAME_reply dns_EDNS_addl dns_HINFO_reply +## .. zeek:see:: dns_AAAA_reply dns_A6_reply dns_CNAME_reply dns_EDNS_addl dns_HINFO_reply ## dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -134,7 +134,7 @@ event dns_A_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%); ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -146,9 +146,9 @@ event dns_A_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%); ## ## a: The address returned by the reply. ## -## .. bro:see:: dns_A_reply dns_A6_reply dns_CNAME_reply dns_EDNS_addl dns_HINFO_reply dns_MX_reply +## .. zeek:see:: dns_A_reply dns_A6_reply dns_CNAME_reply dns_EDNS_addl dns_HINFO_reply dns_MX_reply ## dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply dns_TSIG_addl -## dns_TXT_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered +## dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered ## dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid dns_message dns_query_reply dns_rejected dns_request ## non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -159,7 +159,7 @@ event dns_AAAA_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%); ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -171,9 +171,9 @@ event dns_AAAA_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%); ## ## a: The address returned by the reply. ## -## .. bro:see:: dns_A_reply dns_AAAA_reply dns_CNAME_reply dns_EDNS_addl dns_HINFO_reply dns_MX_reply +## .. zeek:see:: dns_A_reply dns_AAAA_reply dns_CNAME_reply dns_EDNS_addl dns_HINFO_reply dns_MX_reply ## dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply dns_TSIG_addl -## dns_TXT_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered +## dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered ## dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid dns_message dns_query_reply dns_rejected dns_request ## non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -184,7 +184,7 @@ event dns_A6_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%); ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -196,9 +196,9 @@ event dns_A6_reply%(c: connection, msg: dns_msg, ans: dns_answer, a: addr%); ## ## name: The name returned by the reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -209,7 +209,7 @@ event dns_NS_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string%) ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -221,9 +221,9 @@ event dns_NS_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string%) ## ## name: The name returned by the reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_EDNS_addl dns_HINFO_reply dns_MX_reply +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_EDNS_addl dns_HINFO_reply dns_MX_reply ## dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply dns_TSIG_addl -## dns_TXT_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered +## dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered ## dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid dns_message dns_query_reply dns_rejected dns_request ## non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -234,7 +234,7 @@ event dns_CNAME_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: strin ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -246,9 +246,9 @@ event dns_CNAME_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: strin ## ## name: The name returned by the reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_SOA_reply dns_SRV_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -259,7 +259,7 @@ event dns_PTR_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string% ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -271,9 +271,9 @@ event dns_PTR_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string% ## ## soa: The parsed SOA value. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SRV_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -284,7 +284,7 @@ event dns_SOA_reply%(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa% ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -294,9 +294,9 @@ event dns_SOA_reply%(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa% ## ## ans: The type-independent part of the parsed answer record. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_end dns_full_request +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -307,7 +307,7 @@ event dns_WKS_reply%(c: connection, msg: dns_msg, ans: dns_answer%); ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -317,9 +317,9 @@ event dns_WKS_reply%(c: connection, msg: dns_msg, ans: dns_answer%); ## ## ans: The type-independent part of the parsed answer record. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl dns_MX_reply +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl dns_MX_reply ## dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply dns_TSIG_addl -## dns_TXT_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered +## dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered ## dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid dns_message dns_query_reply dns_rejected dns_request ## non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -330,7 +330,7 @@ event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer%); ## individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -344,9 +344,9 @@ event dns_HINFO_reply%(c: connection, msg: dns_msg, ans: dns_answer%); ## ## preference: The preference for *name* specified by the reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -357,7 +357,7 @@ event dns_MX_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string, ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -369,7 +369,7 @@ event dns_MX_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string, ## ## strs: The textual information returned by the reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply ## dns_SRV_reply dns_TSIG_addl dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name @@ -378,6 +378,31 @@ event dns_MX_reply%(c: connection, msg: dns_msg, ans: dns_answer, name: string, ## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth event dns_TXT_reply%(c: connection, msg: dns_msg, ans: dns_answer, strs: string_vec%); +## Generated for DNS replies of type *SPF*. For replies with multiple answers, +## an individual event of the corresponding type is raised for each. +## +## See `Wikipedia `__ for more +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS +## sessions. +## +## c: The connection, which may be UDP or TCP depending on the type of the +## transport-layer session being analyzed. +## +## msg: The parsed DNS message header. +## +## ans: The type-independent part of the parsed answer record. +## +## strs: The textual information returned by the reply. +## +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply +## dns_SRV_reply dns_TSIG_addl dns_WKS_reply dns_end dns_full_request +## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name +## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply +## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout +## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth +event dns_SPF_reply%(c: connection, msg: dns_msg, ans: dns_answer, strs: string_vec%); + ## Generated for DNS replies of type *CAA* (Certification Authority Authorization). ## For replies with multiple answers, an individual event of the corresponding type ## is raised for each. @@ -401,7 +426,7 @@ event dns_CAA_reply%(c: connection, msg: dns_msg, ans: dns_answer, flags: count, ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -423,9 +448,9 @@ event dns_CAA_reply%(c: connection, msg: dns_msg, ans: dns_answer, flags: count, ## p: Port of the SRV response -- the TCP or UDP port on which the ## service is to be found. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -433,7 +458,7 @@ event dns_CAA_reply%(c: connection, msg: dns_msg, ans: dns_answer, flags: count, event dns_SRV_reply%(c: connection, msg: dns_msg, ans: dns_answer, target: string, priority: count, weight: count, p: count%); ## Generated on DNS reply resource records when the type of record is not one -## that Bro knows how to parse and generate another more specific event. +## that Zeek knows how to parse and generate another more specific event. ## ## c: The connection, which may be UDP or TCP depending on the type of the ## transport-layer session being analyzed. @@ -442,16 +467,16 @@ event dns_SRV_reply%(c: connection, msg: dns_msg, ans: dns_answer, target: strin ## ## ans: The type-independent part of the parsed answer record. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_SRV_reply dns_end +## dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_SRV_reply dns_end event dns_unknown_reply%(c: connection, msg: dns_msg, ans: dns_answer%); ## Generated for DNS replies of type *EDNS*. For replies with multiple answers, ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -461,9 +486,9 @@ event dns_unknown_reply%(c: connection, msg: dns_msg, ans: dns_answer%); ## ## ans: The parsed EDNS reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_HINFO_reply dns_MX_reply +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_HINFO_reply dns_MX_reply ## dns_NS_reply dns_PTR_reply dns_SOA_reply dns_SRV_reply dns_TSIG_addl -## dns_TXT_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered +## dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request dns_mapping_altered ## dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid dns_message dns_query_reply dns_rejected dns_request ## non_dns_request dns_max_queries dns_session_timeout dns_skip_addl @@ -474,7 +499,7 @@ event dns_EDNS_addl%(c: connection, msg: dns_msg, ans: dns_edns_additional%); ## an individual event of the corresponding type is raised for each. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -484,9 +509,9 @@ event dns_EDNS_addl%(c: connection, msg: dns_msg, ans: dns_edns_additional%); ## ## ans: The parsed TSIG reply. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TXT_reply dns_WKS_reply dns_end dns_full_request +## dns_SRV_reply dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_end dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -565,7 +590,7 @@ event dns_DS%(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr%); ## all resource records have been passed on. ## ## See `Wikipedia `__ for more -## information about the DNS protocol. Bro analyzes both UDP and TCP DNS +## information about the DNS protocol. Zeek analyzes both UDP and TCP DNS ## sessions. ## ## c: The connection, which may be UDP or TCP depending on the type of the @@ -573,9 +598,9 @@ event dns_DS%(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr%); ## ## msg: The parsed DNS message header. ## -## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl +## .. zeek:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl ## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply -## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_full_request +## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_SPF_reply dns_WKS_reply dns_full_request ## dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply ## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout @@ -590,6 +615,6 @@ event dns_full_request%(%); ## msg: The raw DNS payload. ## -## .. note:: This event is deprecated and superseded by Bro's dynamic protocol +## .. note:: This event is deprecated and superseded by Zeek's dynamic protocol ## detection framework. event non_dns_request%(c: connection, msg: string%); diff --git a/src/analyzer/protocol/file/CMakeLists.txt b/src/analyzer/protocol/file/CMakeLists.txt index 978c28c9c4..5c11356991 100644 --- a/src/analyzer/protocol/file/CMakeLists.txt +++ b/src/analyzer/protocol/file/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro File) -bro_plugin_cc(File.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek File) +zeek_plugin_cc(File.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/file/File.cc b/src/analyzer/protocol/file/File.cc index b7e00c7fa4..62fd36c0da 100644 --- a/src/analyzer/protocol/file/File.cc +++ b/src/analyzer/protocol/file/File.cc @@ -77,10 +77,12 @@ void File_Analyzer::Identify() &matches); string match = matches.empty() ? "" : *(matches.begin()->second.begin()); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(buffer_len, buffer)); - vl->append(new StringVal("")); - vl->append(new StringVal(match)); - ConnectionEvent(file_transferred, vl); + + if ( file_transferred ) + ConnectionEventFast(file_transferred, { + BuildConnVal(), + new StringVal(buffer_len, buffer), + new StringVal(""), + new StringVal(match), + }); } diff --git a/src/analyzer/protocol/file/Plugin.cc b/src/analyzer/protocol/file/Plugin.cc index 499736ebd8..36586fb6a9 100644 --- a/src/analyzer/protocol/file/Plugin.cc +++ b/src/analyzer/protocol/file/Plugin.cc @@ -6,7 +6,7 @@ #include "./File.h" namespace plugin { -namespace Bro_File { +namespace Zeek_File { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("IRC_Data", ::analyzer::file::IRC_Data::Instantiate)); plugin::Configuration config; - config.name = "Bro::File"; + config.name = "Zeek::File"; config.description = "Generic file analyzer"; return config; } diff --git a/src/analyzer/protocol/finger/CMakeLists.txt b/src/analyzer/protocol/finger/CMakeLists.txt index 52dd3816f9..e89f268a8a 100644 --- a/src/analyzer/protocol/finger/CMakeLists.txt +++ b/src/analyzer/protocol/finger/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Finger) -bro_plugin_cc(Finger.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek Finger) +zeek_plugin_cc(Finger.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/finger/Finger.cc b/src/analyzer/protocol/finger/Finger.cc index 6729c34448..127ab048e1 100644 --- a/src/analyzer/protocol/finger/Finger.cc +++ b/src/analyzer/protocol/finger/Finger.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -66,14 +66,15 @@ void Finger_Analyzer::DeliverStream(int length, const u_char* data, bool is_orig else host = at + 1; - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(long_cnt)); - vl->append(new StringVal(at - line, line)); - vl->append(new StringVal(end_of_line - host, host)); - if ( finger_request ) - ConnectionEvent(finger_request, vl); + { + ConnectionEventFast(finger_request, { + BuildConnVal(), + val_mgr->GetBool(long_cnt), + new StringVal(at - line, line), + new StringVal(end_of_line - host, host), + }); + } Conn()->Match(Rule::FINGER, (const u_char *) line, end_of_line - line, true, true, 1, true); @@ -86,10 +87,9 @@ void Finger_Analyzer::DeliverStream(int length, const u_char* data, bool is_orig if ( ! finger_reply ) return; - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(end_of_line - line, line)); - - ConnectionEvent(finger_reply, vl); + ConnectionEventFast(finger_reply, { + BuildConnVal(), + new StringVal(end_of_line - line, line), + }); } } diff --git a/src/analyzer/protocol/finger/Plugin.cc b/src/analyzer/protocol/finger/Plugin.cc index 7dbaaf702d..b6fafd3b4c 100644 --- a/src/analyzer/protocol/finger/Plugin.cc +++ b/src/analyzer/protocol/finger/Plugin.cc @@ -5,7 +5,7 @@ #include "Finger.h" namespace plugin { -namespace Bro_Finger { +namespace Zeek_Finger { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::analyzer::Component("Finger", ::analyzer::finger::Finger_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::Finger"; + config.name = "Zeek::Finger"; config.description = "Finger analyzer"; return config; } diff --git a/src/analyzer/protocol/finger/events.bif b/src/analyzer/protocol/finger/events.bif index e495263b12..bc5180b1eb 100644 --- a/src/analyzer/protocol/finger/events.bif +++ b/src/analyzer/protocol/finger/events.bif @@ -11,11 +11,11 @@ ## ## hostname: The request's host name. ## -## .. bro:see:: finger_reply +## .. zeek:see:: finger_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event finger_request%(c: connection, full: bool, username: string, hostname: string%); @@ -28,11 +28,11 @@ event finger_request%(c: connection, full: bool, username: string, hostname: str ## ## reply_line: The reply as returned by the server ## -## .. bro:see:: finger_request +## .. zeek:see:: finger_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event finger_reply%(c: connection, reply_line: string%); diff --git a/src/analyzer/protocol/ftp/CMakeLists.txt b/src/analyzer/protocol/ftp/CMakeLists.txt index ab657f9260..ff6d372295 100644 --- a/src/analyzer/protocol/ftp/CMakeLists.txt +++ b/src/analyzer/protocol/ftp/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro FTP) -bro_plugin_cc(FTP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek FTP) +zeek_plugin_cc(FTP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ftp/FTP.cc b/src/analyzer/protocol/ftp/FTP.cc index f28dadf670..a6f41a6b66 100644 --- a/src/analyzer/protocol/ftp/FTP.cc +++ b/src/analyzer/protocol/ftp/FTP.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -73,8 +73,7 @@ void FTP_Analyzer::DeliverStream(int length, const u_char* data, bool orig) // Could emit "ftp empty request/reply" weird, but maybe not worth it. return; - val_list* vl = new val_list; - vl->append(BuildConnVal()); + val_list vl; EventHandlerPtr f; if ( orig ) @@ -95,8 +94,11 @@ void FTP_Analyzer::DeliverStream(int length, const u_char* data, bool orig) else cmd_str = (new StringVal(cmd_len, cmd))->ToUpper(); - vl->append(cmd_str); - vl->append(new StringVal(end_of_line - line, line)); + vl = val_list{ + BuildConnVal(), + cmd_str, + new StringVal(end_of_line - line, line), + }; f = ftp_request; ProtocolConfirmation(); @@ -171,14 +173,17 @@ void FTP_Analyzer::DeliverStream(int length, const u_char* data, bool orig) } } - vl->append(val_mgr->GetCount(reply_code)); - vl->append(new StringVal(end_of_line - line, line)); - vl->append(val_mgr->GetBool(cont_resp)); + vl = val_list{ + BuildConnVal(), + val_mgr->GetCount(reply_code), + new StringVal(end_of_line - line, line), + val_mgr->GetBool(cont_resp), + }; f = ftp_reply; } - ConnectionEvent(f, vl); + ConnectionEvent(f, std::move(vl)); ForwardStream(length, data, orig); } diff --git a/src/analyzer/protocol/ftp/Plugin.cc b/src/analyzer/protocol/ftp/Plugin.cc index 80e5bf4381..ae70d2f705 100644 --- a/src/analyzer/protocol/ftp/Plugin.cc +++ b/src/analyzer/protocol/ftp/Plugin.cc @@ -6,7 +6,7 @@ #include "FTP.h" namespace plugin { -namespace Bro_FTP { +namespace Zeek_FTP { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("FTP_ADAT", 0)); plugin::Configuration config; - config.name = "Bro::FTP"; + config.name = "Zeek::FTP"; config.description = "FTP analyzer"; return config; } diff --git a/src/analyzer/protocol/ftp/events.bif b/src/analyzer/protocol/ftp/events.bif index 16faa417d3..6cc2317936 100644 --- a/src/analyzer/protocol/ftp/events.bif +++ b/src/analyzer/protocol/ftp/events.bif @@ -9,7 +9,7 @@ ## ## arg: The arguments going with the command. ## -## .. bro:see:: ftp_reply fmt_ftp_port parse_eftp_port +## .. zeek:see:: ftp_reply fmt_ftp_port parse_eftp_port ## parse_ftp_epsv parse_ftp_pasv parse_ftp_port event ftp_request%(c: connection, command: string, arg: string%); @@ -29,7 +29,7 @@ event ftp_request%(c: connection, command: string, arg: string%); ## to reassemble the pieces before processing the response any ## further. ## -## .. bro:see:: ftp_request fmt_ftp_port parse_eftp_port +## .. zeek:see:: ftp_request fmt_ftp_port parse_eftp_port ## parse_ftp_epsv parse_ftp_pasv parse_ftp_port event ftp_reply%(c: connection, code: count, msg: string, cont_resp: bool%); diff --git a/src/analyzer/protocol/ftp/functions.bif b/src/analyzer/protocol/ftp/functions.bif index 20c26b7c57..ad9c89fadb 100644 --- a/src/analyzer/protocol/ftp/functions.bif +++ b/src/analyzer/protocol/ftp/functions.bif @@ -117,20 +117,20 @@ static Val* parse_eftp(const char* line) %%} ## Converts a string representation of the FTP PORT command to an -## :bro:type:`ftp_port`. +## :zeek:type:`ftp_port`. ## ## s: The string of the FTP PORT command, e.g., ``"10,0,0,1,4,31"``. ## ## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## -## .. bro:see:: parse_eftp_port parse_ftp_pasv parse_ftp_epsv fmt_ftp_port +## .. zeek:see:: parse_eftp_port parse_ftp_pasv parse_ftp_epsv fmt_ftp_port function parse_ftp_port%(s: string%): ftp_port %{ return parse_port(s->CheckString()); %} ## Converts a string representation of the FTP EPRT command (see :rfc:`2428`) -## to an :bro:type:`ftp_port`. The format is +## to an :zeek:type:`ftp_port`. The format is ## ``"EPRT"``, ## where ```` is a delimiter in the ASCII range 33-126 (usually ``|``). ## @@ -138,19 +138,19 @@ function parse_ftp_port%(s: string%): ftp_port ## ## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## -## .. bro:see:: parse_ftp_port parse_ftp_pasv parse_ftp_epsv fmt_ftp_port +## .. zeek:see:: parse_ftp_port parse_ftp_pasv parse_ftp_epsv fmt_ftp_port function parse_eftp_port%(s: string%): ftp_port %{ return parse_eftp(s->CheckString()); %} -## Converts the result of the FTP PASV command to an :bro:type:`ftp_port`. +## Converts the result of the FTP PASV command to an :zeek:type:`ftp_port`. ## ## str: The string containing the result of the FTP PASV command. ## ## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## -## .. bro:see:: parse_ftp_port parse_eftp_port parse_ftp_epsv fmt_ftp_port +## .. zeek:see:: parse_ftp_port parse_eftp_port parse_ftp_epsv fmt_ftp_port function parse_ftp_pasv%(str: string%): ftp_port %{ const char* s = str->CheckString(); @@ -170,14 +170,14 @@ function parse_ftp_pasv%(str: string%): ftp_port %} ## Converts the result of the FTP EPSV command (see :rfc:`2428`) to an -## :bro:type:`ftp_port`. The format is ``" ()"``, +## :zeek:type:`ftp_port`. The format is ``" ()"``, ## where ```` is a delimiter in the ASCII range 33-126 (usually ``|``). ## ## str: The string containing the result of the FTP EPSV command. ## ## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## -## .. bro:see:: parse_ftp_port parse_eftp_port parse_ftp_pasv fmt_ftp_port +## .. zeek:see:: parse_ftp_port parse_eftp_port parse_ftp_pasv fmt_ftp_port function parse_ftp_epsv%(str: string%): ftp_port %{ const char* s = str->CheckString(); @@ -196,7 +196,7 @@ function parse_ftp_epsv%(str: string%): ftp_port ## ## Returns: The FTP PORT string. ## -## .. bro:see:: parse_ftp_port parse_eftp_port parse_ftp_pasv parse_ftp_epsv +## .. zeek:see:: parse_ftp_port parse_eftp_port parse_ftp_pasv parse_ftp_epsv function fmt_ftp_port%(a: addr, p: port%): string %{ const uint32* addr; diff --git a/src/analyzer/protocol/gnutella/CMakeLists.txt b/src/analyzer/protocol/gnutella/CMakeLists.txt index ee5415b924..d463ac6af7 100644 --- a/src/analyzer/protocol/gnutella/CMakeLists.txt +++ b/src/analyzer/protocol/gnutella/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Gnutella) -bro_plugin_cc(Gnutella.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek Gnutella) +zeek_plugin_cc(Gnutella.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/gnutella/Gnutella.cc b/src/analyzer/protocol/gnutella/Gnutella.cc index e7c11b40bb..7cc6285c8c 100644 --- a/src/analyzer/protocol/gnutella/Gnutella.cc +++ b/src/analyzer/protocol/gnutella/Gnutella.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -58,16 +58,10 @@ void Gnutella_Analyzer::Done() if ( ! sent_establish && (gnutella_establish || gnutella_not_establish) ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - if ( Established() && gnutella_establish ) - ConnectionEvent(gnutella_establish, vl); + ConnectionEventFast(gnutella_establish, {BuildConnVal()}); else if ( ! Established () && gnutella_not_establish ) - ConnectionEvent(gnutella_not_establish, vl); - else - delete_vals(vl); + ConnectionEventFast(gnutella_not_establish, {BuildConnVal()}); } if ( gnutella_partial_binary_msg ) @@ -78,14 +72,12 @@ void Gnutella_Analyzer::Done() { if ( ! p->msg_sent && p->msg_pos ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(new StringVal(p->msg)); - vl->append(val_mgr->GetBool((i == 0))); - vl->append(val_mgr->GetCount(p->msg_pos)); - - ConnectionEvent(gnutella_partial_binary_msg, vl); + ConnectionEventFast(gnutella_partial_binary_msg, { + BuildConnVal(), + new StringVal(p->msg), + val_mgr->GetBool((i == 0)), + val_mgr->GetCount(p->msg_pos), + }); } else if ( ! p->msg_sent && p->payload_left ) @@ -129,10 +121,7 @@ int Gnutella_Analyzer::IsHTTP(string header) if ( gnutella_http_notify ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - ConnectionEvent(gnutella_http_notify, vl); + ConnectionEventFast(gnutella_http_notify, {BuildConnVal()}); } analyzer::Analyzer* a = analyzer_mgr->InstantiateAnalyzer("HTTP", Conn()); @@ -192,13 +181,11 @@ void Gnutella_Analyzer::DeliverLines(int len, const u_char* data, bool orig) { if ( gnutella_text_msg ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(ms->headers.data())); - - ConnectionEvent(gnutella_text_msg, vl); + ConnectionEventFast(gnutella_text_msg, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(ms->headers.data()), + }); } ms->headers = ""; @@ -206,12 +193,9 @@ void Gnutella_Analyzer::DeliverLines(int len, const u_char* data, bool orig) if ( Established () && gnutella_establish ) { - val_list* vl = new val_list; - sent_establish = 1; - vl->append(BuildConnVal()); - ConnectionEvent(gnutella_establish, vl); + ConnectionEventFast(gnutella_establish, {BuildConnVal()}); } } } @@ -237,21 +221,18 @@ void Gnutella_Analyzer::SendEvents(GnutellaMsgState* p, bool is_orig) if ( gnutella_binary_msg ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(val_mgr->GetCount(p->msg_type)); - vl->append(val_mgr->GetCount(p->msg_ttl)); - vl->append(val_mgr->GetCount(p->msg_hops)); - vl->append(val_mgr->GetCount(p->msg_len)); - vl->append(new StringVal(p->payload)); - vl->append(val_mgr->GetCount(p->payload_len)); - vl->append(val_mgr->GetBool( - (p->payload_len < min(p->msg_len, (unsigned int)GNUTELLA_MAX_PAYLOAD)))); - vl->append(val_mgr->GetBool((p->payload_left == 0))); - - ConnectionEvent(gnutella_binary_msg, vl); + ConnectionEventFast(gnutella_binary_msg, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + val_mgr->GetCount(p->msg_type), + val_mgr->GetCount(p->msg_ttl), + val_mgr->GetCount(p->msg_hops), + val_mgr->GetCount(p->msg_len), + new StringVal(p->payload), + val_mgr->GetCount(p->payload_len), + val_mgr->GetBool((p->payload_len < min(p->msg_len, (unsigned int)GNUTELLA_MAX_PAYLOAD))), + val_mgr->GetBool((p->payload_left == 0)), + }); } } diff --git a/src/analyzer/protocol/gnutella/Plugin.cc b/src/analyzer/protocol/gnutella/Plugin.cc index afd0ff491e..b6a560ec58 100644 --- a/src/analyzer/protocol/gnutella/Plugin.cc +++ b/src/analyzer/protocol/gnutella/Plugin.cc @@ -6,7 +6,7 @@ #include "Gnutella.h" namespace plugin { -namespace Bro_Gnutella { +namespace Zeek_Gnutella { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("Gnutella", ::analyzer::gnutella::Gnutella_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::Gnutella"; + config.name = "Zeek::Gnutella"; config.description = "Gnutella analyzer"; return config; } diff --git a/src/analyzer/protocol/gnutella/events.bif b/src/analyzer/protocol/gnutella/events.bif index 9384f34e88..4168646543 100644 --- a/src/analyzer/protocol/gnutella/events.bif +++ b/src/analyzer/protocol/gnutella/events.bif @@ -3,13 +3,13 @@ ## See `Wikipedia `__ for more ## information about the Gnutella protocol. ## -## .. bro:see:: gnutella_binary_msg gnutella_establish gnutella_http_notify +## .. zeek:see:: gnutella_binary_msg gnutella_establish gnutella_http_notify ## gnutella_not_establish gnutella_partial_binary_msg gnutella_signature_found ## ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event gnutella_text_msg%(c: connection, orig: bool, headers: string%); @@ -18,12 +18,12 @@ event gnutella_text_msg%(c: connection, orig: bool, headers: string%); ## See `Wikipedia `__ for more ## information about the Gnutella protocol. ## -## .. bro:see:: gnutella_establish gnutella_http_notify gnutella_not_establish +## .. zeek:see:: gnutella_establish gnutella_http_notify gnutella_not_establish ## gnutella_partial_binary_msg gnutella_signature_found gnutella_text_msg ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event gnutella_binary_msg%(c: connection, orig: bool, msg_type: count, ttl: count, hops: count, msg_len: count, @@ -35,12 +35,12 @@ event gnutella_binary_msg%(c: connection, orig: bool, msg_type: count, ## See `Wikipedia `__ for more ## information about the Gnutella protocol. ## -## .. bro:see:: gnutella_binary_msg gnutella_establish gnutella_http_notify +## .. zeek:see:: gnutella_binary_msg gnutella_establish gnutella_http_notify ## gnutella_not_establish gnutella_signature_found gnutella_text_msg ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event gnutella_partial_binary_msg%(c: connection, orig: bool, msg: string, len: count%); @@ -50,12 +50,12 @@ event gnutella_partial_binary_msg%(c: connection, orig: bool, ## See `Wikipedia `__ for more ## information about the Gnutella protocol. ## -## .. bro:see:: gnutella_binary_msg gnutella_http_notify gnutella_not_establish +## .. zeek:see:: gnutella_binary_msg gnutella_http_notify gnutella_not_establish ## gnutella_partial_binary_msg gnutella_signature_found gnutella_text_msg ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event gnutella_establish%(c: connection%); @@ -64,12 +64,12 @@ event gnutella_establish%(c: connection%); ## See `Wikipedia `__ for more ## information about the Gnutella protocol. ## -## .. bro:see:: gnutella_binary_msg gnutella_establish gnutella_http_notify +## .. zeek:see:: gnutella_binary_msg gnutella_establish gnutella_http_notify ## gnutella_partial_binary_msg gnutella_signature_found gnutella_text_msg ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event gnutella_not_establish%(c: connection%); @@ -78,11 +78,11 @@ event gnutella_not_establish%(c: connection%); ## See `Wikipedia `__ for more ## information about the Gnutella protocol. ## -## .. bro:see:: gnutella_binary_msg gnutella_establish gnutella_not_establish +## .. zeek:see:: gnutella_binary_msg gnutella_establish gnutella_not_establish ## gnutella_partial_binary_msg gnutella_signature_found gnutella_text_msg ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event gnutella_http_notify%(c: connection%); diff --git a/src/analyzer/protocol/gssapi/CMakeLists.txt b/src/analyzer/protocol/gssapi/CMakeLists.txt index d826d36bf7..74ae705313 100644 --- a/src/analyzer/protocol/gssapi/CMakeLists.txt +++ b/src/analyzer/protocol/gssapi/CMakeLists.txt @@ -1,16 +1,16 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro GSSAPI) -bro_plugin_cc(GSSAPI.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac( +zeek_plugin_begin(Zeek GSSAPI) +zeek_plugin_cc(GSSAPI.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac( gssapi.pac gssapi-protocol.pac gssapi-analyzer.pac ../asn1/asn1.pac ) -bro_plugin_end() +zeek_plugin_end() diff --git a/src/analyzer/protocol/gssapi/Plugin.cc b/src/analyzer/protocol/gssapi/Plugin.cc index 3765d9b79d..c0cd7fe11c 100644 --- a/src/analyzer/protocol/gssapi/Plugin.cc +++ b/src/analyzer/protocol/gssapi/Plugin.cc @@ -5,7 +5,7 @@ #include "GSSAPI.h" namespace plugin { -namespace Bro_GSSAPI { +namespace Zeek_GSSAPI { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::analyzer::Component("GSSAPI", ::analyzer::gssapi::GSSAPI_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::GSSAPI"; + config.name = "Zeek::GSSAPI"; config.description = "GSSAPI analyzer"; return config; } diff --git a/src/analyzer/protocol/gtpv1/CMakeLists.txt b/src/analyzer/protocol/gtpv1/CMakeLists.txt index b45f32e883..61856cf1f1 100644 --- a/src/analyzer/protocol/gtpv1/CMakeLists.txt +++ b/src/analyzer/protocol/gtpv1/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro GTPv1) -bro_plugin_cc(GTPv1.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(gtpv1.pac gtpv1-protocol.pac gtpv1-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek GTPv1) +zeek_plugin_cc(GTPv1.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(gtpv1.pac gtpv1-protocol.pac gtpv1-analyzer.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/gtpv1/Plugin.cc b/src/analyzer/protocol/gtpv1/Plugin.cc index 846c78d18f..4b7929a747 100644 --- a/src/analyzer/protocol/gtpv1/Plugin.cc +++ b/src/analyzer/protocol/gtpv1/Plugin.cc @@ -6,7 +6,7 @@ #include "GTPv1.h" namespace plugin { -namespace Bro_GTPv1 { +namespace Zeek_GTPv1 { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("GTPv1", ::analyzer::gtpv1::GTPv1_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::GTPv1"; + config.name = "Zeek::GTPv1"; config.description = "GTPv1 analyzer"; return config; } diff --git a/src/analyzer/protocol/http/CMakeLists.txt b/src/analyzer/protocol/http/CMakeLists.txt index 663d343eb3..1b173e6949 100644 --- a/src/analyzer/protocol/http/CMakeLists.txt +++ b/src/analyzer/protocol/http/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro HTTP) -bro_plugin_cc(HTTP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek HTTP) +zeek_plugin_cc(HTTP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index 4706286914..291990119a 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -646,11 +646,11 @@ void HTTP_Message::Done(const int interrupted, const char* detail) if ( http_message_done ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(BuildMessageStat(interrupted, detail)); - GetAnalyzer()->ConnectionEvent(http_message_done, vl); + GetAnalyzer()->ConnectionEventFast(http_message_done, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + BuildMessageStat(interrupted, detail), + }); } MyHTTP_Analyzer()->HTTP_MessageDone(is_orig, this); @@ -679,10 +679,10 @@ void HTTP_Message::BeginEntity(mime::MIME_Entity* entity) if ( http_begin_entity ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - analyzer->ConnectionEvent(http_begin_entity, vl); + analyzer->ConnectionEventFast(http_begin_entity, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + }); } } @@ -696,10 +696,10 @@ void HTTP_Message::EndEntity(mime::MIME_Entity* entity) if ( http_end_entity ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - analyzer->ConnectionEvent(http_end_entity, vl); + analyzer->ConnectionEventFast(http_end_entity, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + }); } current_entity = (HTTP_Entity*) entity->Parent(); @@ -737,11 +737,11 @@ void HTTP_Message::SubmitAllHeaders(mime::MIME_HeaderList& hlist) { if ( http_all_headers ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(BuildHeaderTable(hlist)); - analyzer->ConnectionEvent(http_all_headers, vl); + analyzer->ConnectionEventFast(http_all_headers, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + BuildHeaderTable(hlist), + }); } if ( http_content_type ) @@ -751,12 +751,12 @@ void HTTP_Message::SubmitAllHeaders(mime::MIME_HeaderList& hlist) ty->Ref(); subty->Ref(); - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(ty); - vl->append(subty); - analyzer->ConnectionEvent(http_content_type, vl); + analyzer->ConnectionEventFast(http_content_type, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + ty, + subty, + }); } } @@ -1182,12 +1182,8 @@ void HTTP_Analyzer::GenStats() r->Assign(2, new Val(request_version, TYPE_DOUBLE)); r->Assign(3, new Val(reply_version, TYPE_DOUBLE)); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(r); - // DEBUG_MSG("%.6f http_stats\n", network_time); - ConnectionEvent(http_stats, vl); + ConnectionEventFast(http_stats, {BuildConnVal(), r}); } } @@ -1384,13 +1380,12 @@ void HTTP_Analyzer::HTTP_Event(const char* category, StringVal* detail) { if ( http_event ) { - val_list* vl = new val_list(); - vl->append(BuildConnVal()); - vl->append(new StringVal(category)); - vl->append(detail); - // DEBUG_MSG("%.6f http_event\n", network_time); - ConnectionEvent(http_event, vl); + ConnectionEventFast(http_event, { + BuildConnVal(), + new StringVal(category), + detail, + }); } else delete detail; @@ -1426,17 +1421,16 @@ void HTTP_Analyzer::HTTP_Request() if ( http_request ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - Ref(request_method); - vl->append(request_method); - vl->append(TruncateURI(request_URI->AsStringVal())); - vl->append(TruncateURI(unescaped_URI->AsStringVal())); - vl->append(new StringVal(fmt("%.1f", request_version))); // DEBUG_MSG("%.6f http_request\n", network_time); - ConnectionEvent(http_request, vl); + ConnectionEventFast(http_request, { + BuildConnVal(), + request_method, + TruncateURI(request_URI->AsStringVal()), + TruncateURI(unescaped_URI->AsStringVal()), + new StringVal(fmt("%.1f", request_version)), + }); } } @@ -1444,15 +1438,14 @@ void HTTP_Analyzer::HTTP_Reply() { if ( http_reply ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(fmt("%.1f", reply_version))); - vl->append(val_mgr->GetCount(reply_code)); - if ( reply_reason_phrase ) - vl->append(reply_reason_phrase->Ref()); - else - vl->append(new StringVal("")); - ConnectionEvent(http_reply, vl); + ConnectionEventFast(http_reply, { + BuildConnVal(), + new StringVal(fmt("%.1f", reply_version)), + val_mgr->GetCount(reply_code), + reply_reason_phrase ? + reply_reason_phrase->Ref() : + new StringVal(""), + }); } else { @@ -1524,10 +1517,10 @@ void HTTP_Analyzer::ReplyMade(const int interrupted, const char* msg) if ( http_connection_upgrade ) { - val_list* vl = new val_list(); - vl->append(BuildConnVal()); - vl->append(new StringVal(upgrade_protocol)); - ConnectionEvent(http_connection_upgrade, vl); + ConnectionEventFast(http_connection_upgrade, { + BuildConnVal(), + new StringVal(upgrade_protocol), + }); } } @@ -1697,14 +1690,15 @@ void HTTP_Analyzer::HTTP_Header(int is_orig, mime::MIME_Header* h) Conn()->Match(rule, (const u_char*) hd_value.data, hd_value.length, is_orig, false, true, false); - val_list* vl = new val_list(); - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(mime::new_string_val(h->get_name())->ToUpper()); - vl->append(mime::new_string_val(h->get_value())); if ( DEBUG_http ) DEBUG_MSG("%.6f http_header\n", network_time); - ConnectionEvent(http_header, vl); + + ConnectionEventFast(http_header, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + mime::new_string_val(h->get_name())->ToUpper(), + mime::new_string_val(h->get_value()), + }); } } @@ -1833,12 +1827,12 @@ void HTTP_Analyzer::HTTP_EntityData(int is_orig, BroString* entity_data) { if ( http_entity_data ) { - val_list* vl = new val_list(); - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(val_mgr->GetCount(entity_data->Len())); - vl->append(new StringVal(entity_data)); - ConnectionEvent(http_entity_data, vl); + ConnectionEventFast(http_entity_data, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + val_mgr->GetCount(entity_data->Len()), + new StringVal(entity_data), + }); } else delete entity_data; diff --git a/src/analyzer/protocol/http/Plugin.cc b/src/analyzer/protocol/http/Plugin.cc index f88866f66f..f2b7402415 100644 --- a/src/analyzer/protocol/http/Plugin.cc +++ b/src/analyzer/protocol/http/Plugin.cc @@ -6,7 +6,7 @@ #include "HTTP.h" namespace plugin { -namespace Bro_HTTP { +namespace Zeek_HTTP { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("HTTP", ::analyzer::http::HTTP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::HTTP"; + config.name = "Zeek::HTTP"; config.description = "HTTP analyzer"; return config; } diff --git a/src/analyzer/protocol/http/events.bif b/src/analyzer/protocol/http/events.bif index ab005ba8d6..60b0880a43 100644 --- a/src/analyzer/protocol/http/events.bif +++ b/src/analyzer/protocol/http/events.bif @@ -1,8 +1,8 @@ -## Generated for HTTP requests. Bro supports persistent and pipelined HTTP +## Generated for HTTP requests. Zeek supports persistent and pipelined HTTP ## sessions and raises corresponding events as it parses client/server ## dialogues. This event is generated as soon as a request's initial line has -## been parsed, and before any :bro:id:`http_header` events are raised. +## been parsed, and before any :zeek:id:`http_header` events are raised. ## ## See `Wikipedia `__ ## for more information about the HTTP protocol. @@ -17,15 +17,15 @@ ## ## version: The version number specified in the request (e.g., ``1.1``). ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_header http_message_done http_reply http_stats ## truncate_http_URI http_connection_upgrade event http_request%(c: connection, method: string, original_URI: string, unescaped_URI: string, version: string%); -## Generated for HTTP replies. Bro supports persistent and pipelined HTTP +## Generated for HTTP replies. Zeek supports persistent and pipelined HTTP ## sessions and raises corresponding events as it parses client/server ## dialogues. This event is generated as soon as a reply's initial line has -## been parsed, and before any :bro:id:`http_header` events are raised. +## been parsed, and before any :zeek:id:`http_header` events are raised. ## ## See `Wikipedia `__ ## for more information about the HTTP protocol. @@ -38,12 +38,12 @@ event http_request%(c: connection, method: string, original_URI: string, unescap ## ## reason: The textual description returned by the server along with *code*. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_header http_message_done http_request ## http_stats http_connection_upgrade event http_reply%(c: connection, version: string, code: count, reason: string%); -## Generated for HTTP headers. Bro supports persistent and pipelined HTTP +## Generated for HTTP headers. Zeek supports persistent and pipelined HTTP ## sessions and raises corresponding events as it parses client/server ## dialogues. ## @@ -58,7 +58,7 @@ event http_reply%(c: connection, version: string, code: count, reason: string%); ## ## value: The value of the header. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_message_done http_reply http_request ## http_stats http_connection_upgrade ## @@ -67,7 +67,7 @@ event http_reply%(c: connection, version: string, code: count, reason: string%); event http_header%(c: connection, is_orig: bool, name: string, value: string%); ## Generated for HTTP headers, passing on all headers of an HTTP message at -## once. Bro supports persistent and pipelined HTTP sessions and raises +## once. Zeek supports persistent and pipelined HTTP sessions and raises ## corresponding events as it parses client/server dialogues. ## ## See `Wikipedia `__ @@ -81,7 +81,7 @@ event http_header%(c: connection, is_orig: bool, name: string, value: string%); ## The table is indexed by the position of the header (1 for the first, ## 2 for the second, etc.). ## -## .. bro:see:: http_begin_entity http_content_type http_end_entity http_entity_data +## .. zeek:see:: http_begin_entity http_content_type http_end_entity http_entity_data ## http_event http_header http_message_done http_reply http_request http_stats ## http_connection_upgrade ## @@ -92,7 +92,7 @@ event http_all_headers%(c: connection, is_orig: bool, hlist: mime_header_list%); ## Generated when starting to parse an HTTP body entity. This event is generated ## at least once for each non-empty (client or server) HTTP body; and ## potentially more than once if the body contains further nested MIME -## entities. Bro raises this event just before it starts parsing each entity's +## entities. Zeek raises this event just before it starts parsing each entity's ## content. ## ## See `Wikipedia `__ @@ -103,7 +103,7 @@ event http_all_headers%(c: connection, is_orig: bool, hlist: mime_header_list%); ## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## -## .. bro:see:: http_all_headers http_content_type http_end_entity http_entity_data +## .. zeek:see:: http_all_headers http_content_type http_end_entity http_entity_data ## http_event http_header http_message_done http_reply http_request http_stats ## mime_begin_entity http_connection_upgrade event http_begin_entity%(c: connection, is_orig: bool%); @@ -111,7 +111,7 @@ event http_begin_entity%(c: connection, is_orig: bool%); ## Generated when finishing parsing an HTTP body entity. This event is generated ## at least once for each non-empty (client or server) HTTP body; and ## potentially more than once if the body contains further nested MIME -## entities. Bro raises this event at the point when it has finished parsing an +## entities. Zeek raises this event at the point when it has finished parsing an ## entity's content. ## ## See `Wikipedia `__ @@ -122,7 +122,7 @@ event http_begin_entity%(c: connection, is_orig: bool%); ## is_orig: True if the entity was sent by the originator of the TCP ## connection. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_entity_data +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_entity_data ## http_event http_header http_message_done http_reply http_request ## http_stats mime_end_entity http_connection_upgrade event http_end_entity%(c: connection, is_orig: bool%); @@ -134,7 +134,7 @@ event http_end_entity%(c: connection, is_orig: bool%); ## A common idiom for using this event is to first *reassemble* the data ## at the scripting layer by concatenating it to a successively growing ## string; and only perform further content analysis once the corresponding -## :bro:id:`http_end_entity` event has been raised. Note, however, that doing so +## :zeek:id:`http_end_entity` event has been raised. Note, however, that doing so ## can be quite expensive for HTTP tranders. At the very least, one should ## impose an upper size limit on how much data is being buffered. ## @@ -150,7 +150,7 @@ event http_end_entity%(c: connection, is_orig: bool%); ## ## data: One chunk of raw entity data. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_event http_header http_message_done http_reply http_request http_stats ## mime_entity_data http_entity_data_delivery_size skip_http_data ## http_connection_upgrade @@ -173,7 +173,7 @@ event http_entity_data%(c: connection, is_orig: bool, length: count, data: strin ## ## subty: The subtype. ## -## .. bro:see:: http_all_headers http_begin_entity http_end_entity http_entity_data +## .. zeek:see:: http_all_headers http_begin_entity http_end_entity http_entity_data ## http_event http_header http_message_done http_reply http_request http_stats ## http_connection_upgrade ## @@ -181,7 +181,7 @@ event http_entity_data%(c: connection, is_orig: bool, length: count, data: strin ## entities. event http_content_type%(c: connection, is_orig: bool, ty: string, subty: string%); -## Generated once at the end of parsing an HTTP message. Bro supports persistent +## Generated once at the end of parsing an HTTP message. Zeek supports persistent ## and pipelined HTTP sessions and raises corresponding events as it parses ## client/server dialogues. A "message" is one top-level HTTP entity, such as a ## complete request or reply. Each message can have further nested sub-entities @@ -199,7 +199,7 @@ event http_content_type%(c: connection, is_orig: bool, ty: string, subty: string ## ## stat: Further meta information about the message. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_header http_reply http_request http_stats ## http_connection_upgrade event http_message_done%(c: connection, is_orig: bool, stat: http_message_stat%); @@ -216,7 +216,7 @@ event http_message_done%(c: connection, is_orig: bool, stat: http_message_stat%) ## ## detail: Further more detailed description of the error. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_header http_message_done http_reply http_request ## http_stats mime_event http_connection_upgrade event http_event%(c: connection, event_type: string, detail: string%); @@ -230,7 +230,7 @@ event http_event%(c: connection, event_type: string, detail: string%); ## stats: Statistics summarizing HTTP-level properties of the finished ## connection. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_header http_message_done http_reply ## http_request http_connection_upgrade event http_stats%(c: connection, stats: http_stats_rec%); @@ -243,7 +243,7 @@ event http_stats%(c: connection, stats: http_stats_rec%); ## ## protocol: The protocol to which the connection is switching. ## -## .. bro:see:: http_all_headers http_begin_entity http_content_type http_end_entity +## .. zeek:see:: http_all_headers http_begin_entity http_content_type http_end_entity ## http_entity_data http_event http_header http_message_done http_reply ## http_request event http_connection_upgrade%(c: connection, protocol: string%); diff --git a/src/analyzer/protocol/http/functions.bif b/src/analyzer/protocol/http/functions.bif index 6ef6fecb81..ff4f0015b7 100644 --- a/src/analyzer/protocol/http/functions.bif +++ b/src/analyzer/protocol/http/functions.bif @@ -9,7 +9,7 @@ ## ## is_orig: If true, the client data is skipped, and the server data otherwise. ## -## .. bro:see:: skip_smtp_data +## .. zeek:see:: skip_smtp_data function skip_http_entity_data%(c: connection, is_orig: bool%): any %{ analyzer::ID id = mgr.CurrentAnalyzer(); diff --git a/src/analyzer/protocol/icmp/CMakeLists.txt b/src/analyzer/protocol/icmp/CMakeLists.txt index 7b8bd9c7fe..875b3597ec 100644 --- a/src/analyzer/protocol/icmp/CMakeLists.txt +++ b/src/analyzer/protocol/icmp/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro ICMP) -bro_plugin_cc(ICMP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek ICMP) +zeek_plugin_cc(ICMP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/icmp/ICMP.cc b/src/analyzer/protocol/icmp/ICMP.cc index 1832b324b2..3c65a2a831 100644 --- a/src/analyzer/protocol/icmp/ICMP.cc +++ b/src/analyzer/protocol/icmp/ICMP.cc @@ -2,7 +2,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "Net.h" #include "NetVar.h" @@ -199,20 +199,21 @@ void ICMP_Analyzer::ICMP_Sent(const struct icmp* icmpp, int len, int caplen, { if ( icmp_sent ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, icmpv6, ip_hdr)); - ConnectionEvent(icmp_sent, vl); + ConnectionEventFast(icmp_sent, { + BuildConnVal(), + BuildICMPVal(icmpp, len, icmpv6, ip_hdr), + }); } if ( icmp_sent_payload ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, icmpv6, ip_hdr)); BroString* payload = new BroString(data, min(len, caplen), 0); - vl->append(new StringVal(payload)); - ConnectionEvent(icmp_sent_payload, vl); + + ConnectionEventFast(icmp_sent_payload, { + BuildConnVal(), + BuildICMPVal(icmpp, len, icmpv6, ip_hdr), + new StringVal(payload), + }); } } @@ -511,14 +512,13 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, BroString* payload = new BroString(data, caplen, 0); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP, ip_hdr)); - vl->append(val_mgr->GetCount(iid)); - vl->append(val_mgr->GetCount(iseq)); - vl->append(new StringVal(payload)); - - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP, ip_hdr), + val_mgr->GetCount(iid), + val_mgr->GetCount(iseq), + new StringVal(payload), + }); } @@ -526,6 +526,10 @@ void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_router_advertisement; + + if ( ! f ) + return; + uint32 reachable = 0, retrans = 0; if ( caplen >= (int)sizeof(reachable) ) @@ -534,24 +538,23 @@ void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, if ( caplen >= (int)sizeof(reachable) + (int)sizeof(retrans) ) memcpy(&retrans, data + sizeof(reachable), sizeof(retrans)); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(val_mgr->GetCount(icmpp->icmp_num_addrs)); // Cur Hop Limit - vl->append(val_mgr->GetBool(icmpp->icmp_wpa & 0x80)); // Managed - vl->append(val_mgr->GetBool(icmpp->icmp_wpa & 0x40)); // Other - vl->append(val_mgr->GetBool(icmpp->icmp_wpa & 0x20)); // Home Agent - vl->append(val_mgr->GetCount((icmpp->icmp_wpa & 0x18)>>3)); // Pref - vl->append(val_mgr->GetBool(icmpp->icmp_wpa & 0x04)); // Proxy - vl->append(val_mgr->GetCount(icmpp->icmp_wpa & 0x02)); // Reserved - vl->append(new IntervalVal((double)ntohs(icmpp->icmp_lifetime), Seconds)); - vl->append(new IntervalVal((double)ntohl(reachable), Milliseconds)); - vl->append(new IntervalVal((double)ntohl(retrans), Milliseconds)); - int opt_offset = sizeof(reachable) + sizeof(retrans); - vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 1, ip_hdr), + val_mgr->GetCount(icmpp->icmp_num_addrs), // Cur Hop Limit + val_mgr->GetBool(icmpp->icmp_wpa & 0x80), // Managed + val_mgr->GetBool(icmpp->icmp_wpa & 0x40), // Other + val_mgr->GetBool(icmpp->icmp_wpa & 0x20), // Home Agent + val_mgr->GetCount((icmpp->icmp_wpa & 0x18)>>3), // Pref + val_mgr->GetBool(icmpp->icmp_wpa & 0x04), // Proxy + val_mgr->GetCount(icmpp->icmp_wpa & 0x02), // Reserved + new IntervalVal((double)ntohs(icmpp->icmp_lifetime), Seconds), + new IntervalVal((double)ntohl(reachable), Milliseconds), + new IntervalVal((double)ntohl(retrans), Milliseconds), + BuildNDOptionsVal(caplen - opt_offset, data + opt_offset), + }); } @@ -559,23 +562,26 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_advertisement; + + if ( ! f ) + return; + IPAddr tgtaddr; if ( caplen >= (int)sizeof(in6_addr) ) tgtaddr = IPAddr(*((const in6_addr*)data)); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(val_mgr->GetBool(icmpp->icmp_num_addrs & 0x80)); // Router - vl->append(val_mgr->GetBool(icmpp->icmp_num_addrs & 0x40)); // Solicited - vl->append(val_mgr->GetBool(icmpp->icmp_num_addrs & 0x20)); // Override - vl->append(new AddrVal(tgtaddr)); - int opt_offset = sizeof(in6_addr); - vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 1, ip_hdr), + val_mgr->GetBool(icmpp->icmp_num_addrs & 0x80), // Router + val_mgr->GetBool(icmpp->icmp_num_addrs & 0x40), // Solicited + val_mgr->GetBool(icmpp->icmp_num_addrs & 0x20), // Override + new AddrVal(tgtaddr), + BuildNDOptionsVal(caplen - opt_offset, data + opt_offset), + }); } @@ -583,20 +589,23 @@ void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_solicitation; + + if ( ! f ) + return; + IPAddr tgtaddr; if ( caplen >= (int)sizeof(in6_addr) ) tgtaddr = IPAddr(*((const in6_addr*)data)); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(new AddrVal(tgtaddr)); - int opt_offset = sizeof(in6_addr); - vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 1, ip_hdr), + new AddrVal(tgtaddr), + BuildNDOptionsVal(caplen - opt_offset, data + opt_offset), + }); } @@ -604,6 +613,10 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_redirect; + + if ( ! f ) + return; + IPAddr tgtaddr, dstaddr; if ( caplen >= (int)sizeof(in6_addr) ) @@ -612,16 +625,15 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, if ( caplen >= 2 * (int)sizeof(in6_addr) ) dstaddr = IPAddr(*((const in6_addr*)(data + sizeof(in6_addr)))); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(new AddrVal(tgtaddr)); - vl->append(new AddrVal(dstaddr)); - int opt_offset = 2 * sizeof(in6_addr); - vl->append(BuildNDOptionsVal(caplen - opt_offset, data + opt_offset)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 1, ip_hdr), + new AddrVal(tgtaddr), + new AddrVal(dstaddr), + BuildNDOptionsVal(caplen - opt_offset, data + opt_offset), + }); } @@ -630,12 +642,14 @@ void ICMP_Analyzer::RouterSolicit(double t, const struct icmp* icmpp, int len, { EventHandlerPtr f = icmp_router_solicitation; - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(BuildNDOptionsVal(caplen, data)); + if ( ! f ) + return; - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 1, ip_hdr), + BuildNDOptionsVal(caplen, data), + }); } @@ -657,12 +671,12 @@ void ICMP_Analyzer::Context4(double t, const struct icmp* icmpp, if ( f ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 0, ip_hdr)); - vl->append(val_mgr->GetCount(icmpp->icmp_code)); - vl->append(ExtractICMP4Context(caplen, data)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 0, ip_hdr), + val_mgr->GetCount(icmpp->icmp_code), + ExtractICMP4Context(caplen, data), + }); } } @@ -697,12 +711,12 @@ void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, if ( f ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); - vl->append(val_mgr->GetCount(icmpp->icmp_code)); - vl->append(ExtractICMP6Context(caplen, data)); - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + BuildICMPVal(icmpp, len, 1, ip_hdr), + val_mgr->GetCount(icmpp->icmp_code), + ExtractICMP6Context(caplen, data), + }); } } diff --git a/src/analyzer/protocol/icmp/Plugin.cc b/src/analyzer/protocol/icmp/Plugin.cc index f216bcbbe9..390eb751d1 100644 --- a/src/analyzer/protocol/icmp/Plugin.cc +++ b/src/analyzer/protocol/icmp/Plugin.cc @@ -6,7 +6,7 @@ #include "ICMP.h" namespace plugin { -namespace Bro_ICMP { +namespace Zeek_ICMP { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("ICMP", ::analyzer::icmp::ICMP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::ICMP"; + config.name = "Zeek::ICMP"; config.description = "ICMP analyzer"; return config; } diff --git a/src/analyzer/protocol/icmp/events.bif b/src/analyzer/protocol/icmp/events.bif index bd55f17b27..ada3fe48a0 100644 --- a/src/analyzer/protocol/icmp/events.bif +++ b/src/analyzer/protocol/icmp/events.bif @@ -1,5 +1,5 @@ ## Generated for all ICMP messages that are not handled separately with -## dedicated ICMP events. Bro's ICMP analyzer handles a number of ICMP messages +## dedicated ICMP events. Zeek's ICMP analyzer handles a number of ICMP messages ## directly with dedicated events. This event acts as a fallback for those it ## doesn't. ## @@ -12,10 +12,10 @@ ## icmp: Additional ICMP-specific information augmenting the standard ## connection record *c*. ## -## .. bro:see:: icmp_error_message icmp_sent_payload +## .. zeek:see:: icmp_error_message icmp_sent_payload event icmp_sent%(c: connection, icmp: icmp_conn%); -## The same as :bro:see:`icmp_sent` except containing the ICMP payload. +## The same as :zeek:see:`icmp_sent` except containing the ICMP payload. ## ## c: The connection record for the corresponding ICMP flow. ## @@ -24,7 +24,7 @@ event icmp_sent%(c: connection, icmp: icmp_conn%); ## ## payload: The payload of the ICMP message. ## -## .. bro:see:: icmp_error_message icmp_sent_payload +## .. zeek:see:: icmp_error_message icmp_sent_payload event icmp_sent_payload%(c: connection, icmp: icmp_conn, payload: string%); ## Generated for ICMP *echo request* messages. @@ -45,7 +45,7 @@ event icmp_sent_payload%(c: connection, icmp: icmp_conn, payload: string%); ## payload: The message-specific data of the packet payload, i.e., everything ## after the first 8 bytes of the ICMP header. ## -## .. bro:see:: icmp_echo_reply +## .. zeek:see:: icmp_echo_reply event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); ## Generated for ICMP *echo reply* messages. @@ -66,11 +66,11 @@ event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, ## payload: The message-specific data of the packet payload, i.e., everything ## after the first 8 bytes of the ICMP header. ## -## .. bro:see:: icmp_echo_request +## .. zeek:see:: icmp_echo_request event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); ## Generated for all ICMPv6 error messages that are not handled -## separately with dedicated events. Bro's ICMP analyzer handles a number +## separately with dedicated events. Zeek's ICMP analyzer handles a number ## of ICMP error messages directly with dedicated events. This event acts ## as a fallback for those it doesn't. ## @@ -88,7 +88,7 @@ event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, pa ## context: A record with specifics of the original packet that the message ## refers to. ## -## .. bro:see:: icmp_unreachable icmp_packet_too_big +## .. zeek:see:: icmp_unreachable icmp_packet_too_big ## icmp_time_exceeded icmp_parameter_problem event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); @@ -107,12 +107,12 @@ event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: ## ## context: A record with specifics of the original packet that the message ## refers to. *Unreachable* messages should include the original IP -## header from the packet that triggered them, and Bro parses that +## header from the packet that triggered them, and Zeek parses that ## into the *context* structure. Note that if the *unreachable* ## includes only a partial IP header for some reason, no ## fields of *context* will be filled out. ## -## .. bro:see:: icmp_error_message icmp_packet_too_big +## .. zeek:see:: icmp_error_message icmp_packet_too_big ## icmp_time_exceeded icmp_parameter_problem event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); @@ -131,12 +131,12 @@ event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: ic ## ## context: A record with specifics of the original packet that the message ## refers to. *Too big* messages should include the original IP header -## from the packet that triggered them, and Bro parses that into +## from the packet that triggered them, and Zeek parses that into ## the *context* structure. Note that if the *too big* includes only ## a partial IP header for some reason, no fields of *context* will ## be filled out. ## -## .. bro:see:: icmp_error_message icmp_unreachable +## .. zeek:see:: icmp_error_message icmp_unreachable ## icmp_time_exceeded icmp_parameter_problem event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); @@ -155,12 +155,12 @@ event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: ## ## context: A record with specifics of the original packet that the message ## refers to. *Unreachable* messages should include the original IP -## header from the packet that triggered them, and Bro parses that +## header from the packet that triggered them, and Zeek parses that ## into the *context* structure. Note that if the *exceeded* includes ## only a partial IP header for some reason, no fields of *context* ## will be filled out. ## -## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big +## .. zeek:see:: icmp_error_message icmp_unreachable icmp_packet_too_big ## icmp_parameter_problem event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); @@ -179,12 +179,12 @@ event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: ## ## context: A record with specifics of the original packet that the message ## refers to. *Parameter problem* messages should include the original -## IP header from the packet that triggered them, and Bro parses that +## IP header from the packet that triggered them, and Zeek parses that ## into the *context* structure. Note that if the *parameter problem* ## includes only a partial IP header for some reason, no fields ## of *context* will be filled out. ## -## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big +## .. zeek:see:: icmp_error_message icmp_unreachable icmp_packet_too_big ## icmp_time_exceeded event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); @@ -201,7 +201,7 @@ event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, conte ## ## options: Any Neighbor Discovery options included with message (:rfc:`4861`). ## -## .. bro:see:: icmp_router_advertisement +## .. zeek:see:: icmp_router_advertisement ## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect event icmp_router_solicitation%(c: connection, icmp: icmp_conn, options: icmp6_nd_options%); @@ -239,7 +239,7 @@ event icmp_router_solicitation%(c: connection, icmp: icmp_conn, options: icmp6_n ## ## options: Any Neighbor Discovery options included with message (:rfc:`4861`). ## -## .. bro:see:: icmp_router_solicitation +## .. zeek:see:: icmp_router_solicitation ## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval, options: icmp6_nd_options%); @@ -258,7 +258,7 @@ event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: ## ## options: Any Neighbor Discovery options included with message (:rfc:`4861`). ## -## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## .. zeek:see:: icmp_router_solicitation icmp_router_advertisement ## icmp_neighbor_advertisement icmp_redirect event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt: addr, options: icmp6_nd_options%); @@ -284,7 +284,7 @@ event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt: addr, opt ## ## options: Any Neighbor Discovery options included with message (:rfc:`4861`). ## -## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## .. zeek:see:: icmp_router_solicitation icmp_router_advertisement ## icmp_neighbor_solicitation icmp_redirect event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr, options: icmp6_nd_options%); @@ -306,7 +306,7 @@ event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, ## ## options: Any Neighbor Discovery options included with message (:rfc:`4861`). ## -## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## .. zeek:see:: icmp_router_solicitation icmp_router_advertisement ## icmp_neighbor_solicitation icmp_neighbor_advertisement event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr, options: icmp6_nd_options%); diff --git a/src/analyzer/protocol/ident/CMakeLists.txt b/src/analyzer/protocol/ident/CMakeLists.txt index 658dff141e..22ac6e94a1 100644 --- a/src/analyzer/protocol/ident/CMakeLists.txt +++ b/src/analyzer/protocol/ident/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Ident) -bro_plugin_cc(Ident.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek Ident) +zeek_plugin_cc(Ident.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ident/Ident.cc b/src/analyzer/protocol/ident/Ident.cc index 125f2d7f64..b24675ee53 100644 --- a/src/analyzer/protocol/ident/Ident.cc +++ b/src/analyzer/protocol/ident/Ident.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -83,12 +83,11 @@ void Ident_Analyzer::DeliverStream(int length, const u_char* data, bool is_orig) Weird("ident_request_addendum", s.CheckString()); } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetPort(local_port, TRANSPORT_TCP)); - vl->append(val_mgr->GetPort(remote_port, TRANSPORT_TCP)); - - ConnectionEvent(ident_request, vl); + ConnectionEventFast(ident_request, { + BuildConnVal(), + val_mgr->GetPort(local_port, TRANSPORT_TCP), + val_mgr->GetPort(remote_port, TRANSPORT_TCP), + }); did_deliver = 1; } @@ -144,13 +143,13 @@ void Ident_Analyzer::DeliverStream(int length, const u_char* data, bool is_orig) if ( is_error ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetPort(local_port, TRANSPORT_TCP)); - vl->append(val_mgr->GetPort(remote_port, TRANSPORT_TCP)); - vl->append(new StringVal(end_of_line - line, line)); - - ConnectionEvent(ident_error, vl); + if ( ident_error ) + ConnectionEventFast(ident_error, { + BuildConnVal(), + val_mgr->GetPort(local_port, TRANSPORT_TCP), + val_mgr->GetPort(remote_port, TRANSPORT_TCP), + new StringVal(end_of_line - line, line), + }); } else @@ -178,14 +177,13 @@ void Ident_Analyzer::DeliverStream(int length, const u_char* data, bool is_orig) line = skip_whitespace(colon + 1, end_of_line); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetPort(local_port, TRANSPORT_TCP)); - vl->append(val_mgr->GetPort(remote_port, TRANSPORT_TCP)); - vl->append(new StringVal(end_of_line - line, line)); - vl->append(new StringVal(sys_type_s)); - - ConnectionEvent(ident_reply, vl); + ConnectionEventFast(ident_reply, { + BuildConnVal(), + val_mgr->GetPort(local_port, TRANSPORT_TCP), + val_mgr->GetPort(remote_port, TRANSPORT_TCP), + new StringVal(end_of_line - line, line), + new StringVal(sys_type_s), + }); } } } diff --git a/src/analyzer/protocol/ident/Plugin.cc b/src/analyzer/protocol/ident/Plugin.cc index e495210f08..23a798a72f 100644 --- a/src/analyzer/protocol/ident/Plugin.cc +++ b/src/analyzer/protocol/ident/Plugin.cc @@ -6,7 +6,7 @@ #include "Ident.h" namespace plugin { -namespace Bro_Ident { +namespace Zeek_Ident { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("Ident", ::analyzer::ident::Ident_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::Ident"; + config.name = "Zeek::Ident"; config.description = "Ident analyzer"; return config; } diff --git a/src/analyzer/protocol/ident/events.bif b/src/analyzer/protocol/ident/events.bif index 96a7f37a31..d348c0307f 100644 --- a/src/analyzer/protocol/ident/events.bif +++ b/src/analyzer/protocol/ident/events.bif @@ -9,11 +9,11 @@ ## ## rport: The request's remote port. ## -## .. bro:see:: ident_error ident_reply +## .. zeek:see:: ident_error ident_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event ident_request%(c: connection, lport: port, rport: port%); @@ -32,11 +32,11 @@ event ident_request%(c: connection, lport: port, rport: port%); ## ## system: The operating system returned by the reply. ## -## .. bro:see:: ident_error ident_request +## .. zeek:see:: ident_error ident_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event ident_reply%(c: connection, lport: port, rport: port, user_id: string, system: string%); @@ -53,11 +53,11 @@ event ident_reply%(c: connection, lport: port, rport: port, user_id: string, sys ## ## line: The error description returned by the reply. ## -## .. bro:see:: ident_reply ident_request +## .. zeek:see:: ident_reply ident_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event ident_error%(c: connection, lport: port, rport: port, line: string%); diff --git a/src/analyzer/protocol/imap/CMakeLists.txt b/src/analyzer/protocol/imap/CMakeLists.txt index 921dde2444..472b465b71 100644 --- a/src/analyzer/protocol/imap/CMakeLists.txt +++ b/src/analyzer/protocol/imap/CMakeLists.txt @@ -1,12 +1,12 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro IMAP) -bro_plugin_cc(Plugin.cc) -bro_plugin_cc(IMAP.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(imap.pac imap-analyzer.pac imap-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek IMAP) +zeek_plugin_cc(Plugin.cc) +zeek_plugin_cc(IMAP.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(imap.pac imap-analyzer.pac imap-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/imap/Plugin.cc b/src/analyzer/protocol/imap/Plugin.cc index 63358f1aeb..3192ea8f28 100644 --- a/src/analyzer/protocol/imap/Plugin.cc +++ b/src/analyzer/protocol/imap/Plugin.cc @@ -3,7 +3,7 @@ #include "IMAP.h" namespace plugin { -namespace Bro_IMAP { +namespace Zeek_IMAP { class Plugin : public plugin::Plugin { public: @@ -12,7 +12,7 @@ public: AddComponent(new ::analyzer::Component("IMAP", ::analyzer::imap::IMAP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::IMAP"; + config.name = "Zeek::IMAP"; config.description = "IMAP analyzer (StartTLS only)"; return config; } diff --git a/src/analyzer/protocol/imap/imap-analyzer.pac b/src/analyzer/protocol/imap/imap-analyzer.pac index 353aadb7ce..ac1652086e 100644 --- a/src/analyzer/protocol/imap/imap-analyzer.pac +++ b/src/analyzer/protocol/imap/imap-analyzer.pac @@ -43,7 +43,9 @@ refine connection IMAP_Conn += { if ( commands == "ok" ) { bro_analyzer()->StartTLS(); - BifEvent::generate_imap_starttls(bro_analyzer(), bro_analyzer()->Conn()); + + if ( imap_starttls ) + BifEvent::generate_imap_starttls(bro_analyzer(), bro_analyzer()->Conn()); } else reporter->Weird(bro_analyzer()->Conn(), "IMAP: server refused StartTLS"); @@ -54,6 +56,9 @@ refine connection IMAP_Conn += { function proc_server_capability(capabilities: Capability[]): bool %{ + if ( ! imap_capabilities ) + return true; + VectorVal* capv = new VectorVal(internal_type("string_vec")->AsVectorType()); for ( unsigned int i = 0; i< capabilities->size(); i++ ) { diff --git a/src/analyzer/protocol/interconn/CMakeLists.txt b/src/analyzer/protocol/interconn/CMakeLists.txt index ef5ca13a9a..c1cf40da3f 100644 --- a/src/analyzer/protocol/interconn/CMakeLists.txt +++ b/src/analyzer/protocol/interconn/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro InterConn) -bro_plugin_cc(InterConn.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek InterConn) +zeek_plugin_cc(InterConn.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/interconn/InterConn.cc b/src/analyzer/protocol/interconn/InterConn.cc index 8d9dd72774..e9a9378c90 100644 --- a/src/analyzer/protocol/interconn/InterConn.cc +++ b/src/analyzer/protocol/interconn/InterConn.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "InterConn.h" #include "Event.h" @@ -241,20 +241,18 @@ void InterConn_Analyzer::StatTimer(double t, int is_expire) void InterConn_Analyzer::StatEvent() { - val_list* vl = new val_list; - vl->append(Conn()->BuildConnVal()); - vl->append(orig_endp->BuildStats()); - vl->append(resp_endp->BuildStats()); - - Conn()->ConnectionEvent(interconn_stats, this, vl); + if ( interconn_stats ) + Conn()->ConnectionEventFast(interconn_stats, this, { + Conn()->BuildConnVal(), + orig_endp->BuildStats(), + resp_endp->BuildStats(), + }); } void InterConn_Analyzer::RemoveEvent() { - val_list* vl = new val_list; - vl->append(Conn()->BuildConnVal()); - - Conn()->ConnectionEvent(interconn_remove_conn, this, vl); + if ( interconn_remove_conn ) + Conn()->ConnectionEventFast(interconn_remove_conn, this, {Conn()->BuildConnVal()}); } InterConnTimer::InterConnTimer(double t, InterConn_Analyzer* a) diff --git a/src/analyzer/protocol/interconn/Plugin.cc b/src/analyzer/protocol/interconn/Plugin.cc index a4ee39ca07..bbd1b866ed 100644 --- a/src/analyzer/protocol/interconn/Plugin.cc +++ b/src/analyzer/protocol/interconn/Plugin.cc @@ -6,7 +6,7 @@ #include "InterConn.h" namespace plugin { -namespace Bro_InterConn { +namespace Zeek_InterConn { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("InterConn", ::analyzer::interconn::InterConn_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::InterConn"; + config.name = "Zeek::InterConn"; config.description = "InterConn analyzer deprecated"; return config; } diff --git a/src/analyzer/protocol/irc/CMakeLists.txt b/src/analyzer/protocol/irc/CMakeLists.txt index 5f97482365..4538172d75 100644 --- a/src/analyzer/protocol/irc/CMakeLists.txt +++ b/src/analyzer/protocol/irc/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro IRC) -bro_plugin_cc(IRC.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek IRC) +zeek_plugin_cc(IRC.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/irc/IRC.cc b/src/analyzer/protocol/irc/IRC.cc index 25d568d627..c5db109434 100644 --- a/src/analyzer/protocol/irc/IRC.cc +++ b/src/analyzer/protocol/irc/IRC.cc @@ -233,14 +233,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) // else ### } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(val_mgr->GetInt(users)); - vl->append(val_mgr->GetInt(services)); - vl->append(val_mgr->GetInt(servers)); - - ConnectionEvent(irc_network_info, vl); + ConnectionEventFast(irc_network_info, { + BuildConnVal(), + val_mgr->GetBool(orig), + val_mgr->GetInt(users), + val_mgr->GetInt(services), + val_mgr->GetInt(servers), + }); } break; @@ -271,13 +270,8 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( parts.size() > 0 && parts[0][0] == ':' ) parts[0] = parts[0].substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(type.c_str())); - vl->append(new StringVal(channel.c_str())); - TableVal* set = new TableVal(string_set); + for ( unsigned int i = 0; i < parts.size(); ++i ) { if ( parts[i][0] == '@' ) @@ -286,9 +280,14 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) set->Assign(idx, 0); Unref(idx); } - vl->append(set); - ConnectionEvent(irc_names_info, vl); + ConnectionEventFast(irc_names_info, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(type.c_str()), + new StringVal(channel.c_str()), + set, + }); } break; @@ -316,14 +315,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) // else ### } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(val_mgr->GetInt(users)); - vl->append(val_mgr->GetInt(services)); - vl->append(val_mgr->GetInt(servers)); - - ConnectionEvent(irc_server_info, vl); + ConnectionEventFast(irc_server_info, { + BuildConnVal(), + val_mgr->GetBool(orig), + val_mgr->GetInt(users), + val_mgr->GetInt(services), + val_mgr->GetInt(servers), + }); } break; @@ -339,12 +337,11 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( parts[i] == ":channels" ) channels = atoi(parts[i - 1].c_str()); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(val_mgr->GetInt(channels)); - - ConnectionEvent(irc_channel_info, vl); + ConnectionEventFast(irc_channel_info, { + BuildConnVal(), + val_mgr->GetBool(orig), + val_mgr->GetInt(channels), + }); } break; @@ -372,12 +369,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) break; } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(eop - prefix, prefix)); - vl->append(new StringVal(++msg)); - ConnectionEvent(irc_global_users, vl); + ConnectionEventFast(irc_global_users, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(eop - prefix, prefix), + new StringVal(++msg), + }); break; } @@ -397,12 +394,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) return; } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(parts[0].c_str())); - vl->append(new StringVal(parts[1].c_str())); - vl->append(new StringVal(parts[2].c_str())); + val_list vl(6); + vl.append(BuildConnVal()); + vl.append(val_mgr->GetBool(orig)); + vl.append(new StringVal(parts[0].c_str())); + vl.append(new StringVal(parts[1].c_str())); + vl.append(new StringVal(parts[2].c_str())); parts.erase(parts.begin(), parts.begin() + 4); @@ -413,9 +410,9 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( real_name[0] == ':' ) real_name = real_name.substr(1); - vl->append(new StringVal(real_name.c_str())); + vl.append(new StringVal(real_name.c_str())); - ConnectionEvent(irc_whois_user_line, vl); + ConnectionEventFast(irc_whois_user_line, std::move(vl)); } break; @@ -436,12 +433,11 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) return; } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(parts[0].c_str())); - - ConnectionEvent(irc_whois_operator_line, vl); + ConnectionEventFast(irc_whois_operator_line, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(parts[0].c_str()), + }); } break; @@ -467,11 +463,8 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( parts.size() > 0 && parts[0][0] == ':' ) parts[0] = parts[0].substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(nick.c_str())); TableVal* set = new TableVal(string_set); + for ( unsigned int i = 0; i < parts.size(); ++i ) { Val* idx = new StringVal(parts[i].c_str()); @@ -479,9 +472,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) Unref(idx); } - vl->append(set); - - ConnectionEvent(irc_whois_channel_line, vl); + ConnectionEventFast(irc_whois_channel_line, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(nick.c_str()), + set, + }); } break; @@ -502,19 +498,17 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( pos < params.size() ) { string topic = params.substr(pos + 1); - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(parts[1].c_str())); - const char* t = topic.c_str(); + if ( *t == ':' ) ++t; - vl->append(new StringVal(t)); - - ConnectionEvent(irc_channel_topic, vl); + ConnectionEventFast(irc_channel_topic, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(parts[1].c_str()), + new StringVal(t), + }); } else { @@ -537,24 +531,25 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) return; } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(parts[0].c_str())); - vl->append(new StringVal(parts[1].c_str())); if ( parts[2][0] == '~' ) parts[2] = parts[2].substr(1); - vl->append(new StringVal(parts[2].c_str())); - vl->append(new StringVal(parts[3].c_str())); - vl->append(new StringVal(parts[4].c_str())); - vl->append(new StringVal(parts[5].c_str())); - vl->append(new StringVal(parts[6].c_str())); + if ( parts[7][0] == ':' ) parts[7] = parts[7].substr(1); - vl->append(val_mgr->GetInt(atoi(parts[7].c_str()))); - vl->append(new StringVal(parts[8].c_str())); - ConnectionEvent(irc_who_line, vl); + ConnectionEventFast(irc_who_line, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(parts[0].c_str()), + new StringVal(parts[1].c_str()), + new StringVal(parts[2].c_str()), + new StringVal(parts[3].c_str()), + new StringVal(parts[4].c_str()), + new StringVal(parts[5].c_str()), + new StringVal(parts[6].c_str()), + val_mgr->GetInt(atoi(parts[7].c_str())), + new StringVal(parts[8].c_str()), + }); } break; @@ -565,10 +560,10 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) case 436: if ( irc_invalid_nick ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - ConnectionEvent(irc_invalid_nick, vl); + ConnectionEventFast(irc_invalid_nick, { + BuildConnVal(), + val_mgr->GetBool(orig), + }); } break; @@ -577,11 +572,11 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) case 491: // user is not operator if ( irc_oper_response ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(val_mgr->GetBool(code == 381)); - ConnectionEvent(irc_oper_response, vl); + ConnectionEventFast(irc_oper_response, { + BuildConnVal(), + val_mgr->GetBool(orig), + val_mgr->GetBool(code == 381), + }); } break; @@ -592,14 +587,14 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) // All other server replies. default: - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(val_mgr->GetCount(code)); - vl->append(new StringVal(params.c_str())); - - ConnectionEvent(irc_reply, vl); + if ( irc_reply ) + ConnectionEventFast(irc_reply, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + val_mgr->GetCount(code), + new StringVal(params.c_str()), + }); break; } return; @@ -662,33 +657,33 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) raw_ip = (10 * raw_ip) + atoi(s.c_str()); } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(target.c_str())); - vl->append(new StringVal(parts[1].c_str())); - vl->append(new StringVal(parts[2].c_str())); - vl->append(new AddrVal(htonl(raw_ip))); - vl->append(val_mgr->GetCount(atoi(parts[4].c_str()))); - if ( parts.size() >= 6 ) - vl->append(val_mgr->GetCount(atoi(parts[5].c_str()))); - else - vl->append(val_mgr->GetCount(0)); - ConnectionEvent(irc_dcc_message, vl); + if ( irc_dcc_message ) + ConnectionEventFast(irc_dcc_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(target.c_str()), + new StringVal(parts[1].c_str()), + new StringVal(parts[2].c_str()), + new AddrVal(htonl(raw_ip)), + val_mgr->GetCount(atoi(parts[4].c_str())), + parts.size() >= 6 ? + val_mgr->GetCount(atoi(parts[5].c_str())) : + val_mgr->GetCount(0), + }); } else { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(target.c_str())); - vl->append(new StringVal(message.c_str())); - - ConnectionEvent(irc_privmsg_message, vl); + if ( irc_privmsg_message ) + ConnectionEventFast(irc_privmsg_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(target.c_str()), + new StringVal(message.c_str()), + }); } } @@ -707,14 +702,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( message[0] == ':' ) message = message.substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(target.c_str())); - vl->append(new StringVal(message.c_str())); - - ConnectionEvent(irc_notice_message, vl); + ConnectionEventFast(irc_notice_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(target.c_str()), + new StringVal(message.c_str()), + }); } else if ( irc_squery_message && command == "SQUERY" ) @@ -732,35 +726,34 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( message[0] == ':' ) message = message.substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(target.c_str())); - vl->append(new StringVal(message.c_str())); - - ConnectionEvent(irc_squery_message, vl); + ConnectionEventFast(irc_squery_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(target.c_str()), + new StringVal(message.c_str()), + }); } else if ( irc_user_message && command == "USER" ) { // extract username and real name vector parts = SplitWords(params, ' '); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); + val_list vl(6); + vl.append(BuildConnVal()); + vl.append(val_mgr->GetBool(orig)); if ( parts.size() > 0 ) - vl->append(new StringVal(parts[0].c_str())); - else vl->append(val_mgr->GetEmptyString()); + vl.append(new StringVal(parts[0].c_str())); + else vl.append(val_mgr->GetEmptyString()); if ( parts.size() > 1 ) - vl->append(new StringVal(parts[1].c_str())); - else vl->append(val_mgr->GetEmptyString()); + vl.append(new StringVal(parts[1].c_str())); + else vl.append(val_mgr->GetEmptyString()); if ( parts.size() > 2 ) - vl->append(new StringVal(parts[2].c_str())); - else vl->append(val_mgr->GetEmptyString()); + vl.append(new StringVal(parts[2].c_str())); + else vl.append(val_mgr->GetEmptyString()); string realname; for ( unsigned int i = 3; i < parts.size(); i++ ) @@ -771,9 +764,9 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) } const char* name = realname.c_str(); - vl->append(new StringVal(*name == ':' ? name + 1 : name)); + vl.append(new StringVal(*name == ':' ? name + 1 : name)); - ConnectionEvent(irc_user_message, vl); + ConnectionEventFast(irc_user_message, std::move(vl)); } else if ( irc_oper_message && command == "OPER" ) @@ -782,13 +775,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) vector parts = SplitWords(params, ' '); if ( parts.size() == 2 ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(parts[0].c_str())); - vl->append(new StringVal(parts[1].c_str())); - - ConnectionEvent(irc_oper_message, vl); + ConnectionEventFast(irc_oper_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(parts[0].c_str()), + new StringVal(parts[1].c_str()), + }); } else @@ -805,12 +797,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) return; } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(parts[0].c_str())); - vl->append(new StringVal(parts[1].c_str())); + val_list vl(6); + vl.append(BuildConnVal()); + vl.append(val_mgr->GetBool(orig)); + vl.append(new StringVal(prefix.c_str())); + vl.append(new StringVal(parts[0].c_str())); + vl.append(new StringVal(parts[1].c_str())); if ( parts.size() > 2 ) { string comment = parts[2]; @@ -820,12 +812,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( comment[0] == ':' ) comment = comment.substr(1); - vl->append(new StringVal(comment.c_str())); + vl.append(new StringVal(comment.c_str())); } else - vl->append(val_mgr->GetEmptyString()); + vl.append(val_mgr->GetEmptyString()); - ConnectionEvent(irc_kick_message, vl); + ConnectionEventFast(irc_kick_message, std::move(vl)); } else if ( irc_join_message && command == "JOIN" ) @@ -849,11 +841,8 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) nickname = prefix.substr(0, pos); } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - TableVal* list = new TableVal(irc_join_list); + vector channels = SplitWords(parts[0], ','); vector passwords; @@ -876,9 +865,11 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) Unref(info); } - vl->append(list); - - ConnectionEvent(irc_join_message, vl); + ConnectionEventFast(irc_join_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + list, + }); } else if ( irc_join_message && command == "NJOIN" ) @@ -895,12 +886,8 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) parts[1] = parts[1].substr(1); vector users = SplitWords(parts[1], ','); - - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - TableVal* list = new TableVal(irc_join_list); + string empty_string = ""; for ( unsigned int i = 0; i < users.size(); ++i ) @@ -939,9 +926,11 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) Unref(info); } - vl->append(list); - - ConnectionEvent(irc_join_message, vl); + ConnectionEventFast(irc_join_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + list, + }); } else if ( irc_part_message && command == "PART" ) @@ -977,14 +966,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) Unref(idx); } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(nick.c_str())); - vl->append(set); - vl->append(new StringVal(message.c_str())); - - ConnectionEvent(irc_part_message, vl); + ConnectionEventFast(irc_part_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(nick.c_str()), + set, + new StringVal(message.c_str()), + }); } else if ( irc_quit_message && command == "QUIT" ) @@ -1001,13 +989,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) nickname = prefix.substr(0, pos); } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(nickname.c_str())); - vl->append(new StringVal(message.c_str())); - - ConnectionEvent(irc_quit_message, vl); + ConnectionEventFast(irc_quit_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(nickname.c_str()), + new StringVal(message.c_str()), + }); } else if ( irc_nick_message && command == "NICK" ) @@ -1016,13 +1003,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( nick[0] == ':' ) nick = nick.substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(nick.c_str())); - - ConnectionEvent(irc_nick_message, vl); + ConnectionEventFast(irc_nick_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(nick.c_str()) + }); } else if ( irc_who_message && command == "WHO" ) @@ -1042,16 +1028,14 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( parts.size() > 0 && parts[0].size() > 0 && parts[0][0] == ':' ) parts[0] = parts[0].substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - if ( parts.size() > 0 ) - vl->append(new StringVal(parts[0].c_str())); - else - vl->append(val_mgr->GetEmptyString()); - vl->append(val_mgr->GetBool(oper)); - - ConnectionEvent(irc_who_message, vl); + ConnectionEventFast(irc_who_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + parts.size() > 0 ? + new StringVal(parts[0].c_str()) : + val_mgr->GetEmptyString(), + val_mgr->GetBool(oper), + }); } else if ( irc_whois_message && command == "WHOIS" ) @@ -1074,26 +1058,25 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) else users = parts[0]; - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(server.c_str())); - vl->append(new StringVal(users.c_str())); - - ConnectionEvent(irc_whois_message, vl); + ConnectionEventFast(irc_whois_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(server.c_str()), + new StringVal(users.c_str()), + }); } else if ( irc_error_message && command == "ERROR" ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); if ( params[0] == ':' ) params = params.substr(1); - vl->append(new StringVal(params.c_str())); - ConnectionEvent(irc_error_message, vl); + ConnectionEventFast(irc_error_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(params.c_str()), + }); } else if ( irc_invite_message && command == "INVITE" ) @@ -1104,14 +1087,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) if ( parts[1].size() > 0 && parts[1][0] == ':' ) parts[1] = parts[1].substr(1); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(parts[0].c_str())); - vl->append(new StringVal(parts[1].c_str())); - - ConnectionEvent(irc_invite_message, vl); + ConnectionEventFast(irc_invite_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(parts[0].c_str()), + new StringVal(parts[1].c_str()), + }); } else Weird("irc_invalid_invite_message_format"); @@ -1121,13 +1103,12 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) { if ( params.size() > 0 ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(params.c_str())); - - ConnectionEvent(irc_mode_message, vl); + ConnectionEventFast(irc_mode_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(params.c_str()), + }); } else @@ -1136,11 +1117,11 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) else if ( irc_password_message && command == "PASS" ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(params.c_str())); - ConnectionEvent(irc_password_message, vl); + ConnectionEventFast(irc_password_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(params.c_str()), + }); } else if ( irc_squit_message && command == "SQUIT" ) @@ -1158,14 +1139,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) message = message.substr(1); } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(server.c_str())); - vl->append(new StringVal(message.c_str())); - - ConnectionEvent(irc_squit_message, vl); + ConnectionEventFast(irc_squit_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(server.c_str()), + new StringVal(message.c_str()), + }); } @@ -1173,14 +1153,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) { if ( irc_request ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(command.c_str())); - vl->append(new StringVal(params.c_str())); - - ConnectionEvent(irc_request, vl); + ConnectionEventFast(irc_request, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(command.c_str()), + new StringVal(params.c_str()), + }); } } @@ -1188,14 +1167,13 @@ void IRC_Analyzer::DeliverStream(int length, const u_char* line, bool orig) { if ( irc_message ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(prefix.c_str())); - vl->append(new StringVal(command.c_str())); - vl->append(new StringVal(params.c_str())); - - ConnectionEvent(irc_message, vl); + ConnectionEventFast(irc_message, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(prefix.c_str()), + new StringVal(command.c_str()), + new StringVal(params.c_str()), + }); } } @@ -1224,10 +1202,8 @@ void IRC_Analyzer::StartTLS() if ( ssl ) AddChildAnalyzer(ssl); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - - ConnectionEvent(irc_starttls, vl); + if ( irc_starttls ) + ConnectionEventFast(irc_starttls, {BuildConnVal()}); } vector IRC_Analyzer::SplitWords(const string input, const char split) diff --git a/src/analyzer/protocol/irc/Plugin.cc b/src/analyzer/protocol/irc/Plugin.cc index 54769ba0b0..fc63baad12 100644 --- a/src/analyzer/protocol/irc/Plugin.cc +++ b/src/analyzer/protocol/irc/Plugin.cc @@ -6,7 +6,7 @@ #include "IRC.h" namespace plugin { -namespace Bro_IRC { +namespace Zeek_IRC { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("IRC", ::analyzer::irc::IRC_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::IRC"; + config.name = "Zeek::IRC"; config.description = "IRC analyzer"; return config; } diff --git a/src/analyzer/protocol/irc/events.bif b/src/analyzer/protocol/irc/events.bif index be425817b2..d6af5fbae1 100644 --- a/src/analyzer/protocol/irc/events.bif +++ b/src/analyzer/protocol/irc/events.bif @@ -15,7 +15,7 @@ ## ## arguments: The arguments for the command. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -23,7 +23,7 @@ ## ## .. note:: This event is generated only for messages that originate ## at the client-side. Commands coming in from remote trigger -## the :bro:id:`irc_message` event instead. +## the :zeek:id:`irc_message` event instead. event irc_request%(c: connection, is_orig: bool, prefix: string, command: string, arguments: string%); @@ -45,7 +45,7 @@ event irc_request%(c: connection, is_orig: bool, prefix: string, ## ## params: The reply's parameters. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -69,7 +69,7 @@ event irc_reply%(c: connection, is_orig: bool, prefix: string, ## ## message: TODO. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -79,7 +79,7 @@ event irc_reply%(c: connection, is_orig: bool, prefix: string, ## ## This event is generated only for messages that are forwarded by the server ## to the client. Commands coming from client trigger the -## :bro:id:`irc_request` event instead. +## :zeek:id:`irc_request` event instead. event irc_message%(c: connection, is_orig: bool, prefix: string, command: string, message: string%); @@ -98,7 +98,7 @@ event irc_message%(c: connection, is_orig: bool, prefix: string, ## ## message: The text included with the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -122,7 +122,7 @@ event irc_quit_message%(c: connection, is_orig: bool, nick: string, message: str ## ## message: The text of communication. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -147,7 +147,7 @@ event irc_privmsg_message%(c: connection, is_orig: bool, source: string, ## ## message: The text of communication. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_oper_message irc_oper_response irc_part_message @@ -172,7 +172,7 @@ event irc_notice_message%(c: connection, is_orig: bool, source: string, ## ## message: The text of communication. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -193,7 +193,7 @@ event irc_squery_message%(c: connection, is_orig: bool, source: string, ## ## info_list: The user information coming with the command. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -217,7 +217,7 @@ event irc_join_message%(c: connection, is_orig: bool, info_list: irc_join_list%) ## ## message: The text coming with the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -240,7 +240,7 @@ event irc_part_message%(c: connection, is_orig: bool, nick: string, ## ## newnick: The new nickname. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -257,7 +257,7 @@ event irc_nick_message%(c: connection, is_orig: bool, who: string, newnick: stri ## is_orig: True if the command was sent by the originator of the TCP ## connection. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invite_message irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -280,7 +280,7 @@ event irc_invalid_nick%(c: connection, is_orig: bool%); ## ## servers: The number of servers as returned in the reply. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -304,7 +304,7 @@ event irc_network_info%(c: connection, is_orig: bool, users: count, ## ## servers: The number of servers as returned in the reply. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -324,7 +324,7 @@ event irc_server_info%(c: connection, is_orig: bool, users: count, ## ## chans: The number of channels as returned in the reply. ## -## .. bro:see:: irc_channel_topic irc_dcc_message irc_error_message irc_global_users +## .. zeek:see:: irc_channel_topic irc_dcc_message irc_error_message irc_global_users ## irc_invalid_nick irc_invite_message irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -359,7 +359,7 @@ event irc_channel_info%(c: connection, is_orig: bool, chans: count%); ## ## real_name: The real name. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -386,7 +386,7 @@ event irc_who_line%(c: connection, is_orig: bool, target_nick: string, ## ## users: The set of users. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -406,7 +406,7 @@ event irc_names_info%(c: connection, is_orig: bool, c_type: string, ## ## nick: The nickname specified in the reply. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -427,7 +427,7 @@ event irc_whois_operator_line%(c: connection, is_orig: bool, nick: string%); ## ## chans: The set of channels returned. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -453,7 +453,7 @@ event irc_whois_channel_line%(c: connection, is_orig: bool, nick: string, ## ## real_name: The real name specified in the reply. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -474,7 +474,7 @@ event irc_whois_user_line%(c: connection, is_orig: bool, nick: string, ## got_oper: True if the *oper* command was executed successfully ## (*youreport*) and false otherwise (*nooperhost*). ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_part_message @@ -496,7 +496,7 @@ event irc_oper_response%(c: connection, is_orig: bool, got_oper: bool%); ## ## msg: The message coming with the reply. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_invalid_nick irc_invite_message irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -517,7 +517,7 @@ event irc_global_users%(c: connection, is_orig: bool, prefix: string, msg: strin ## ## topic: The topic specified in the reply. ## -## .. bro:see:: irc_channel_info irc_dcc_message irc_error_message irc_global_users +## .. zeek:see:: irc_channel_info irc_dcc_message irc_error_message irc_global_users ## irc_invalid_nick irc_invite_message irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -539,7 +539,7 @@ event irc_channel_topic%(c: connection, is_orig: bool, channel: string, topic: s ## ## oper: True if the operator flag was set. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -561,7 +561,7 @@ event irc_who_message%(c: connection, is_orig: bool, mask: string, oper: bool%); ## ## users: TODO. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -583,7 +583,7 @@ event irc_whois_message%(c: connection, is_orig: bool, server: string, users: st ## ## password: The password specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_response irc_part_message @@ -610,7 +610,7 @@ event irc_oper_message%(c: connection, is_orig: bool, user: string, password: st ## ## comment: The comment specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -634,7 +634,7 @@ event irc_kick_message%(c: connection, is_orig: bool, prefix: string, ## ## message: The textual description specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_global_users +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_global_users ## irc_invalid_nick irc_invite_message irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -659,7 +659,7 @@ event irc_error_message%(c: connection, is_orig: bool, prefix: string, message: ## ## channel: The channel specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -683,7 +683,7 @@ event irc_invite_message%(c: connection, is_orig: bool, prefix: string, ## ## params: The parameters coming with the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -708,7 +708,7 @@ event irc_mode_message%(c: connection, is_orig: bool, prefix: string, params: st ## ## message: The textual description specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -742,7 +742,7 @@ event irc_squit_message%(c: connection, is_orig: bool, prefix: string, ## ## size: The size specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_error_message irc_global_users +## .. zeek:see:: irc_channel_info irc_channel_topic irc_error_message irc_global_users ## irc_invalid_nick irc_invite_message irc_join_message irc_kick_message ## irc_message irc_mode_message irc_names_info irc_network_info irc_nick_message ## irc_notice_message irc_oper_message irc_oper_response irc_part_message @@ -771,7 +771,7 @@ event irc_dcc_message%(c: connection, is_orig: bool, ## ## real_name: The real name specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response @@ -791,7 +791,7 @@ event irc_user_message%(c: connection, is_orig: bool, user: string, host: string ## ## password: The password specified in the message. ## -## .. bro:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message +## .. zeek:see:: irc_channel_info irc_channel_topic irc_dcc_message irc_error_message ## irc_global_users irc_invalid_nick irc_invite_message irc_join_message ## irc_kick_message irc_message irc_mode_message irc_names_info irc_network_info ## irc_nick_message irc_notice_message irc_oper_message irc_oper_response diff --git a/src/analyzer/protocol/krb/CMakeLists.txt b/src/analyzer/protocol/krb/CMakeLists.txt index 1cac35d626..d052e9bb6c 100644 --- a/src/analyzer/protocol/krb/CMakeLists.txt +++ b/src/analyzer/protocol/krb/CMakeLists.txt @@ -1,26 +1,26 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro KRB) -bro_plugin_cc(Plugin.cc) -bro_plugin_cc(KRB.cc) -bro_plugin_cc(KRB_TCP.cc) -bro_plugin_bif(types.bif) -bro_plugin_bif(events.bif) -bro_plugin_pac(krb.pac krb-protocol.pac krb-analyzer.pac +zeek_plugin_begin(Zeek KRB) +zeek_plugin_cc(Plugin.cc) +zeek_plugin_cc(KRB.cc) +zeek_plugin_cc(KRB_TCP.cc) +zeek_plugin_bif(types.bif) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(krb.pac krb-protocol.pac krb-analyzer.pac krb-asn1.pac krb-defs.pac krb-types.pac krb-padata.pac ../asn1/asn1.pac ) -bro_plugin_pac(krb_TCP.pac krb-protocol.pac krb-analyzer.pac +zeek_plugin_pac(krb_TCP.pac krb-protocol.pac krb-analyzer.pac krb-asn1.pac krb-defs.pac krb-types.pac krb-padata.pac ../asn1/asn1.pac ) -bro_plugin_end() +zeek_plugin_end() diff --git a/src/analyzer/protocol/krb/KRB.cc b/src/analyzer/protocol/krb/KRB.cc index 4ee663dcf1..e6bd42b961 100644 --- a/src/analyzer/protocol/krb/KRB.cc +++ b/src/analyzer/protocol/krb/KRB.cc @@ -1,5 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. +#include + #include "KRB.h" #include "types.bif.h" #include "events.bif.h" diff --git a/src/analyzer/protocol/krb/KRB.h b/src/analyzer/protocol/krb/KRB.h index 7eee46d838..6a6af93c45 100644 --- a/src/analyzer/protocol/krb/KRB.h +++ b/src/analyzer/protocol/krb/KRB.h @@ -9,6 +9,8 @@ #include #endif +#include + namespace analyzer { namespace krb { class KRB_Analyzer : public analyzer::Analyzer { diff --git a/src/analyzer/protocol/krb/Plugin.cc b/src/analyzer/protocol/krb/Plugin.cc index ffbefb5b1c..707498f729 100644 --- a/src/analyzer/protocol/krb/Plugin.cc +++ b/src/analyzer/protocol/krb/Plugin.cc @@ -5,7 +5,7 @@ #include "KRB_TCP.h" namespace plugin { - namespace Bro_KRB { + namespace Zeek_KRB { class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() @@ -13,7 +13,7 @@ namespace plugin { AddComponent(new ::analyzer::Component("KRB", ::analyzer::krb::KRB_Analyzer::Instantiate)); AddComponent(new ::analyzer::Component("KRB_TCP", ::analyzer::krb_tcp::KRB_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::KRB"; + config.name = "Zeek::KRB"; config.description = "Kerberos analyzer"; return config; } diff --git a/src/analyzer/protocol/krb/events.bif b/src/analyzer/protocol/krb/events.bif index 19b165a4be..26405442ed 100644 --- a/src/analyzer/protocol/krb/events.bif +++ b/src/analyzer/protocol/krb/events.bif @@ -11,7 +11,7 @@ ## ## msg: A Kerberos KDC request message data structure. ## -## .. bro:see:: krb_as_response krb_tgs_request krb_tgs_response krb_ap_request +## .. zeek:see:: krb_as_response krb_tgs_request krb_tgs_response krb_ap_request ## krb_ap_response krb_priv krb_safe krb_cred krb_error event krb_as_request%(c: connection, msg: KRB::KDC_Request%); @@ -27,7 +27,7 @@ event krb_as_request%(c: connection, msg: KRB::KDC_Request%); ## ## msg: A Kerberos KDC reply message data structure. ## -## .. bro:see:: krb_as_request krb_tgs_request krb_tgs_response krb_ap_request +## .. zeek:see:: krb_as_request krb_tgs_request krb_tgs_response krb_ap_request ## krb_ap_response krb_priv krb_safe krb_cred krb_error event krb_as_response%(c: connection, msg: KRB::KDC_Response%); @@ -44,7 +44,7 @@ event krb_as_response%(c: connection, msg: KRB::KDC_Response%); ## ## msg: A Kerberos KDC request message data structure. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_response krb_ap_request +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_response krb_ap_request ## krb_ap_response krb_priv krb_safe krb_cred krb_error event krb_tgs_request%(c: connection, msg: KRB::KDC_Request%); @@ -60,7 +60,7 @@ event krb_tgs_request%(c: connection, msg: KRB::KDC_Request%); ## ## msg: A Kerberos KDC reply message data structure. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_ap_request +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_ap_request ## krb_ap_response krb_priv krb_safe krb_cred krb_error event krb_tgs_response%(c: connection, msg: KRB::KDC_Response%); @@ -78,7 +78,7 @@ event krb_tgs_response%(c: connection, msg: KRB::KDC_Response%); ## ## opts: A Kerberos AP options data structure. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response ## krb_ap_response krb_priv krb_safe krb_cred krb_error event krb_ap_request%(c: connection, ticket: KRB::Ticket, opts: KRB::AP_Options%); @@ -93,7 +93,7 @@ event krb_ap_request%(c: connection, ticket: KRB::Ticket, opts: KRB::AP_Options% ## ## c: The connection over which this Kerberos message was sent. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response ## krb_ap_request krb_priv krb_safe krb_cred krb_error event krb_ap_response%(c: connection%); @@ -109,7 +109,7 @@ event krb_ap_response%(c: connection%); ## ## is_orig: Whether the originator of the connection sent this message. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response ## krb_ap_request krb_ap_response krb_safe krb_cred krb_error event krb_priv%(c: connection, is_orig: bool%); @@ -125,7 +125,7 @@ event krb_priv%(c: connection, is_orig: bool%); ## ## msg: A Kerberos SAFE message data structure. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response ## krb_ap_request krb_ap_response krb_priv krb_cred krb_error event krb_safe%(c: connection, is_orig: bool, msg: KRB::SAFE_Msg%); @@ -141,7 +141,7 @@ event krb_safe%(c: connection, is_orig: bool, msg: KRB::SAFE_Msg%); ## ## tickets: Tickets obtained from the KDC that are being forwarded. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response ## krb_ap_request krb_ap_response krb_priv krb_safe krb_error event krb_cred%(c: connection, is_orig: bool, tickets: KRB::Ticket_Vector%); @@ -154,6 +154,6 @@ event krb_cred%(c: connection, is_orig: bool, tickets: KRB::Ticket_Vector%); ## ## msg: A Kerberos error message data structure. ## -## .. bro:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response +## .. zeek:see:: krb_as_request krb_as_response krb_tgs_request krb_tgs_response ## krb_ap_request krb_ap_response krb_priv krb_safe krb_cred event krb_error%(c: connection, msg: KRB::Error_Msg%); diff --git a/src/analyzer/protocol/login/CMakeLists.txt b/src/analyzer/protocol/login/CMakeLists.txt index 66f8eb1568..cb8217aaeb 100644 --- a/src/analyzer/protocol/login/CMakeLists.txt +++ b/src/analyzer/protocol/login/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Login) -bro_plugin_cc(Login.cc RSH.cc Telnet.cc Rlogin.cc NVT.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek Login) +zeek_plugin_cc(Login.cc RSH.cc Telnet.cc Rlogin.cc NVT.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/login/Login.cc b/src/analyzer/protocol/login/Login.cc index f8eb233a29..277bb752ff 100644 --- a/src/analyzer/protocol/login/Login.cc +++ b/src/analyzer/protocol/login/Login.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -289,9 +289,7 @@ void Login_Analyzer::AuthenticationDialog(bool orig, char* line) { if ( authentication_skipped ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - ConnectionEvent(authentication_skipped, vl); + ConnectionEventFast(authentication_skipped, {BuildConnVal()}); } state = LOGIN_STATE_SKIP; @@ -334,32 +332,26 @@ void Login_Analyzer::SetEnv(bool orig, char* name, char* val) else if ( login_terminal && streq(name, "TERM") ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(new StringVal(val)); - - ConnectionEvent(login_terminal, vl); + ConnectionEventFast(login_terminal, { + BuildConnVal(), + new StringVal(val), + }); } else if ( login_display && streq(name, "DISPLAY") ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(new StringVal(val)); - - ConnectionEvent(login_display, vl); + ConnectionEventFast(login_display, { + BuildConnVal(), + new StringVal(val), + }); } else if ( login_prompt && streq(name, "TTYPROMPT") ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(new StringVal(val)); - - ConnectionEvent(login_prompt, vl); + ConnectionEventFast(login_prompt, { + BuildConnVal(), + new StringVal(val), + }); } } @@ -433,15 +425,13 @@ void Login_Analyzer::LoginEvent(EventHandlerPtr f, const char* line, Val* password = HaveTypeahead() ? PopUserTextVal() : new StringVal(""); - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(username->Ref()); - vl->append(client_name ? client_name->Ref() : val_mgr->GetEmptyString()); - vl->append(password); - vl->append(new StringVal(line)); - - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + username->Ref(), + client_name ? client_name->Ref() : val_mgr->GetEmptyString(), + password, + new StringVal(line), + }); } const char* Login_Analyzer::GetUsername(const char* line) const @@ -454,12 +444,13 @@ const char* Login_Analyzer::GetUsername(const char* line) const void Login_Analyzer::LineEvent(EventHandlerPtr f, const char* line) { - val_list* vl = new val_list; + if ( ! f ) + return; - vl->append(BuildConnVal()); - vl->append(new StringVal(line)); - - ConnectionEvent(f, vl); + ConnectionEventFast(f, { + BuildConnVal(), + new StringVal(line), + }); } @@ -469,12 +460,11 @@ void Login_Analyzer::Confused(const char* msg, const char* line) if ( login_confused ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(msg)); - vl->append(new StringVal(line)); - - ConnectionEvent(login_confused, vl); + ConnectionEventFast(login_confused, { + BuildConnVal(), + new StringVal(msg), + new StringVal(line), + }); } if ( login_confused_text ) @@ -496,10 +486,10 @@ void Login_Analyzer::ConfusionText(const char* line) { if ( login_confused_text ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(line)); - ConnectionEvent(login_confused_text, vl); + ConnectionEventFast(login_confused_text, { + BuildConnVal(), + new StringVal(line), + }); } } diff --git a/src/analyzer/protocol/login/NVT.cc b/src/analyzer/protocol/login/NVT.cc index 11952103bf..9f2e6a2de4 100644 --- a/src/analyzer/protocol/login/NVT.cc +++ b/src/analyzer/protocol/login/NVT.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -461,11 +461,10 @@ void NVT_Analyzer::SetTerminal(const u_char* terminal, int len) { if ( login_terminal ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(new StringVal(new BroString(terminal, len, 0))); - - ConnectionEvent(login_terminal, vl); + ConnectionEventFast(login_terminal, { + BuildConnVal(), + new StringVal(new BroString(terminal, len, 0)), + }); } } diff --git a/src/analyzer/protocol/login/Plugin.cc b/src/analyzer/protocol/login/Plugin.cc index 3e4a83ceae..182c070592 100644 --- a/src/analyzer/protocol/login/Plugin.cc +++ b/src/analyzer/protocol/login/Plugin.cc @@ -9,7 +9,7 @@ #include "Rlogin.h" namespace plugin { -namespace Bro_Login { +namespace Zeek_Login { class Plugin : public plugin::Plugin { public: @@ -24,7 +24,7 @@ public: AddComponent(new ::analyzer::Component("Contents_Rlogin", 0)); plugin::Configuration config; - config.name = "Bro::Login"; + config.name = "Zeek::Login"; config.description = "Telnet/Rsh/Rlogin analyzers"; return config; } diff --git a/src/analyzer/protocol/login/RSH.cc b/src/analyzer/protocol/login/RSH.cc index 0afacb2f2b..9485e6269e 100644 --- a/src/analyzer/protocol/login/RSH.cc +++ b/src/analyzer/protocol/login/RSH.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "Event.h" @@ -156,31 +156,38 @@ void Rsh_Analyzer::DeliverStream(int len, const u_char* data, bool orig) { Login_Analyzer::DeliverStream(len, data, orig); + if ( orig ) + { + if ( ! rsh_request ) + return; + } + else + { + if ( ! rsh_reply ) + return; + } + + val_list vl(4 + orig); const char* line = (const char*) data; - val_list* vl = new val_list; - line = skip_whitespace(line); - vl->append(BuildConnVal()); - vl->append(client_name ? client_name->Ref() : new StringVal("")); - vl->append(username ? username->Ref() : new StringVal("")); - vl->append(new StringVal(line)); + vl.append(BuildConnVal()); + vl.append(client_name ? client_name->Ref() : new StringVal("")); + vl.append(username ? username->Ref() : new StringVal("")); + vl.append(new StringVal(line)); - if ( orig && rsh_request ) + if ( orig ) { if ( contents_orig->RshSaveState() == RSH_SERVER_USER_NAME ) // First input - vl->append(val_mgr->GetTrue()); + vl.append(val_mgr->GetTrue()); else - vl->append(val_mgr->GetFalse()); + vl.append(val_mgr->GetFalse()); - ConnectionEvent(rsh_request, vl); + ConnectionEventFast(rsh_request, std::move(vl)); } - else if ( rsh_reply ) - ConnectionEvent(rsh_reply, vl); - else - delete_vals(vl); + ConnectionEventFast(rsh_reply, std::move(vl)); } void Rsh_Analyzer::ClientUserName(const char* s) diff --git a/src/analyzer/protocol/login/Rlogin.cc b/src/analyzer/protocol/login/Rlogin.cc index 6979148676..62b391849b 100644 --- a/src/analyzer/protocol/login/Rlogin.cc +++ b/src/analyzer/protocol/login/Rlogin.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "Event.h" @@ -244,11 +244,9 @@ void Rlogin_Analyzer::TerminalType(const char* s) { if ( login_terminal ) { - val_list* vl = new val_list; - - vl->append(BuildConnVal()); - vl->append(new StringVal(s)); - - ConnectionEvent(login_terminal, vl); + ConnectionEventFast(login_terminal, { + BuildConnVal(), + new StringVal(s), + }); } } diff --git a/src/analyzer/protocol/login/Telnet.cc b/src/analyzer/protocol/login/Telnet.cc index 78a3289931..5a187a8221 100644 --- a/src/analyzer/protocol/login/Telnet.cc +++ b/src/analyzer/protocol/login/Telnet.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "Telnet.h" #include "NVT.h" diff --git a/src/analyzer/protocol/login/events.bif b/src/analyzer/protocol/login/events.bif index 91c58f21c4..45b82ea11e 100644 --- a/src/analyzer/protocol/login/events.bif +++ b/src/analyzer/protocol/login/events.bif @@ -14,16 +14,16 @@ ## ## new_session: True if this is the first command of the Rsh session. ## -## .. bro:see:: rsh_reply login_confused login_confused_text login_display +## .. zeek:see:: rsh_reply login_confused login_confused_text login_display ## login_failure login_input_line login_output_line login_prompt login_success ## login_terminal ## ## .. note:: For historical reasons, these events are separate from the ## ``login_`` events. Ideally, they would all be handled uniquely. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event rsh_request%(c: connection, client_user: string, server_user: string, line: string, new_session: bool%); @@ -41,16 +41,16 @@ event rsh_request%(c: connection, client_user: string, server_user: string, line ## ## line: The command line sent in the request. ## -## .. bro:see:: rsh_request login_confused login_confused_text login_display +## .. zeek:see:: rsh_request login_confused login_confused_text login_display ## login_failure login_input_line login_output_line login_prompt login_success ## login_terminal ## ## .. note:: For historical reasons, these events are separate from the ## ``login_`` events. Ideally, they would all be handled uniquely. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event rsh_reply%(c: connection, client_user: string, server_user: string, line: string%); @@ -72,20 +72,20 @@ event rsh_reply%(c: connection, client_user: string, server_user: string, line: ## line: The line of text that led the analyzer to conclude that the ## authentication had failed. ## -## .. bro:see:: login_confused login_confused_text login_display login_input_line +## .. zeek:see:: login_confused login_confused_text login_display login_input_line ## login_output_line login_prompt login_success login_terminal direct_login_prompts ## get_login_state login_failure_msgs login_non_failure_msgs login_prompts login_success_msgs ## login_timeouts set_login_state ## ## .. note:: The login analyzer depends on a set of script-level variables that ## need to be configured with patterns identifying login attempts. This -## configuration has not yet been ported over from Bro 1.5 to Bro 2.x, and +## configuration has not yet been ported, and ## the analyzer is therefore not directly usable at the moment. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeeks's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_failure%(c: connection, user: string, client_user: string, password: string, line: string%); @@ -107,20 +107,20 @@ event login_failure%(c: connection, user: string, client_user: string, password: ## line: The line of text that led the analyzer to conclude that the ## authentication had succeeded. ## -## .. bro:see:: login_confused login_confused_text login_display login_failure +## .. zeek:see:: login_confused login_confused_text login_display login_failure ## login_input_line login_output_line login_prompt login_terminal ## direct_login_prompts get_login_state login_failure_msgs login_non_failure_msgs ## login_prompts login_success_msgs login_timeouts set_login_state ## ## .. note:: The login analyzer depends on a set of script-level variables that ## need to be configured with patterns identifying login attempts. This -## configuration has not yet been ported over from Bro 1.5 to Bro 2.x, and +## configuration has not yet been ported, and ## the analyzer is therefore not directly usable at the moment. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_success%(c: connection, user: string, client_user: string, password: string, line: string%); @@ -131,13 +131,13 @@ event login_success%(c: connection, user: string, client_user: string, password: ## ## line: The input line. ## -## .. bro:see:: login_confused login_confused_text login_display login_failure +## .. zeek:see:: login_confused login_confused_text login_display login_failure ## login_output_line login_prompt login_success login_terminal rsh_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_input_line%(c: connection, line: string%); @@ -148,17 +148,17 @@ event login_input_line%(c: connection, line: string%); ## ## line: The ouput line. ## -## .. bro:see:: login_confused login_confused_text login_display login_failure +## .. zeek:see:: login_confused login_confused_text login_display login_failure ## login_input_line login_prompt login_success login_terminal rsh_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_output_line%(c: connection, line: string%); -## Generated when tracking of Telnet/Rlogin authentication failed. As Bro's +## Generated when tracking of Telnet/Rlogin authentication failed. As Zeek's ## *login* analyzer uses a number of heuristics to extract authentication ## information, it may become confused. If it can no longer correctly track ## the authentication dialog, it raises this event. @@ -173,36 +173,36 @@ event login_output_line%(c: connection, line: string%); ## line: The line of text that caused the heuristics to conclude they were ## confused. ## -## .. bro:see:: login_confused_text login_display login_failure login_input_line login_output_line +## .. zeek:see:: login_confused_text login_display login_failure login_input_line login_output_line ## login_prompt login_success login_terminal direct_login_prompts get_login_state ## login_failure_msgs login_non_failure_msgs login_prompts login_success_msgs ## login_timeouts set_login_state ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_confused%(c: connection, msg: string, line: string%); ## Generated after getting confused while tracking a Telnet/Rlogin ## authentication dialog. The *login* analyzer generates this even for every -## line of user input after it has reported :bro:id:`login_confused` for a +## line of user input after it has reported :zeek:id:`login_confused` for a ## connection. ## ## c: The connection. ## ## line: The line the user typed. ## -## .. bro:see:: login_confused login_display login_failure login_input_line +## .. zeek:see:: login_confused login_display login_failure login_input_line ## login_output_line login_prompt login_success login_terminal direct_login_prompts ## get_login_state login_failure_msgs login_non_failure_msgs login_prompts ## login_success_msgs login_timeouts set_login_state ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_confused_text%(c: connection, line: string%); @@ -213,13 +213,13 @@ event login_confused_text%(c: connection, line: string%); ## ## terminal: The TERM value transmitted. ## -## .. bro:see:: login_confused login_confused_text login_display login_failure +## .. zeek:see:: login_confused login_confused_text login_display login_failure ## login_input_line login_output_line login_prompt login_success ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_terminal%(c: connection, terminal: string%); @@ -230,13 +230,13 @@ event login_terminal%(c: connection, terminal: string%); ## ## display: The DISPLAY transmitted. ## -## .. bro:see:: login_confused login_confused_text login_failure login_input_line +## .. zeek:see:: login_confused login_confused_text login_failure login_input_line ## login_output_line login_prompt login_success login_terminal ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_display%(c: connection, display: string%); @@ -252,16 +252,16 @@ event login_display%(c: connection, display: string%); ## ## c: The connection. ## -## .. bro:see:: authentication_rejected authentication_skipped login_success +## .. zeek:see:: authentication_rejected authentication_skipped login_success ## ## .. note:: This event inspects the corresponding Telnet option -## while :bro:id:`login_success` heuristically determines success by watching +## while :zeek:id:`login_success` heuristically determines success by watching ## session data. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event authentication_accepted%(name: string, c: connection%); @@ -277,16 +277,16 @@ event authentication_accepted%(name: string, c: connection%); ## ## c: The connection. ## -## .. bro:see:: authentication_accepted authentication_skipped login_failure +## .. zeek:see:: authentication_accepted authentication_skipped login_failure ## ## .. note:: This event inspects the corresponding Telnet option -## while :bro:id:`login_success` heuristically determines failure by watching +## while :zeek:id:`login_success` heuristically determines failure by watching ## session data. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event authentication_rejected%(name: string, c: connection%); @@ -298,19 +298,19 @@ event authentication_rejected%(name: string, c: connection%); ## ## c: The connection. ## -## .. bro:see:: authentication_accepted authentication_rejected direct_login_prompts +## .. zeek:see:: authentication_accepted authentication_rejected direct_login_prompts ## get_login_state login_failure_msgs login_non_failure_msgs login_prompts ## login_success_msgs login_timeouts set_login_state ## ## .. note:: The login analyzer depends on a set of script-level variables that ## need to be configured with patterns identifying activity. This -## configuration has not yet been ported over from Bro 1.5 to Bro 2.x, and +## configuration has not yet been ported, and ## the analyzer is therefore not directly usable at the moment. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event authentication_skipped%(c: connection%); @@ -325,13 +325,13 @@ event authentication_skipped%(c: connection%); ## ## prompt: The TTYPROMPT transmitted. ## -## .. bro:see:: login_confused login_confused_text login_display login_failure +## .. zeek:see:: login_confused login_confused_text login_display login_failure ## login_input_line login_output_line login_success login_terminal ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event login_prompt%(c: connection, prompt: string%); @@ -344,7 +344,7 @@ event login_prompt%(c: connection, prompt: string%); ## ## c: The connection. ## -## .. bro:see:: authentication_accepted authentication_rejected authentication_skipped +## .. zeek:see:: authentication_accepted authentication_rejected authentication_skipped ## login_confused login_confused_text login_display login_failure login_input_line ## login_output_line login_prompt login_success login_terminal event activating_encryption%(c: connection%); @@ -362,7 +362,7 @@ event activating_encryption%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: bad_option bad_option_termination authentication_accepted +## .. zeek:see:: bad_option bad_option_termination authentication_accepted ## authentication_rejected authentication_skipped login_confused ## login_confused_text login_display login_failure login_input_line ## login_output_line login_prompt login_success login_terminal @@ -375,15 +375,15 @@ event inconsistent_option%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: inconsistent_option bad_option_termination authentication_accepted +## .. zeek:see:: inconsistent_option bad_option_termination authentication_accepted ## authentication_rejected authentication_skipped login_confused ## login_confused_text login_display login_failure login_input_line ## login_output_line login_prompt login_success login_terminal ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event bad_option%(c: connection%); @@ -394,14 +394,14 @@ event bad_option%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: inconsistent_option bad_option authentication_accepted +## .. zeek:see:: inconsistent_option bad_option authentication_accepted ## authentication_rejected authentication_skipped login_confused ## login_confused_text login_display login_failure login_input_line ## login_output_line login_prompt login_success login_terminal ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event bad_option_termination%(c: connection%); diff --git a/src/analyzer/protocol/login/functions.bif b/src/analyzer/protocol/login/functions.bif index bc4b2a7104..932020595c 100644 --- a/src/analyzer/protocol/login/functions.bif +++ b/src/analyzer/protocol/login/functions.bif @@ -21,7 +21,7 @@ ## does not correctly know the state of the connection, and/or ## the username associated with it. ## -## .. bro:see:: set_login_state +## .. zeek:see:: set_login_state function get_login_state%(cid: conn_id%): count %{ Connection* c = sessions->FindConnection(cid); @@ -40,12 +40,12 @@ function get_login_state%(cid: conn_id%): count ## cid: The connection ID. ## ## new_state: The new state of the login analyzer. See -## :bro:id:`get_login_state` for possible values. +## :zeek:id:`get_login_state` for possible values. ## ## Returns: Returns false if *cid* is not an active connection ## or is not tagged as a login analyzer, and true otherwise. ## -## .. bro:see:: get_login_state +## .. zeek:see:: get_login_state function set_login_state%(cid: conn_id, new_state: count%): bool %{ Connection* c = sessions->FindConnection(cid); diff --git a/src/analyzer/protocol/mime/CMakeLists.txt b/src/analyzer/protocol/mime/CMakeLists.txt index 0a038625f8..6275297dc9 100644 --- a/src/analyzer/protocol/mime/CMakeLists.txt +++ b/src/analyzer/protocol/mime/CMakeLists.txt @@ -4,12 +4,12 @@ # it's also parsing a protocol just like them. The current structure # is merely a left-over from when this code was written. -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro MIME) -bro_plugin_cc(MIME.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek MIME) +zeek_plugin_cc(MIME.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/mime/MIME.cc b/src/analyzer/protocol/mime/MIME.cc index 931e155fdf..8fb027f8e8 100644 --- a/src/analyzer/protocol/mime/MIME.cc +++ b/src/analyzer/protocol/mime/MIME.cc @@ -1,4 +1,4 @@ -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "MIME.h" @@ -1358,11 +1358,11 @@ void MIME_Mail::Done() hash_final(md5_hash, digest); md5_hash = nullptr; - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(content_hash_length)); - vl->append(new StringVal(new BroString(1, digest, 16))); - analyzer->ConnectionEvent(mime_content_hash, vl); + analyzer->ConnectionEventFast(mime_content_hash, { + analyzer->BuildConnVal(), + val_mgr->GetCount(content_hash_length), + new StringVal(new BroString(1, digest, 16)), + }); } MIME_Message::Done(); @@ -1386,11 +1386,7 @@ void MIME_Mail::BeginEntity(MIME_Entity* /* entity */) cur_entity_id.clear(); if ( mime_begin_entity ) - { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - analyzer->ConnectionEvent(mime_begin_entity, vl); - } + analyzer->ConnectionEventFast(mime_begin_entity, {analyzer->BuildConnVal()}); buffer_start = data_start = 0; ASSERT(entity_content.size() == 0); @@ -1402,12 +1398,11 @@ void MIME_Mail::EndEntity(MIME_Entity* /* entity */) { BroString* s = concatenate(entity_content); - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(s->Len())); - vl->append(new StringVal(s)); - - analyzer->ConnectionEvent(mime_entity_data, vl); + analyzer->ConnectionEventFast(mime_entity_data, { + analyzer->BuildConnVal(), + val_mgr->GetCount(s->Len()), + new StringVal(s), + }); if ( ! mime_all_data ) delete_strings(entity_content); @@ -1416,11 +1411,7 @@ void MIME_Mail::EndEntity(MIME_Entity* /* entity */) } if ( mime_end_entity ) - { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - analyzer->ConnectionEvent(mime_end_entity, vl); - } + analyzer->ConnectionEventFast(mime_end_entity, {analyzer->BuildConnVal()}); file_mgr->EndOfFile(analyzer->GetAnalyzerTag(), analyzer->Conn()); cur_entity_id.clear(); @@ -1430,10 +1421,10 @@ void MIME_Mail::SubmitHeader(MIME_Header* h) { if ( mime_one_header ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(BuildHeaderVal(h)); - analyzer->ConnectionEvent(mime_one_header, vl); + analyzer->ConnectionEventFast(mime_one_header, { + analyzer->BuildConnVal(), + BuildHeaderVal(h), + }); } } @@ -1441,10 +1432,10 @@ void MIME_Mail::SubmitAllHeaders(MIME_HeaderList& hlist) { if ( mime_all_headers ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(BuildHeaderTable(hlist)); - analyzer->ConnectionEvent(mime_all_headers, vl); + analyzer->ConnectionEventFast(mime_all_headers, { + analyzer->BuildConnVal(), + BuildHeaderTable(hlist), + }); } } @@ -1478,11 +1469,11 @@ void MIME_Mail::SubmitData(int len, const char* buf) const char* data = (char*) data_buffer->Bytes() + data_start; int data_len = (buf + len) - data; - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(data_len)); - vl->append(new StringVal(data_len, data)); - analyzer->ConnectionEvent(mime_segment_data, vl); + analyzer->ConnectionEventFast(mime_segment_data, { + analyzer->BuildConnVal(), + val_mgr->GetCount(data_len), + new StringVal(data_len, data), + }); } cur_entity_id = file_mgr->DataIn(reinterpret_cast(buf), len, @@ -1525,12 +1516,11 @@ void MIME_Mail::SubmitAllData() BroString* s = concatenate(all_content); delete_strings(all_content); - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(s->Len())); - vl->append(new StringVal(s)); - - analyzer->ConnectionEvent(mime_all_data, vl); + analyzer->ConnectionEventFast(mime_all_data, { + analyzer->BuildConnVal(), + val_mgr->GetCount(s->Len()), + new StringVal(s), + }); } } @@ -1555,10 +1545,10 @@ void MIME_Mail::SubmitEvent(int event_type, const char* detail) if ( mime_event ) { - val_list* vl = new val_list(); - vl->append(analyzer->BuildConnVal()); - vl->append(new StringVal(category)); - vl->append(new StringVal(detail)); - analyzer->ConnectionEvent(mime_event, vl); + analyzer->ConnectionEventFast(mime_event, { + analyzer->BuildConnVal(), + new StringVal(category), + new StringVal(detail), + }); } } diff --git a/src/analyzer/protocol/mime/Plugin.cc b/src/analyzer/protocol/mime/Plugin.cc index f7a1c22f3e..6cff9f0a5a 100644 --- a/src/analyzer/protocol/mime/Plugin.cc +++ b/src/analyzer/protocol/mime/Plugin.cc @@ -4,14 +4,14 @@ #include "plugin/Plugin.h" namespace plugin { -namespace Bro_MIME { +namespace Zeek_MIME { class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() { plugin::Configuration config; - config.name = "Bro::MIME"; + config.name = "Zeek::MIME"; config.description = "MIME parsing"; return config; } diff --git a/src/analyzer/protocol/mime/events.bif b/src/analyzer/protocol/mime/events.bif index c0b2e66132..2b38d60481 100644 --- a/src/analyzer/protocol/mime/events.bif +++ b/src/analyzer/protocol/mime/events.bif @@ -1,46 +1,46 @@ ## Generated when starting to parse an email MIME entity. MIME is a ## protocol-independent data format for encoding text and files, along with -## corresponding metadata, for transmission. Bro raises this event when it +## corresponding metadata, for transmission. Zeek raises this event when it ## begins parsing a MIME entity extracted from an email protocol. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## ## c: The connection. ## -## .. bro:see:: mime_all_data mime_all_headers mime_content_hash mime_end_entity +## .. zeek:see:: mime_all_data mime_all_headers mime_content_hash mime_end_entity ## mime_entity_data mime_event mime_one_header mime_segment_data smtp_data ## http_begin_entity ## -## .. note:: Bro also extracts MIME entities from HTTP sessions. For those, -## however, it raises :bro:id:`http_begin_entity` instead. +## .. note:: Zeek also extracts MIME entities from HTTP sessions. For those, +## however, it raises :zeek:id:`http_begin_entity` instead. event mime_begin_entity%(c: connection%); ## Generated when finishing parsing an email MIME entity. MIME is a ## protocol-independent data format for encoding text and files, along with -## corresponding metadata, for transmission. Bro raises this event when it +## corresponding metadata, for transmission. Zeek raises this event when it ## finished parsing a MIME entity extracted from an email protocol. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## ## c: The connection. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_entity_data mime_event mime_one_header mime_segment_data smtp_data ## http_end_entity ## -## .. note:: Bro also extracts MIME entities from HTTP sessions. For those, -## however, it raises :bro:id:`http_end_entity` instead. +## .. note:: Zeek also extracts MIME entities from HTTP sessions. For those, +## however, it raises :zeek:id:`http_end_entity` instead. event mime_end_entity%(c: connection%); ## Generated for individual MIME headers extracted from email MIME ## entities. MIME is a protocol-independent data format for encoding text and ## files, along with corresponding metadata, for transmission. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -48,19 +48,19 @@ event mime_end_entity%(c: connection%); ## ## h: The parsed MIME header. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_event mime_segment_data ## http_header http_all_headers ## -## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, -## however, it raises :bro:id:`http_header` instead. +## .. note:: Zeek also extracts MIME headers from HTTP sessions. For those, +## however, it raises :zeek:id:`http_header` instead. event mime_one_header%(c: connection, h: mime_header_rec%); ## Generated for MIME headers extracted from email MIME entities, passing all ## headers at once. MIME is a protocol-independent data format for encoding ## text and files, along with corresponding metadata, for transmission. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -70,25 +70,25 @@ event mime_one_header%(c: connection, h: mime_header_rec%); ## The table is indexed by the position of the header (1 for the first, ## 2 for the second, etc.). ## -## .. bro:see:: mime_all_data mime_begin_entity mime_content_hash mime_end_entity +## .. zeek:see:: mime_all_data mime_begin_entity mime_content_hash mime_end_entity ## mime_entity_data mime_event mime_one_header mime_segment_data ## http_header http_all_headers ## -## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, -## however, it raises :bro:id:`http_header` instead. +## .. note:: Zeek also extracts MIME headers from HTTP sessions. For those, +## however, it raises :zeek:id:`http_header` instead. event mime_all_headers%(c: connection, hlist: mime_header_list%); ## Generated for chunks of decoded MIME data from email MIME entities. MIME ## is a protocol-independent data format for encoding text and files, along with -## corresponding metadata, for transmission. As Bro parses the data of an +## corresponding metadata, for transmission. As Zeek parses the data of an ## entity, it raises a sequence of these events, each coming as soon as a new ## chunk of data is available. In contrast, there is also -## :bro:id:`mime_entity_data`, which passes all of an entities data at once +## :zeek:id:`mime_entity_data`, which passes all of an entities data at once ## in a single block. While the latter is more convenient to handle, -## ``mime_segment_data`` is more efficient as Bro does not need to buffer +## ``mime_segment_data`` is more efficient as Zeek does not need to buffer ## the data. Thus, if possible, this event should be preferred. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -98,23 +98,23 @@ event mime_all_headers%(c: connection, hlist: mime_header_list%); ## ## data: The raw data of one segment of the current entity. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_event mime_one_header http_entity_data ## mime_segment_length mime_segment_overlap_length ## -## .. note:: Bro also extracts MIME data from HTTP sessions. For those, -## however, it raises :bro:id:`http_entity_data` (sic!) instead. +## .. note:: Zeek also extracts MIME data from HTTP sessions. For those, +## however, it raises :zeek:id:`http_entity_data` (sic!) instead. event mime_segment_data%(c: connection, length: count, data: string%); ## Generated for data decoded from an email MIME entity. This event delivers ## the complete content of a single MIME entity with the quoted-printable and -## and base64 data decoded. In contrast, there is also :bro:id:`mime_segment_data`, +## and base64 data decoded. In contrast, there is also :zeek:id:`mime_segment_data`, ## which passes on a sequence of data chunks as they come in. While ## ``mime_entity_data`` is more convenient to handle, ``mime_segment_data`` is -## more efficient as Bro does not need to buffer the data. Thus, if possible, +## more efficient as Zeek does not need to buffer the data. Thus, if possible, ## the latter should be preferred. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -124,10 +124,10 @@ event mime_segment_data%(c: connection, length: count, data: string%); ## ## data: The raw data of the complete entity. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_event mime_one_header mime_segment_data ## -## .. note:: While Bro also decodes MIME entities extracted from HTTP +## .. note:: While Zeek also decodes MIME entities extracted from HTTP ## sessions, there's no corresponding event for that currently. event mime_entity_data%(c: connection, length: count, data: string%); @@ -137,7 +137,7 @@ event mime_entity_data%(c: connection, length: count, data: string%); ## of the potentially significant buffering necessary, using this event can be ## expensive. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -147,16 +147,16 @@ event mime_entity_data%(c: connection, length: count, data: string%); ## ## data: The raw data of all MIME entities concatenated. ## -## .. bro:see:: mime_all_headers mime_begin_entity mime_content_hash mime_end_entity +## .. zeek:see:: mime_all_headers mime_begin_entity mime_content_hash mime_end_entity ## mime_entity_data mime_event mime_one_header mime_segment_data ## -## .. note:: While Bro also decodes MIME entities extracted from HTTP +## .. note:: While Zeek also decodes MIME entities extracted from HTTP ## sessions, there's no corresponding event for that currently. event mime_all_data%(c: connection, length: count, data: string%); ## Generated for errors found when decoding email MIME entities. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -167,18 +167,18 @@ event mime_all_data%(c: connection, length: count, data: string%); ## ## detail: Further more detailed description of the error. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_one_header mime_segment_data http_event ## -## .. note:: Bro also extracts MIME headers from HTTP sessions. For those, -## however, it raises :bro:id:`http_event` instead. +## .. note:: Zeek also extracts MIME headers from HTTP sessions. For those, +## however, it raises :zeek:id:`http_event` instead. event mime_event%(c: connection, event_type: string, detail: string%); ## Generated for decoded MIME entities extracted from email messages, passing on -## their MD5 checksums. Bro computes the MD5 over the complete decoded data of +## their MD5 checksums. Zeek computes the MD5 over the complete decoded data of ## each MIME entity. ## -## Bro's MIME analyzer for emails currently supports SMTP and POP3. See +## Zeek's MIME analyzer for emails currently supports SMTP and POP3. See ## `Wikipedia `__ for more information ## about MIME. ## @@ -188,10 +188,10 @@ event mime_event%(c: connection, event_type: string, detail: string%); ## ## hash_value: The MD5 hash. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_end_entity +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_end_entity ## mime_entity_data mime_event mime_one_header mime_segment_data ## -## .. note:: While Bro also decodes MIME entities extracted from HTTP +## .. note:: While Zeek also decodes MIME entities extracted from HTTP ## sessions, there's no corresponding event for that currently. event mime_content_hash%(c: connection, content_len: count, hash_value: string%); diff --git a/src/analyzer/protocol/modbus/CMakeLists.txt b/src/analyzer/protocol/modbus/CMakeLists.txt index e6705cdd22..2560f18a60 100644 --- a/src/analyzer/protocol/modbus/CMakeLists.txt +++ b/src/analyzer/protocol/modbus/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Modbus) -bro_plugin_cc(Modbus.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(modbus.pac modbus-analyzer.pac modbus-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek Modbus) +zeek_plugin_cc(Modbus.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(modbus.pac modbus-analyzer.pac modbus-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/modbus/Plugin.cc b/src/analyzer/protocol/modbus/Plugin.cc index 8a01878113..68b78fcbe7 100644 --- a/src/analyzer/protocol/modbus/Plugin.cc +++ b/src/analyzer/protocol/modbus/Plugin.cc @@ -6,7 +6,7 @@ #include "Modbus.h" namespace plugin { -namespace Bro_Modbus { +namespace Zeek_Modbus { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("MODBUS", ::analyzer::modbus::ModbusTCP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::Modbus"; + config.name = "Zeek::Modbus"; config.description = "Modbus analyzer"; return config; } diff --git a/src/analyzer/protocol/mysql/CMakeLists.txt b/src/analyzer/protocol/mysql/CMakeLists.txt index 13558417ec..3ac448c665 100644 --- a/src/analyzer/protocol/mysql/CMakeLists.txt +++ b/src/analyzer/protocol/mysql/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro MySQL) - bro_plugin_cc(MySQL.cc Plugin.cc) - bro_plugin_bif(events.bif) - bro_plugin_pac(mysql.pac mysql-analyzer.pac mysql-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek MySQL) + zeek_plugin_cc(MySQL.cc Plugin.cc) + zeek_plugin_bif(events.bif) + zeek_plugin_pac(mysql.pac mysql-analyzer.pac mysql-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/mysql/Plugin.cc b/src/analyzer/protocol/mysql/Plugin.cc index 48bfd04a97..0f484e29ce 100644 --- a/src/analyzer/protocol/mysql/Plugin.cc +++ b/src/analyzer/protocol/mysql/Plugin.cc @@ -5,14 +5,14 @@ #include "MySQL.h" namespace plugin { - namespace Bro_MySQL { + namespace Zeek_MySQL { class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() { AddComponent(new ::analyzer::Component("MySQL", ::analyzer::MySQL::MySQL_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::MySQL"; + config.name = "Zeek::MySQL"; config.description = "MySQL analyzer"; return config; } diff --git a/src/analyzer/protocol/mysql/events.bif b/src/analyzer/protocol/mysql/events.bif index 34cbc54b4b..7ce65276a6 100644 --- a/src/analyzer/protocol/mysql/events.bif +++ b/src/analyzer/protocol/mysql/events.bif @@ -9,7 +9,7 @@ ## ## arg: The argument for the command (empty string if not provided). ## -## .. bro:see:: mysql_error mysql_ok mysql_server_version mysql_handshake +## .. zeek:see:: mysql_error mysql_ok mysql_server_version mysql_handshake event mysql_command_request%(c: connection, command: count, arg: string%); ## Generated for an unsuccessful MySQL response. @@ -23,7 +23,7 @@ event mysql_command_request%(c: connection, command: count, arg: string%); ## ## msg: Any extra details about the error (empty string if not provided). ## -## .. bro:see:: mysql_command_request mysql_ok mysql_server_version mysql_handshake +## .. zeek:see:: mysql_command_request mysql_ok mysql_server_version mysql_handshake event mysql_error%(c: connection, code: count, msg: string%); ## Generated for a successful MySQL response. @@ -35,7 +35,7 @@ event mysql_error%(c: connection, code: count, msg: string%); ## ## affected_rows: The number of rows that were affected. ## -## .. bro:see:: mysql_command_request mysql_error mysql_server_version mysql_handshake +## .. zeek:see:: mysql_command_request mysql_error mysql_server_version mysql_handshake event mysql_ok%(c: connection, affected_rows: count%); ## Generated for each MySQL ResultsetRow response packet. @@ -47,7 +47,7 @@ event mysql_ok%(c: connection, affected_rows: count%); ## ## row: The result row data. ## -## .. bro:see:: mysql_command_request mysql_error mysql_server_version mysql_handshake mysql_ok +## .. zeek:see:: mysql_command_request mysql_error mysql_server_version mysql_handshake mysql_ok event mysql_result_row%(c: connection, row: string_vec%); ## Generated for the initial server handshake packet, which includes the MySQL server version. @@ -59,7 +59,7 @@ event mysql_result_row%(c: connection, row: string_vec%); ## ## ver: The server version string. ## -## .. bro:see:: mysql_command_request mysql_error mysql_ok mysql_handshake +## .. zeek:see:: mysql_command_request mysql_error mysql_ok mysql_handshake event mysql_server_version%(c: connection, ver: string%); ## Generated for a client handshake response packet, which includes the username the client is attempting @@ -72,6 +72,6 @@ event mysql_server_version%(c: connection, ver: string%); ## ## username: The username supplied by the client ## -## .. bro:see:: mysql_command_request mysql_error mysql_ok mysql_server_version +## .. zeek:see:: mysql_command_request mysql_error mysql_ok mysql_server_version event mysql_handshake%(c: connection, username: string%); diff --git a/src/analyzer/protocol/ncp/CMakeLists.txt b/src/analyzer/protocol/ncp/CMakeLists.txt index 1ec5cf2e67..62b198553b 100644 --- a/src/analyzer/protocol/ncp/CMakeLists.txt +++ b/src/analyzer/protocol/ncp/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro NCP) -bro_plugin_cc(NCP.cc Plugin.cc) -bro_plugin_bif(events.bif consts.bif) -bro_plugin_pac(ncp.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek NCP) +zeek_plugin_cc(NCP.cc Plugin.cc) +zeek_plugin_bif(events.bif consts.bif) +zeek_plugin_pac(ncp.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ncp/NCP.cc b/src/analyzer/protocol/ncp/NCP.cc index b59358b703..e8407b9fc4 100644 --- a/src/analyzer/protocol/ncp/NCP.cc +++ b/src/analyzer/protocol/ncp/NCP.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -61,21 +61,27 @@ void NCP_Session::DeliverFrame(const binpac::NCP::ncp_frame* frame) EventHandlerPtr f = frame->is_orig() ? ncp_request : ncp_reply; if ( f ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(frame->frame_type())); - vl->append(val_mgr->GetCount(frame->body_length())); - if ( frame->is_orig() ) - vl->append(val_mgr->GetCount(req_func)); + { + analyzer->ConnectionEventFast(f, { + analyzer->BuildConnVal(), + val_mgr->GetCount(frame->frame_type()), + val_mgr->GetCount(frame->body_length()), + val_mgr->GetCount(req_func), + }); + } else { - vl->append(val_mgr->GetCount(req_frame_type)); - vl->append(val_mgr->GetCount(req_func)); - vl->append(val_mgr->GetCount(frame->reply()->completion_code())); + analyzer->ConnectionEventFast(f, { + analyzer->BuildConnVal(), + val_mgr->GetCount(frame->frame_type()), + val_mgr->GetCount(frame->body_length()), + val_mgr->GetCount(req_frame_type), + val_mgr->GetCount(req_func), + val_mgr->GetCount(frame->reply()->completion_code()), + }); } - analyzer->ConnectionEvent(f, vl); } } diff --git a/src/analyzer/protocol/ncp/Plugin.cc b/src/analyzer/protocol/ncp/Plugin.cc index fe1de9a250..9ea75a4674 100644 --- a/src/analyzer/protocol/ncp/Plugin.cc +++ b/src/analyzer/protocol/ncp/Plugin.cc @@ -6,7 +6,7 @@ #include "NCP.h" namespace plugin { -namespace Bro_NCP { +namespace Zeek_NCP { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("Contents_NCP", 0)); plugin::Configuration config; - config.name = "Bro::NCP"; + config.name = "Zeek::NCP"; config.description = "NCP analyzer"; return config; } diff --git a/src/analyzer/protocol/ncp/events.bif b/src/analyzer/protocol/ncp/events.bif index 9b5b7d77a7..d7b87d2e27 100644 --- a/src/analyzer/protocol/ncp/events.bif +++ b/src/analyzer/protocol/ncp/events.bif @@ -11,11 +11,11 @@ ## ## func: The requested function, as specified by the protocol. ## -## .. bro:see:: ncp_reply +## .. zeek:see:: ncp_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event ncp_request%(c: connection, frame_type: count, length: count, func: count%); @@ -36,11 +36,11 @@ event ncp_request%(c: connection, frame_type: count, length: count, func: count% ## ## completion_code: The reply's completion code, as specified by the protocol. ## -## .. bro:see:: ncp_request +## .. zeek:see:: ncp_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event ncp_reply%(c: connection, frame_type: count, length: count, req_frame: count, req_func: count, completion_code: count%); diff --git a/src/analyzer/protocol/netbios/CMakeLists.txt b/src/analyzer/protocol/netbios/CMakeLists.txt index ad6009d171..4ae22a6f42 100644 --- a/src/analyzer/protocol/netbios/CMakeLists.txt +++ b/src/analyzer/protocol/netbios/CMakeLists.txt @@ -1,13 +1,13 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) include_directories(AFTER ${CMAKE_CURRENT_BINARY_DIR}/../dce-rpc) include_directories(AFTER ${CMAKE_CURRENT_BINARY_DIR}/../smb) -bro_plugin_begin(Bro NetBIOS) -bro_plugin_cc(NetbiosSSN.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek NetBIOS) +zeek_plugin_cc(NetbiosSSN.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/netbios/NetbiosSSN.cc b/src/analyzer/protocol/netbios/NetbiosSSN.cc index 492375b7aa..94812d816c 100644 --- a/src/analyzer/protocol/netbios/NetbiosSSN.cc +++ b/src/analyzer/protocol/netbios/NetbiosSSN.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -58,12 +58,12 @@ int NetbiosSSN_Interpreter::ParseMessage(unsigned int type, unsigned int flags, { if ( netbios_session_message ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_query)); - vl->append(val_mgr->GetCount(type)); - vl->append(val_mgr->GetCount(len)); - analyzer->ConnectionEvent(netbios_session_message, vl); + analyzer->ConnectionEventFast(netbios_session_message, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_query), + val_mgr->GetCount(type), + val_mgr->GetCount(len), + }); } switch ( type ) { @@ -328,13 +328,19 @@ void NetbiosSSN_Interpreter::Event(EventHandlerPtr event, const u_char* data, if ( ! event ) return; - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); if ( is_orig >= 0 ) - vl->append(val_mgr->GetBool(is_orig)); - vl->append(new StringVal(new BroString(data, len, 0))); - - analyzer->ConnectionEvent(event, vl); + { + analyzer->ConnectionEventFast(event, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + new StringVal(new BroString(data, len, 0)), + }); + } + else + analyzer->ConnectionEventFast(event, { + analyzer->BuildConnVal(), + new StringVal(new BroString(data, len, 0)), + }); } diff --git a/src/analyzer/protocol/netbios/Plugin.cc b/src/analyzer/protocol/netbios/Plugin.cc index 0ec730889d..7f49cdfb09 100644 --- a/src/analyzer/protocol/netbios/Plugin.cc +++ b/src/analyzer/protocol/netbios/Plugin.cc @@ -6,7 +6,7 @@ #include "NetbiosSSN.h" namespace plugin { -namespace Bro_NetBIOS { +namespace Zeek_NetBIOS { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("Contents_NetbiosSSN", 0)); plugin::Configuration config; - config.name = "Bro::NetBIOS"; + config.name = "Zeek::NetBIOS"; config.description = "NetBIOS analyzer support"; return config; } diff --git a/src/analyzer/protocol/netbios/events.bif b/src/analyzer/protocol/netbios/events.bif index 72933f1e49..6d109368f4 100644 --- a/src/analyzer/protocol/netbios/events.bif +++ b/src/analyzer/protocol/netbios/events.bif @@ -1,10 +1,10 @@ -## Generated for all NetBIOS SSN and DGM messages. Bro's NetBIOS analyzer +## Generated for all NetBIOS SSN and DGM messages. Zeek's NetBIOS analyzer ## processes the NetBIOS session service running on TCP port 139, and (despite ## its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -16,27 +16,27 @@ ## ## data_len: The length of the message's payload. ## -## .. bro:see:: netbios_session_accepted netbios_session_keepalive +## .. zeek:see:: netbios_session_accepted netbios_session_keepalive ## netbios_session_raw_message netbios_session_rejected netbios_session_request ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_message%(c: connection, is_orig: bool, msg_type: count, data_len: count%); -## Generated for NetBIOS messages of type *session request*. Bro's NetBIOS +## Generated for NetBIOS messages of type *session request*. Zeek's NetBIOS ## analyzer processes the NetBIOS session service running on TCP port 139, and ## (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -44,27 +44,27 @@ event netbios_session_message%(c: connection, is_orig: bool, msg_type: count, da ## msg: The raw payload of the message sent, excluding the common NetBIOS ## header. ## -## .. bro:see:: netbios_session_accepted netbios_session_keepalive +## .. zeek:see:: netbios_session_accepted netbios_session_keepalive ## netbios_session_message netbios_session_raw_message netbios_session_rejected ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_request%(c: connection, msg: string%); -## Generated for NetBIOS messages of type *positive session response*. Bro's +## Generated for NetBIOS messages of type *positive session response*. Zeek's ## NetBIOS analyzer processes the NetBIOS session service running on TCP port ## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -72,27 +72,27 @@ event netbios_session_request%(c: connection, msg: string%); ## msg: The raw payload of the message sent, excluding the common NetBIOS ## header. ## -## .. bro:see:: netbios_session_keepalive netbios_session_message +## .. zeek:see:: netbios_session_keepalive netbios_session_message ## netbios_session_raw_message netbios_session_rejected netbios_session_request ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_accepted%(c: connection, msg: string%); -## Generated for NetBIOS messages of type *negative session response*. Bro's +## Generated for NetBIOS messages of type *negative session response*. Zeek's ## NetBIOS analyzer processes the NetBIOS session service running on TCP port ## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -100,17 +100,17 @@ event netbios_session_accepted%(c: connection, msg: string%); ## msg: The raw payload of the message sent, excluding the common NetBIOS ## header. ## -## .. bro:see:: netbios_session_accepted netbios_session_keepalive +## .. zeek:see:: netbios_session_accepted netbios_session_keepalive ## netbios_session_message netbios_session_raw_message netbios_session_request ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_rejected%(c: connection, msg: string%); @@ -122,7 +122,7 @@ event netbios_session_rejected%(c: connection, msg: string%); ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -132,30 +132,30 @@ event netbios_session_rejected%(c: connection, msg: string%); ## msg: The raw payload of the message sent, excluding the common NetBIOS ## header (i.e., the ``user_data``). ## -## .. bro:see:: netbios_session_accepted netbios_session_keepalive +## .. zeek:see:: netbios_session_accepted netbios_session_keepalive ## netbios_session_message netbios_session_rejected netbios_session_request ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: This is an oddly named event. In fact, it's probably an odd event ## to have to begin with. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_raw_message%(c: connection, is_orig: bool, msg: string%); -## Generated for NetBIOS messages of type *retarget response*. Bro's NetBIOS +## Generated for NetBIOS messages of type *retarget response*. Zeek's NetBIOS ## analyzer processes the NetBIOS session service running on TCP port 139, and ## (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -163,29 +163,29 @@ event netbios_session_raw_message%(c: connection, is_orig: bool, msg: string%); ## msg: The raw payload of the message sent, excluding the common NetBIOS ## header. ## -## .. bro:see:: netbios_session_accepted netbios_session_keepalive +## .. zeek:see:: netbios_session_accepted netbios_session_keepalive ## netbios_session_message netbios_session_raw_message netbios_session_rejected ## netbios_session_request decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## ## .. todo:: This is an oddly named event. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_ret_arg_resp%(c: connection, msg: string%); -## Generated for NetBIOS messages of type *keep-alive*. Bro's NetBIOS analyzer +## Generated for NetBIOS messages of type *keep-alive*. Zeek's NetBIOS analyzer ## processes the NetBIOS session service running on TCP port 139, and (despite ## its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information ## about NetBIOS. :rfc:`1002` describes -## the packet format for NetBIOS over TCP/IP, which Bro parses. +## the packet format for NetBIOS over TCP/IP, which Zeek parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the ## NetBIOS session. @@ -193,17 +193,17 @@ event netbios_session_ret_arg_resp%(c: connection, msg: string%); ## msg: The raw payload of the message sent, excluding the common NetBIOS ## header. ## -## .. bro:see:: netbios_session_accepted netbios_session_message +## .. zeek:see:: netbios_session_accepted netbios_session_message ## netbios_session_raw_message netbios_session_rejected netbios_session_request ## netbios_session_ret_arg_resp decode_netbios_name decode_netbios_name_type ## ## .. note:: These days, NetBIOS is primarily used as a transport mechanism for -## `SMB/CIFS `__. Bro's +## `SMB/CIFS `__. Zeek's ## SMB analyzer parses both SMB-over-NetBIOS and SMB-over-TCP on port 445. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event netbios_session_keepalive%(c: connection, msg: string%); diff --git a/src/analyzer/protocol/netbios/functions.bif b/src/analyzer/protocol/netbios/functions.bif index f92402a3e8..c86156931f 100644 --- a/src/analyzer/protocol/netbios/functions.bif +++ b/src/analyzer/protocol/netbios/functions.bif @@ -5,7 +5,7 @@ ## ## Returns: The decoded NetBIOS name, e.g., ``"THE NETBIOS NAME"``. ## -## .. bro:see:: decode_netbios_name_type +## .. zeek:see:: decode_netbios_name_type function decode_netbios_name%(name: string%): string %{ char buf[16]; @@ -41,7 +41,7 @@ function decode_netbios_name%(name: string%): string ## ## Returns: The numeric value of *name*. ## -## .. bro:see:: decode_netbios_name +## .. zeek:see:: decode_netbios_name function decode_netbios_name_type%(name: string%): count %{ const u_char* s = name->Bytes(); diff --git a/src/analyzer/protocol/ntlm/CMakeLists.txt b/src/analyzer/protocol/ntlm/CMakeLists.txt index fe2d4115e9..e2e627f36b 100644 --- a/src/analyzer/protocol/ntlm/CMakeLists.txt +++ b/src/analyzer/protocol/ntlm/CMakeLists.txt @@ -1,15 +1,15 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro NTLM) -bro_plugin_cc(NTLM.cc Plugin.cc) -bro_plugin_bif(types.bif events.bif) -bro_plugin_pac( +zeek_plugin_begin(Zeek NTLM) +zeek_plugin_cc(NTLM.cc Plugin.cc) +zeek_plugin_bif(types.bif events.bif) +zeek_plugin_pac( ntlm.pac ntlm-protocol.pac ntlm-analyzer.pac ) -bro_plugin_end() +zeek_plugin_end() diff --git a/src/analyzer/protocol/ntlm/Plugin.cc b/src/analyzer/protocol/ntlm/Plugin.cc index a9450537b5..e85b0cff17 100644 --- a/src/analyzer/protocol/ntlm/Plugin.cc +++ b/src/analyzer/protocol/ntlm/Plugin.cc @@ -5,7 +5,7 @@ #include "NTLM.h" namespace plugin { -namespace Bro_NTLM { +namespace Zeek_NTLM { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::analyzer::Component("NTLM", ::analyzer::ntlm::NTLM_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::NTLM"; + config.name = "Zeek::NTLM"; config.description = "NTLM analyzer"; return config; } diff --git a/src/analyzer/protocol/ntlm/events.bif b/src/analyzer/protocol/ntlm/events.bif index a36d653968..88def089fa 100644 --- a/src/analyzer/protocol/ntlm/events.bif +++ b/src/analyzer/protocol/ntlm/events.bif @@ -4,7 +4,7 @@ ## ## negotiate: The parsed data of the :abbr:`NTLM (NT LAN Manager)` message. See init-bare for more details. ## -## .. bro:see:: ntlm_challenge ntlm_authenticate +## .. zeek:see:: ntlm_challenge ntlm_authenticate event ntlm_negotiate%(c: connection, negotiate: NTLM::Negotiate%); ## Generated for :abbr:`NTLM (NT LAN Manager)` messages of type *challenge*. @@ -13,7 +13,7 @@ event ntlm_negotiate%(c: connection, negotiate: NTLM::Negotiate%); ## ## negotiate: The parsed data of the :abbr:`NTLM (NT LAN Manager)` message. See init-bare for more details. ## -## .. bro:see:: ntlm_negotiate ntlm_authenticate +## .. zeek:see:: ntlm_negotiate ntlm_authenticate event ntlm_challenge%(c: connection, challenge: NTLM::Challenge%); ## Generated for :abbr:`NTLM (NT LAN Manager)` messages of type *authenticate*. @@ -22,5 +22,5 @@ event ntlm_challenge%(c: connection, challenge: NTLM::Challenge%); ## ## request: The parsed data of the :abbr:`NTLM (NT LAN Manager)` message. See init-bare for more details. ## -## .. bro:see:: ntlm_negotiate ntlm_challenge +## .. zeek:see:: ntlm_negotiate ntlm_challenge event ntlm_authenticate%(c: connection, request: NTLM::Authenticate%); diff --git a/src/analyzer/protocol/ntlm/ntlm-analyzer.pac b/src/analyzer/protocol/ntlm/ntlm-analyzer.pac index c72a9d249a..0f0d842570 100644 --- a/src/analyzer/protocol/ntlm/ntlm-analyzer.pac +++ b/src/analyzer/protocol/ntlm/ntlm-analyzer.pac @@ -94,6 +94,9 @@ refine connection NTLM_Conn += { function proc_ntlm_negotiate(val: NTLM_Negotiate): bool %{ + if ( ! ntlm_negotiate ) + return true; + RecordVal* result = new RecordVal(BifType::Record::NTLM::Negotiate); result->Assign(0, build_negotiate_flag_record(${val.flags})); @@ -115,6 +118,9 @@ refine connection NTLM_Conn += { function proc_ntlm_challenge(val: NTLM_Challenge): bool %{ + if ( ! ntlm_challenge ) + return true; + RecordVal* result = new RecordVal(BifType::Record::NTLM::Challenge); result->Assign(0, build_negotiate_flag_record(${val.flags})); @@ -136,6 +142,9 @@ refine connection NTLM_Conn += { function proc_ntlm_authenticate(val: NTLM_Authenticate): bool %{ + if ( ! ntlm_authenticate ) + return true; + RecordVal* result = new RecordVal(BifType::Record::NTLM::Authenticate); result->Assign(0, build_negotiate_flag_record(${val.flags})); diff --git a/src/analyzer/protocol/ntp/CMakeLists.txt b/src/analyzer/protocol/ntp/CMakeLists.txt index a8b8bb1872..863e9798aa 100644 --- a/src/analyzer/protocol/ntp/CMakeLists.txt +++ b/src/analyzer/protocol/ntp/CMakeLists.txt @@ -1,9 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro NTP) -bro_plugin_cc(NTP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek NTP) +zeek_plugin_cc(NTP.cc Plugin.cc) +zeek_plugin_bif(types.bif events.bif) +zeek_plugin_pac(ntp.pac ntp-analyzer.pac ntp-mode7.pac ntp-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ntp/NTP.cc b/src/analyzer/protocol/ntp/NTP.cc index 631d5bc3e9..2442c7ee68 100644 --- a/src/analyzer/protocol/ntp/NTP.cc +++ b/src/analyzer/protocol/ntp/NTP.cc @@ -1,22 +1,15 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include "bro-config.h" - -#include "NetVar.h" #include "NTP.h" -#include "Sessions.h" -#include "Event.h" + +#include "Reporter.h" #include "events.bif.h" -using namespace analyzer::ntp; +using namespace analyzer::NTP; -NTP_Analyzer::NTP_Analyzer(Connection* conn) - : Analyzer("NTP", conn) +NTP_Analyzer::NTP_Analyzer(Connection* c) + : analyzer::Analyzer("NTP", c) { - ADD_ANALYZER_TIMER(&NTP_Analyzer::ExpireTimer, - network_time + ntp_session_timeout, 1, - TIMER_NTP_EXPIRE); + interp = new binpac::NTP::NTP_Conn(this); } void NTP_Analyzer::Done() @@ -25,88 +18,22 @@ void NTP_Analyzer::Done() Event(udp_session_done); } -void NTP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, uint64 seq, const IP_Hdr* ip, int caplen) +NTP_Analyzer::~NTP_Analyzer() { - Analyzer::DeliverPacket(len, data, is_orig, seq, ip, caplen); - - // Actually we could just get rid of the Request/Reply and simply use - // the code of Message(). But for now we use it as an example of how - // to convert an old-style UDP analyzer. - if ( is_orig ) - Request(data, len); - else - Reply(data, len); + delete interp; } -int NTP_Analyzer::Request(const u_char* data, int len) +void NTP_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, + uint64 seq, const IP_Hdr* ip, int caplen) { - Message(data, len); - return 1; - } + Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen); -int NTP_Analyzer::Reply(const u_char* data, int len) - { - Message(data, len); - return 1; - } - -void NTP_Analyzer::Message(const u_char* data, int len) - { - if ( (unsigned) len < sizeof(struct ntpdata) ) + try { - Weird("truncated_NTP"); - return; + interp->NewData(orig, data, data + len); + } + catch ( const binpac::Exception& e ) + { + ProtocolViolation(fmt("Binpac exception: %s", e.c_msg())); } - - struct ntpdata* ntp_data = (struct ntpdata *) data; - len -= sizeof *ntp_data; - data += sizeof *ntp_data; - - RecordVal* msg = new RecordVal(ntp_msg); - - unsigned int code = ntp_data->status & 0x7; - - msg->Assign(0, val_mgr->GetCount((unsigned int) (ntohl(ntp_data->refid)))); - msg->Assign(1, val_mgr->GetCount(code)); - msg->Assign(2, val_mgr->GetCount((unsigned int) ntp_data->stratum)); - msg->Assign(3, val_mgr->GetCount((unsigned int) ntp_data->ppoll)); - msg->Assign(4, val_mgr->GetInt((unsigned int) ntp_data->precision)); - msg->Assign(5, new Val(ShortFloat(ntp_data->distance), TYPE_INTERVAL)); - msg->Assign(6, new Val(ShortFloat(ntp_data->dispersion), TYPE_INTERVAL)); - msg->Assign(7, new Val(LongFloat(ntp_data->reftime), TYPE_TIME)); - msg->Assign(8, new Val(LongFloat(ntp_data->org), TYPE_TIME)); - msg->Assign(9, new Val(LongFloat(ntp_data->rec), TYPE_TIME)); - msg->Assign(10, new Val(LongFloat(ntp_data->xmt), TYPE_TIME)); - - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(msg); - vl->append(new StringVal(new BroString(data, len, 0))); - - ConnectionEvent(ntp_message, vl); - } - -double NTP_Analyzer::ShortFloat(struct s_fixedpt fp) - { - return ConvertToDouble(ntohs(fp.int_part), ntohs(fp.fraction), 65536.0); - } - -double NTP_Analyzer::LongFloat(struct l_fixedpt fp) - { - double t = ConvertToDouble(ntohl(fp.int_part), ntohl(fp.fraction), - 4294967296.0); - - return t ? t - JAN_1970 : 0.0; - } - -double NTP_Analyzer::ConvertToDouble(unsigned int int_part, - unsigned int fraction, double frac_base) - { - return double(int_part) + double(fraction) / frac_base; - } - -void NTP_Analyzer::ExpireTimer(double /* t */) - { - Event(connection_timeout); - sessions->Remove(Conn()); } diff --git a/src/analyzer/protocol/ntp/NTP.h b/src/analyzer/protocol/ntp/NTP.h index 5b5d3d7baa..c155fb3135 100644 --- a/src/analyzer/protocol/ntp/NTP.h +++ b/src/analyzer/protocol/ntp/NTP.h @@ -1,69 +1,32 @@ -// See the file "COPYING" in the main distribution directory for copyright. - #ifndef ANALYZER_PROTOCOL_NTP_NTP_H #define ANALYZER_PROTOCOL_NTP_NTP_H +#include "events.bif.h" +#include "types.bif.h" + #include "analyzer/protocol/udp/UDP.h" -// The following are from the tcpdump distribution, credited there -// to the U of MD implementation. +#include "ntp_pac.h" -#define JAN_1970 2208988800.0 /* 1970 - 1900 in seconds */ - -namespace analyzer { namespace ntp { - -struct l_fixedpt { - unsigned int int_part; - unsigned int fraction; -}; - -struct s_fixedpt { - unsigned short int_part; - unsigned short fraction; -}; - -struct ntpdata { - unsigned char status; /* status of local clock and leap info */ - unsigned char stratum; /* Stratum level */ - unsigned char ppoll; /* poll value */ - int precision:8; - struct s_fixedpt distance; - struct s_fixedpt dispersion; - unsigned int refid; - struct l_fixedpt reftime; - struct l_fixedpt org; - struct l_fixedpt rec; - struct l_fixedpt xmt; -}; +namespace analyzer { namespace NTP { class NTP_Analyzer : public analyzer::Analyzer { public: explicit NTP_Analyzer(Connection* conn); + ~NTP_Analyzer() override; + + // Overriden from Analyzer. + void Done() override; + void DeliverPacket(int len, const u_char* data, bool orig, + uint64 seq, const IP_Hdr* ip, int caplen) override; static analyzer::Analyzer* Instantiate(Connection* conn) { return new NTP_Analyzer(conn); } protected: - void Done() override; - void DeliverPacket(int len, const u_char* data, bool orig, - uint64 seq, const IP_Hdr* ip, int caplen) override; - - int Request(const u_char* data, int len); - int Reply(const u_char* data, int len); - - // NTP is a unidirectional protocol, so no notion of "requests" - // as separate from "replies". - void Message(const u_char* data, int len); - - double ShortFloat(struct s_fixedpt fp); - double LongFloat(struct l_fixedpt fp); - double ConvertToDouble(unsigned int int_part, unsigned int fraction, - double frac_base); - - friend class ConnectionTimer; - void ExpireTimer(double t); + binpac::NTP::NTP_Conn* interp; }; -} } // namespace analyzer::* +} } // namespace analyzer::* #endif diff --git a/src/analyzer/protocol/ntp/Plugin.cc b/src/analyzer/protocol/ntp/Plugin.cc index 3399fbb867..edb2b8c3d7 100644 --- a/src/analyzer/protocol/ntp/Plugin.cc +++ b/src/analyzer/protocol/ntp/Plugin.cc @@ -1,21 +1,20 @@ // See the file in the main distribution directory for copyright. - #include "plugin/Plugin.h" #include "NTP.h" namespace plugin { -namespace Bro_NTP { +namespace Zeek_NTP { class Plugin : public plugin::Plugin { public: plugin::Configuration Configure() { - AddComponent(new ::analyzer::Component("NTP", ::analyzer::ntp::NTP_Analyzer::Instantiate)); + AddComponent(new ::analyzer::Component("NTP", ::analyzer::NTP::NTP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::NTP"; + config.name = "Zeek::NTP"; config.description = "NTP analyzer"; return config; } diff --git a/src/analyzer/protocol/ntp/events.bif b/src/analyzer/protocol/ntp/events.bif index bba2dfbbe5..11e1fcfe0e 100644 --- a/src/analyzer/protocol/ntp/events.bif +++ b/src/analyzer/protocol/ntp/events.bif @@ -1,21 +1,12 @@ -## Generated for all NTP messages. Different from many other of Bro's events, +## Generated for all NTP messages. Different from many other of Zeek's events, ## this one is generated for both client-side and server-side messages. ## ## See `Wikipedia `__ for ## more information about the NTP protocol. ## -## u: The connection record describing the corresponding UDP flow. +## c: The connection record describing the corresponding UDP flow. +## +## is_orig: True if the message was sent by the originator. ## ## msg: The parsed NTP message. -## -## excess: The raw bytes of any optional parts of the NTP packet. Bro does not -## further parse any optional fields. -## -## .. bro:see:: ntp_session_timeout -## -## .. todo:: Bro's current default configuration does not activate the protocol -## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to -## register a port for it or add a DPD payload signature. -event ntp_message%(u: connection, msg: ntp_msg, excess: string%); - +event ntp_message%(c: connection, is_orig: bool, msg: NTP::Message%); diff --git a/src/analyzer/protocol/ntp/ntp-analyzer.pac b/src/analyzer/protocol/ntp/ntp-analyzer.pac new file mode 100644 index 0000000000..226a71236e --- /dev/null +++ b/src/analyzer/protocol/ntp/ntp-analyzer.pac @@ -0,0 +1,160 @@ + +%extern{ + #include + #define FRAC_16 pow(2,-16) + #define FRAC_32 pow(2,-32) + // NTP defines the epoch from 1900, not 1970 + #define EPOCH_OFFSET -2208988800 +%} + +%header{ + Val* proc_ntp_short(const NTP_Short_Time* t); + Val* proc_ntp_timestamp(const NTP_Time* t); +%} + + +%code{ + Val* proc_ntp_short(const NTP_Short_Time* t) + { + if ( t->seconds() == 0 && t->fractions() == 0 ) + return new Val(0.0, TYPE_INTERVAL); + return new Val(t->seconds() + t->fractions()*FRAC_16, TYPE_INTERVAL); + } + + Val* proc_ntp_timestamp(const NTP_Time* t) + { + if ( t->seconds() == 0 && t->fractions() == 0) + return new Val(0.0, TYPE_TIME); + return new Val(EPOCH_OFFSET + t->seconds() + t->fractions()*FRAC_32, TYPE_TIME); + } +%} + + +refine flow NTP_Flow += { + + # This builds the standard msg record + function BuildNTPStdMsg(nsm: NTP_std_msg): BroVal + %{ + RecordVal* rv = new RecordVal(BifType::Record::NTP::StandardMessage); + + rv->Assign(0, val_mgr->GetCount(${nsm.stratum})); + rv->Assign(1, new Val(pow(2, ${nsm.poll}), TYPE_INTERVAL)); + rv->Assign(2, new Val(pow(2, ${nsm.precision}), TYPE_INTERVAL)); + rv->Assign(3, proc_ntp_short(${nsm.root_delay})); + rv->Assign(4, proc_ntp_short(${nsm.root_dispersion})); + + switch ( ${nsm.stratum} ) { + case 0: + // unknown stratum => kiss code + rv->Assign(5, bytestring_to_val(${nsm.reference_id})); + break; + case 1: + // reference clock => ref clock string + rv->Assign(6, bytestring_to_val(${nsm.reference_id})); + break; + default: + { + const uint8* d = ${nsm.reference_id}.data(); + rv->Assign(7, new AddrVal(IPAddr(IPv4, (const uint32*) d, IPAddr::Network))); + } + break; + } + + rv->Assign(8, proc_ntp_timestamp(${nsm.reference_ts})); + rv->Assign(9, proc_ntp_timestamp(${nsm.origin_ts})); + rv->Assign(10, proc_ntp_timestamp(${nsm.receive_ts})); + rv->Assign(11, proc_ntp_timestamp(${nsm.transmit_ts})); + + if ( ${nsm.mac_len} == 20 ) + { + rv->Assign(12, val_mgr->GetCount(${nsm.mac.key_id})); + rv->Assign(13, bytestring_to_val(${nsm.mac.digest})); + } + else if ( ${nsm.mac_len} == 24 ) + { + rv->Assign(12, val_mgr->GetCount(${nsm.mac_ext.key_id})); + rv->Assign(13, bytestring_to_val(${nsm.mac_ext.digest})); + } + + if ( ${nsm.has_exts} ) + { + // TODO: add extension fields + rv->Assign(14, val_mgr->GetCount((uint32) ${nsm.exts}->size())); + } + + return rv; + %} + + # This builds the control msg record + function BuildNTPControlMsg(ncm: NTP_control_msg): BroVal + %{ + RecordVal* rv = new RecordVal(BifType::Record::NTP::ControlMessage); + + rv->Assign(0, val_mgr->GetCount(${ncm.OpCode})); + rv->Assign(1, val_mgr->GetBool(${ncm.R})); + rv->Assign(2, val_mgr->GetBool(${ncm.E})); + rv->Assign(3, val_mgr->GetBool(${ncm.M})); + rv->Assign(4, val_mgr->GetCount(${ncm.sequence})); + rv->Assign(5, val_mgr->GetCount(${ncm.status})); + rv->Assign(6, val_mgr->GetCount(${ncm.association_id})); + + if ( ${ncm.c} > 0 ) + rv->Assign(7, bytestring_to_val(${ncm.data})); + + if ( ${ncm.has_control_mac} ) + { + rv->Assign(8, val_mgr->GetCount(${ncm.mac.key_id})); + rv->Assign(9, bytestring_to_val(${ncm.mac.crypto_checksum})); + } + + return rv; + %} + + # This builds the mode7 msg record + function BuildNTPMode7Msg(m7: NTP_mode7_msg): BroVal + %{ + RecordVal* rv = new RecordVal(BifType::Record::NTP::Mode7Message); + + rv->Assign(0, val_mgr->GetCount(${m7.request_code})); + rv->Assign(1, val_mgr->GetBool(${m7.auth_bit})); + rv->Assign(2, val_mgr->GetCount(${m7.sequence})); + rv->Assign(3, val_mgr->GetCount(${m7.implementation})); + rv->Assign(4, val_mgr->GetCount(${m7.error_code})); + + if ( ${m7.data_len} > 0 ) + rv->Assign(5, bytestring_to_val(${m7.data})); + + return rv; + %} + + + function proc_ntp_message(msg: NTP_PDU): bool + %{ + connection()->bro_analyzer()->ProtocolConfirmation(); + + if ( ! ntp_message ) + return false; + + RecordVal* rv = new RecordVal(BifType::Record::NTP::Message); + rv->Assign(0, val_mgr->GetCount(${msg.version})); + rv->Assign(1, val_mgr->GetCount(${msg.mode})); + + // The standard record + if ( ${msg.mode} > 0 && ${msg.mode} < 6 ) + rv->Assign(2, BuildNTPStdMsg(${msg.std})); + else if ( ${msg.mode} == 6 ) + rv->Assign(3, BuildNTPControlMsg(${msg.control})); + else if ( ${msg.mode} == 7 ) + rv->Assign(4, BuildNTPMode7Msg(${msg.mode7})); + + BifEvent::generate_ntp_message(connection()->bro_analyzer(), + connection()->bro_analyzer()->Conn(), + is_orig(), rv); + return true; + %} +}; + +refine typeattr NTP_PDU += &let { + proc: bool = $context.flow.proc_ntp_message(this); +}; + diff --git a/src/analyzer/protocol/ntp/ntp-mode7.pac b/src/analyzer/protocol/ntp/ntp-mode7.pac new file mode 100644 index 0000000000..0e4f16738d --- /dev/null +++ b/src/analyzer/protocol/ntp/ntp-mode7.pac @@ -0,0 +1,123 @@ +# This is a mode7 message as described below. +# The documentation below is taken from the NTP official project (www.ntp.org), +# code v. ntp-4.2.8p13, in include/ntp_request.h. +# +# A mode 7 packet is used exchanging data between an NTP server +# and a client for purposes other than time synchronization, e.g. +# monitoring, statistics gathering and configuration. A mode 7 +# packet has the following format: +# +# 0 1 2 3 +# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# |R|M| VN | Mode|A| Sequence | Implementation| Req Code | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | Err | Number of data items | MBZ | Size of data item | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | | +# | Data (Minimum 0 octets, maximum 500 octets) | +# | | +# [...] +# | | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | Encryption Keyid (when A bit set) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | | +# | Message Authentication Code (when A bit set) | +# | | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# +# where the fields are (note that the client sends requests, the server +# responses): +# +# Response Bit: This packet is a response (if clear, packet is a request). +# +# More Bit: Set for all packets but the last in a response which +# requires more than one packet. +# +# Version Number: 2 for current version +# +# Mode: Always 7 +# +# Authenticated bit: If set, this packet is authenticated. +# +# Sequence number: For a multipacket response, contains the sequence +# number of this packet. 0 is the first in the sequence, +# 127 (or less) is the last. The More Bit must be set in +# all packets but the last. +# +# Implementation number: The number of the implementation this request code +# is defined by. An implementation number of zero is used +# for requst codes/data formats which all implementations +# agree on. Implementation number 255 is reserved (for +# extensions, in case we run out). +# +# Request code: An implementation-specific code which specifies the +# operation to be (which has been) performed and/or the +# format and semantics of the data included in the packet. +# +# Err: Must be 0 for a request. For a response, holds an error +# code relating to the request. If nonzero, the operation +# requested wasn't performed. +# +# 0 - no error +# 1 - incompatible implementation number +# 2 - unimplemented request code +# 3 - format error (wrong data items, data size, packet size etc.) +# 4 - no data available (e.g. request for details on unknown peer) +# 5-6 I don't know +# 7 - authentication failure (i.e. permission denied) +# +# Number of data items: number of data items in packet. 0 to 500 +# +# MBZ: A reserved data field, must be zero in requests and responses. +# +# Size of data item: size of each data item in packet. 0 to 500 +# +# Data: Variable sized area containing request/response data. For +# requests and responses the size in octets must be greater +# than or equal to the product of the number of data items +# and the size of a data item. For requests the data area +# must be exactly 40 octets in length. For responses the +# data area may be any length between 0 and 500 octets +# inclusive. +# +# Message Authentication Code: Same as NTP spec, in definition and function. +# May optionally be included in requests which require +# authentication, is never included in responses. +# +# The version number, mode and keyid have the same function and are +# in the same location as a standard NTP packet. The request packet +# is the same size as a standard NTP packet to ease receive buffer +# management, and to allow the same encryption procedure to be used +# both on mode 7 and standard NTP packets. The mac is included when +# it is required that a request be authenticated, the keyid should be +# zero in requests in which the mac is not included. +# +# The data format depends on the implementation number/request code pair +# and whether the packet is a request or a response. The only requirement +# is that data items start in the octet immediately following the size +# word and that data items be concatenated without padding between (i.e. +# if the data area is larger than data_items*size, all padding is at +# the end). Padding is ignored, other than for encryption purposes. +# Implementations using encryption might want to include a time stamp +# or other data in the request packet padding. The key used for requests +# is implementation defined, but key 15 is suggested as a default. + +type NTP_mode7_msg = record { + second_byte : uint8; + implementation : uint8; + request_code : uint8; + err_and_data_len : uint16; + data : bytestring &length=data_len; + have_mac : case(auth_bit) of { + true -> mac: NTP_MAC; + false -> nil: empty; + }; +} &let { + auth_bit : bool = (second_byte & 0x80) > 0; + sequence : uint8 = (second_byte & 0x7F); + error_code: uint8 = (err_and_data_len & 0xF000) >> 12; + data_len : uint16 = (err_and_data_len & 0x0FFF); +} &byteorder=bigendian &exportsourcedata; + diff --git a/src/analyzer/protocol/ntp/ntp-protocol.pac b/src/analyzer/protocol/ntp/ntp-protocol.pac new file mode 100644 index 0000000000..2c31832f09 --- /dev/null +++ b/src/analyzer/protocol/ntp/ntp-protocol.pac @@ -0,0 +1,131 @@ +# This is the common part in the header format. +# See RFC 5905 for details +type NTP_PDU(is_orig: bool) = record { + # The first byte of the NTP header contains the leap indicator, + # the version and the mode + first_byte: uint8; + + # Modes 1-5 are standard NTP time sync + standard_modes: case ( mode >= 1 && mode <=5 ) of { + true -> std : NTP_std_msg; + false -> emp : empty; + }; + + modes_6_7: case ( mode ) of { + # Mode 6 is for control messages (format is different from modes 6-7) + 6 -> control: NTP_control_msg; + # Mode 7 is reserved or private (and implementation dependent). + # For example used for some commands such as MONLIST + 7 -> mode7: NTP_mode7_msg; + default -> unknown: bytestring &restofdata; + }; +} &let { + leap: uint8 = (first_byte & 0xc0)>>6; # First 2 bits of 8-bits value + version: uint8 = (first_byte & 0x38)>>3; # Bits 3-5 of 8-bits value + mode: uint8 = (first_byte & 0x07); # Bits 6-8 of 8-bits value +} &byteorder=bigendian &exportsourcedata; + +# This is the most common type of message, corresponding to modes 1-5 +# This kind of msg are used for normal operation of syncronization +# See RFC 5905 for details +type NTP_std_msg = record { + stratum: uint8; + poll: int8; + precision: int8; + + root_delay: NTP_Short_Time; + root_dispersion: NTP_Short_Time; + + reference_id: bytestring &length=4; + reference_ts: NTP_Time; + + origin_ts: NTP_Time; + receive_ts: NTP_Time; + transmit_ts: NTP_Time; + + extensions: case ( has_exts ) of { + true -> exts: Extension_Field[] &until($input.length() <= 24); + false -> nil: empty; + } &requires(has_exts); + + mac_fields: case ( mac_len ) of { + 20 -> mac: NTP_MAC; + 24 -> mac_ext: NTP_MAC_ext; + default -> nil2: empty; + } &requires(mac_len); +} &let { + length = sourcedata.length(); + has_exts: bool = (length - offsetof(extensions)) > 24; + mac_len: uint32 = (length - offsetof(mac_fields)); +} &byteorder=bigendian &exportsourcedata; + +# This format is for mode==6, control msg +# See RFC 1119 for details +type NTP_control_msg = record { + second_byte: uint8; + sequence: uint16; + status: uint16; #TODO: this can be further parsed internally + association_id: uint16; + offs: uint16; + c: uint16; + data: bytestring &length=c; + + mac_fields: case ( has_control_mac ) of { + true -> mac: NTP_CONTROL_MAC; + false -> nil: empty; + } &requires(has_control_mac); +} &let { + R: bool = (second_byte & 0x80) > 0; # First bit of 8-bits value + E: bool = (second_byte & 0x40) > 0; # Second bit of 8-bits value + M: bool = (second_byte & 0x20) > 0; # Third bit of 8-bits value + OpCode: uint8 = (second_byte & 0x1F); # Last 5 bits of 8-bits value + length = sourcedata.length(); + has_control_mac: bool = (length - offsetof(mac_fields)) == 12; +} &byteorder=bigendian &exportsourcedata; + +# As in RFC 5905 +type NTP_MAC = record { + key_id: uint32; + digest: bytestring &length=16; +} &length=20; + +# As in RFC 5906, same as NTP_MAC but with a 160 bit digest +type NTP_MAC_ext = record { + key_id: uint32; + digest: bytestring &length=20; +} &length=24; + +# As in RFC 1119 +type NTP_CONTROL_MAC = record { + key_id: uint32; + crypto_checksum: bytestring &length=8; +} &length=12; + +# As defined in RFC 5906 +type Extension_Field = record { + first_byte_ext: uint8; + field_type: uint8; + len: uint16; + association_id: uint16; + timestamp: uint32; + filestamp: uint32; + value_len: uint32; + value: bytestring &length=value_len; + sig_len: uint32; + signature: bytestring &length=sig_len; + pad: padding to (len - offsetof(first_byte_ext)); +} &let { + R: bool = (first_byte_ext & 0x80) > 0; # First bit of 8-bits value + E: bool = (first_byte_ext & 0x40) > 0; # Second bit of 8-bits value + Code: uint8 = (first_byte_ext & 0x3F); # Last 6 bits of 8-bits value +}; + +type NTP_Short_Time = record { + seconds: int16; + fractions: int16; +}; + +type NTP_Time = record { + seconds: uint32; + fractions: uint32; +}; diff --git a/src/analyzer/protocol/ntp/ntp.pac b/src/analyzer/protocol/ntp/ntp.pac new file mode 100644 index 0000000000..4da2ae0f6a --- /dev/null +++ b/src/analyzer/protocol/ntp/ntp.pac @@ -0,0 +1,27 @@ + +%include binpac.pac +%include bro.pac + +%extern{ + #include "types.bif.h" + #include "events.bif.h" +%} + +analyzer NTP withcontext { + connection: NTP_Conn; + flow: NTP_Flow; +}; + +connection NTP_Conn(bro_analyzer: BroAnalyzer) { + upflow = NTP_Flow(true); + downflow = NTP_Flow(false); +}; + +%include ntp-mode7.pac +%include ntp-protocol.pac + +flow NTP_Flow(is_orig: bool) { + datagram = NTP_PDU(is_orig) withcontext(connection, this); +}; + +%include ntp-analyzer.pac diff --git a/src/analyzer/protocol/ntp/types.bif b/src/analyzer/protocol/ntp/types.bif new file mode 100644 index 0000000000..2855d39a42 --- /dev/null +++ b/src/analyzer/protocol/ntp/types.bif @@ -0,0 +1,6 @@ +module NTP; + +type NTP::StandardMessage: record; +type NTP::ControlMessage: record; +type NTP::Mode7Message: record; +type NTP::Message: record; diff --git a/src/analyzer/protocol/pia/CMakeLists.txt b/src/analyzer/protocol/pia/CMakeLists.txt index 02397f7aff..b2bcf0c70c 100644 --- a/src/analyzer/protocol/pia/CMakeLists.txt +++ b/src/analyzer/protocol/pia/CMakeLists.txt @@ -1,8 +1,8 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro PIA) -bro_plugin_cc(PIA.cc Plugin.cc) -bro_plugin_end() +zeek_plugin_begin(Zeek PIA) +zeek_plugin_cc(PIA.cc Plugin.cc) +zeek_plugin_end() diff --git a/src/analyzer/protocol/pia/Plugin.cc b/src/analyzer/protocol/pia/Plugin.cc index 983617be66..c46e710f9d 100644 --- a/src/analyzer/protocol/pia/Plugin.cc +++ b/src/analyzer/protocol/pia/Plugin.cc @@ -6,7 +6,7 @@ #include "PIA.h" namespace plugin { -namespace Bro_PIA { +namespace Zeek_PIA { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::analyzer::Component("PIA_UDP", ::analyzer::pia::PIA_UDP::Instantiate)); plugin::Configuration config; - config.name = "Bro::PIA"; + config.name = "Zeek::PIA"; config.description = "Analyzers implementing Dynamic Protocol"; return config; } diff --git a/src/analyzer/protocol/pop3/CMakeLists.txt b/src/analyzer/protocol/pop3/CMakeLists.txt index 8071d6a74d..dcca381140 100644 --- a/src/analyzer/protocol/pop3/CMakeLists.txt +++ b/src/analyzer/protocol/pop3/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro POP3) -bro_plugin_cc(POP3.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek POP3) +zeek_plugin_cc(POP3.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/pop3/POP3.cc b/src/analyzer/protocol/pop3/POP3.cc index 2cd5041a70..62b57674e1 100644 --- a/src/analyzer/protocol/pop3/POP3.cc +++ b/src/analyzer/protocol/pop3/POP3.cc @@ -1,7 +1,7 @@ // This code contributed to Bro by Florian Schimandl, Hugh Dollman and // Robin Sommer. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -833,10 +833,8 @@ void POP3_Analyzer::StartTLS() if ( ssl ) AddChildAnalyzer(ssl); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - - ConnectionEvent(pop3_starttls, vl); + if ( pop3_starttls ) + ConnectionEventFast(pop3_starttls, {BuildConnVal()}); } void POP3_Analyzer::AuthSuccessfull() @@ -926,14 +924,14 @@ void POP3_Analyzer::POP3Event(EventHandlerPtr event, bool is_orig, if ( ! event ) return; - val_list* vl = new val_list; + val_list vl(2 + (bool)arg1 + (bool)arg2); - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); + vl.append(BuildConnVal()); + vl.append(val_mgr->GetBool(is_orig)); if ( arg1 ) - vl->append(new StringVal(arg1)); + vl.append(new StringVal(arg1)); if ( arg2 ) - vl->append(new StringVal(arg2)); + vl.append(new StringVal(arg2)); - ConnectionEvent(event, vl); + ConnectionEventFast(event, std::move(vl)); } diff --git a/src/analyzer/protocol/pop3/Plugin.cc b/src/analyzer/protocol/pop3/Plugin.cc index f6a97b824e..0fed697e83 100644 --- a/src/analyzer/protocol/pop3/Plugin.cc +++ b/src/analyzer/protocol/pop3/Plugin.cc @@ -6,7 +6,7 @@ #include "POP3.h" namespace plugin { -namespace Bro_POP3 { +namespace Zeek_POP3 { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("POP3", ::analyzer::pop3::POP3_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::POP3"; + config.name = "Zeek::POP3"; config.description = "POP3 analyzer"; return config; } diff --git a/src/analyzer/protocol/pop3/events.bif b/src/analyzer/protocol/pop3/events.bif index 74cf1f6f68..7f06008a88 100644 --- a/src/analyzer/protocol/pop3/events.bif +++ b/src/analyzer/protocol/pop3/events.bif @@ -12,12 +12,12 @@ ## ## arg: The argument to the command. ## -## .. bro:see:: pop3_data pop3_login_failure pop3_login_success pop3_reply +## .. zeek:see:: pop3_data pop3_login_failure pop3_login_success pop3_reply ## pop3_unexpected ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_request%(c: connection, is_orig: bool, command: string, arg: string%); @@ -37,14 +37,14 @@ event pop3_request%(c: connection, is_orig: bool, ## ## msg: The textual description the server sent along with *cmd*. ## -## .. bro:see:: pop3_data pop3_login_failure pop3_login_success pop3_request +## .. zeek:see:: pop3_data pop3_login_failure pop3_login_success pop3_request ## pop3_unexpected ## ## .. todo:: This event is receiving odd parameters, should unify. ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_reply%(c: connection, is_orig: bool, cmd: string, msg: string%); @@ -62,12 +62,12 @@ event pop3_reply%(c: connection, is_orig: bool, cmd: string, msg: string%); ## ## data: The data sent. ## -## .. bro:see:: pop3_login_failure pop3_login_success pop3_reply pop3_request +## .. zeek:see:: pop3_login_failure pop3_login_success pop3_reply pop3_request ## pop3_unexpected ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_data%(c: connection, is_orig: bool, data: string%); @@ -86,11 +86,11 @@ event pop3_data%(c: connection, is_orig: bool, data: string%); ## ## detail: The input that triggered the event. ## -## .. bro:see:: pop3_data pop3_login_failure pop3_login_success pop3_reply pop3_request +## .. zeek:see:: pop3_data pop3_login_failure pop3_login_success pop3_reply pop3_request ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_unexpected%(c: connection, is_orig: bool, msg: string, detail: string%); @@ -105,12 +105,12 @@ event pop3_unexpected%(c: connection, is_orig: bool, ## ## c: The connection. ## -## .. bro:see:: pop3_data pop3_login_failure pop3_login_success pop3_reply +## .. zeek:see:: pop3_data pop3_login_failure pop3_login_success pop3_reply ## pop3_request pop3_unexpected ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_starttls%(c: connection%); @@ -128,12 +128,12 @@ event pop3_starttls%(c: connection%); ## ## password: The password used for authentication. ## -## .. bro:see:: pop3_data pop3_login_failure pop3_reply pop3_request +## .. zeek:see:: pop3_data pop3_login_failure pop3_reply pop3_request ## pop3_unexpected ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_login_success%(c: connection, is_orig: bool, user: string, password: string%); @@ -152,12 +152,12 @@ event pop3_login_success%(c: connection, is_orig: bool, ## ## password: The password attempted for authentication. ## -## .. bro:see:: pop3_data pop3_login_success pop3_reply pop3_request +## .. zeek:see:: pop3_data pop3_login_success pop3_reply pop3_request ## pop3_unexpected ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pop3_login_failure%(c: connection, is_orig: bool, user: string, password: string%); diff --git a/src/analyzer/protocol/radius/CMakeLists.txt b/src/analyzer/protocol/radius/CMakeLists.txt index 077d71d7c7..3e5477be9e 100644 --- a/src/analyzer/protocol/radius/CMakeLists.txt +++ b/src/analyzer/protocol/radius/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro RADIUS) -bro_plugin_cc(RADIUS.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(radius.pac radius-analyzer.pac radius-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek RADIUS) +zeek_plugin_cc(RADIUS.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(radius.pac radius-analyzer.pac radius-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/radius/Plugin.cc b/src/analyzer/protocol/radius/Plugin.cc index c2729289ef..8b6efe15b8 100644 --- a/src/analyzer/protocol/radius/Plugin.cc +++ b/src/analyzer/protocol/radius/Plugin.cc @@ -6,7 +6,7 @@ #include "RADIUS.h" namespace plugin { -namespace Bro_RADIUS { +namespace Zeek_RADIUS { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("RADIUS", ::analyzer::RADIUS::RADIUS_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::RADIUS"; + config.name = "Zeek::RADIUS"; config.description = "RADIUS analyzer"; return config; } diff --git a/src/analyzer/protocol/rdp/CMakeLists.txt b/src/analyzer/protocol/rdp/CMakeLists.txt index c94afaa052..67ad09c18c 100644 --- a/src/analyzer/protocol/rdp/CMakeLists.txt +++ b/src/analyzer/protocol/rdp/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro RDP) - bro_plugin_cc(RDP.cc Plugin.cc) - bro_plugin_bif(events.bif) - bro_plugin_bif(types.bif) - bro_plugin_pac(rdp.pac rdp-analyzer.pac rdp-protocol.pac ../asn1/asn1.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek RDP) + zeek_plugin_cc(RDP.cc Plugin.cc) + zeek_plugin_bif(events.bif) + zeek_plugin_bif(types.bif) + zeek_plugin_pac(rdp.pac rdp-analyzer.pac rdp-protocol.pac ../asn1/asn1.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/rdp/Plugin.cc b/src/analyzer/protocol/rdp/Plugin.cc index 770bdfc730..169c7501d6 100644 --- a/src/analyzer/protocol/rdp/Plugin.cc +++ b/src/analyzer/protocol/rdp/Plugin.cc @@ -3,7 +3,7 @@ #include "RDP.h" namespace plugin { -namespace Bro_RDP { +namespace Zeek_RDP { class Plugin : public plugin::Plugin { public: @@ -12,7 +12,7 @@ public: AddComponent(new ::analyzer::Component("RDP", ::analyzer::rdp::RDP_Analyzer::InstantiateAnalyzer)); plugin::Configuration config; - config.name = "Bro::RDP"; + config.name = "Zeek::RDP"; config.description = "RDP analyzer"; return config; } diff --git a/src/analyzer/protocol/rdp/RDP.cc b/src/analyzer/protocol/rdp/RDP.cc index f3ceaae699..0f252952bd 100644 --- a/src/analyzer/protocol/rdp/RDP.cc +++ b/src/analyzer/protocol/rdp/RDP.cc @@ -10,7 +10,7 @@ RDP_Analyzer::RDP_Analyzer(Connection* c) : tcp::TCP_ApplicationAnalyzer("RDP", c) { interp = new binpac::RDP::RDP_Conn(this); - + had_gap = false; pia = 0; } @@ -72,6 +72,13 @@ void RDP_Analyzer::DeliverStream(int len, const u_char* data, bool orig) ForwardStream(len, data, orig); } + else + { + if ( rdp_native_encrypted_data ) + BifEvent::generate_rdp_native_encrypted_data( + interp->bro_analyzer(), interp->bro_analyzer()->Conn(), + orig, len); + } } else // if not encrypted { diff --git a/src/analyzer/protocol/rdp/events.bif b/src/analyzer/protocol/rdp/events.bif index 3a86e45773..178860bd42 100644 --- a/src/analyzer/protocol/rdp/events.bif +++ b/src/analyzer/protocol/rdp/events.bif @@ -1,3 +1,12 @@ +## Generated for each packet after RDP native encryption begins +## +## c: The connection record for the underlying transport-layer session/flow. +## +## orig: True if the packet was sent by the originator of the connection. +## +## len: The length of the encrypted data. +event rdp_native_encrypted_data%(c: connection, orig: bool, len: count%); + ## Generated for X.224 client requests. ## ## c: The connection record for the underlying transport-layer session/flow. @@ -26,6 +35,27 @@ event rdp_negotiation_failure%(c: connection, failure_code: count%); ## data: The data contained in the client core data structure. event rdp_client_core_data%(c: connection, data: RDP::ClientCoreData%); +## Generated for client security data packets. +## +## c: The connection record for the underlying transport-layer session/flow. +## +## data: The data contained in the client security data structure. +event rdp_client_security_data%(c: connection, data: RDP::ClientSecurityData%); + +## Generated for Client Network Data (TS_UD_CS_NET) packets +## +## c: The connection record for the underlying transport-layer session/flow. +## +## channels: The channels that were requested +event rdp_client_network_data%(c: connection, channels: RDP::ClientChannelList%); + +## Generated for client cluster data packets. +## +## c: The connection record for the underlying transport-layer session/flow. +## +## data: The data contained in the client security data structure. +event rdp_client_cluster_data%(c: connection, data: RDP::ClientClusterData%); + ## Generated for MCS server responses. ## ## c: The connection record for the underlying transport-layer session/flow. @@ -58,4 +88,4 @@ event rdp_server_certificate%(c: connection, cert_type: count, permanently_issue ## c: The connection record for the underlying transport-layer session/flow. ## ## security_protocol: The security protocol being used for the session. -event rdp_begin_encryption%(c: connection, security_protocol: count%); \ No newline at end of file +event rdp_begin_encryption%(c: connection, security_protocol: count%); diff --git a/src/analyzer/protocol/rdp/rdp-analyzer.pac b/src/analyzer/protocol/rdp/rdp-analyzer.pac index 9c370887a2..dd76d07a87 100644 --- a/src/analyzer/protocol/rdp/rdp-analyzer.pac +++ b/src/analyzer/protocol/rdp/rdp-analyzer.pac @@ -101,6 +101,79 @@ refine flow RDP_Flow += { return true; %} + function proc_rdp_client_security_data(csec: Client_Security_Data): bool + %{ + if ( ! rdp_client_security_data ) + return false; + + RecordVal* csd = new RecordVal(BifType::Record::RDP::ClientSecurityData); + csd->Assign(0, val_mgr->GetCount(${csec.encryption_methods})); + csd->Assign(1, val_mgr->GetCount(${csec.ext_encryption_methods})); + + BifEvent::generate_rdp_client_security_data(connection()->bro_analyzer(), + connection()->bro_analyzer()->Conn(), + csd); + return true; + %} + + function proc_rdp_client_network_data(cnetwork: Client_Network_Data): bool + %{ + if ( ! rdp_client_network_data ) + return false; + + if ( ${cnetwork.channel_def_array}->size() ) + { + VectorVal* channels = new VectorVal(BifType::Vector::RDP::ClientChannelList); + + for ( uint i = 0; i < ${cnetwork.channel_def_array}->size(); ++i ) + { + RecordVal* channel_def = new RecordVal(BifType::Record::RDP::ClientChannelDef); + + channel_def->Assign(0, bytestring_to_val(${cnetwork.channel_def_array[i].name})); + channel_def->Assign(1, val_mgr->GetCount(${cnetwork.channel_def_array[i].options})); + + channel_def->Assign(2, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_INITIALIZED})); + channel_def->Assign(3, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_ENCRYPT_RDP})); + channel_def->Assign(4, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_ENCRYPT_SC})); + channel_def->Assign(5, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_ENCRYPT_CS})); + channel_def->Assign(6, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_PRI_HIGH})); + channel_def->Assign(7, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_PRI_MED})); + channel_def->Assign(8, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_PRI_LOW})); + channel_def->Assign(9, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_COMPRESS_RDP})); + channel_def->Assign(10, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_COMPRESS})); + channel_def->Assign(11, val_mgr->GetBool(${cnetwork.channel_def_array[i].CHANNEL_OPTION_SHOW_PROTOCOL})); + channel_def->Assign(12, val_mgr->GetBool(${cnetwork.channel_def_array[i].REMOTE_CONTROL_PERSISTENT})); + + channels->Assign(channels->Size(), channel_def); + } + + BifEvent::generate_rdp_client_network_data(connection()->bro_analyzer(), + connection()->bro_analyzer()->Conn(), + channels); + } + + return true; + %} + + function proc_rdp_client_cluster_data(ccluster: Client_Cluster_Data): bool + %{ + if ( ! rdp_client_cluster_data ) + return false; + + RecordVal* ccld = new RecordVal(BifType::Record::RDP::ClientClusterData); + ccld->Assign(0, val_mgr->GetCount(${ccluster.flags})); + ccld->Assign(1, val_mgr->GetCount(${ccluster.redir_session_id})); + ccld->Assign(2, val_mgr->GetBool(${ccluster.REDIRECTION_SUPPORTED})); + ccld->Assign(3, val_mgr->GetCount(${ccluster.SERVER_SESSION_REDIRECTION_VERSION_MASK})); + ccld->Assign(4, val_mgr->GetBool(${ccluster.REDIRECTED_SESSIONID_FIELD_VALID})); + ccld->Assign(5, val_mgr->GetBool(${ccluster.REDIRECTED_SMARTCARD})); + + BifEvent::generate_rdp_client_cluster_data(connection()->bro_analyzer(), + connection()->bro_analyzer()->Conn(), + ccld); + return true; + %} + function proc_rdp_server_security(ssd: Server_Security_Data): bool %{ connection()->bro_analyzer()->ProtocolConfirmation(); @@ -165,6 +238,18 @@ refine typeattr Client_Core_Data += &let { proc: bool = $context.flow.proc_rdp_client_core_data(this); }; +refine typeattr Client_Security_Data += &let { + proc: bool = $context.flow.proc_rdp_client_security_data(this); +}; + +refine typeattr Client_Network_Data += &let { + proc: bool = $context.flow.proc_rdp_client_network_data(this); +}; + +refine typeattr Client_Cluster_Data += &let { + proc: bool = $context.flow.proc_rdp_client_cluster_data(this); +}; + refine typeattr GCC_Server_Create_Response += &let { proc: bool = $context.flow.proc_rdp_gcc_server_create_response(this); }; @@ -180,3 +265,4 @@ refine typeattr Server_Certificate += &let { refine typeattr X509_Cert_Data += &let { proc: bool = $context.flow.proc_x509_cert_data(this); }; + diff --git a/src/analyzer/protocol/rdp/rdp-protocol.pac b/src/analyzer/protocol/rdp/rdp-protocol.pac index 602e104b2a..12eb5aee94 100644 --- a/src/analyzer/protocol/rdp/rdp-protocol.pac +++ b/src/analyzer/protocol/rdp/rdp-protocol.pac @@ -52,9 +52,9 @@ type Data_Block = record { header: Data_Header; block: case header.type of { 0xc001 -> client_core: Client_Core_Data; - #0xc002 -> client_security: Client_Security_Data; - #0xc003 -> client_network: Client_Network_Data; - #0xc004 -> client_cluster: Client_Cluster_Data; + 0xc002 -> client_security: Client_Security_Data; + 0xc003 -> client_network: Client_Network_Data; + 0xc004 -> client_cluster: Client_Cluster_Data; #0xc005 -> client_monitor: Client_Monitor_Data; #0xc006 -> client_msgchannel: Client_MsgChannel_Data; #0xc008 -> client_monitor_ex: Client_MonitorExtended_Data; @@ -220,6 +220,43 @@ type Client_Core_Data = record { SUPPORT_HEARTBEAT_PDU: bool = early_capability_flags & 0x0400; } &byteorder=littleendian; +type Client_Security_Data = record { + encryption_methods: uint32; + ext_encryption_methods: uint32; +} &byteorder=littleendian; + +type Client_Network_Data = record { + channel_count: uint32; + channel_def_array: Client_Channel_Def[channel_count]; +} &byteorder=littleendian; + +type Client_Cluster_Data = record { + flags: uint32; + redir_session_id: uint32; +} &let { + REDIRECTION_SUPPORTED: bool = redir_session_id & 0x00000001; + SERVER_SESSION_REDIRECTION_VERSION_MASK: uint8 = (redir_session_id & 0x0000003C); + REDIRECTED_SESSIONID_FIELD_VALID: bool = (redir_session_id & 0x00000002); + REDIRECTED_SMARTCARD: bool = redir_session_id & 0x00000040; +} &byteorder=littleendian; + +type Client_Channel_Def = record { + name: bytestring &length=8; + options: uint32; +} &let { + REMOTE_CONTROL_PERSISTENT: bool = options & 0x00100000; + CHANNEL_OPTION_SHOW_PROTOCOL: bool = options & 0x00200000; + CHANNEL_OPTION_COMPRESS: bool = options & 0x00400000; + CHANNEL_OPTION_COMPRESS_RDP: bool = options & 0x00800000; + CHANNEL_OPTION_PRI_LOW: bool = options & 0x02000000; + CHANNEL_OPTION_PRI_MED: bool = options & 0x04000000; + CHANNEL_OPTION_PRI_HIGH: bool = options & 0x08000000; + CHANNEL_OPTION_ENCRYPT_CS: bool = options & 0x10000000; + CHANNEL_OPTION_ENCRYPT_SC: bool = options & 0x20000000; + CHANNEL_OPTION_ENCRYPT_RDP: bool = options & 0x40000000; + CHANNEL_OPTION_INITIALIZED: bool = options & 0x80000000; +} &byteorder=littleendian; + ###################################################################### # Server MCS ###################################################################### diff --git a/src/analyzer/protocol/rdp/types.bif b/src/analyzer/protocol/rdp/types.bif index 8222560331..366676d017 100644 --- a/src/analyzer/protocol/rdp/types.bif +++ b/src/analyzer/protocol/rdp/types.bif @@ -3,3 +3,9 @@ module RDP; type EarlyCapabilityFlags: record; type ClientCoreData: record; + +type ClientSecurityData: record; +type ClientClusterData: record; + +type ClientChannelList: vector; +type ClientChannelDef: record; diff --git a/src/analyzer/protocol/rfb/CMakeLists.txt b/src/analyzer/protocol/rfb/CMakeLists.txt index 28523bfe2d..10c8b2de12 100644 --- a/src/analyzer/protocol/rfb/CMakeLists.txt +++ b/src/analyzer/protocol/rfb/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro RFB) - bro_plugin_cc(RFB.cc Plugin.cc) - bro_plugin_bif(events.bif) - bro_plugin_pac(rfb.pac rfb-analyzer.pac rfb-protocol.pac) -bro_plugin_end() \ No newline at end of file +zeek_plugin_begin(Zeek RFB) + zeek_plugin_cc(RFB.cc Plugin.cc) + zeek_plugin_bif(events.bif) + zeek_plugin_pac(rfb.pac rfb-analyzer.pac rfb-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/rfb/Plugin.cc b/src/analyzer/protocol/rfb/Plugin.cc index b3bed0f093..8cf53bb007 100644 --- a/src/analyzer/protocol/rfb/Plugin.cc +++ b/src/analyzer/protocol/rfb/Plugin.cc @@ -3,7 +3,7 @@ #include "RFB.h" namespace plugin { -namespace Bro_RFB { +namespace Zeek_RFB { class Plugin : public plugin::Plugin { public: @@ -13,11 +13,11 @@ public: ::analyzer::rfb::RFB_Analyzer::InstantiateAnalyzer)); plugin::Configuration config; - config.name = "Bro::RFB"; + config.name = "Zeek::RFB"; config.description = "Parser for rfb (VNC) analyzer"; return config; } } plugin; } -} \ No newline at end of file +} diff --git a/src/analyzer/protocol/rfb/rfb-analyzer.pac b/src/analyzer/protocol/rfb/rfb-analyzer.pac index 49d6e9f420..ee0c4657bc 100644 --- a/src/analyzer/protocol/rfb/rfb-analyzer.pac +++ b/src/analyzer/protocol/rfb/rfb-analyzer.pac @@ -1,7 +1,8 @@ refine flow RFB_Flow += { function proc_rfb_message(msg: RFB_PDU): bool %{ - BifEvent::generate_rfb_event(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn()); + if ( rfb_event ) + BifEvent::generate_rfb_event(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn()); return true; %} @@ -9,50 +10,59 @@ refine flow RFB_Flow += { %{ if (client) { - BifEvent::generate_rfb_client_version(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), bytestring_to_val(major), bytestring_to_val(minor)); + if ( rfb_client_version ) + BifEvent::generate_rfb_client_version(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), bytestring_to_val(major), bytestring_to_val(minor)); connection()->bro_analyzer()->ProtocolConfirmation(); } else { - BifEvent::generate_rfb_server_version(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), bytestring_to_val(major), bytestring_to_val(minor)); + if ( rfb_server_version ) + BifEvent::generate_rfb_server_version(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), bytestring_to_val(major), bytestring_to_val(minor)); } return true; %} function proc_rfb_share_flag(shared: bool) : bool %{ - BifEvent::generate_rfb_share_flag(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), shared); + if ( rfb_share_flag ) + BifEvent::generate_rfb_share_flag(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), shared); return true; %} function proc_security_types(msg: RFBSecurityType) : bool %{ - BifEvent::generate_rfb_authentication_type(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), ${msg.sectype}); + if ( rfb_authentication_type ) + BifEvent::generate_rfb_authentication_type(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), ${msg.sectype}); return true; %} function proc_security_types37(msg: RFBAuthTypeSelected) : bool %{ - BifEvent::generate_rfb_authentication_type(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), ${msg.type}); + if ( rfb_authentication_type ) + BifEvent::generate_rfb_authentication_type(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), ${msg.type}); return true; %} function proc_handle_server_params(msg:RFBServerInit) : bool %{ - auto vec_ptr = ${msg.name}; - auto name_ptr = &((*vec_ptr)[0]); - BifEvent::generate_rfb_server_parameters( - connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - new StringVal(${msg.name}->size(), (const char*)name_ptr), - ${msg.width}, - ${msg.height}); + if ( rfb_server_parameters ) + { + auto vec_ptr = ${msg.name}; + auto name_ptr = &((*vec_ptr)[0]); + BifEvent::generate_rfb_server_parameters( + connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), + new StringVal(${msg.name}->size(), (const char*)name_ptr), + ${msg.width}, + ${msg.height}); + } return true; %} function proc_handle_security_result(result : uint32) : bool %{ - BifEvent::generate_rfb_auth_result(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), result); + if ( rfb_auth_result ) + BifEvent::generate_rfb_auth_result(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), result); return true; %} }; diff --git a/src/analyzer/protocol/rpc/CMakeLists.txt b/src/analyzer/protocol/rpc/CMakeLists.txt index c71c6ddd9a..f1da2c9692 100644 --- a/src/analyzer/protocol/rpc/CMakeLists.txt +++ b/src/analyzer/protocol/rpc/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro RPC) -bro_plugin_cc(RPC.cc NFS.cc MOUNT.cc Portmap.cc XDR.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek RPC) +zeek_plugin_cc(RPC.cc NFS.cc MOUNT.cc Portmap.cc XDR.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/rpc/MOUNT.cc b/src/analyzer/protocol/rpc/MOUNT.cc index 604d2e3ed1..643aa21891 100644 --- a/src/analyzer/protocol/rpc/MOUNT.cc +++ b/src/analyzer/protocol/rpc/MOUNT.cc @@ -3,7 +3,7 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "XDR.h" @@ -93,9 +93,9 @@ int MOUNT_Interp::RPC_BuildReply(RPC_CallInfo* c, BifEnum::rpc_status rpc_status if ( mount_reply_status ) { - val_list* vl = event_common_vl(c, rpc_status, mount_status, - start_time, last_time, reply_len); - analyzer->ConnectionEvent(mount_reply_status, vl); + auto vl = event_common_vl(c, rpc_status, mount_status, + start_time, last_time, reply_len, 0); + analyzer->ConnectionEventFast(mount_reply_status, std::move(vl)); } if ( ! rpc_success ) @@ -162,34 +162,34 @@ int MOUNT_Interp::RPC_BuildReply(RPC_CallInfo* c, BifEnum::rpc_status rpc_status // optional and all are set to 0 ... if ( event ) { - val_list* vl = event_common_vl(c, rpc_status, mount_status, - start_time, last_time, reply_len); - Val *request = c->TakeRequestVal(); + auto vl = event_common_vl(c, rpc_status, mount_status, + start_time, last_time, reply_len, (bool)request + (bool)reply); + if ( request ) - vl->append(request); + vl.append(request); if ( reply ) - vl->append(reply); + vl.append(reply); - analyzer->ConnectionEvent(event, vl); + analyzer->ConnectionEventFast(event, std::move(vl)); } else Unref(reply); return 1; } -val_list* MOUNT_Interp::event_common_vl(RPC_CallInfo *c, +val_list MOUNT_Interp::event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, BifEnum::MOUNT3::status_t mount_status, double rep_start_time, - double rep_last_time, int reply_len) + double rep_last_time, int reply_len, int extra_elements) { // Returns a new val_list that already has a conn_val, and mount3_info. // These are the first parameters for each mount_* event ... - val_list *vl = new val_list; - vl->append(analyzer->BuildConnVal()); + val_list vl(2 + extra_elements); + vl.append(analyzer->BuildConnVal()); VectorVal* auxgids = new VectorVal(internal_type("index_vec")->AsVectorType()); for (size_t i = 0; i < c->AuxGIDs().size(); ++i) @@ -212,7 +212,7 @@ val_list* MOUNT_Interp::event_common_vl(RPC_CallInfo *c, info->Assign(11, new StringVal(c->MachineName())); info->Assign(12, auxgids); - vl->append(info); + vl.append(info); return vl; } diff --git a/src/analyzer/protocol/rpc/MOUNT.h b/src/analyzer/protocol/rpc/MOUNT.h index 42da4f61ed..7c243f96a0 100644 --- a/src/analyzer/protocol/rpc/MOUNT.h +++ b/src/analyzer/protocol/rpc/MOUNT.h @@ -22,10 +22,10 @@ protected: // Returns a new val_list that already has a conn_val, rpc_status and // mount_status. These are the first parameters for each mount_* event // ... - val_list* event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, + val_list event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, BifEnum::MOUNT3::status_t mount_status, double rep_start_time, double rep_last_time, - int reply_len); + int reply_len, int extra_elements); // These methods parse the appropriate MOUNTv3 "type" out of buf. If // there are any errors (i.e., buffer to short, etc), buf will be set diff --git a/src/analyzer/protocol/rpc/NFS.cc b/src/analyzer/protocol/rpc/NFS.cc index ff16812d65..9eb9e88d95 100644 --- a/src/analyzer/protocol/rpc/NFS.cc +++ b/src/analyzer/protocol/rpc/NFS.cc @@ -3,7 +3,7 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "XDR.h" @@ -147,9 +147,9 @@ int NFS_Interp::RPC_BuildReply(RPC_CallInfo* c, BifEnum::rpc_status rpc_status, if ( nfs_reply_status ) { - val_list* vl = event_common_vl(c, rpc_status, nfs_status, - start_time, last_time, reply_len); - analyzer->ConnectionEvent(nfs_reply_status, vl); + auto vl = event_common_vl(c, rpc_status, nfs_status, + start_time, last_time, reply_len, 0); + analyzer->ConnectionEventFast(nfs_reply_status, std::move(vl)); } if ( ! rpc_success ) @@ -274,18 +274,18 @@ int NFS_Interp::RPC_BuildReply(RPC_CallInfo* c, BifEnum::rpc_status rpc_status, // optional and all are set to 0 ... if ( event ) { - val_list* vl = event_common_vl(c, rpc_status, nfs_status, - start_time, last_time, reply_len); - Val *request = c->TakeRequestVal(); + auto vl = event_common_vl(c, rpc_status, nfs_status, + start_time, last_time, reply_len, (bool)request + (bool)reply); + if ( request ) - vl->append(request); + vl.append(request); if ( reply ) - vl->append(reply); + vl.append(reply); - analyzer->ConnectionEvent(event, vl); + analyzer->ConnectionEventFast(event, std::move(vl)); } else Unref(reply); @@ -317,15 +317,15 @@ StringVal* NFS_Interp::nfs3_file_data(const u_char*& buf, int& n, uint64_t offse return 0; } -val_list* NFS_Interp::event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, +val_list NFS_Interp::event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, BifEnum::NFS3::status_t nfs_status, double rep_start_time, - double rep_last_time, int reply_len) + double rep_last_time, int reply_len, int extra_elements) { // Returns a new val_list that already has a conn_val, and nfs3_info. // These are the first parameters for each nfs_* event ... - val_list *vl = new val_list; - vl->append(analyzer->BuildConnVal()); + val_list vl(2 + extra_elements); + vl.append(analyzer->BuildConnVal()); VectorVal* auxgids = new VectorVal(internal_type("index_vec")->AsVectorType()); for ( size_t i = 0; i < c->AuxGIDs().size(); ++i ) @@ -346,7 +346,7 @@ val_list* NFS_Interp::event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_s info->Assign(11, new StringVal(c->MachineName())); info->Assign(12, auxgids); - vl->append(info); + vl.append(info); return vl; } diff --git a/src/analyzer/protocol/rpc/NFS.h b/src/analyzer/protocol/rpc/NFS.h index 2ec4047946..56a368bfdc 100644 --- a/src/analyzer/protocol/rpc/NFS.h +++ b/src/analyzer/protocol/rpc/NFS.h @@ -22,10 +22,10 @@ protected: // Returns a new val_list that already has a conn_val, rpc_status and // nfs_status. These are the first parameters for each nfs_* event // ... - val_list* event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, + val_list event_common_vl(RPC_CallInfo *c, BifEnum::rpc_status rpc_status, BifEnum::NFS3::status_t nfs_status, double rep_start_time, double rep_last_time, - int reply_len); + int reply_len, int extra_elements); // These methods parse the appropriate NFSv3 "type" out of buf. If // there are any errors (i.e., buffer to short, etc), buf will be set diff --git a/src/analyzer/protocol/rpc/Plugin.cc b/src/analyzer/protocol/rpc/Plugin.cc index abc2f679f2..2fff0ff6cf 100644 --- a/src/analyzer/protocol/rpc/Plugin.cc +++ b/src/analyzer/protocol/rpc/Plugin.cc @@ -9,7 +9,7 @@ #include "Portmap.h" namespace plugin { -namespace Bro_RPC { +namespace Zeek_RPC { class Plugin : public plugin::Plugin { public: @@ -22,7 +22,7 @@ public: AddComponent(new ::analyzer::Component("Contents_NFS", 0)); plugin::Configuration config; - config.name = "Bro::RPC"; + config.name = "Zeek::RPC"; config.description = "Analyzers for RPC-based protocols"; return config; } diff --git a/src/analyzer/protocol/rpc/Portmap.cc b/src/analyzer/protocol/rpc/Portmap.cc index 95beab6b62..eb26991921 100644 --- a/src/analyzer/protocol/rpc/Portmap.cc +++ b/src/analyzer/protocol/rpc/Portmap.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "XDR.h" @@ -261,10 +261,10 @@ uint32 PortmapperInterp::CheckPort(uint32 port) { if ( pm_bad_port ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(port)); - analyzer->ConnectionEvent(pm_bad_port, vl); + analyzer->ConnectionEventFast(pm_bad_port, { + analyzer->BuildConnVal(), + val_mgr->GetCount(port), + }); } port = 0; @@ -282,25 +282,25 @@ void PortmapperInterp::Event(EventHandlerPtr f, Val* request, BifEnum::rpc_statu return; } - val_list* vl = new val_list; + val_list vl; - vl->append(analyzer->BuildConnVal()); + vl.append(analyzer->BuildConnVal()); if ( status == BifEnum::RPC_SUCCESS ) { if ( request ) - vl->append(request); + vl.append(request); if ( reply ) - vl->append(reply); + vl.append(reply); } else { - vl->append(BifType::Enum::rpc_status->GetVal(status)); + vl.append(BifType::Enum::rpc_status->GetVal(status)); if ( request ) - vl->append(request); + vl.append(request); } - analyzer->ConnectionEvent(f, vl); + analyzer->ConnectionEventFast(f, std::move(vl)); } Portmapper_Analyzer::Portmapper_Analyzer(Connection* conn) diff --git a/src/analyzer/protocol/rpc/RPC.cc b/src/analyzer/protocol/rpc/RPC.cc index 5bd748d1ea..57de09b4d6 100644 --- a/src/analyzer/protocol/rpc/RPC.cc +++ b/src/analyzer/protocol/rpc/RPC.cc @@ -4,7 +4,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "NetVar.h" #include "XDR.h" @@ -286,7 +286,7 @@ int RPC_Interpreter::DeliverRPC(const u_char* buf, int n, int rpclen, else Weird("bad_RPC"); - if ( n > 0 ) + if ( n > 0 && buf ) { // If it's just padded with zeroes, don't complain. for ( ; n > 0; --n, ++buf ) @@ -317,7 +317,7 @@ void RPC_Interpreter::Timeout() if ( c->IsValidCall() ) { - const u_char* buf; + const u_char* buf = nullptr; int n = 0; if ( ! RPC_BuildReply(c, BifEnum::RPC_TIMEOUT, buf, n, network_time, network_time, 0) ) @@ -330,16 +330,16 @@ void RPC_Interpreter::Event_RPC_Dialogue(RPC_CallInfo* c, BifEnum::rpc_status st { if ( rpc_dialogue ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(c->Program())); - vl->append(val_mgr->GetCount(c->Version())); - vl->append(val_mgr->GetCount(c->Proc())); - vl->append(BifType::Enum::rpc_status->GetVal(status)); - vl->append(new Val(c->StartTime(), TYPE_TIME)); - vl->append(val_mgr->GetCount(c->CallLen())); - vl->append(val_mgr->GetCount(reply_len)); - analyzer->ConnectionEvent(rpc_dialogue, vl); + analyzer->ConnectionEventFast(rpc_dialogue, { + analyzer->BuildConnVal(), + val_mgr->GetCount(c->Program()), + val_mgr->GetCount(c->Version()), + val_mgr->GetCount(c->Proc()), + BifType::Enum::rpc_status->GetVal(status), + new Val(c->StartTime(), TYPE_TIME), + val_mgr->GetCount(c->CallLen()), + val_mgr->GetCount(reply_len), + }); } } @@ -347,14 +347,14 @@ void RPC_Interpreter::Event_RPC_Call(RPC_CallInfo* c) { if ( rpc_call ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(c->XID())); - vl->append(val_mgr->GetCount(c->Program())); - vl->append(val_mgr->GetCount(c->Version())); - vl->append(val_mgr->GetCount(c->Proc())); - vl->append(val_mgr->GetCount(c->CallLen())); - analyzer->ConnectionEvent(rpc_call, vl); + analyzer->ConnectionEventFast(rpc_call, { + analyzer->BuildConnVal(), + val_mgr->GetCount(c->XID()), + val_mgr->GetCount(c->Program()), + val_mgr->GetCount(c->Version()), + val_mgr->GetCount(c->Proc()), + val_mgr->GetCount(c->CallLen()), + }); } } @@ -362,12 +362,12 @@ void RPC_Interpreter::Event_RPC_Reply(uint32_t xid, BifEnum::rpc_status status, { if ( rpc_reply ) { - val_list* vl = new val_list; - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetCount(xid)); - vl->append(BifType::Enum::rpc_status->GetVal(status)); - vl->append(val_mgr->GetCount(reply_len)); - analyzer->ConnectionEvent(rpc_reply, vl); + analyzer->ConnectionEventFast(rpc_reply, { + analyzer->BuildConnVal(), + val_mgr->GetCount(xid), + BifType::Enum::rpc_status->GetVal(status), + val_mgr->GetCount(reply_len), + }); } } diff --git a/src/analyzer/protocol/rpc/XDR.cc b/src/analyzer/protocol/rpc/XDR.cc index 9ae1ba1236..33973327ee 100644 --- a/src/analyzer/protocol/rpc/XDR.cc +++ b/src/analyzer/protocol/rpc/XDR.cc @@ -2,7 +2,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "XDR.h" diff --git a/src/analyzer/protocol/rpc/events.bif b/src/analyzer/protocol/rpc/events.bif index b811a60cda..9b96dcb9de 100644 --- a/src/analyzer/protocol/rpc/events.bif +++ b/src/analyzer/protocol/rpc/events.bif @@ -10,14 +10,14 @@ ## ## info: Reports the status of the dialogue, along with some meta information. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_read nfs_proc_readdir nfs_proc_readlink ## nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_null%(c: connection, info: NFS3::info_t%); @@ -38,14 +38,14 @@ event nfs_proc_null%(c: connection, info: NFS3::info_t%); ## attrs: The attributes returned in the reply. The values may not be valid if ## the request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## rpc_call rpc_dialogue rpc_reply file_mode ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_getattr%(c: connection, info: NFS3::info_t, fh: string, attrs: NFS3::fattr_t%); @@ -66,14 +66,14 @@ event nfs_proc_getattr%(c: connection, info: NFS3::info_t, fh: string, attrs: NF ## rep: The attributes returned in the reply. The values may not be ## valid if the request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## rpc_call rpc_dialogue rpc_reply file_mode ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_sattr%(c: connection, info: NFS3::info_t, req: NFS3::sattrargs_t, rep: NFS3::sattr_reply_t%); @@ -94,14 +94,14 @@ event nfs_proc_sattr%(c: connection, info: NFS3::info_t, req: NFS3::sattrargs_t, ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_lookup%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::lookup_reply_t%); @@ -122,14 +122,14 @@ event nfs_proc_lookup%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_remove nfs_proc_rmdir ## nfs_proc_write nfs_reply_status rpc_call rpc_dialogue rpc_reply ## NFS3::return_data NFS3::return_data_first_only NFS3::return_data_max ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_read%(c: connection, info: NFS3::info_t, req: NFS3::readargs_t, rep: NFS3::read_reply_t%); @@ -150,14 +150,14 @@ event nfs_proc_read%(c: connection, info: NFS3::info_t, req: NFS3::readargs_t, r ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## nfs_proc_symlink rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_readlink%(c: connection, info: NFS3::info_t, fh: string, rep: NFS3::readlink_reply_t%); @@ -178,14 +178,14 @@ event nfs_proc_readlink%(c: connection, info: NFS3::info_t, fh: string, rep: NFS ## rep: The attributes returned in the reply. The values may not be ## valid if the request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## nfs_proc_link rpc_call rpc_dialogue rpc_reply file_mode ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_symlink%(c: connection, info: NFS3::info_t, req: NFS3::symlinkargs_t, rep: NFS3::newobj_reply_t%); @@ -206,14 +206,14 @@ event nfs_proc_symlink%(c: connection, info: NFS3::info_t, req: NFS3::symlinkarg ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status rpc_call ## nfs_proc_symlink rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_link%(c: connection, info: NFS3::info_t, req: NFS3::linkargs_t, rep: NFS3::link_reply_t%); @@ -234,15 +234,15 @@ event nfs_proc_link%(c: connection, info: NFS3::info_t, req: NFS3::linkargs_t, r ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_reply_status rpc_call ## rpc_dialogue rpc_reply NFS3::return_data NFS3::return_data_first_only ## NFS3::return_data_max ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_write%(c: connection, info: NFS3::info_t, req: NFS3::writeargs_t, rep: NFS3::write_reply_t%); @@ -263,14 +263,14 @@ event nfs_proc_write%(c: connection, info: NFS3::info_t, req: NFS3::writeargs_t, ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_create%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::newobj_reply_t%); @@ -291,14 +291,14 @@ event nfs_proc_create%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status ## rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_mkdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::newobj_reply_t%); @@ -319,14 +319,14 @@ event nfs_proc_mkdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_rmdir nfs_proc_write nfs_reply_status rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_remove%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::delobj_reply_t%); @@ -347,14 +347,14 @@ event nfs_proc_remove%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_write nfs_reply_status rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_rmdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, rep: NFS3::delobj_reply_t%); @@ -375,14 +375,14 @@ event nfs_proc_rmdir%(c: connection, info: NFS3::info_t, req: NFS3::diropargs_t, ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rename nfs_proc_write ## nfs_reply_status rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_rename%(c: connection, info: NFS3::info_t, req: NFS3::renameopargs_t, rep: NFS3::renameobj_reply_t%); @@ -403,18 +403,18 @@ event nfs_proc_rename%(c: connection, info: NFS3::info_t, req: NFS3::renameoparg ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readlink ## nfs_proc_remove nfs_proc_rmdir nfs_proc_write nfs_reply_status rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_readdir%(c: connection, info: NFS3::info_t, req: NFS3::readdirargs_t, rep: NFS3::readdir_reply_t%); -## Generated for NFSv3 request/reply dialogues of a type that Bro's NFSv3 +## Generated for NFSv3 request/reply dialogues of a type that Zeek's NFSv3 ## analyzer does not implement. ## ## NFS is a service running on top of RPC. See `Wikipedia @@ -425,15 +425,15 @@ event nfs_proc_readdir%(c: connection, info: NFS3::info_t, req: NFS3::readdirarg ## ## info: Reports the status of the dialogue, along with some meta information. ## -## proc: The procedure called that Bro does not implement. +## proc: The procedure called that Zeek does not implement. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_null nfs_proc_read nfs_proc_readdir nfs_proc_readlink nfs_proc_remove ## nfs_proc_rmdir nfs_proc_write nfs_reply_status rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_proc_not_implemented%(c: connection, info: NFS3::info_t, proc: NFS3::proc_t%); @@ -444,14 +444,14 @@ event nfs_proc_not_implemented%(c: connection, info: NFS3::info_t, proc: NFS3::p ## ## info: Reports the status included in the reply. ## -## .. bro:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir +## .. zeek:see:: nfs_proc_create nfs_proc_getattr nfs_proc_lookup nfs_proc_mkdir ## nfs_proc_not_implemented nfs_proc_null nfs_proc_read nfs_proc_readdir ## nfs_proc_readlink nfs_proc_remove nfs_proc_rmdir nfs_proc_write rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event nfs_reply_status%(n: connection, info: NFS3::info_t%); @@ -463,14 +463,14 @@ event nfs_reply_status%(n: connection, info: NFS3::info_t%); ## ## r: The RPC connection. ## -## .. bro:see:: pm_request_set pm_request_unset pm_request_getport +## .. zeek:see:: pm_request_set pm_request_unset pm_request_getport ## pm_request_dump pm_request_callit pm_attempt_null pm_attempt_set ## pm_attempt_unset pm_attempt_getport pm_attempt_dump ## pm_attempt_callit pm_bad_port rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_request_null%(r: connection%); @@ -488,14 +488,14 @@ event pm_request_null%(r: connection%); ## reply. If no reply was seen, this will be false once the request ## times out. ## -## .. bro:see:: pm_request_null pm_request_unset pm_request_getport +## .. zeek:see:: pm_request_null pm_request_unset pm_request_getport ## pm_request_dump pm_request_callit pm_attempt_null pm_attempt_set ## pm_attempt_unset pm_attempt_getport pm_attempt_dump ## pm_attempt_callit pm_bad_port rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_request_set%(r: connection, m: pm_mapping, success: bool%); @@ -513,14 +513,14 @@ event pm_request_set%(r: connection, m: pm_mapping, success: bool%); ## reply. If no reply was seen, this will be false once the request ## times out. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_getport +## .. zeek:see:: pm_request_null pm_request_set pm_request_getport ## pm_request_dump pm_request_callit pm_attempt_null pm_attempt_set ## pm_attempt_unset pm_attempt_getport pm_attempt_dump ## pm_attempt_callit pm_bad_port rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_request_unset%(r: connection, m: pm_mapping, success: bool%); @@ -536,14 +536,14 @@ event pm_request_unset%(r: connection, m: pm_mapping, success: bool%); ## ## p: The port returned by the server. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_dump pm_request_callit pm_attempt_null pm_attempt_set ## pm_attempt_unset pm_attempt_getport pm_attempt_dump ## pm_attempt_callit pm_bad_port rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_request_getport%(r: connection, pr: pm_port_request, p: port%); @@ -557,15 +557,15 @@ event pm_request_getport%(r: connection, pr: pm_port_request, p: port%); ## ## m: The mappings returned by the server. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_callit pm_attempt_null ## pm_attempt_set pm_attempt_unset pm_attempt_getport ## pm_attempt_dump pm_attempt_callit pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_request_dump%(r: connection, m: pm_mappings%); @@ -581,15 +581,15 @@ event pm_request_dump%(r: connection, m: pm_mappings%); ## ## p: The port value returned by the call. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_attempt_null ## pm_attempt_set pm_attempt_unset pm_attempt_getport ## pm_attempt_dump pm_attempt_callit pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_request_callit%(r: connection, call: pm_callit_request, p: port%); @@ -602,17 +602,17 @@ event pm_request_callit%(r: connection, call: pm_callit_request, p: port%); ## r: The RPC connection. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_set pm_attempt_unset pm_attempt_getport ## pm_attempt_dump pm_attempt_callit pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_attempt_null%(r: connection, status: rpc_status%); @@ -625,19 +625,19 @@ event pm_attempt_null%(r: connection, status: rpc_status%); ## r: The RPC connection. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## ## m: The argument to the original request. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_null pm_attempt_unset pm_attempt_getport ## pm_attempt_dump pm_attempt_callit pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_attempt_set%(r: connection, status: rpc_status, m: pm_mapping%); @@ -650,19 +650,19 @@ event pm_attempt_set%(r: connection, status: rpc_status, m: pm_mapping%); ## r: The RPC connection. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## ## m: The argument to the original request. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_null pm_attempt_set pm_attempt_getport ## pm_attempt_dump pm_attempt_callit pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_attempt_unset%(r: connection, status: rpc_status, m: pm_mapping%); @@ -675,18 +675,18 @@ event pm_attempt_unset%(r: connection, status: rpc_status, m: pm_mapping%); ## r: The RPC connection. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## ## pr: The argument to the original request. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_null pm_attempt_set pm_attempt_unset pm_attempt_dump ## pm_attempt_callit pm_bad_port rpc_call rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_attempt_getport%(r: connection, status: rpc_status, pr: pm_port_request%); @@ -699,17 +699,17 @@ event pm_attempt_getport%(r: connection, status: rpc_status, pr: pm_port_request ## r: The RPC connection. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_null pm_attempt_set pm_attempt_unset ## pm_attempt_getport pm_attempt_callit pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_attempt_dump%(r: connection, status: rpc_status%); @@ -722,19 +722,19 @@ event pm_attempt_dump%(r: connection, status: rpc_status%); ## r: The RPC connection. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## ## call: The argument to the original request. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_null pm_attempt_set pm_attempt_unset ## pm_attempt_getport pm_attempt_dump pm_bad_port rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_attempt_callit%(r: connection, status: rpc_status, call: pm_callit_request%); @@ -751,15 +751,15 @@ event pm_attempt_callit%(r: connection, status: rpc_status, call: pm_callit_requ ## ## bad_p: The invalid port value. ## -## .. bro:see:: pm_request_null pm_request_set pm_request_unset +## .. zeek:see:: pm_request_null pm_request_set pm_request_unset ## pm_request_getport pm_request_dump pm_request_callit ## pm_attempt_null pm_attempt_set pm_attempt_unset ## pm_attempt_getport pm_attempt_dump pm_attempt_callit rpc_call ## rpc_dialogue rpc_reply ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event pm_bad_port%(r: connection, bad_p: count%); @@ -767,7 +767,7 @@ event pm_bad_port%(r: connection, bad_p: count%); ## and reply by their transaction identifiers and raises this event once both ## have been seen. If there's not a reply, this event will still be generated ## eventually on timeout. In that case, *status* will be set to -## :bro:enum:`RPC_TIMEOUT`. +## :zeek:enum:`RPC_TIMEOUT`. ## ## See `Wikipedia `__ for more information ## about the ONC RPC protocol. @@ -781,7 +781,7 @@ event pm_bad_port%(r: connection, bad_p: count%); ## proc: The procedure of the remote program to call. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## ## start_time: The time when the *call* was seen. ## @@ -789,13 +789,13 @@ event pm_bad_port%(r: connection, bad_p: count%); ## ## reply_len: The size of the *reply_body* PDU. ## -## .. bro:see:: rpc_call rpc_reply dce_rpc_bind dce_rpc_message dce_rpc_request +## .. zeek:see:: rpc_call rpc_reply dce_rpc_bind dce_rpc_message dce_rpc_request ## dce_rpc_response rpc_timeout ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event rpc_dialogue%(c: connection, prog: count, ver: count, proc: count, status: rpc_status, start_time: time, call_len: count, reply_len: count%); @@ -816,13 +816,13 @@ event rpc_dialogue%(c: connection, prog: count, ver: count, proc: count, status: ## ## call_len: The size of the *call_body* PDU. ## -## .. bro:see:: rpc_dialogue rpc_reply dce_rpc_bind dce_rpc_message dce_rpc_request +## .. zeek:see:: rpc_dialogue rpc_reply dce_rpc_bind dce_rpc_message dce_rpc_request ## dce_rpc_response rpc_timeout ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event rpc_call%(c: connection, xid: count, prog: count, ver: count, proc: count, call_len: count%); @@ -836,17 +836,17 @@ event rpc_call%(c: connection, xid: count, prog: count, ver: count, proc: count, ## xid: The transaction identifier allowing to match requests with replies. ## ## status: The status of the reply, which should be one of the index values of -## :bro:id:`RPC_status`. +## :zeek:id:`RPC_status`. ## ## reply_len: The size of the *reply_body* PDU. ## -## .. bro:see:: rpc_call rpc_dialogue dce_rpc_bind dce_rpc_message dce_rpc_request +## .. zeek:see:: rpc_call rpc_dialogue dce_rpc_bind dce_rpc_message dce_rpc_request ## dce_rpc_response rpc_timeout ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to add a -## call to :bro:see:`Analyzer::register_for_ports` or a DPD payload +## been ported. To still enable this event, one needs to add a +## call to :zeek:see:`Analyzer::register_for_ports` or a DPD payload ## signature. event rpc_reply%(c: connection, xid: count, status: rpc_status, reply_len: count%); @@ -859,12 +859,12 @@ event rpc_reply%(c: connection, xid: count, status: rpc_status, reply_len: count ## ## info: Reports the status of the dialogue, along with some meta information. ## -## .. bro:see:: mount_proc_mnt mount_proc_umnt +## .. zeek:see:: mount_proc_mnt mount_proc_umnt ## mount_proc_umnt_all mount_proc_not_implemented ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event mount_proc_null%(c: connection, info: MOUNT3::info_t%); @@ -882,12 +882,12 @@ event mount_proc_null%(c: connection, info: MOUNT3::info_t%); ## rep: The response returned in the reply. The values may not be valid if the ## request was unsuccessful. ## -## .. bro:see:: mount_proc_mnt mount_proc_umnt +## .. zeek:see:: mount_proc_mnt mount_proc_umnt ## mount_proc_umnt_all mount_proc_not_implemented ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event mount_proc_mnt%(c: connection, info: MOUNT3::info_t, req: MOUNT3::dirmntargs_t, rep: MOUNT3::mnt_reply_t%); @@ -902,12 +902,12 @@ event mount_proc_mnt%(c: connection, info: MOUNT3::info_t, req: MOUNT3::dirmntar ## ## req: The arguments passed in the request. ## -## .. bro:see:: mount_proc_mnt mount_proc_umnt +## .. zeek:see:: mount_proc_mnt mount_proc_umnt ## mount_proc_umnt_all mount_proc_not_implemented ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event mount_proc_umnt%(c: connection, info: MOUNT3::info_t, req: MOUNT3::dirmntargs_t%); @@ -922,30 +922,30 @@ event mount_proc_umnt%(c: connection, info: MOUNT3::info_t, req: MOUNT3::dirmnta ## ## req: The arguments passed in the request. ## -## .. bro:see:: mount_proc_mnt mount_proc_umnt +## .. zeek:see:: mount_proc_mnt mount_proc_umnt ## mount_proc_umnt_all mount_proc_not_implemented ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event mount_proc_umnt_all%(c: connection, info: MOUNT3::info_t, req: MOUNT3::dirmntargs_t%); -## Generated for MOUNT3 request/reply dialogues of a type that Bro's MOUNTv3 +## Generated for MOUNT3 request/reply dialogues of a type that Zeek's MOUNTv3 ## analyzer does not implement. ## ## c: The RPC connection. ## ## info: Reports the status of the dialogue, along with some meta information. ## -## proc: The procedure called that Bro does not implement. +## proc: The procedure called that Zeek does not implement. ## -## .. bro:see:: mount_proc_mnt mount_proc_umnt +## .. zeek:see:: mount_proc_mnt mount_proc_umnt ## mount_proc_umnt_all mount_proc_not_implemented ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event mount_proc_not_implemented%(c: connection, info: MOUNT3::info_t, proc: MOUNT3::proc_t%); @@ -956,11 +956,11 @@ event mount_proc_not_implemented%(c: connection, info: MOUNT3::info_t, proc: MOU ## ## info: Reports the status included in the reply. ## -## .. bro:see:: mount_proc_mnt mount_proc_umnt +## .. zeek:see:: mount_proc_mnt mount_proc_umnt ## mount_proc_umnt_all mount_proc_not_implemented ## -## .. todo:: Bro's current default configuration does not activate the protocol +## .. todo:: Zeek's current default configuration does not activate the protocol ## analyzer that generates this event; the corresponding script has not yet -## been ported to Bro 2.x. To still enable this event, one needs to +## been ported. To still enable this event, one needs to ## register a port for it or add a DPD payload signature. event mount_reply_status%(n: connection, info: MOUNT3::info_t%); diff --git a/src/analyzer/protocol/sip/CMakeLists.txt b/src/analyzer/protocol/sip/CMakeLists.txt index 6b42d2519a..e0ae9d2b90 100644 --- a/src/analyzer/protocol/sip/CMakeLists.txt +++ b/src/analyzer/protocol/sip/CMakeLists.txt @@ -1,14 +1,14 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SIP) -bro_plugin_cc(Plugin.cc) -bro_plugin_cc(SIP.cc) -bro_plugin_cc(SIP_TCP.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(sip.pac sip-analyzer.pac sip-protocol.pac) -bro_plugin_pac(sip_TCP.pac sip-protocol.pac sip-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek SIP) +zeek_plugin_cc(Plugin.cc) +zeek_plugin_cc(SIP.cc) +zeek_plugin_cc(SIP_TCP.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(sip.pac sip-analyzer.pac sip-protocol.pac) +zeek_plugin_pac(sip_TCP.pac sip-protocol.pac sip-analyzer.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/sip/Plugin.cc b/src/analyzer/protocol/sip/Plugin.cc index cb8d49ddb6..23ddebc12c 100644 --- a/src/analyzer/protocol/sip/Plugin.cc +++ b/src/analyzer/protocol/sip/Plugin.cc @@ -7,7 +7,7 @@ #include "SIP_TCP.h" namespace plugin { -namespace Bro_SIP { +namespace Zeek_SIP { class Plugin : public plugin::Plugin { public: @@ -19,7 +19,7 @@ public: // AddComponent(new ::analyzer::Component("SIP_TCP", ::analyzer::sip_tcp::SIP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::SIP"; + config.name = "Zeek::SIP"; config.description = "SIP analyzer UDP-only"; return config; } diff --git a/src/analyzer/protocol/sip/events.bif b/src/analyzer/protocol/sip/events.bif index f8ab6f4f37..fb8f9b77d1 100644 --- a/src/analyzer/protocol/sip/events.bif +++ b/src/analyzer/protocol/sip/events.bif @@ -13,7 +13,7 @@ ## ## version: The version number specified in the request (e.g., ``2.0``). ## -## .. bro:see:: sip_reply sip_header sip_all_headers sip_begin_entity sip_end_entity +## .. zeek:see:: sip_reply sip_header sip_all_headers sip_begin_entity sip_end_entity event sip_request%(c: connection, method: string, original_URI: string, version: string%); ## Generated for :abbr:`SIP (Session Initiation Protocol)` replies, used in Voice over IP (VoIP). @@ -31,7 +31,7 @@ event sip_request%(c: connection, method: string, original_URI: string, version: ## ## reason: Textual details for the response code. ## -## .. bro:see:: sip_request sip_header sip_all_headers sip_begin_entity sip_end_entity +## .. zeek:see:: sip_request sip_header sip_all_headers sip_begin_entity sip_end_entity event sip_reply%(c: connection, version: string, code: count, reason: string%); ## Generated for each :abbr:`SIP (Session Initiation Protocol)` header. @@ -47,7 +47,7 @@ event sip_reply%(c: connection, version: string, code: count, reason: string%); ## ## value: Header value. ## -## .. bro:see:: sip_request sip_reply sip_all_headers sip_begin_entity sip_end_entity +## .. zeek:see:: sip_request sip_reply sip_all_headers sip_begin_entity sip_end_entity event sip_header%(c: connection, is_orig: bool, name: string, value: string%); ## Generated once for all :abbr:`SIP (Session Initiation Protocol)` headers from the originator or responder. @@ -61,7 +61,7 @@ event sip_header%(c: connection, is_orig: bool, name: string, value: string%); ## ## hlist: All the headers, and their values ## -## .. bro:see:: sip_request sip_reply sip_header sip_begin_entity sip_end_entity +## .. zeek:see:: sip_request sip_reply sip_header sip_begin_entity sip_end_entity event sip_all_headers%(c: connection, is_orig: bool, hlist: mime_header_list%); ## Generated at the beginning of a :abbr:`SIP (Session Initiation Protocol)` message. @@ -75,7 +75,7 @@ event sip_all_headers%(c: connection, is_orig: bool, hlist: mime_header_list%); ## ## is_orig: Whether the message came from the originator. ## -## .. bro:see:: sip_request sip_reply sip_header sip_all_headers sip_end_entity +## .. zeek:see:: sip_request sip_reply sip_header sip_all_headers sip_end_entity event sip_begin_entity%(c: connection, is_orig: bool%); ## Generated at the end of a :abbr:`SIP (Session Initiation Protocol)` message. @@ -87,5 +87,5 @@ event sip_begin_entity%(c: connection, is_orig: bool%); ## ## is_orig: Whether the message came from the originator. ## -## .. bro:see:: sip_request sip_reply sip_header sip_all_headers sip_begin_entity +## .. zeek:see:: sip_request sip_reply sip_header sip_all_headers sip_begin_entity event sip_end_entity%(c: connection, is_orig: bool%); diff --git a/src/analyzer/protocol/smb/CMakeLists.txt b/src/analyzer/protocol/smb/CMakeLists.txt index b156d185bc..5fbbe190d0 100644 --- a/src/analyzer/protocol/smb/CMakeLists.txt +++ b/src/analyzer/protocol/smb/CMakeLists.txt @@ -1,11 +1,11 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) include_directories(AFTER ${CMAKE_CURRENT_BINARY_DIR}/../dce-rpc) -bro_plugin_begin(Bro SMB) -bro_plugin_cc(SMB.cc Plugin.cc) -bro_plugin_bif( +zeek_plugin_begin(Zeek SMB) +zeek_plugin_cc(SMB.cc Plugin.cc) +zeek_plugin_bif( smb1_com_check_directory.bif smb1_com_close.bif smb1_com_create_directory.bif @@ -42,7 +42,7 @@ bro_plugin_bif( consts.bif types.bif) -bro_plugin_pac( +zeek_plugin_pac( smb.pac smb-common.pac smb-strings.pac @@ -87,4 +87,4 @@ bro_plugin_pac( smb2-com-write.pac smb2-com-transform-header.pac ) -bro_plugin_end() +zeek_plugin_end() diff --git a/src/analyzer/protocol/smb/Plugin.cc b/src/analyzer/protocol/smb/Plugin.cc index 7af28aa671..788333bb7c 100644 --- a/src/analyzer/protocol/smb/Plugin.cc +++ b/src/analyzer/protocol/smb/Plugin.cc @@ -5,7 +5,7 @@ #include "SMB.h" namespace plugin { -namespace Bro_SMB { +namespace Zeek_SMB { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("Contents_SMB", 0)); plugin::Configuration config; - config.name = "Bro::SMB"; + config.name = "Zeek::SMB"; config.description = "SMB analyzer"; return config; } diff --git a/src/analyzer/protocol/smb/events.bif b/src/analyzer/protocol/smb/events.bif index d0091589fe..77746c2a09 100644 --- a/src/analyzer/protocol/smb/events.bif +++ b/src/analyzer/protocol/smb/events.bif @@ -3,7 +3,7 @@ ## up is when the drive mapping isn't seen so the analyzer is not able ## to determine whether to send the data to the files framework or to ## the DCE_RPC analyzer. This heuristic can be tuned by adding or -## removing "named pipe" names from the :bro:see:`SMB::pipe_filenames` +## removing "named pipe" names from the :zeek:see:`SMB::pipe_filenames` ## const. ## ## c: The connection. diff --git a/src/analyzer/protocol/smb/smb1-com-nt-create-andx.pac b/src/analyzer/protocol/smb/smb1-com-nt-create-andx.pac index 0cdae1cefb..01eae48d0b 100644 --- a/src/analyzer/protocol/smb/smb1-com-nt-create-andx.pac +++ b/src/analyzer/protocol/smb/smb1-com-nt-create-andx.pac @@ -6,8 +6,10 @@ refine connection SMB_Conn += { BifConst::SMB::pipe_filenames->AsTable()->Lookup(filename->CheckString()) ) { set_tree_is_pipe(${header.tid}); - BifEvent::generate_smb_pipe_connect_heuristic(bro_analyzer(), - bro_analyzer()->Conn()); + + if ( smb_pipe_connect_heuristic ) + BifEvent::generate_smb_pipe_connect_heuristic(bro_analyzer(), + bro_analyzer()->Conn()); } if ( smb1_nt_create_andx_request ) diff --git a/src/analyzer/protocol/smb/smb1-protocol.pac b/src/analyzer/protocol/smb/smb1-protocol.pac index 4ba86d1b75..d5df7a3fca 100644 --- a/src/analyzer/protocol/smb/smb1-protocol.pac +++ b/src/analyzer/protocol/smb/smb1-protocol.pac @@ -66,9 +66,10 @@ refine connection SMB_Conn += { } else { - BifEvent::generate_smb1_error(bro_analyzer(), - bro_analyzer()->Conn(), - BuildHeaderVal(h), is_orig); + if ( smb1_error ) + BifEvent::generate_smb1_error(bro_analyzer(), + bro_analyzer()->Conn(), + BuildHeaderVal(h), is_orig); } return true; %} diff --git a/src/analyzer/protocol/smb/smb1_com_check_directory.bif b/src/analyzer/protocol/smb/smb1_com_check_directory.bif index 15feb3ad59..26f83210ff 100644 --- a/src/analyzer/protocol/smb/smb1_com_check_directory.bif +++ b/src/analyzer/protocol/smb/smb1_com_check_directory.bif @@ -10,7 +10,7 @@ ## ## directory_name: The directory name to check for existence. ## -## .. bro:see:: smb1_message smb1_check_directory_response +## .. zeek:see:: smb1_message smb1_check_directory_response event smb1_check_directory_request%(c: connection, hdr: SMB1::Header, directory_name: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -23,5 +23,5 @@ event smb1_check_directory_request%(c: connection, hdr: SMB1::Header, directory_ ## ## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` version 1 message. ## -## .. bro:see:: smb1_message smb1_check_directory_request +## .. zeek:see:: smb1_message smb1_check_directory_request event smb1_check_directory_response%(c: connection, hdr: SMB1::Header%); \ No newline at end of file diff --git a/src/analyzer/protocol/smb/smb1_com_close.bif b/src/analyzer/protocol/smb/smb1_com_close.bif index 37958e1d19..8d2d8f0747 100644 --- a/src/analyzer/protocol/smb/smb1_com_close.bif +++ b/src/analyzer/protocol/smb/smb1_com_close.bif @@ -10,6 +10,6 @@ ## ## file_id: The file identifier being closed. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb1_close_request%(c: connection, hdr: SMB1::Header, file_id: count%); diff --git a/src/analyzer/protocol/smb/smb1_com_create_directory.bif b/src/analyzer/protocol/smb/smb1_com_create_directory.bif index f5e29b467b..40ddf44c8d 100644 --- a/src/analyzer/protocol/smb/smb1_com_create_directory.bif +++ b/src/analyzer/protocol/smb/smb1_com_create_directory.bif @@ -11,7 +11,7 @@ ## ## directory_name: The name of the directory to create. ## -## .. bro:see:: smb1_message smb1_create_directory_response smb1_transaction2_request +## .. zeek:see:: smb1_message smb1_create_directory_response smb1_transaction2_request event smb1_create_directory_request%(c: connection, hdr: SMB1::Header, directory_name: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -25,5 +25,5 @@ event smb1_create_directory_request%(c: connection, hdr: SMB1::Header, directory ## ## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` version 1 message. ## -## .. bro:see:: smb1_message smb1_create_directory_request smb1_transaction2_request +## .. zeek:see:: smb1_message smb1_create_directory_request smb1_transaction2_request event smb1_create_directory_response%(c: connection, hdr: SMB1::Header%); \ No newline at end of file diff --git a/src/analyzer/protocol/smb/smb1_com_echo.bif b/src/analyzer/protocol/smb/smb1_com_echo.bif index 5b255af371..f95261ca3c 100644 --- a/src/analyzer/protocol/smb/smb1_com_echo.bif +++ b/src/analyzer/protocol/smb/smb1_com_echo.bif @@ -12,7 +12,7 @@ ## ## data: The data for the server to echo. ## -## .. bro:see:: smb1_message smb1_echo_response +## .. zeek:see:: smb1_message smb1_echo_response event smb1_echo_request%(c: connection, echo_count: count, data: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -28,5 +28,5 @@ event smb1_echo_request%(c: connection, echo_count: count, data: string%); ## ## data: The data echoed back from the client. ## -## .. bro:see:: smb1_message smb1_echo_request +## .. zeek:see:: smb1_message smb1_echo_request event smb1_echo_response%(c: connection, seq_num: count, data: string%); \ No newline at end of file diff --git a/src/analyzer/protocol/smb/smb1_com_logoff_andx.bif b/src/analyzer/protocol/smb/smb1_com_logoff_andx.bif index 88b5016328..ff5168e4dd 100644 --- a/src/analyzer/protocol/smb/smb1_com_logoff_andx.bif +++ b/src/analyzer/protocol/smb/smb1_com_logoff_andx.bif @@ -10,6 +10,6 @@ ## ## is_orig: Indicates which host sent the logoff message. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb1_logoff_andx%(c: connection, is_orig: bool%); diff --git a/src/analyzer/protocol/smb/smb1_com_negotiate.bif b/src/analyzer/protocol/smb/smb1_com_negotiate.bif index fdb2201c1f..7dfe02cb68 100644 --- a/src/analyzer/protocol/smb/smb1_com_negotiate.bif +++ b/src/analyzer/protocol/smb/smb1_com_negotiate.bif @@ -11,7 +11,7 @@ ## ## dialects: The SMB dialects supported by the client. ## -## .. bro:see:: smb1_message smb1_negotiate_response +## .. zeek:see:: smb1_message smb1_negotiate_response event smb1_negotiate_request%(c: connection, hdr: SMB1::Header, dialects: string_vec%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -26,7 +26,7 @@ event smb1_negotiate_request%(c: connection, hdr: SMB1::Header, dialects: string ## ## response: A record structure containing more information from the response. ## -## .. bro:see:: smb1_message smb1_negotiate_request +## .. zeek:see:: smb1_message smb1_negotiate_request event smb1_negotiate_response%(c: connection, hdr: SMB1::Header, response: SMB1::NegotiateResponse%); #### Types diff --git a/src/analyzer/protocol/smb/smb1_com_nt_cancel.bif b/src/analyzer/protocol/smb/smb1_com_nt_cancel.bif index f04fc839ec..66bbbc5fb9 100644 --- a/src/analyzer/protocol/smb/smb1_com_nt_cancel.bif +++ b/src/analyzer/protocol/smb/smb1_com_nt_cancel.bif @@ -8,5 +8,5 @@ ## ## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` version 1 message. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb1_nt_cancel_request%(c: connection, hdr: SMB1::Header%); \ No newline at end of file diff --git a/src/analyzer/protocol/smb/smb1_com_nt_create_andx.bif b/src/analyzer/protocol/smb/smb1_com_nt_create_andx.bif index f8008e878b..d19d59fd50 100644 --- a/src/analyzer/protocol/smb/smb1_com_nt_create_andx.bif +++ b/src/analyzer/protocol/smb/smb1_com_nt_create_andx.bif @@ -11,7 +11,7 @@ ## ## name: The ``name`` attribute specified in the message. ## -## .. bro:see:: smb1_message smb1_nt_create_andx_response +## .. zeek:see:: smb1_message smb1_nt_create_andx_response event smb1_nt_create_andx_request%(c: connection, hdr: SMB1::Header, file_name: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -30,7 +30,7 @@ event smb1_nt_create_andx_request%(c: connection, hdr: SMB1::Header, file_name: ## ## times: Timestamps associated with the file in question. ## -## .. bro:see:: smb1_message smb1_nt_create_andx_request +## .. zeek:see:: smb1_message smb1_nt_create_andx_request event smb1_nt_create_andx_response%(c: connection, hdr: SMB1::Header, file_id: count, file_size: count, times: SMB::MACTimes%); diff --git a/src/analyzer/protocol/smb/smb1_com_query_information.bif b/src/analyzer/protocol/smb/smb1_com_query_information.bif index 64a5150dc9..e2f1ded6bd 100644 --- a/src/analyzer/protocol/smb/smb1_com_query_information.bif +++ b/src/analyzer/protocol/smb/smb1_com_query_information.bif @@ -11,6 +11,6 @@ ## ## filename: The filename that the client is querying. ## -## .. bro:see:: smb1_message smb1_transaction2_request +## .. zeek:see:: smb1_message smb1_transaction2_request event smb1_query_information_request%(c: connection, hdr: SMB1::Header, filename: string%); diff --git a/src/analyzer/protocol/smb/smb1_com_read_andx.bif b/src/analyzer/protocol/smb/smb1_com_read_andx.bif index 73cacf0a65..a7c04bffca 100644 --- a/src/analyzer/protocol/smb/smb1_com_read_andx.bif +++ b/src/analyzer/protocol/smb/smb1_com_read_andx.bif @@ -15,7 +15,7 @@ ## ## length: The number of bytes being requested. ## -## .. bro:see:: smb1_message smb1_read_andx_response +## .. zeek:see:: smb1_message smb1_read_andx_response event smb1_read_andx_request%(c: connection, hdr: SMB1::Header, file_id: count, offset: count, length: count%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -29,6 +29,6 @@ event smb1_read_andx_request%(c: connection, hdr: SMB1::Header, file_id: count, ## ## data_len: The length of data from the requested file. ## -## .. bro:see:: smb1_message smb1_read_andx_request +## .. zeek:see:: smb1_message smb1_read_andx_request event smb1_read_andx_response%(c: connection, hdr: SMB1::Header, data_len: count%); diff --git a/src/analyzer/protocol/smb/smb1_com_session_setup_andx.bif b/src/analyzer/protocol/smb/smb1_com_session_setup_andx.bif index 7971a4977c..b50fa5d875 100644 --- a/src/analyzer/protocol/smb/smb1_com_session_setup_andx.bif +++ b/src/analyzer/protocol/smb/smb1_com_session_setup_andx.bif @@ -9,7 +9,7 @@ ## ## request: The parsed request data of the SMB message. See init-bare for more details. ## -## .. bro:see:: smb1_message smb1_session_setup_andx_response +## .. zeek:see:: smb1_message smb1_session_setup_andx_response event smb1_session_setup_andx_request%(c: connection, hdr: SMB1::Header, request: SMB1::SessionSetupAndXRequest%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -23,7 +23,7 @@ event smb1_session_setup_andx_request%(c: connection, hdr: SMB1::Header, request ## ## response: The parsed response data of the SMB message. See init-bare for more details. ## -## .. bro:see:: smb1_message smb1_session_setup_andx_request +## .. zeek:see:: smb1_message smb1_session_setup_andx_request event smb1_session_setup_andx_response%(c: connection, hdr: SMB1::Header, response: SMB1::SessionSetupAndXResponse%); #### Types diff --git a/src/analyzer/protocol/smb/smb1_com_transaction.bif b/src/analyzer/protocol/smb/smb1_com_transaction.bif index 0c411b55c3..cd80a668dc 100644 --- a/src/analyzer/protocol/smb/smb1_com_transaction.bif +++ b/src/analyzer/protocol/smb/smb1_com_transaction.bif @@ -18,7 +18,7 @@ ## ## data: content of the SMB_Data.Trans_Data field ## -## .. bro:see:: smb1_message smb1_transaction2_request +## .. zeek:see:: smb1_message smb1_transaction2_request event smb1_transaction_request%(c: connection, hdr: SMB1::Header, name: string, sub_cmd: count, parameters: string, data: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` diff --git a/src/analyzer/protocol/smb/smb1_com_transaction2.bif b/src/analyzer/protocol/smb/smb1_com_transaction2.bif index aa30aeebe1..48e2f7cdd6 100644 --- a/src/analyzer/protocol/smb/smb1_com_transaction2.bif +++ b/src/analyzer/protocol/smb/smb1_com_transaction2.bif @@ -15,7 +15,7 @@ ## ## sub_cmd: The sub command, some are parsed and have their own events. ## -## .. bro:see:: smb1_message smb1_trans2_find_first2_request smb1_trans2_query_path_info_request +## .. zeek:see:: smb1_message smb1_trans2_find_first2_request smb1_trans2_query_path_info_request ## smb1_trans2_get_dfs_referral_request smb1_transaction_request event smb1_transaction2_request%(c: connection, hdr: SMB1::Header, args: SMB1::Trans2_Args, sub_cmd: count%); @@ -31,7 +31,7 @@ event smb1_transaction2_request%(c: connection, hdr: SMB1::Header, args: SMB1::T ## ## args: A record data structure with arguments given to the command. ## -## .. bro:see:: smb1_message smb1_transaction2_request smb1_trans2_query_path_info_request +## .. zeek:see:: smb1_message smb1_transaction2_request smb1_trans2_query_path_info_request ## smb1_trans2_get_dfs_referral_request event smb1_trans2_find_first2_request%(c: connection, hdr: SMB1::Header, args: SMB1::Find_First2_Request_Args%); @@ -47,7 +47,7 @@ event smb1_trans2_find_first2_request%(c: connection, hdr: SMB1::Header, args: S ## ## file_name: File name the request is in reference to. ## -## .. bro:see:: smb1_message smb1_transaction2_request smb1_trans2_find_first2_request +## .. zeek:see:: smb1_message smb1_transaction2_request smb1_trans2_find_first2_request ## smb1_trans2_get_dfs_referral_request event smb1_trans2_query_path_info_request%(c: connection, hdr: SMB1::Header, file_name: string%); @@ -63,7 +63,7 @@ event smb1_trans2_query_path_info_request%(c: connection, hdr: SMB1::Header, fil ## ## file_name: File name the request is in reference to. ## -## .. bro:see:: smb1_message smb1_transaction2_request smb1_trans2_find_first2_request +## .. zeek:see:: smb1_message smb1_transaction2_request smb1_trans2_find_first2_request ## smb1_trans2_query_path_info_request event smb1_trans2_get_dfs_referral_request%(c: connection, hdr: SMB1::Header, file_name: string%); diff --git a/src/analyzer/protocol/smb/smb1_com_tree_connect_andx.bif b/src/analyzer/protocol/smb/smb1_com_tree_connect_andx.bif index 16aeb2bbb6..95274af115 100644 --- a/src/analyzer/protocol/smb/smb1_com_tree_connect_andx.bif +++ b/src/analyzer/protocol/smb/smb1_com_tree_connect_andx.bif @@ -12,7 +12,7 @@ ## ## service: The ``service`` attribute specified in the message. ## -## .. bro:see:: smb1_message smb1_tree_connect_andx_response +## .. zeek:see:: smb1_message smb1_tree_connect_andx_response event smb1_tree_connect_andx_request%(c: connection, hdr: SMB1::Header, path: string, service: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -29,6 +29,6 @@ event smb1_tree_connect_andx_request%(c: connection, hdr: SMB1::Header, path: st ## ## native_file_system: The file system of the remote server as indicate by the server. ## -## .. bro:see:: smb1_message smb1_tree_connect_andx_request +## .. zeek:see:: smb1_message smb1_tree_connect_andx_request event smb1_tree_connect_andx_response%(c: connection, hdr: SMB1::Header, service: string, native_file_system: string%); diff --git a/src/analyzer/protocol/smb/smb1_com_tree_disconnect.bif b/src/analyzer/protocol/smb/smb1_com_tree_disconnect.bif index 493ee66238..db94e1ff2a 100644 --- a/src/analyzer/protocol/smb/smb1_com_tree_disconnect.bif +++ b/src/analyzer/protocol/smb/smb1_com_tree_disconnect.bif @@ -10,6 +10,6 @@ ## ## is_orig: True if the message was from the originator. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb1_tree_disconnect%(c: connection, hdr: SMB1::Header, is_orig: bool%); diff --git a/src/analyzer/protocol/smb/smb1_com_write_andx.bif b/src/analyzer/protocol/smb/smb1_com_write_andx.bif index d30c8af2ba..6bf086e978 100644 --- a/src/analyzer/protocol/smb/smb1_com_write_andx.bif +++ b/src/analyzer/protocol/smb/smb1_com_write_andx.bif @@ -13,7 +13,7 @@ ## ## data: The data being written. ## -## .. bro:see:: smb1_message smb1_write_andx_response +## .. zeek:see:: smb1_message smb1_write_andx_response event smb1_write_andx_request%(c: connection, hdr: SMB1::Header, file_id: count, offset: count, data_len: count%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -28,5 +28,5 @@ event smb1_write_andx_request%(c: connection, hdr: SMB1::Header, file_id: count, ## ## written_bytes: The number of bytes the server reported having actually written. ## -## .. bro:see:: smb1_message smb1_write_andx_request +## .. zeek:see:: smb1_message smb1_write_andx_request event smb1_write_andx_response%(c: connection, hdr: SMB1::Header, written_bytes: count%); diff --git a/src/analyzer/protocol/smb/smb1_events.bif b/src/analyzer/protocol/smb/smb1_events.bif index 4746af34a4..c797f21ff5 100644 --- a/src/analyzer/protocol/smb/smb1_events.bif +++ b/src/analyzer/protocol/smb/smb1_events.bif @@ -2,7 +2,7 @@ ## messages. ## ## See `Wikipedia `__ for more information about the -## :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` protocol. Bro's +## :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` protocol. Zeek's ## :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` analyzer parses ## both :abbr:`SMB (Server Message Block)`-over-:abbr:`NetBIOS (Network Basic Input/Output System)` on ## ports 138/139 and :abbr:`SMB (Server Message Block)`-over-TCP on port 445. @@ -14,7 +14,7 @@ ## is_orig: True if the message was sent by the originator of the underlying ## transport-level connection. ## -## .. bro:see:: smb2_message +## .. zeek:see:: smb2_message event smb1_message%(c: connection, hdr: SMB1::Header, is_orig: bool%); ## Generated when there is an :abbr:`SMB (Server Message Block)` version 1 response with no message body. @@ -23,7 +23,7 @@ event smb1_message%(c: connection, hdr: SMB1::Header, is_orig: bool%); ## ## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` message. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb1_empty_response%(c: connection, hdr: SMB1::Header%); ## Generated for :abbr:`SMB (Server Message Block)` version 1 messages @@ -37,6 +37,6 @@ event smb1_empty_response%(c: connection, hdr: SMB1::Header%); ## is_orig: True if the message was sent by the originator of the underlying ## transport-level connection. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb1_error%(c: connection, hdr: SMB1::Header, is_orig: bool%); diff --git a/src/analyzer/protocol/smb/smb2-com-create.pac b/src/analyzer/protocol/smb/smb2-com-create.pac index 2f7dfc4d26..d3df094f51 100644 --- a/src/analyzer/protocol/smb/smb2-com-create.pac +++ b/src/analyzer/protocol/smb/smb2-com-create.pac @@ -7,8 +7,10 @@ refine connection SMB_Conn += { BifConst::SMB::pipe_filenames->AsTable()->Lookup(filename->CheckString()) ) { set_tree_is_pipe(${h.tree_id}); - BifEvent::generate_smb_pipe_connect_heuristic(bro_analyzer(), - bro_analyzer()->Conn()); + + if ( smb_pipe_connect_heuristic ) + BifEvent::generate_smb_pipe_connect_heuristic(bro_analyzer(), + bro_analyzer()->Conn()); } if ( smb2_create_request ) diff --git a/src/analyzer/protocol/smb/smb2-com-write.pac b/src/analyzer/protocol/smb/smb2-com-write.pac index 177a3a84bd..c117fc793d 100644 --- a/src/analyzer/protocol/smb/smb2-com-write.pac +++ b/src/analyzer/protocol/smb/smb2-com-write.pac @@ -24,6 +24,15 @@ refine connection SMB_Conn += { function proc_smb2_write_response(h: SMB2_Header, val: SMB2_write_response) : bool %{ + + if ( smb2_write_response ) + { + BifEvent::generate_smb2_write_response(bro_analyzer(), + bro_analyzer()->Conn(), + BuildSMB2HeaderVal(h), + ${val.write_count}); + } + return true; %} diff --git a/src/analyzer/protocol/smb/smb2_com_close.bif b/src/analyzer/protocol/smb/smb2_com_close.bif index 5ac4afa1db..4f8d802c63 100644 --- a/src/analyzer/protocol/smb/smb2_com_close.bif +++ b/src/analyzer/protocol/smb/smb2_com_close.bif @@ -10,7 +10,7 @@ ## ## file_name: The SMB2 GUID of the file being closed. ## -## .. bro:see:: smb2_message smb2_close_response +## .. zeek:see:: smb2_message smb2_close_response event smb2_close_request%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -25,7 +25,7 @@ event smb2_close_request%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID% ## ## response: A record of attributes returned from the server from the close. ## -## .. bro:see:: smb2_message smb2_close_request +## .. zeek:see:: smb2_message smb2_close_request event smb2_close_response%(c: connection, hdr: SMB2::Header, response: SMB2::CloseResponse%); diff --git a/src/analyzer/protocol/smb/smb2_com_create.bif b/src/analyzer/protocol/smb/smb2_com_create.bif index 9a77878e9f..7d9c4e4895 100644 --- a/src/analyzer/protocol/smb/smb2_com_create.bif +++ b/src/analyzer/protocol/smb/smb2_com_create.bif @@ -10,7 +10,7 @@ ## ## request: A record with more information related to the request. ## -## .. bro:see:: smb2_message smb2_create_response +## .. zeek:see:: smb2_message smb2_create_response event smb2_create_request%(c: connection, hdr: SMB2::Header, request: SMB2::CreateRequest%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -25,7 +25,7 @@ event smb2_create_request%(c: connection, hdr: SMB2::Header, request: SMB2::Crea ## ## response: A record with more information related to the response. ## -## .. bro:see:: smb2_message smb2_create_request +## .. zeek:see:: smb2_message smb2_create_request event smb2_create_response%(c: connection, hdr: SMB2::Header, response: SMB2::CreateResponse%); #### Types diff --git a/src/analyzer/protocol/smb/smb2_com_negotiate.bif b/src/analyzer/protocol/smb/smb2_com_negotiate.bif index 80c7c1aea5..2202064933 100644 --- a/src/analyzer/protocol/smb/smb2_com_negotiate.bif +++ b/src/analyzer/protocol/smb/smb2_com_negotiate.bif @@ -10,7 +10,7 @@ ## ## dialects: A vector of the client's supported dialects. ## -## .. bro:see:: smb2_message smb2_negotiate_response +## .. zeek:see:: smb2_message smb2_negotiate_response event smb2_negotiate_request%(c: connection, hdr: SMB2::Header, dialects: index_vec%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -25,7 +25,7 @@ event smb2_negotiate_request%(c: connection, hdr: SMB2::Header, dialects: index_ ## ## response: The negotiate response data structure. ## -## .. bro:see:: smb2_message smb2_negotiate_request +## .. zeek:see:: smb2_message smb2_negotiate_request event smb2_negotiate_response%(c: connection, hdr: SMB2::Header, response: SMB2::NegotiateResponse%); #### Types diff --git a/src/analyzer/protocol/smb/smb2_com_read.bif b/src/analyzer/protocol/smb/smb2_com_read.bif index 4ccc8d7788..b14874b38b 100644 --- a/src/analyzer/protocol/smb/smb2_com_read.bif +++ b/src/analyzer/protocol/smb/smb2_com_read.bif @@ -14,5 +14,5 @@ ## ## length: The number of bytes of the file being read. ## -## .. bro:see:: smb2_message +## .. zeek:see:: smb2_message event smb2_read_request%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, offset: count, length: count%); diff --git a/src/analyzer/protocol/smb/smb2_com_session_setup.bif b/src/analyzer/protocol/smb/smb2_com_session_setup.bif index 99430d5ac9..b3dbe6cc57 100644 --- a/src/analyzer/protocol/smb/smb2_com_session_setup.bif +++ b/src/analyzer/protocol/smb/smb2_com_session_setup.bif @@ -11,7 +11,7 @@ ## ## request: A record containing more information related to the request. ## -## .. bro:see:: smb2_message smb2_session_setup_response +## .. zeek:see:: smb2_message smb2_session_setup_response event smb2_session_setup_request%(c: connection, hdr: SMB2::Header, request: SMB2::SessionSetupRequest%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -26,7 +26,7 @@ event smb2_session_setup_request%(c: connection, hdr: SMB2::Header, request: SMB ## ## response: A record containing more information related to the response. ## -## .. bro:see:: smb2_message smb2_session_setup_request +## .. zeek:see:: smb2_message smb2_session_setup_request event smb2_session_setup_response%(c: connection, hdr: SMB2::Header, response: SMB2::SessionSetupResponse%); #### Types diff --git a/src/analyzer/protocol/smb/smb2_com_set_info.bif b/src/analyzer/protocol/smb/smb2_com_set_info.bif index 1f6d9386f8..37a0b8900f 100644 --- a/src/analyzer/protocol/smb/smb2_com_set_info.bif +++ b/src/analyzer/protocol/smb/smb2_com_set_info.bif @@ -11,7 +11,7 @@ ## ## dst_filename: The filename to rename the file into. ## -## .. bro:see:: smb2_message smb2_file_delete smb2_file_sattr +## .. zeek:see:: smb2_message smb2_file_delete smb2_file_sattr event smb2_file_rename%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, dst_filename: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -28,7 +28,7 @@ event smb2_file_rename%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, d ## delete_pending: A boolean value to indicate that a file should be deleted ## when it's closed if set to T. ## -## .. bro:see:: smb2_message smb2_file_rename smb2_file_sattr +## .. zeek:see:: smb2_message smb2_file_rename smb2_file_sattr event smb2_file_delete%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, delete_pending: bool%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -46,7 +46,7 @@ event smb2_file_delete%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, d ## ## attrs: File attributes. ## -## .. bro:see:: smb2_message smb2_file_rename smb2_file_delete +## .. zeek:see:: smb2_message smb2_file_rename smb2_file_delete event smb2_file_sattr%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, times: SMB::MACTimes, attrs: SMB2::FileAttrs%); # TODO - Not implemented @@ -60,7 +60,7 @@ event smb2_file_sattr%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, ti # # request: A record containing more information related to the request. # -# .. bro:see:: smb2_message smb2_file_rename smb2_file_delete +# .. zeek:see:: smb2_message smb2_file_rename smb2_file_delete # event smb2_set_info_request%(c: connection, hdr: SMB2::Header, request: SMB2::SetInfoRequest%); # # type SMB2::SetInfoRequest: record; diff --git a/src/analyzer/protocol/smb/smb2_com_transform_header.bif b/src/analyzer/protocol/smb/smb2_com_transform_header.bif index 1506fe3222..629ae27841 100644 --- a/src/analyzer/protocol/smb/smb2_com_transform_header.bif +++ b/src/analyzer/protocol/smb/smb2_com_transform_header.bif @@ -8,7 +8,7 @@ ## ## hdr: The parsed transformed header message, which is starting with \xfdSMB and different from SMB1 and SMB2 headers. ## -## .. bro:see:: smb2_message +## .. zeek:see:: smb2_message event smb2_transform_header%(c: connection, hdr: SMB2::Transform_header%); type SMB2::Transform_header: record; diff --git a/src/analyzer/protocol/smb/smb2_com_tree_connect.bif b/src/analyzer/protocol/smb/smb2_com_tree_connect.bif index 78978f3971..877f5b2c4c 100644 --- a/src/analyzer/protocol/smb/smb2_com_tree_connect.bif +++ b/src/analyzer/protocol/smb/smb2_com_tree_connect.bif @@ -10,7 +10,7 @@ ## ## path: Path of the requested tree. ## -## .. bro:see:: smb2_message smb2_tree_connect_response +## .. zeek:see:: smb2_message smb2_tree_connect_response event smb2_tree_connect_request%(c: connection, hdr: SMB2::Header, path: string%); ## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` @@ -25,7 +25,7 @@ event smb2_tree_connect_request%(c: connection, hdr: SMB2::Header, path: string% ## ## response: A record with more information related to the response. ## -## .. bro:see:: smb2_message smb2_tree_connect_request +## .. zeek:see:: smb2_message smb2_tree_connect_request event smb2_tree_connect_response%(c: connection, hdr: SMB2::Header, response: SMB2::TreeConnectResponse%); type SMB2::TreeConnectResponse: record; diff --git a/src/analyzer/protocol/smb/smb2_com_tree_disconnect.bif b/src/analyzer/protocol/smb/smb2_com_tree_disconnect.bif index fdcd5d9d8b..6c7f3b7c2d 100644 --- a/src/analyzer/protocol/smb/smb2_com_tree_disconnect.bif +++ b/src/analyzer/protocol/smb/smb2_com_tree_disconnect.bif @@ -6,7 +6,7 @@ ## ## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` version 2 message. ## -## .. bro:see:: smb2_message +## .. zeek:see:: smb2_message event smb2_tree_disconnect_request%(c: connection, hdr: SMB2::Header%); @@ -18,5 +18,5 @@ event smb2_tree_disconnect_request%(c: connection, hdr: SMB2::Header%); ## ## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` version 2 message. ## -## .. bro:see:: smb2_message +## .. zeek:see:: smb2_message event smb2_tree_disconnect_response%(c: connection, hdr: SMB2::Header%); diff --git a/src/analyzer/protocol/smb/smb2_com_write.bif b/src/analyzer/protocol/smb/smb2_com_write.bif index 90efce049c..71df322090 100644 --- a/src/analyzer/protocol/smb/smb2_com_write.bif +++ b/src/analyzer/protocol/smb/smb2_com_write.bif @@ -14,5 +14,20 @@ ## ## length: The number of bytes of the file being written. ## -## .. bro:see:: smb2_message +## .. zeek:see:: smb2_message event smb2_write_request%(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, offset: count, length: count%); + +## Generated for :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` +## version 2 requests of type *write*. This is sent by the server in response to a write request or +## named pipe on the server. +## +## For more information, see MS-SMB2:2.2.22 +## +## c: The connection. +## +## hdr: The parsed header of the :abbr:`SMB (Server Message Block)` version 2 message. +## +## length: The number of bytes of the file being written. +## +## .. zeek:see:: smb2_message +event smb2_write_response%(c: connection, hdr: SMB2::Header, length: count%); diff --git a/src/analyzer/protocol/smb/smb2_events.bif b/src/analyzer/protocol/smb/smb2_events.bif index a8a2c439fc..2071a0600e 100644 --- a/src/analyzer/protocol/smb/smb2_events.bif +++ b/src/analyzer/protocol/smb/smb2_events.bif @@ -2,7 +2,7 @@ ## version 2 messages. ## ## See `Wikipedia `__ for more information about the -## :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` protocol. Bro's +## :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` protocol. Zeek's ## :abbr:`SMB (Server Message Block)`/:abbr:`CIFS (Common Internet File System)` analyzer parses ## both :abbr:`SMB (Server Message Block)`-over-:abbr:`NetBIOS (Network Basic Input/Output System)` on ## ports 138/139 and :abbr:`SMB (Server Message Block)`-over-TCP on port 445. @@ -13,5 +13,5 @@ ## ## is_orig: True if the message came from the originator side. ## -## .. bro:see:: smb1_message +## .. zeek:see:: smb1_message event smb2_message%(c: connection, hdr: SMB2::Header, is_orig: bool%); diff --git a/src/analyzer/protocol/smtp/CMakeLists.txt b/src/analyzer/protocol/smtp/CMakeLists.txt index 82918656a0..3ffebc66a8 100644 --- a/src/analyzer/protocol/smtp/CMakeLists.txt +++ b/src/analyzer/protocol/smtp/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SMTP) -bro_plugin_cc(SMTP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek SMTP) +zeek_plugin_cc(SMTP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/smtp/Plugin.cc b/src/analyzer/protocol/smtp/Plugin.cc index ae0ef0e71a..784da4d860 100644 --- a/src/analyzer/protocol/smtp/Plugin.cc +++ b/src/analyzer/protocol/smtp/Plugin.cc @@ -6,7 +6,7 @@ #include "SMTP.h" namespace plugin { -namespace Bro_SMTP { +namespace Zeek_SMTP { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("SMTP", ::analyzer::smtp::SMTP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::SMTP"; + config.name = "Zeek::SMTP"; config.description = "SMTP analyzer"; return config; } diff --git a/src/analyzer/protocol/smtp/SMTP.cc b/src/analyzer/protocol/smtp/SMTP.cc index 6b92484431..2ba011b8ef 100644 --- a/src/analyzer/protocol/smtp/SMTP.cc +++ b/src/analyzer/protocol/smtp/SMTP.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -220,11 +220,11 @@ void SMTP_Analyzer::ProcessLine(int length, const char* line, bool orig) if ( smtp_data && ! skip_data ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(new StringVal(data_len, line)); - ConnectionEvent(smtp_data, vl); + ConnectionEventFast(smtp_data, { + BuildConnVal(), + val_mgr->GetBool(orig), + new StringVal(data_len, line), + }); } } @@ -350,15 +350,14 @@ void SMTP_Analyzer::ProcessLine(int length, const char* line, bool orig) break; } - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig)); - vl->append(val_mgr->GetCount(reply_code)); - vl->append(new StringVal(cmd)); - vl->append(new StringVal(end_of_line - line, line)); - vl->append(val_mgr->GetBool((pending_reply > 0))); - - ConnectionEvent(smtp_reply, vl); + ConnectionEventFast(smtp_reply, { + BuildConnVal(), + val_mgr->GetBool(orig), + val_mgr->GetCount(reply_code), + new StringVal(cmd), + new StringVal(end_of_line - line, line), + val_mgr->GetBool((pending_reply > 0)), + }); } } @@ -411,10 +410,8 @@ void SMTP_Analyzer::StartTLS() if ( ssl ) AddChildAnalyzer(ssl); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - - ConnectionEvent(smtp_starttls, vl); + if ( smtp_starttls ) + ConnectionEventFast(smtp_starttls, {BuildConnVal()}); } @@ -856,14 +853,14 @@ void SMTP_Analyzer::RequestEvent(int cmd_len, const char* cmd, int arg_len, const char* arg) { ProtocolConfirmation(); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(orig_is_sender)); - vl->append((new StringVal(cmd_len, cmd))->ToUpper()); - vl->append(new StringVal(arg_len, arg)); - - ConnectionEvent(smtp_request, vl); + if ( smtp_request ) + ConnectionEventFast(smtp_request, { + BuildConnVal(), + val_mgr->GetBool(orig_is_sender), + (new StringVal(cmd_len, cmd))->ToUpper(), + new StringVal(arg_len, arg), + }); } void SMTP_Analyzer::Unexpected(const int is_sender, const char* msg, @@ -874,17 +871,16 @@ void SMTP_Analyzer::Unexpected(const int is_sender, const char* msg, if ( smtp_unexpected ) { - val_list* vl = new val_list; int is_orig = is_sender; if ( ! orig_is_sender ) is_orig = ! is_orig; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(new StringVal(msg)); - vl->append(new StringVal(detail_len, detail)); - - ConnectionEvent(smtp_unexpected, vl); + ConnectionEventFast(smtp_unexpected, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + new StringVal(msg), + new StringVal(detail_len, detail), + }); } } diff --git a/src/analyzer/protocol/smtp/events.bif b/src/analyzer/protocol/smtp/events.bif index 898e98e0d1..3dfd82b75e 100644 --- a/src/analyzer/protocol/smtp/events.bif +++ b/src/analyzer/protocol/smtp/events.bif @@ -16,11 +16,11 @@ ## ## arg: The request command's arguments. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_event mime_one_header mime_segment_data ## smtp_data smtp_reply ## -## .. note:: Bro does not support the newer ETRN extension yet. +## .. note:: Zeek does not support the newer ETRN extension yet. event smtp_request%(c: connection, is_orig: bool, command: string, arg: string%); ## Generated for server-side SMTP commands. @@ -47,11 +47,11 @@ event smtp_request%(c: connection, is_orig: bool, command: string, arg: string%) ## line. If so, further events will be raised and a handler may want to ## reassemble the pieces before processing the response any further. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_event mime_one_header mime_segment_data ## smtp_data smtp_request ## -## .. note:: Bro doesn't support the newer ETRN extension yet. +## .. note:: Zeek doesn't support the newer ETRN extension yet. event smtp_reply%(c: connection, is_orig: bool, code: count, cmd: string, msg: string, cont_resp: bool%); ## Generated for DATA transmitted on SMTP sessions. This event is raised for @@ -70,7 +70,7 @@ event smtp_reply%(c: connection, is_orig: bool, code: count, cmd: string, msg: s ## data: The raw data. Note that the size of each chunk is undefined and ## depends on specifics of the underlying TCP connection. ## -## .. bro:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash +## .. zeek:see:: mime_all_data mime_all_headers mime_begin_entity mime_content_hash ## mime_end_entity mime_entity_data mime_event mime_one_header mime_segment_data ## smtp_reply smtp_request skip_smtp_data ## @@ -96,7 +96,7 @@ event smtp_data%(c: connection, is_orig: bool, data: string%); ## ## detail: The actual SMTP line triggering the event. ## -## .. bro:see:: smtp_data smtp_request smtp_reply +## .. zeek:see:: smtp_data smtp_request smtp_reply event smtp_unexpected%(c: connection, is_orig: bool, msg: string, detail: string%); ## Generated if a connection switched to using TLS using STARTTLS or X-ANONYMOUSTLS. diff --git a/src/analyzer/protocol/smtp/functions.bif b/src/analyzer/protocol/smtp/functions.bif index 8630685096..a5670c7d64 100644 --- a/src/analyzer/protocol/smtp/functions.bif +++ b/src/analyzer/protocol/smtp/functions.bif @@ -7,7 +7,7 @@ ## ## c: The SMTP connection. ## -## .. bro:see:: skip_http_entity_data +## .. zeek:see:: skip_http_entity_data function skip_smtp_data%(c: connection%): any %{ analyzer::Analyzer* sa = c->FindAnalyzer("SMTP"); diff --git a/src/analyzer/protocol/snmp/CMakeLists.txt b/src/analyzer/protocol/snmp/CMakeLists.txt index 43cbf45ac4..988949bbad 100644 --- a/src/analyzer/protocol/snmp/CMakeLists.txt +++ b/src/analyzer/protocol/snmp/CMakeLists.txt @@ -1,11 +1,11 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SNMP) -bro_plugin_cc(SNMP.cc Plugin.cc) -bro_plugin_bif(types.bif) -bro_plugin_bif(events.bif) -bro_plugin_pac(snmp.pac snmp-protocol.pac snmp-analyzer.pac ../asn1/asn1.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek SNMP) +zeek_plugin_cc(SNMP.cc Plugin.cc) +zeek_plugin_bif(types.bif) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(snmp.pac snmp-protocol.pac snmp-analyzer.pac ../asn1/asn1.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/snmp/Plugin.cc b/src/analyzer/protocol/snmp/Plugin.cc index 30f690ec96..d5c6e98309 100644 --- a/src/analyzer/protocol/snmp/Plugin.cc +++ b/src/analyzer/protocol/snmp/Plugin.cc @@ -5,7 +5,7 @@ #include "SNMP.h" namespace plugin { -namespace Bro_SNMP { +namespace Zeek_SNMP { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::analyzer::Component("SNMP", ::analyzer::snmp::SNMP_Analyzer::InstantiateAnalyzer)); plugin::Configuration config; - config.name = "Bro::SNMP"; + config.name = "Zeek::SNMP"; config.description = "SNMP analyzer"; return config; } diff --git a/src/analyzer/protocol/socks/CMakeLists.txt b/src/analyzer/protocol/socks/CMakeLists.txt index 5157c8d368..93e111814a 100644 --- a/src/analyzer/protocol/socks/CMakeLists.txt +++ b/src/analyzer/protocol/socks/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SOCKS) -bro_plugin_cc(SOCKS.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(socks.pac socks-protocol.pac socks-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek SOCKS) +zeek_plugin_cc(SOCKS.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(socks.pac socks-protocol.pac socks-analyzer.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/socks/Plugin.cc b/src/analyzer/protocol/socks/Plugin.cc index 661e39efbc..8efbeeb23e 100644 --- a/src/analyzer/protocol/socks/Plugin.cc +++ b/src/analyzer/protocol/socks/Plugin.cc @@ -6,7 +6,7 @@ #include "SOCKS.h" namespace plugin { -namespace Bro_SOCKS { +namespace Zeek_SOCKS { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("SOCKS", ::analyzer::socks::SOCKS_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::SOCKS"; + config.name = "Zeek::SOCKS"; config.description = "SOCKS analyzer"; return config; } diff --git a/src/analyzer/protocol/socks/socks-analyzer.pac b/src/analyzer/protocol/socks/socks-analyzer.pac index f625851d0a..b0ec62e2b9 100644 --- a/src/analyzer/protocol/socks/socks-analyzer.pac +++ b/src/analyzer/protocol/socks/socks-analyzer.pac @@ -22,18 +22,22 @@ refine connection SOCKS_Conn += { function socks4_request(request: SOCKS4_Request): bool %{ - RecordVal* sa = new RecordVal(socks_address); - sa->Assign(0, new AddrVal(htonl(${request.addr}))); - if ( ${request.v4a} ) - sa->Assign(1, array_to_string(${request.name})); + if ( socks_request ) + { + RecordVal* sa = new RecordVal(socks_address); + sa->Assign(0, new AddrVal(htonl(${request.addr}))); - BifEvent::generate_socks_request(bro_analyzer(), - bro_analyzer()->Conn(), - 4, - ${request.command}, - sa, - val_mgr->GetPort(${request.port}, TRANSPORT_TCP), - array_to_string(${request.user})); + if ( ${request.v4a} ) + sa->Assign(1, array_to_string(${request.name})); + + BifEvent::generate_socks_request(bro_analyzer(), + bro_analyzer()->Conn(), + 4, + ${request.command}, + sa, + val_mgr->GetPort(${request.port}, TRANSPORT_TCP), + array_to_string(${request.user})); + } static_cast(bro_analyzer())->EndpointDone(true); @@ -42,15 +46,18 @@ refine connection SOCKS_Conn += { function socks4_reply(reply: SOCKS4_Reply): bool %{ - RecordVal* sa = new RecordVal(socks_address); - sa->Assign(0, new AddrVal(htonl(${reply.addr}))); + if ( socks_reply ) + { + RecordVal* sa = new RecordVal(socks_address); + sa->Assign(0, new AddrVal(htonl(${reply.addr}))); - BifEvent::generate_socks_reply(bro_analyzer(), - bro_analyzer()->Conn(), - 4, - ${reply.status}, - sa, - val_mgr->GetPort(${reply.port}, TRANSPORT_TCP)); + BifEvent::generate_socks_reply(bro_analyzer(), + bro_analyzer()->Conn(), + 4, + ${reply.status}, + sa, + val_mgr->GetPort(${reply.port}, TRANSPORT_TCP)); + } bro_analyzer()->ProtocolConfirmation(); static_cast(bro_analyzer())->EndpointDone(false); @@ -97,13 +104,16 @@ refine connection SOCKS_Conn += { return false; } - BifEvent::generate_socks_request(bro_analyzer(), - bro_analyzer()->Conn(), - 5, - ${request.command}, - sa, - val_mgr->GetPort(${request.port}, TRANSPORT_TCP), - val_mgr->GetEmptyString()); + if ( socks_request ) + BifEvent::generate_socks_request(bro_analyzer(), + bro_analyzer()->Conn(), + 5, + ${request.command}, + sa, + val_mgr->GetPort(${request.port}, TRANSPORT_TCP), + val_mgr->GetEmptyString()); + else + Unref(sa); static_cast(bro_analyzer())->EndpointDone(true); @@ -136,12 +146,15 @@ refine connection SOCKS_Conn += { return false; } - BifEvent::generate_socks_reply(bro_analyzer(), - bro_analyzer()->Conn(), - 5, - ${reply.reply}, - sa, - val_mgr->GetPort(${reply.port}, TRANSPORT_TCP)); + if ( socks_reply ) + BifEvent::generate_socks_reply(bro_analyzer(), + bro_analyzer()->Conn(), + 5, + ${reply.reply}, + sa, + val_mgr->GetPort(${reply.port}, TRANSPORT_TCP)); + else + Unref(sa); bro_analyzer()->ProtocolConfirmation(); static_cast(bro_analyzer())->EndpointDone(false); @@ -150,6 +163,9 @@ refine connection SOCKS_Conn += { function socks5_auth_request_userpass(request: SOCKS5_Auth_Request_UserPass_v1): bool %{ + if ( ! socks_login_userpass_request ) + return true; + StringVal* user = new StringVal(${request.username}.length(), (const char*) ${request.username}.begin()); StringVal* pass = new StringVal(${request.password}.length(), (const char*) ${request.password}.begin()); @@ -173,9 +189,10 @@ refine connection SOCKS_Conn += { function socks5_auth_reply_userpass(reply: SOCKS5_Auth_Reply_UserPass_v1): bool %{ - BifEvent::generate_socks_login_userpass_reply(bro_analyzer(), - bro_analyzer()->Conn(), - ${reply.code}); + if ( socks_login_userpass_reply ) + BifEvent::generate_socks_login_userpass_reply(bro_analyzer(), + bro_analyzer()->Conn(), + ${reply.code}); return true; %} diff --git a/src/analyzer/protocol/ssh/CMakeLists.txt b/src/analyzer/protocol/ssh/CMakeLists.txt index b7d8b50b4a..a7cb99b353 100644 --- a/src/analyzer/protocol/ssh/CMakeLists.txt +++ b/src/analyzer/protocol/ssh/CMakeLists.txt @@ -1,11 +1,11 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SSH) - bro_plugin_cc(SSH.cc Plugin.cc) - bro_plugin_bif(types.bif) - bro_plugin_bif(events.bif) - bro_plugin_pac(ssh.pac ssh-analyzer.pac ssh-protocol.pac consts.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek SSH) + zeek_plugin_cc(SSH.cc Plugin.cc) + zeek_plugin_bif(types.bif) + zeek_plugin_bif(events.bif) + zeek_plugin_pac(ssh.pac ssh-analyzer.pac ssh-protocol.pac consts.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ssh/Plugin.cc b/src/analyzer/protocol/ssh/Plugin.cc index be5d2f428b..7b6ac67c88 100644 --- a/src/analyzer/protocol/ssh/Plugin.cc +++ b/src/analyzer/protocol/ssh/Plugin.cc @@ -4,7 +4,7 @@ #include "SSH.h" namespace plugin { - namespace Bro_SSH { + namespace Zeek_SSH { class Plugin : public plugin::Plugin { public: @@ -13,7 +13,7 @@ namespace plugin { AddComponent(new ::analyzer::Component("SSH", ::analyzer::SSH::SSH_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::SSH"; + config.name = "Zeek::SSH"; config.description = "Secure Shell analyzer"; return config; } diff --git a/src/analyzer/protocol/ssh/events.bif b/src/analyzer/protocol/ssh/events.bif index cb6c5e248e..6ff62e501d 100644 --- a/src/analyzer/protocol/ssh/events.bif +++ b/src/analyzer/protocol/ssh/events.bif @@ -7,7 +7,7 @@ ## ## version: The identification string ## -## .. bro:see:: ssh_client_version ssh_auth_successful ssh_auth_failed +## .. zeek:see:: ssh_client_version ssh_auth_successful ssh_auth_failed ## ssh_auth_result ssh_auth_attempted ssh_capabilities ## ssh2_server_host_key ssh1_server_host_key ssh_server_host_key ## ssh_encrypted_packet ssh2_dh_server_params ssh2_gss_error @@ -23,7 +23,7 @@ event ssh_server_version%(c: connection, version: string%); ## ## version: The identification string ## -## .. bro:see:: ssh_server_version ssh_auth_successful ssh_auth_failed +## .. zeek:see:: ssh_server_version ssh_auth_successful ssh_auth_failed ## ssh_auth_result ssh_auth_attempted ssh_capabilities ## ssh2_server_host_key ssh1_server_host_key ssh_server_host_key ## ssh_encrypted_packet ssh2_dh_server_params ssh2_gss_error @@ -44,7 +44,7 @@ event ssh_client_version%(c: connection, version: string%); ## :abbr:`SSH (Secure Shell)` protocol provides a mechanism for ## unauthenticated access, which some servers support. ## -## .. bro:see:: ssh_server_version ssh_client_version ssh_auth_failed +## .. zeek:see:: ssh_server_version ssh_client_version ssh_auth_failed ## ssh_auth_result ssh_auth_attempted ssh_capabilities ## ssh2_server_host_key ssh1_server_host_key ssh_server_host_key ## ssh_encrypted_packet ssh2_dh_server_params ssh2_gss_error @@ -74,7 +74,7 @@ event ssh_auth_successful%(c: connection, auth_method_none: bool%); ## authenticated: This is true if the analyzer detected a ## successful connection from the authentication attempt. ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_capabilities ssh2_server_host_key ssh1_server_host_key ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params @@ -96,7 +96,7 @@ event ssh_auth_attempted%(c: connection, authenticated: bool%); ## capabilities: The list of algorithms and languages that the sender ## advertises support for, in order of preference. ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh2_server_host_key ssh1_server_host_key ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params @@ -113,7 +113,7 @@ event ssh_capabilities%(c: connection, cookie: string, capabilities: SSH::Capabi ## key: The server's public host key. Note that this is the public key ## itself, and not just the fingerprint or hash. ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh_capabilities ssh1_server_host_key ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params @@ -131,7 +131,7 @@ event ssh2_server_host_key%(c: connection, key: string%); ## ## e: The exponent for the serer's public host key. ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key ## ssh_server_host_key ssh_encrypted_packet ssh2_dh_server_params @@ -141,7 +141,7 @@ event ssh1_server_host_key%(c: connection, p: string, e: string%); ## This event is generated when an :abbr:`SSH (Secure Shell)` ## encrypted packet is seen. This event is not handled by default, but ## is provided for heuristic analysis scripts. Note that you have to set -## :bro:id:`SSH::disable_analyzer_after_detection` to false to use this +## :zeek:id:`SSH::disable_analyzer_after_detection` to false to use this ## event. This carries a performance penalty. ## ## c: The connection over which the :abbr:`SSH (Secure Shell)` @@ -153,7 +153,7 @@ event ssh1_server_host_key%(c: connection, p: string, e: string%); ## len: The length of the :abbr:`SSH (Secure Shell)` payload, in ## bytes. Note that this ignores reassembly, as this is unknown. ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key ## ssh1_server_host_key ssh_server_host_key ssh2_dh_server_params @@ -171,7 +171,7 @@ event ssh_encrypted_packet%(c: connection, orig: bool, len: count%); ## ## q: The DH generator. ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key ## ssh1_server_host_key ssh_server_host_key ssh_encrypted_packet @@ -191,7 +191,7 @@ event ssh2_dh_server_params%(c: connection, p: string, q: string%); ## ## err_msg: Detailed human-readable error message ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key ## ssh1_server_host_key ssh_server_host_key ssh_encrypted_packet @@ -211,7 +211,7 @@ event ssh2_gss_error%(c: connection, major_status: count, minor_status: count, e ## ## q: The ephemeral public key ## -## .. bro:see:: ssh_server_version ssh_client_version +## .. zeek:see:: ssh_server_version ssh_client_version ## ssh_auth_successful ssh_auth_failed ssh_auth_result ## ssh_auth_attempted ssh_capabilities ssh2_server_host_key ## ssh1_server_host_key ssh_server_host_key ssh_encrypted_packet diff --git a/src/analyzer/protocol/ssl/CMakeLists.txt b/src/analyzer/protocol/ssl/CMakeLists.txt index 14e41892c8..47093a978e 100644 --- a/src/analyzer/protocol/ssl/CMakeLists.txt +++ b/src/analyzer/protocol/ssl/CMakeLists.txt @@ -1,23 +1,24 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SSL) -bro_plugin_cc(SSL.cc DTLS.cc Plugin.cc) -bro_plugin_bif(types.bif) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_pac(tls-handshake.pac tls-handshake-protocol.pac tls-handshake-analyzer.pac ssl-defs.pac +zeek_plugin_begin(Zeek SSL) +zeek_plugin_cc(SSL.cc DTLS.cc Plugin.cc) +zeek_plugin_bif(types.bif) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_bif(consts.bif) +zeek_plugin_pac(tls-handshake.pac tls-handshake-protocol.pac tls-handshake-analyzer.pac ssl-defs.pac proc-client-hello.pac proc-server-hello.pac proc-certificate.pac tls-handshake-signed_certificate_timestamp.pac ) -bro_plugin_pac(ssl.pac ssl-dtls-analyzer.pac ssl-analyzer.pac ssl-dtls-protocol.pac ssl-protocol.pac ssl-defs.pac +zeek_plugin_pac(ssl.pac ssl-dtls-analyzer.pac ssl-analyzer.pac ssl-dtls-protocol.pac ssl-protocol.pac ssl-defs.pac proc-client-hello.pac - proc-server-hello.pac + proc-server-hello.pac proc-certificate.pac ) -bro_plugin_pac(dtls.pac ssl-dtls-analyzer.pac dtls-analyzer.pac ssl-dtls-protocol.pac dtls-protocol.pac ssl-defs.pac) -bro_plugin_end() +zeek_plugin_pac(dtls.pac ssl-dtls-analyzer.pac dtls-analyzer.pac ssl-dtls-protocol.pac dtls-protocol.pac ssl-defs.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/ssl/Plugin.cc b/src/analyzer/protocol/ssl/Plugin.cc index 85b65aedfd..60d6b0d4a3 100644 --- a/src/analyzer/protocol/ssl/Plugin.cc +++ b/src/analyzer/protocol/ssl/Plugin.cc @@ -7,7 +7,7 @@ #include "DTLS.h" namespace plugin { -namespace Bro_SSL { +namespace Zeek_SSL { class Plugin : public plugin::Plugin { public: @@ -17,7 +17,7 @@ public: AddComponent(new ::analyzer::Component("DTLS", ::analyzer::dtls::DTLS_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::SSL"; + config.name = "Zeek::SSL"; config.description = "SSL/TLS and DTLS analyzers"; return config; } diff --git a/src/analyzer/protocol/ssl/consts.bif b/src/analyzer/protocol/ssl/consts.bif new file mode 100644 index 0000000000..9dcbaa65d5 --- /dev/null +++ b/src/analyzer/protocol/ssl/consts.bif @@ -0,0 +1,2 @@ +const SSL::dtls_max_version_errors: count; +const SSL::dtls_max_reported_version_errors: count; diff --git a/src/analyzer/protocol/ssl/dtls-protocol.pac b/src/analyzer/protocol/ssl/dtls-protocol.pac index 771aa267b3..70897a585c 100644 --- a/src/analyzer/protocol/ssl/dtls-protocol.pac +++ b/src/analyzer/protocol/ssl/dtls-protocol.pac @@ -45,15 +45,40 @@ type Handshake(rec: SSLRecord) = record { refine connection SSL_Conn += { + %member{ + uint16 invalid_version_count_; + uint16 reported_errors_; + %} + + %init{ + invalid_version_count_ = 0; + reported_errors_ = 0; + %} + function dtls_version_ok(version: uint16): uint16 %{ switch ( version ) { case DTLSv10: case DTLSv12: + // Reset only to 0 once we have seen a client hello. + // This means the connection gets a limited amount of valid/invalid + // packets before a client hello has to be seen - which seems reasonable. + if ( bro_analyzer()->ProtocolConfirmed() ) + invalid_version_count_ = 0; return true; default: - bro_analyzer()->ProtocolViolation(fmt("Invalid version in DTLS connection. Packet reported version: %d", version)); + invalid_version_count_++; + + if ( bro_analyzer()->ProtocolConfirmed() ) + { + reported_errors_++; + if ( reported_errors_ <= BifConst::SSL::dtls_max_reported_version_errors ) + bro_analyzer()->ProtocolViolation(fmt("Invalid version in DTLS connection. Packet reported version: %d", version)); + } + + if ( invalid_version_count_ > BifConst::SSL::dtls_max_version_errors ) + bro_analyzer()->SetSkip(true); return false; } %} diff --git a/src/analyzer/protocol/ssl/dtls.pac b/src/analyzer/protocol/ssl/dtls.pac index b08dd61f8f..b2aa34d5c5 100644 --- a/src/analyzer/protocol/ssl/dtls.pac +++ b/src/analyzer/protocol/ssl/dtls.pac @@ -10,6 +10,7 @@ namespace analyzer { namespace dtls { class DTLS_Analyzer; } } typedef analyzer::dtls::DTLS_Analyzer* DTLSAnalyzer; #include "DTLS.h" +#include "consts.bif.h" %} extern type DTLSAnalyzer; diff --git a/src/analyzer/protocol/ssl/events.bif b/src/analyzer/protocol/ssl/events.bif index 2ef675554f..c935a0b21a 100644 --- a/src/analyzer/protocol/ssl/events.bif +++ b/src/analyzer/protocol/ssl/events.bif @@ -1,5 +1,5 @@ ## Generated for an SSL/TLS client's initial *hello* message. SSL/TLS sessions -## start with an unencrypted handshake, and Bro extracts as much information out +## start with an unencrypted handshake, and Zeek extracts as much information out ## of that as it can. This event provides access to the initial information ## sent by the client. ## @@ -10,7 +10,7 @@ ## ## version: The protocol version as extracted from the client's message. The ## values are standardized as part of the SSL/TLS protocol. The -## :bro:id:`SSL::version_strings` table maps them to descriptive names. +## :zeek:id:`SSL::version_strings` table maps them to descriptive names. ## ## record_version: TLS version given in the record layer of the message. ## Set to 0 for SSLv2. @@ -25,12 +25,12 @@ ## ## ciphers: The list of ciphers the client offered to use. The values are ## standardized as part of the SSL/TLS protocol. The -## :bro:id:`SSL::cipher_desc` table maps them to descriptive names. +## :zeek:id:`SSL::cipher_desc` table maps them to descriptive names. ## ## comp_methods: The list of compression methods that the client offered to use. ## This value is not sent in TLSv1.3 or SSLv2. ## -## .. bro:see:: ssl_alert ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_alert ssl_established ssl_extension ssl_server_hello ## ssl_session_ticket_handshake x509_certificate ssl_handshake_message ## ssl_change_cipher_spec ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params @@ -38,7 +38,7 @@ event ssl_client_hello%(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec%); ## Generated for an SSL/TLS server's initial *hello* message. SSL/TLS sessions -## start with an unencrypted handshake, and Bro extracts as much information out +## start with an unencrypted handshake, and Zeek extracts as much information out ## of that as it can. This event provides access to the initial information ## sent by the client. ## @@ -49,58 +49,61 @@ event ssl_client_hello%(c: connection, version: count, record_version: count, po ## ## version: The protocol version as extracted from the server's message. ## The values are standardized as part of the SSL/TLS protocol. The -## :bro:id:`SSL::version_strings` table maps them to descriptive names. +## :zeek:id:`SSL::version_strings` table maps them to descriptive names. ## ## record_version: TLS version given in the record layer of the message. ## Set to 0 for SSLv2. ## ## possible_ts: The current time as sent by the server. Note that SSL/TLS does ## not require clocks to be set correctly, so treat with care. This value -## is not sent in TLSv1.3. +## is meaningless in SSLv2 and TLSv1.3. ## ## session_id: The session ID as sent back by the server (if any). This value is not ## sent in TLSv1.3. ## ## server_random: The random value sent by the server. For version 2 connections, -## the connection-id is returned. +## the connection-id is returned. Note - the full 32 bytes are included in +## server_random. This means that the 4 bytes present in possible_ts are repeated; +## if you do not want this behavior ignore the first 4 bytes. ## ## cipher: The cipher chosen by the server. The values are standardized as part -## of the SSL/TLS protocol. The :bro:id:`SSL::cipher_desc` table maps +## of the SSL/TLS protocol. The :zeek:id:`SSL::cipher_desc` table maps ## them to descriptive names. ## ## comp_method: The compression method chosen by the client. The values are ## standardized as part of the SSL/TLS protocol. This value is not ## sent in TLSv1.3 or SSLv2. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_extension -## ssl_session_ticket_handshake x509_certificate ssl_server_curve +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_extension +## ssl_session_ticket_handshake x509_certificate ## ssl_dh_server_params ssl_handshake_message ssl_change_cipher_spec ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params ## ssl_rsa_client_pms event ssl_server_hello%(c: connection, version: count, record_version: count, possible_ts: time, server_random: string, session_id: string, cipher: count, comp_method: count%); ## Generated for SSL/TLS extensions seen in an initial handshake. SSL/TLS -## sessions start with an unencrypted handshake, and Bro extracts as much +## sessions start with an unencrypted handshake, and Zeek extracts as much ## information out of that as it can. This event provides access to any ## extensions either side sends as part of an extended *hello* message. ## -## Note that Bro offers more specialized events for a few extensions. +## Note that Zeek offers more specialized events for a few extensions. ## ## c: The connection. ## ## is_orig: True if event is raised for originator side of the connection. ## ## code: The numerical code of the extension. The values are standardized as -## part of the SSL/TLS protocol. The :bro:id:`SSL::extensions` table maps +## part of the SSL/TLS protocol. The :zeek:id:`SSL::extensions` table maps ## them to descriptive names. ## ## val: The raw extension value that was sent in the message. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension_ec_point_formats ## ssl_extension_elliptic_curves ssl_extension_application_layer_protocol_negotiation ## ssl_extension_server_name ssl_extension_signature_algorithm ssl_extension_key_share ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension%(c: connection, is_orig: bool, code: count, val: string%); ## Generated for an SSL/TLS Elliptic Curves extension. This TLS extension is @@ -113,13 +116,14 @@ event ssl_extension%(c: connection, is_orig: bool, code: count, val: string%); ## ## curves: List of supported elliptic curves. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_ec_point_formats ssl_extension_application_layer_protocol_negotiation -## ssl_extension_server_name ssl_server_curve ssl_extension_signature_algorithm +## ssl_extension_server_name ssl_extension_signature_algorithm ## ssl_extension_key_share ssl_rsa_client_pms ssl_server_signature ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_elliptic_curves%(c: connection, is_orig: bool, curves: index_vec%); ## Generated for an SSL/TLS Supported Point Formats extension. This TLS extension @@ -133,14 +137,15 @@ event ssl_extension_elliptic_curves%(c: connection, is_orig: bool, curves: index ## ## point_formats: List of supported point formats. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_application_layer_protocol_negotiation -## ssl_extension_server_name ssl_server_curve ssl_extension_signature_algorithm +## ssl_extension_server_name ssl_extension_signature_algorithm ## ssl_extension_key_share ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params ## ssl_rsa_client_pms ssl_server_signature +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_ec_point_formats%(c: connection, is_orig: bool, point_formats: index_vec%); ## Generated for an Signature Algorithms extension. This TLS extension @@ -154,13 +159,14 @@ event ssl_extension_ec_point_formats%(c: connection, is_orig: bool, point_format ## ## signature_algorithms: List of supported signature and hash algorithm pairs. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_application_layer_protocol_negotiation -## ssl_extension_server_name ssl_server_curve ssl_extension_key_share +## ssl_extension_server_name ssl_extension_key_share ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params ## ssl_rsa_client_pms ssl_server_signature +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_signature_algorithm%(c: connection, is_orig: bool, signature_algorithms: signature_and_hashalgorithm_vec%); ## Generated for a Key Share extension. This TLS extension is defined in TLS1.3-draft16 @@ -169,38 +175,59 @@ event ssl_extension_signature_algorithm%(c: connection, is_orig: bool, signature ## ## c: The connection. ## -## is_orig: True if event is raised for originator side of the connection. +## is_orig: True if event is raised for the originator side of the connection. ## ## curves: List of supported/chosen named groups. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_application_layer_protocol_negotiation -## ssl_extension_server_name ssl_server_curve +## ssl_extension_server_name ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params ## ssl_rsa_client_pms ssl_server_signature +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_key_share%(c: connection, is_orig: bool, curves: index_vec%); -## Generated if a named curve is chosen by the server for an SSL/TLS connection. -## The curve is sent by the server in the ServerKeyExchange message as defined -## in :rfc:`4492`, in case an ECDH or ECDHE cipher suite is chosen. +## Generated for the pre-shared key extension as it is sent in the TLS 1.3 client hello. +## +## The extension lists the identities the client is willing to negotiate with the server; +## they can either be pre-shared or be based on previous handshakes. ## ## c: The connection. ## -## curve: The curve. +## is_orig: True if event is raised for the originator side of the connection ## -## .. note:: This event is deprecated and superseded by the ssl_ecdh_server_params -## event. This event will be removed in a future version of Bro. +## identities: A list of the identities the client is willing to negotiate with the server. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## binders: A series of HMAC values; for computation, see the TLS 1.3 RFC. +## +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_application_layer_protocol_negotiation -## ssl_extension_server_name ssl_extension_key_share +## ssl_extension_server_name ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params -## ssl_rsa_client_pms ssl_server_signature -event ssl_server_curve%(c: connection, curve: count%) &deprecated; +## ssl_rsa_client_pms ssl_server_signature ssl_extension_pre_shared_key_server_hello +event ssl_extension_pre_shared_key_client_hello%(c: connection, is_orig: bool, identities: psk_identity_vec, binders: string_vec%); + +## Generated for the pre-shared key extension as it is sent in the TLS 1.3 server hello. +## +## c: The connection. +## +## is_orig: True if event is raised for the originator side of the connection +## +## selected_identity: The identity the server chose as a 0-based index into the identities +## the client sent. +## +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_extension +## ssl_extension_elliptic_curves ssl_extension_application_layer_protocol_negotiation +## ssl_extension_server_name +## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions +## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params +## ssl_rsa_client_pms ssl_server_signature ssl_extension_pre_shared_key_client_hello +event ssl_extension_pre_shared_key_server_hello%(c: connection, is_orig: bool, selected_identity: count%); ## Generated if a server uses an ECDH-anon or ECDHE cipher suite using a named curve ## This event contains the named curve name and the server ECDH parameters contained @@ -212,8 +239,8 @@ event ssl_server_curve%(c: connection, curve: count%) &deprecated; ## ## point: The server's ECDH public key. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello -## ssl_session_ticket_handshake ssl_server_curve ssl_server_signature +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_server_signature ## ssl_dh_client_params ssl_ecdh_client_params ssl_rsa_client_pms event ssl_ecdh_server_params%(c: connection, curve: count, point: string%); @@ -229,8 +256,8 @@ event ssl_ecdh_server_params%(c: connection, curve: count, point: string%); ## ## Ys: The server's DH public key. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello -## ssl_session_ticket_handshake ssl_server_curve ssl_server_signature +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_server_signature ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params ## ssl_rsa_client_pms event ssl_dh_server_params%(c: connection, p: string, q: string, Ys: string%); @@ -252,8 +279,8 @@ event ssl_dh_server_params%(c: connection, p: string, q: string, Ys: string%); ## corresponding to the certified public key in the server's certificate ## message is used for signing. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello -## ssl_session_ticket_handshake ssl_server_curve ssl_rsa_client_pms +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_rsa_client_pms ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params event ssl_server_signature%(c: connection, signature_and_hashalgorithm: SSL::SignatureAndHashAlgorithm, signature: string%); @@ -265,8 +292,8 @@ event ssl_server_signature%(c: connection, signature_and_hashalgorithm: SSL::Sig ## ## point: The client's ECDH public key. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello -## ssl_session_ticket_handshake ssl_server_curve ssl_server_signature +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_server_signature ## ssl_dh_client_params ssl_ecdh_server_params ssl_rsa_client_pms event ssl_ecdh_client_params%(c: connection, point: string%); @@ -278,8 +305,8 @@ event ssl_ecdh_client_params%(c: connection, point: string%); ## ## Yc: The client's DH public key. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello -## ssl_session_ticket_handshake ssl_server_curve ssl_server_signature +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_server_signature ## ssl_ecdh_server_params ssl_ecdh_client_params ssl_rsa_client_pms event ssl_dh_client_params%(c: connection, Yc: string%); @@ -291,8 +318,8 @@ event ssl_dh_client_params%(c: connection, Yc: string%); ## ## pms: The encrypted pre-master secret. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello -## ssl_session_ticket_handshake ssl_server_curve ssl_server_signature +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## ssl_session_ticket_handshake ssl_server_signature ## ssl_dh_client_params ssl_ecdh_server_params ssl_ecdh_client_params event ssl_rsa_client_pms%(c: connection, pms: string%); @@ -309,12 +336,13 @@ event ssl_rsa_client_pms%(c: connection, pms: string%); ## ## protocols: List of supported application layer protocols. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_ec_point_formats ## ssl_extension_server_name ssl_extension_key_share ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_extension_signed_certificate_timestamp +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_application_layer_protocol_negotiation%(c: connection, is_orig: bool, protocols: string_vec%); ## Generated for an SSL/TLS Server Name extension. This SSL/TLS extension is @@ -329,13 +357,14 @@ event ssl_extension_application_layer_protocol_negotiation%(c: connection, is_or ## ## names: A list of server names (DNS hostnames). ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_ec_point_formats ## ssl_extension_application_layer_protocol_negotiation ## ssl_extension_key_share ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_extension_signed_certificate_timestamp +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_server_name%(c: connection, is_orig: bool, names: string_vec%); ## Generated for the signed_certificate_timestamp TLS extension as defined in @@ -359,13 +388,14 @@ event ssl_extension_server_name%(c: connection, is_orig: bool, names: string_vec ## ## signature: signature part of the digitally_signed struct ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_ec_point_formats ## ssl_extension_server_name ssl_extension_key_share ## ssl_extension_psk_key_exchange_modes ssl_extension_supported_versions ## ssl_extension_application_layer_protocol_negotiation ## x509_ocsp_ext_signed_certificate_timestamp sct_verify +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_signed_certificate_timestamp%(c: connection, is_orig: bool, version: count, logid: string, timestamp: count, signature_and_hashalgorithm: SSL::SignatureAndHashAlgorithm, signature: string%); ## Generated for an TLS Supported Versions extension. This TLS extension @@ -379,12 +409,13 @@ event ssl_extension_signed_certificate_timestamp%(c: connection, is_orig: bool, ## ## versions: List of supported TLS versions. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_ec_point_formats ## ssl_extension_application_layer_protocol_negotiation ## ssl_extension_key_share ssl_extension_server_name ## ssl_extension_psk_key_exchange_modes ssl_extension_signed_certificate_timestamp +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_supported_versions%(c: connection, is_orig: bool, versions: index_vec%); ## Generated for an TLS Pre-Shared Key Exchange Modes extension. This TLS extension is defined @@ -396,16 +427,17 @@ event ssl_extension_supported_versions%(c: connection, is_orig: bool, versions: ## ## versions: List of supported Pre-Shared Key Exchange Modes. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_established ssl_server_hello ## ssl_session_ticket_handshake ssl_extension ## ssl_extension_elliptic_curves ssl_extension_ec_point_formats ## ssl_extension_application_layer_protocol_negotiation ## ssl_extension_key_share ssl_extension_server_name ## ssl_extension_supported_versions ssl_extension_signed_certificate_timestamp +## ssl_extension_pre_shared_key_server_hello ssl_extension_pre_shared_key_client_hello event ssl_extension_psk_key_exchange_modes%(c: connection, is_orig: bool, modes: index_vec%); ## Generated at the end of an SSL/TLS handshake. SSL/TLS sessions start with -## an unencrypted handshake, and Bro extracts as much information out of that +## an unencrypted handshake, and Zeek extracts as much information out of that ## as it can. This event signals the time when an SSL/TLS has finished the ## handshake and its endpoints consider it as fully established. Typically, ## everything from now on will be encrypted. @@ -415,12 +447,12 @@ event ssl_extension_psk_key_exchange_modes%(c: connection, is_orig: bool, modes: ## ## c: The connection. ## -## .. bro:see:: ssl_alert ssl_client_hello ssl_extension ssl_server_hello +## .. zeek:see:: ssl_alert ssl_client_hello ssl_extension ssl_server_hello ## ssl_session_ticket_handshake x509_certificate event ssl_established%(c: connection%); ## Generated for SSL/TLS alert records. SSL/TLS sessions start with an -## unencrypted handshake, and Bro extracts as much information out of that as +## unencrypted handshake, and Zeek extracts as much information out of that as ## it can. If during that handshake, an endpoint encounters a fatal error, it ## sends an *alert* record, that in turn triggers this event. After an *alert*, ## any endpoint may close the connection immediately. @@ -438,13 +470,13 @@ event ssl_established%(c: connection%); ## desc: A numerical value identifying the cause of the *alert*. The values are ## defined as part of the SSL/TLS protocol. ## -## .. bro:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello ## ssl_session_ticket_handshake event ssl_alert%(c: connection, is_orig: bool, level: count, desc: count%); ## Generated for SSL/TLS handshake messages that are a part of the ## stateless-server session resumption mechanism. SSL/TLS sessions start with -## an unencrypted handshake, and Bro extracts as much information out of that +## an unencrypted handshake, and Zeek extracts as much information out of that ## as it can. This event is raised when an SSL/TLS server passes a session ## ticket to the client that can later be used for resuming the session. The ## mechanism is described in :rfc:`4507`. @@ -459,7 +491,7 @@ event ssl_alert%(c: connection, is_orig: bool, level: count, desc: count%); ## ## ticket: The raw ticket data. ## -## .. bro:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello ## ssl_alert event ssl_session_ticket_handshake%(c: connection, ticket_lifetime_hint: count, ticket: string%); @@ -481,14 +513,14 @@ event ssl_session_ticket_handshake%(c: connection, ticket_lifetime_hint: count, ## payload: payload contained in the heartbeat message. Size can differ from ## payload_length, if payload_length and actual packet length disagree. ## -## .. bro:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello ## ssl_alert ssl_encrypted_data event ssl_heartbeat%(c: connection, is_orig: bool, length: count, heartbeat_type: count, payload_length: count, payload: string%); ## Generated for SSL/TLS messages that are sent before full session encryption ## starts. Note that "full encryption" is a bit fuzzy, especially for TLSv1.3; ## here this event will be raised for early packets that are already using -## pre-encryption. # This event is also used by Bro internally to determine if +## pre-encryption. # This event is also used by Zeek internally to determine if ## the connection has been completely setup. This is necessary as TLS 1.3 does ## not have CCS anymore. ## @@ -504,14 +536,14 @@ event ssl_heartbeat%(c: connection, is_orig: bool, length: count, heartbeat_type ## ## length: length of the entire message. ## -## .. bro:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello ## ssl_alert ssl_heartbeat event ssl_plaintext_data%(c: connection, is_orig: bool, record_version: count, content_type: count, length: count%); ## Generated for SSL/TLS messages that are sent after session encryption ## started. ## -## Note that :bro:id:`SSL::disable_analyzer_after_detection` has to be changed +## Note that :zeek:id:`SSL::disable_analyzer_after_detection` has to be changed ## from its default to false for this event to be generated. ## ## c: The connection. @@ -526,7 +558,7 @@ event ssl_plaintext_data%(c: connection, is_orig: bool, record_version: count, c ## ## length: length of the entire message. ## -## .. bro:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_client_hello ssl_established ssl_extension ssl_server_hello ## ssl_alert ssl_heartbeat event ssl_encrypted_data%(c: connection, is_orig: bool, record_version: count, content_type: count, length: count%); @@ -551,7 +583,7 @@ event ssl_stapled_ocsp%(c: connection, is_orig: bool, response: string%); ## ## length: Length of the handshake message that was seen. ## -## .. bro:see:: ssl_alert ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_alert ssl_established ssl_extension ssl_server_hello ## ssl_session_ticket_handshake x509_certificate ssl_client_hello ## ssl_change_cipher_spec event ssl_handshake_message%(c: connection, is_orig: bool, msg_type: count, length: count%); @@ -563,7 +595,7 @@ event ssl_handshake_message%(c: connection, is_orig: bool, msg_type: count, leng ## ## is_orig: True if event is raised for originator side of the connection. ## -## .. bro:see:: ssl_alert ssl_established ssl_extension ssl_server_hello +## .. zeek:see:: ssl_alert ssl_established ssl_extension ssl_server_hello ## ssl_session_ticket_handshake x509_certificate ssl_client_hello ## ssl_handshake_message event ssl_change_cipher_spec%(c: connection, is_orig: bool%); diff --git a/src/analyzer/protocol/ssl/proc-server-hello.pac b/src/analyzer/protocol/ssl/proc-server-hello.pac index 3fbf688e5d..a684631af5 100644 --- a/src/analyzer/protocol/ssl/proc-server-hello.pac +++ b/src/analyzer/protocol/ssl/proc-server-hello.pac @@ -1,5 +1,5 @@ function proc_server_hello( - version : uint16, ts : double, + version : uint16, v2 : bool, server_random : bytestring, session_id : uint8[], cipher_suites16 : uint16[], @@ -21,6 +21,10 @@ else std::transform(cipher_suites24->begin(), cipher_suites24->end(), std::back_inserter(*ciphers), to_int()); + uint32 ts = 0; + if ( v2 == 0 && server_random.length() >= 4 ) + ts = ntohl(*((uint32*)server_random.data())); + BifEvent::generate_ssl_server_hello(bro_analyzer(), bro_analyzer()->Conn(), version, record_version(), ts, new StringVal(server_random.length(), diff --git a/src/analyzer/protocol/ssl/ssl-analyzer.pac b/src/analyzer/protocol/ssl/ssl-analyzer.pac index bf35218873..f901119f23 100644 --- a/src/analyzer/protocol/ssl/ssl-analyzer.pac +++ b/src/analyzer/protocol/ssl/ssl-analyzer.pac @@ -17,8 +17,8 @@ refine connection SSL_Conn += { function proc_v2_client_master_key(rec: SSLRecord, cipher_kind: int) : bool %{ - BifEvent::generate_ssl_established(bro_analyzer(), - bro_analyzer()->Conn()); + if ( ssl_established ) + BifEvent::generate_ssl_established(bro_analyzer(), bro_analyzer()->Conn()); return true; %} @@ -44,7 +44,7 @@ refine typeattr V2ClientHello += &let { refine typeattr V2ServerHello += &let { check_v2 : bool = $context.connection.proc_check_v2_server_hello_version(server_version); - proc : bool = $context.connection.proc_server_hello(server_version, 0, + proc : bool = $context.connection.proc_server_hello(server_version, 1, conn_id_data, 0, 0, ciphers, 0) &requires(check_v2) &if(check_v2 == true); cert : bool = $context.connection.proc_v2_certificate(rec.is_orig, cert_data) diff --git a/src/analyzer/protocol/ssl/ssl-defs.pac b/src/analyzer/protocol/ssl/ssl-defs.pac index 26eb29bfc5..6c2d6a0bfa 100644 --- a/src/analyzer/protocol/ssl/ssl-defs.pac +++ b/src/analyzer/protocol/ssl/ssl-defs.pac @@ -145,7 +145,7 @@ enum SSLExtensions { EXT_STATUS_REQUEST_V2 = 17, EXT_SIGNED_CERTIFICATE_TIMESTAMP = 18, EXT_SESSIONTICKET_TLS = 35, - EXT_KEY_SHARE = 40, + EXT_KEY_SHARE_OLD = 40, EXT_PRE_SHARED_KEY = 41, EXT_EARLY_DATA = 42, EXT_SUPPORTED_VERSIONS = 43, @@ -154,6 +154,7 @@ enum SSLExtensions { EXT_TICKET_EARLY_DATA_INFO = 46, EXT_CERTIFICATE_AUTHORITIES = 47, EXT_OID_FILTERS = 48, + EXT_KEY_SHARE = 51, EXT_NEXT_PROTOCOL_NEGOTIATION = 13172, EXT_ORIGIN_BOUND_CERTIFICATES = 13175, EXT_ENCRYPTED_CLIENT_CERTIFICATES = 13180, diff --git a/src/analyzer/protocol/ssl/ssl-dtls-analyzer.pac b/src/analyzer/protocol/ssl/ssl-dtls-analyzer.pac index d92f850d28..56573fd48e 100644 --- a/src/analyzer/protocol/ssl/ssl-dtls-analyzer.pac +++ b/src/analyzer/protocol/ssl/ssl-dtls-analyzer.pac @@ -31,8 +31,9 @@ refine connection SSL_Conn += { function proc_alert(rec: SSLRecord, level : int, desc : int) : bool %{ - BifEvent::generate_ssl_alert(bro_analyzer(), bro_analyzer()->Conn(), - ${rec.is_orig}, level, desc); + if ( ssl_alert ) + BifEvent::generate_ssl_alert(bro_analyzer(), bro_analyzer()->Conn(), + ${rec.is_orig}, level, desc); return true; %} function proc_unknown_record(rec: SSLRecord) : bool @@ -50,8 +51,8 @@ refine connection SSL_Conn += { established_ == false ) { established_ = true; - BifEvent::generate_ssl_established(bro_analyzer(), - bro_analyzer()->Conn()); + if ( ssl_established ) + BifEvent::generate_ssl_established(bro_analyzer(), bro_analyzer()->Conn()); } if ( ssl_encrypted_data ) @@ -72,9 +73,10 @@ refine connection SSL_Conn += { function proc_heartbeat(rec : SSLRecord, type: uint8, payload_length: uint16, data: bytestring) : bool %{ - BifEvent::generate_ssl_heartbeat(bro_analyzer(), - bro_analyzer()->Conn(), ${rec.is_orig}, ${rec.length}, type, payload_length, - new StringVal(data.length(), (const char*) data.data())); + if ( ssl_heartbeat ) + BifEvent::generate_ssl_heartbeat(bro_analyzer(), + bro_analyzer()->Conn(), ${rec.is_orig}, ${rec.length}, type, payload_length, + new StringVal(data.length(), (const char*) data.data())); return true; %} @@ -93,8 +95,9 @@ refine connection SSL_Conn += { function proc_ccs(rec: SSLRecord) : bool %{ - BifEvent::generate_ssl_change_cipher_spec(bro_analyzer(), - bro_analyzer()->Conn(), ${rec.is_orig}); + if ( ssl_change_cipher_spec ) + BifEvent::generate_ssl_change_cipher_spec(bro_analyzer(), + bro_analyzer()->Conn(), ${rec.is_orig}); return true; %} diff --git a/src/analyzer/protocol/ssl/tls-handshake-analyzer.pac b/src/analyzer/protocol/ssl/tls-handshake-analyzer.pac index 5cf250c366..5e8e31e0b2 100644 --- a/src/analyzer/protocol/ssl/tls-handshake-analyzer.pac +++ b/src/analyzer/protocol/ssl/tls-handshake-analyzer.pac @@ -72,6 +72,9 @@ refine connection Handshake_Conn += { function proc_ec_point_formats(rec: HandshakeRecord, point_format_list: uint8[]) : bool %{ + if ( ! ssl_extension_ec_point_formats ) + return true; + VectorVal* points = new VectorVal(internal_type("index_vec")->AsVectorType()); if ( point_format_list ) @@ -88,6 +91,9 @@ refine connection Handshake_Conn += { function proc_elliptic_curves(rec: HandshakeRecord, list: uint16[]) : bool %{ + if ( ! ssl_extension_elliptic_curves ) + return true; + VectorVal* curves = new VectorVal(internal_type("index_vec")->AsVectorType()); if ( list ) @@ -104,6 +110,9 @@ refine connection Handshake_Conn += { function proc_client_key_share(rec: HandshakeRecord, keyshare: KeyShareEntry[]) : bool %{ + if ( ! ssl_extension_key_share ) + return true; + VectorVal* nglist = new VectorVal(internal_type("index_vec")->AsVectorType()); if ( keyshare ) @@ -113,11 +122,15 @@ refine connection Handshake_Conn += { } BifEvent::generate_ssl_extension_key_share(bro_analyzer(), bro_analyzer()->Conn(), ${rec.is_orig}, nglist); + return true; %} function proc_server_key_share(rec: HandshakeRecord, keyshare: KeyShareEntry) : bool %{ + if ( ! ssl_extension_key_share ) + return true; + VectorVal* nglist = new VectorVal(internal_type("index_vec")->AsVectorType()); nglist->Assign(0u, val_mgr->GetCount(keyshare->namedgroup())); @@ -125,8 +138,23 @@ refine connection Handshake_Conn += { return true; %} + function proc_hello_retry_request_key_share(rec: HandshakeRecord, namedgroup: uint16) : bool + %{ + if ( ! ssl_extension_key_share ) + return true; + + VectorVal* nglist = new VectorVal(internal_type("index_vec")->AsVectorType()); + + nglist->Assign(0u, val_mgr->GetCount(namedgroup)); + BifEvent::generate_ssl_extension_key_share(bro_analyzer(), bro_analyzer()->Conn(), ${rec.is_orig}, nglist); + return true; + %} + function proc_signature_algorithm(rec: HandshakeRecord, supported_signature_algorithms: SignatureAndHashAlgorithm[]) : bool %{ + if ( ! ssl_extension_signature_algorithm ) + return true; + VectorVal* slist = new VectorVal(internal_type("signature_and_hashalgorithm_vec")->AsVectorType()); if ( supported_signature_algorithms ) @@ -147,6 +175,9 @@ refine connection Handshake_Conn += { function proc_apnl(rec: HandshakeRecord, protocols: ProtocolName[]) : bool %{ + if ( ! ssl_extension_application_layer_protocol_negotiation ) + return true; + VectorVal* plist = new VectorVal(internal_type("string_vec")->AsVectorType()); if ( protocols ) @@ -183,14 +214,20 @@ refine connection Handshake_Conn += { } } - BifEvent::generate_ssl_extension_server_name(bro_analyzer(), bro_analyzer()->Conn(), - ${rec.is_orig}, servers); + if ( ssl_extension_server_name ) + BifEvent::generate_ssl_extension_server_name(bro_analyzer(), bro_analyzer()->Conn(), + ${rec.is_orig}, servers); + else + Unref(servers); return true; %} function proc_supported_versions(rec: HandshakeRecord, versions_list: uint16[]) : bool %{ + if ( ! ssl_extension_supported_versions ) + return true; + VectorVal* versions = new VectorVal(internal_type("index_vec")->AsVectorType()); if ( versions_list ) @@ -207,6 +244,9 @@ refine connection Handshake_Conn += { function proc_one_supported_version(rec: HandshakeRecord, version: uint16) : bool %{ + if ( ! ssl_extension_supported_versions ) + return true; + VectorVal* versions = new VectorVal(internal_type("index_vec")->AsVectorType()); versions->Assign(0u, val_mgr->GetCount(version)); @@ -218,6 +258,9 @@ refine connection Handshake_Conn += { function proc_psk_key_exchange_modes(rec: HandshakeRecord, mode_list: uint8[]) : bool %{ + if ( ! ssl_extension_psk_key_exchange_modes ) + return true; + VectorVal* modes = new VectorVal(internal_type("index_vec")->AsVectorType()); if ( mode_list ) @@ -272,10 +315,11 @@ refine connection Handshake_Conn += { response.length(), bro_analyzer()->GetAnalyzerTag(), bro_analyzer()->Conn(), false, file_id, "application/ocsp-response"); - BifEvent::generate_ssl_stapled_ocsp(bro_analyzer(), - bro_analyzer()->Conn(), ${rec.is_orig}, - new StringVal(response.length(), - (const char*) response.data())); + if ( ssl_stapled_ocsp ) + BifEvent::generate_ssl_stapled_ocsp(bro_analyzer(), + bro_analyzer()->Conn(), + ${rec.is_orig}, + new StringVal(response.length(), (const char*) response.data())); file_mgr->EndOfFile(file_id); } @@ -288,26 +332,28 @@ refine connection Handshake_Conn += { if ( ${kex.curve_type} != NAMED_CURVE ) return true; - BifEvent::generate_ssl_server_curve(bro_analyzer(), - bro_analyzer()->Conn(), ${kex.params.curve}); - BifEvent::generate_ssl_ecdh_server_params(bro_analyzer(), - bro_analyzer()->Conn(), ${kex.params.curve}, new StringVal(${kex.params.point}.length(), (const char*)${kex.params.point}.data())); + if ( ssl_ecdh_server_params ) + BifEvent::generate_ssl_ecdh_server_params(bro_analyzer(), + bro_analyzer()->Conn(), ${kex.params.curve}, new StringVal(${kex.params.point}.length(), (const char*)${kex.params.point}.data())); - RecordVal* ha = new RecordVal(BifType::Record::SSL::SignatureAndHashAlgorithm); - if ( ${kex.signed_params.uses_signature_and_hashalgorithm} ) + if ( ssl_server_signature ) { - ha->Assign(0, val_mgr->GetCount(${kex.signed_params.algorithm.HashAlgorithm})); - ha->Assign(1, val_mgr->GetCount(${kex.signed_params.algorithm.SignatureAlgorithm})); - } + RecordVal* ha = new RecordVal(BifType::Record::SSL::SignatureAndHashAlgorithm); + if ( ${kex.signed_params.uses_signature_and_hashalgorithm} ) + { + ha->Assign(0, val_mgr->GetCount(${kex.signed_params.algorithm.HashAlgorithm})); + ha->Assign(1, val_mgr->GetCount(${kex.signed_params.algorithm.SignatureAlgorithm})); + } else - { - // set to impossible value - ha->Assign(0, val_mgr->GetCount(256)); - ha->Assign(1, val_mgr->GetCount(256)); - } + { + // set to impossible value + ha->Assign(0, val_mgr->GetCount(256)); + ha->Assign(1, val_mgr->GetCount(256)); + } - BifEvent::generate_ssl_server_signature(bro_analyzer(), - bro_analyzer()->Conn(), ha, new StringVal(${kex.signed_params.signature}.length(), (const char*)(${kex.signed_params.signature}).data())); + BifEvent::generate_ssl_server_signature(bro_analyzer(), + bro_analyzer()->Conn(), ha, new StringVal(${kex.signed_params.signature}.length(), (const char*)(${kex.signed_params.signature}).data())); + } return true; %} @@ -317,34 +363,42 @@ refine connection Handshake_Conn += { if ( ${kex.curve_type} != NAMED_CURVE ) return true; - BifEvent::generate_ssl_server_curve(bro_analyzer(), - bro_analyzer()->Conn(), ${kex.params.curve}); - BifEvent::generate_ssl_ecdh_server_params(bro_analyzer(), - bro_analyzer()->Conn(), ${kex.params.curve}, new StringVal(${kex.params.point}.length(), (const char*)${kex.params.point}.data())); + if ( ssl_ecdh_server_params ) + BifEvent::generate_ssl_ecdh_server_params(bro_analyzer(), + bro_analyzer()->Conn(), ${kex.params.curve}, new StringVal(${kex.params.point}.length(), (const char*)${kex.params.point}.data())); return true; %} function proc_rsa_client_key_exchange(rec: HandshakeRecord, rsa_pms: bytestring) : bool %{ - BifEvent::generate_ssl_rsa_client_pms(bro_analyzer(), bro_analyzer()->Conn(), new StringVal(rsa_pms.length(), (const char*)rsa_pms.data())); + if ( ssl_rsa_client_pms ) + BifEvent::generate_ssl_rsa_client_pms(bro_analyzer(), bro_analyzer()->Conn(), new StringVal(rsa_pms.length(), (const char*)rsa_pms.data())); + return true; %} function proc_dh_client_key_exchange(rec: HandshakeRecord, Yc: bytestring) : bool %{ - BifEvent::generate_ssl_dh_client_params(bro_analyzer(), bro_analyzer()->Conn(), new StringVal(Yc.length(), (const char*)Yc.data())); + if ( ssl_dh_client_params ) + BifEvent::generate_ssl_dh_client_params(bro_analyzer(), bro_analyzer()->Conn(), new StringVal(Yc.length(), (const char*)Yc.data())); + return true; %} function proc_ecdh_client_key_exchange(rec: HandshakeRecord, point: bytestring) : bool %{ - BifEvent::generate_ssl_ecdh_client_params(bro_analyzer(), bro_analyzer()->Conn(), new StringVal(point.length(), (const char*)point.data())); + if ( ssl_ecdh_client_params ) + BifEvent::generate_ssl_ecdh_client_params(bro_analyzer(), bro_analyzer()->Conn(), new StringVal(point.length(), (const char*)point.data())); + return true; %} function proc_signedcertificatetimestamp(rec: HandshakeRecord, version: uint8, logid: const_bytestring, timestamp: uint64, digitally_signed_algorithms: SignatureAndHashAlgorithm, digitally_signed_signature: const_bytestring) : bool %{ + if ( ! ssl_extension_signed_certificate_timestamp ) + return true; + RecordVal* ha = new RecordVal(BifType::Record::SSL::SignatureAndHashAlgorithm); ha->Assign(0, val_mgr->GetCount(digitally_signed_algorithms->HashAlgorithm())); ha->Assign(1, val_mgr->GetCount(digitally_signed_algorithms->SignatureAlgorithm())); @@ -363,50 +417,98 @@ refine connection Handshake_Conn += { function proc_dhe_server_key_exchange(rec: HandshakeRecord, p: bytestring, g: bytestring, Ys: bytestring, signed_params: ServerKeyExchangeSignature) : bool %{ - BifEvent::generate_ssl_dh_server_params(bro_analyzer(), - bro_analyzer()->Conn(), - new StringVal(p.length(), (const char*) p.data()), - new StringVal(g.length(), (const char*) g.data()), - new StringVal(Ys.length(), (const char*) Ys.data()) - ); + if ( ssl_ecdh_server_params ) + BifEvent::generate_ssl_dh_server_params(bro_analyzer(), + bro_analyzer()->Conn(), + new StringVal(p.length(), (const char*) p.data()), + new StringVal(g.length(), (const char*) g.data()), + new StringVal(Ys.length(), (const char*) Ys.data()) + ); - RecordVal* ha = new RecordVal(BifType::Record::SSL::SignatureAndHashAlgorithm); - if ( ${signed_params.uses_signature_and_hashalgorithm} ) + if ( ssl_server_signature ) { - ha->Assign(0, val_mgr->GetCount(${signed_params.algorithm.HashAlgorithm})); - ha->Assign(1, val_mgr->GetCount(${signed_params.algorithm.SignatureAlgorithm})); - } - else - { - // set to impossible value - ha->Assign(0, val_mgr->GetCount(256)); - ha->Assign(1, val_mgr->GetCount(256)); - } + RecordVal* ha = new RecordVal(BifType::Record::SSL::SignatureAndHashAlgorithm); + if ( ${signed_params.uses_signature_and_hashalgorithm} ) + { + ha->Assign(0, val_mgr->GetCount(${signed_params.algorithm.HashAlgorithm})); + ha->Assign(1, val_mgr->GetCount(${signed_params.algorithm.SignatureAlgorithm})); + } + else + { + // set to impossible value + ha->Assign(0, val_mgr->GetCount(256)); + ha->Assign(1, val_mgr->GetCount(256)); + } - BifEvent::generate_ssl_server_signature(bro_analyzer(), - bro_analyzer()->Conn(), ha, - new StringVal(${signed_params.signature}.length(), (const char*)(${signed_params.signature}).data()) - ); + BifEvent::generate_ssl_server_signature(bro_analyzer(), + bro_analyzer()->Conn(), ha, + new StringVal(${signed_params.signature}.length(), (const char*)(${signed_params.signature}).data()) + ); + } return true; %} function proc_dh_anon_server_key_exchange(rec: HandshakeRecord, p: bytestring, g: bytestring, Ys: bytestring) : bool %{ - BifEvent::generate_ssl_dh_server_params(bro_analyzer(), - bro_analyzer()->Conn(), - new StringVal(p.length(), (const char*) p.data()), - new StringVal(g.length(), (const char*) g.data()), - new StringVal(Ys.length(), (const char*) Ys.data()) - ); + if ( ssl_dh_server_params ) + BifEvent::generate_ssl_dh_server_params(bro_analyzer(), + bro_analyzer()->Conn(), + new StringVal(p.length(), (const char*) p.data()), + new StringVal(g.length(), (const char*) g.data()), + new StringVal(Ys.length(), (const char*) Ys.data()) + ); return true; %} function proc_handshake(is_orig: bool, msg_type: uint8, length: uint24) : bool %{ - BifEvent::generate_ssl_handshake_message(bro_analyzer(), - bro_analyzer()->Conn(), is_orig, msg_type, to_int()(length)); + if ( ssl_handshake_message ) + BifEvent::generate_ssl_handshake_message(bro_analyzer(), + bro_analyzer()->Conn(), is_orig, msg_type, to_int()(length)); + + return true; + %} + + function proc_pre_shared_key_server_hello(rec: HandshakeRecord, identities: PSKIdentitiesList, binders: PSKBindersList) : bool + %{ + if ( ! ssl_extension_pre_shared_key_server_hello ) + return true; + + VectorVal* slist = new VectorVal(internal_type("psk_identity_vec")->AsVectorType()); + + if ( identities && identities->identities() ) + { + for ( auto&& identity : *(identities->identities()) ) + { + RecordVal* el = new RecordVal(BifType::Record::SSL::PSKIdentity); + el->Assign(0, new StringVal(identity->identity().length(), (const char*) identity->identity().data())); + el->Assign(1, val_mgr->GetCount(identity->obfuscated_ticket_age())); + slist->Assign(slist->Size(), el); + } + } + + VectorVal* blist = new VectorVal(internal_type("string_vec")->AsVectorType()); + if ( binders && binders->binders() ) + { + for ( auto&& binder : *(binders->binders()) ) + blist->Assign(blist->Size(), new StringVal(binder->binder().length(), (const char*) binder->binder().data())); + } + + BifEvent::generate_ssl_extension_pre_shared_key_client_hello(bro_analyzer(), bro_analyzer()->Conn(), + ${rec.is_orig}, slist, blist); + + return true; + %} + + function proc_pre_shared_key_client_hello(rec: HandshakeRecord, selected_identity: uint16) : bool + %{ + if ( ! ssl_extension_pre_shared_key_client_hello ) + return true; + + BifEvent::generate_ssl_extension_pre_shared_key_server_hello(bro_analyzer(), + bro_analyzer()->Conn(), ${rec.is_orig}, selected_identity); return true; %} @@ -421,7 +523,7 @@ refine typeattr ClientHello += &let { refine typeattr ServerHello += &let { proc : bool = $context.connection.proc_server_hello(server_version, - gmt_unix_time, random_bytes, session_id, cipher_suite, 0, + 0, random_bytes, session_id, cipher_suite, 0, compression_method); }; @@ -460,6 +562,10 @@ refine typeattr ServerHelloKeyShare += &let { proc : bool = $context.connection.proc_server_key_share(rec, keyshare); }; +refine typeattr HelloRetryRequestKeyShare += &let { + proc : bool = $context.connection.proc_hello_retry_request_key_share(rec, namedgroup); +}; + refine typeattr ClientHelloKeyShare += &let { proc : bool = $context.connection.proc_client_key_share(rec, keyshares); }; @@ -520,6 +626,14 @@ refine typeattr PSKKeyExchangeModes += &let { proc : bool = $context.connection.proc_psk_key_exchange_modes(rec, modes); }; +refine typeattr OfferedPsks += &let { + proc : bool = $context.connection.proc_pre_shared_key_server_hello(rec, identities, binders); +}; + +refine typeattr SelectedPreSharedKeyIdentity += &let { + proc : bool = $context.connection.proc_pre_shared_key_client_hello(rec, selected_identity); +}; + refine typeattr Handshake += &let { proc : bool = $context.connection.proc_handshake(rec.is_orig, rec.msg_type, rec.msg_length); }; diff --git a/src/analyzer/protocol/ssl/tls-handshake-protocol.pac b/src/analyzer/protocol/ssl/tls-handshake-protocol.pac index f141a6e9b0..3fcf1c595c 100644 --- a/src/analyzer/protocol/ssl/tls-handshake-protocol.pac +++ b/src/analyzer/protocol/ssl/tls-handshake-protocol.pac @@ -116,8 +116,7 @@ type ServerHelloChoice(rec: HandshakeRecord) = record { }; type ServerHello(rec: HandshakeRecord, server_version: uint16) = record { - gmt_unix_time : uint32; - random_bytes : bytestring &length = 28; + random_bytes : bytestring &length = 32; session_len : uint8; session_id : uint8[session_len]; cipher_suite : uint16[1]; @@ -775,9 +774,11 @@ type SSLExtension(rec: HandshakeRecord) = record { EXT_SERVER_NAME -> server_name: ServerNameExt(rec)[] &until($element == 0 || $element != 0); EXT_SIGNATURE_ALGORITHMS -> signature_algorithm: SignatureAlgorithm(rec)[] &until($element == 0 || $element != 0); EXT_SIGNED_CERTIFICATE_TIMESTAMP -> certificate_timestamp: SignedCertificateTimestampList(rec)[] &until($element == 0 || $element != 0); - EXT_KEY_SHARE -> key_share: KeyShare(rec)[] &until($element == 0 || $element != 0); + EXT_KEY_SHARE -> key_share: KeyShare(rec, this)[] &until($element == 0 || $element != 0); + EXT_KEY_SHARE_OLD -> key_share_old: KeyShare(rec, this)[] &until($element == 0 || $element != 0); EXT_SUPPORTED_VERSIONS -> supported_versions_selector: SupportedVersionsSelector(rec, data_len)[] &until($element == 0 || $element != 0); EXT_PSK_KEY_EXCHANGE_MODES -> psk_key_exchange_modes: PSKKeyExchangeModes(rec)[] &until($element == 0 || $element != 0); + EXT_PRE_SHARED_KEY -> pre_shared_key: PreSharedKey(rec)[] &until($element == 0 || $element != 0); default -> data: bytestring &restofdata; }; } &length=data_len+4 &exportsourcedata; @@ -852,14 +853,62 @@ type ServerHelloKeyShare(rec: HandshakeRecord) = record { keyshare : KeyShareEntry; }; +type HelloRetryRequestKeyShare(rec: HandshakeRecord) = record { + namedgroup : uint16; +}; + +type ServerHelloKeyShareChoice(rec: HandshakeRecord, ext: SSLExtension) = case (ext.data_len) of { + 2 -> hrr : HelloRetryRequestKeyShare(rec); + default -> server : ServerHelloKeyShare(rec); +}; + type ClientHelloKeyShare(rec: HandshakeRecord) = record { length: uint16; keyshares : KeyShareEntry[] &until($input.length() == 0); +} &length=(length+2); + +type KeyShare(rec: HandshakeRecord, ext: SSLExtension) = case rec.msg_type of { + CLIENT_HELLO -> client_hello_keyshare : ClientHelloKeyShare(rec); + SERVER_HELLO -> server_hello_keyshare : ServerHelloKeyShareChoice(rec, ext); + # in old traces, theoretically hello retry requests might show up as a separate type here. + # If this happens, just ignore the extension - we do not have any example traffic for this. + # And it will not happen in anything speaking TLS 1.3, or not completely ancient drafts of it. + default -> other : bytestring &restofdata &transient; }; -type KeyShare(rec: HandshakeRecord) = case rec.msg_type of { - CLIENT_HELLO -> client_hello_keyshare : ClientHelloKeyShare(rec); - SERVER_HELLO -> server_hello_keyshare : ServerHelloKeyShare(rec); +type SelectedPreSharedKeyIdentity(rec: HandshakeRecord) = record { + selected_identity: uint16; +}; + +type PSKIdentity() = record { + length: uint16; + identity: bytestring &length=length; + obfuscated_ticket_age: uint32; +}; + +type PSKIdentitiesList() = record { + length: uint16; + identities: PSKIdentity[] &until($input.length() == 0); +} &length=length+2; + +type PSKBinder() = record { + length: uint8; + binder: bytestring &length=length; +}; + +type PSKBindersList() = record { + length: uint16; + binders: PSKBinder[] &until($input.length() == 0); +} &length=length+2; + +type OfferedPsks(rec: HandshakeRecord) = record { + identities: PSKIdentitiesList; + binders: PSKBindersList; +}; + +type PreSharedKey(rec: HandshakeRecord) = case rec.msg_type of { + CLIENT_HELLO -> offered_psks : OfferedPsks(rec); + SERVER_HELLO -> selected_identity : SelectedPreSharedKeyIdentity(rec); # ... well, we don't parse hello retry requests yet, because I don't have an example of them on the wire. default -> other : bytestring &restofdata &transient; }; diff --git a/src/analyzer/protocol/ssl/types.bif b/src/analyzer/protocol/ssl/types.bif index a6f7f069cf..c2bce5a44f 100644 --- a/src/analyzer/protocol/ssl/types.bif +++ b/src/analyzer/protocol/ssl/types.bif @@ -1,5 +1,6 @@ module SSL; type SignatureAndHashAlgorithm: record; +type PSKIdentity: record; module GLOBAL; diff --git a/src/analyzer/protocol/stepping-stone/CMakeLists.txt b/src/analyzer/protocol/stepping-stone/CMakeLists.txt index 042f5bc858..8975da49f9 100644 --- a/src/analyzer/protocol/stepping-stone/CMakeLists.txt +++ b/src/analyzer/protocol/stepping-stone/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SteppingStone) -bro_plugin_cc(SteppingStone.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek SteppingStone) +zeek_plugin_cc(SteppingStone.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/stepping-stone/Plugin.cc b/src/analyzer/protocol/stepping-stone/Plugin.cc index f3566eb551..5d76fa7d74 100644 --- a/src/analyzer/protocol/stepping-stone/Plugin.cc +++ b/src/analyzer/protocol/stepping-stone/Plugin.cc @@ -6,7 +6,7 @@ #include "SteppingStone.h" namespace plugin { -namespace Bro_SteppingStone { +namespace Zeek_SteppingStone { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("SteppingStone", ::analyzer::stepping_stone::SteppingStone_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::SteppingStone"; + config.name = "Zeek::SteppingStone"; config.description = "Stepping stone analyzer"; return config; } diff --git a/src/analyzer/protocol/stepping-stone/SteppingStone.cc b/src/analyzer/protocol/stepping-stone/SteppingStone.cc index 3035a0b1a5..d3844846b9 100644 --- a/src/analyzer/protocol/stepping-stone/SteppingStone.cc +++ b/src/analyzer/protocol/stepping-stone/SteppingStone.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include @@ -139,25 +139,23 @@ void SteppingStoneEndpoint::Event(EventHandlerPtr f, int id1, int id2) if ( ! f ) return; - val_list* vl = new val_list; - - vl->append(val_mgr->GetInt(id1)); - if ( id2 >= 0 ) - vl->append(val_mgr->GetInt(id2)); + endp->TCP()->ConnectionEventFast(f, {val_mgr->GetInt(id1), val_mgr->GetInt(id2)}); + else + endp->TCP()->ConnectionEventFast(f, {val_mgr->GetInt(id1)}); - endp->TCP()->ConnectionEvent(f, vl); } void SteppingStoneEndpoint::CreateEndpEvent(int is_orig) { - val_list* vl = new val_list; + if ( ! stp_create_endp ) + return; - vl->append(endp->TCP()->BuildConnVal()); - vl->append(val_mgr->GetInt(stp_id)); - vl->append(val_mgr->GetBool(is_orig)); - - endp->TCP()->ConnectionEvent(stp_create_endp, vl); + endp->TCP()->ConnectionEventFast(stp_create_endp, { + endp->TCP()->BuildConnVal(), + val_mgr->GetInt(stp_id), + val_mgr->GetBool(is_orig), + }); } SteppingStone_Analyzer::SteppingStone_Analyzer(Connection* c) diff --git a/src/analyzer/protocol/syslog/CMakeLists.txt b/src/analyzer/protocol/syslog/CMakeLists.txt index 5366f94642..5e1fca87ad 100644 --- a/src/analyzer/protocol/syslog/CMakeLists.txt +++ b/src/analyzer/protocol/syslog/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Syslog) -bro_plugin_cc(Syslog.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(syslog.pac syslog-analyzer.pac syslog-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek Syslog) +zeek_plugin_cc(Syslog.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(syslog.pac syslog-analyzer.pac syslog-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/syslog/Plugin.cc b/src/analyzer/protocol/syslog/Plugin.cc index c2478bdeb0..e4d5f38fa1 100644 --- a/src/analyzer/protocol/syslog/Plugin.cc +++ b/src/analyzer/protocol/syslog/Plugin.cc @@ -6,7 +6,7 @@ #include "Syslog.h" namespace plugin { -namespace Bro_Syslog { +namespace Zeek_Syslog { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("Syslog", ::analyzer::syslog::Syslog_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::Syslog"; + config.name = "Zeek::Syslog"; config.description = "Syslog analyzer UDP-only"; return config; } diff --git a/src/analyzer/protocol/syslog/events.bif b/src/analyzer/protocol/syslog/events.bif index f82adc7e69..2c4e3d9775 100644 --- a/src/analyzer/protocol/syslog/events.bif +++ b/src/analyzer/protocol/syslog/events.bif @@ -12,6 +12,6 @@ ## ## msg: The message logged. ## -## .. note:: Bro currently parses only UDP syslog traffic. Support for TCP +## .. note:: Zeek currently parses only UDP syslog traffic. Support for TCP ## syslog will be added soon. event syslog_message%(c: connection, facility: count, severity: count, msg: string%); diff --git a/src/analyzer/protocol/syslog/syslog-analyzer.pac b/src/analyzer/protocol/syslog/syslog-analyzer.pac index 46e2cc171d..2bbdfd3754 100644 --- a/src/analyzer/protocol/syslog/syslog-analyzer.pac +++ b/src/analyzer/protocol/syslog/syslog-analyzer.pac @@ -11,6 +11,9 @@ flow Syslog_Flow function process_syslog_message(m: Syslog_Message): bool %{ + if ( ! syslog_message ) + return true; + if ( ${m.has_pri} ) BifEvent::generate_syslog_message( connection()->bro_analyzer(), diff --git a/src/analyzer/protocol/tcp/CMakeLists.txt b/src/analyzer/protocol/tcp/CMakeLists.txt index d4b2dc3eab..c00f3e5379 100644 --- a/src/analyzer/protocol/tcp/CMakeLists.txt +++ b/src/analyzer/protocol/tcp/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro TCP) -bro_plugin_cc(TCP.cc TCP_Endpoint.cc TCP_Reassembler.cc ContentLine.cc Stats.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek TCP) +zeek_plugin_cc(TCP.cc TCP_Endpoint.cc TCP_Reassembler.cc ContentLine.cc Stats.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/tcp/Plugin.cc b/src/analyzer/protocol/tcp/Plugin.cc index b258135b37..3a99b2036a 100644 --- a/src/analyzer/protocol/tcp/Plugin.cc +++ b/src/analyzer/protocol/tcp/Plugin.cc @@ -6,7 +6,7 @@ #include "TCP.h" namespace plugin { -namespace Bro_TCP { +namespace Zeek_TCP { class Plugin : public plugin::Plugin { public: @@ -18,7 +18,7 @@ public: AddComponent(new ::analyzer::Component("Contents", 0)); plugin::Configuration config; - config.name = "Bro::TCP"; + config.name = "Zeek::TCP"; config.description = "TCP analyzer"; return config; } diff --git a/src/analyzer/protocol/tcp/TCP.cc b/src/analyzer/protocol/tcp/TCP.cc index 9329b103ed..74e73b80e2 100644 --- a/src/analyzer/protocol/tcp/TCP.cc +++ b/src/analyzer/protocol/tcp/TCP.cc @@ -4,7 +4,6 @@ #include "NetVar.h" #include "File.h" -#include "OSFinger.h" #include "Event.h" #include "analyzer/protocol/pia/PIA.h" @@ -115,201 +114,6 @@ static RecordVal* build_syn_packet_val(int is_orig, const IP_Hdr* ip, return v; } -static RecordVal* build_os_val(int is_orig, const IP_Hdr* ip, - const struct tcphdr* tcp, uint32 tcp_hdr_len) - { - if ( ! is_orig ) - // Later we might use SYN-ACK fingerprinting here. - return 0; - - // Passive OS fingerprinting wants to know a lot about IP and TCP - // options: how many options there are, and in which order. - int winscale = 0; - int MSS = 0; - int optcount = 0; - uint32 quirks = 0; - uint32 tstamp = 0; - uint8 op[MAXOPT]; - - if ( ip->HdrLen() > 20 ) - quirks |= QUIRK_IPOPT; - - if ( ip->ID() == 0 ) - quirks |= QUIRK_ZEROID; - - if ( tcp->th_seq == 0 ) - quirks |= QUIRK_SEQ0; - - if ( tcp->th_seq == tcp->th_ack ) - quirks |= QUIRK_SEQEQ; - - if ( tcp->th_flags & ~(TH_SYN|TH_ACK|TH_RST|TH_ECE|TH_CWR) ) - quirks |= QUIRK_FLAGS; - - if ( ip->TotalLen() - ip->HdrLen() - tcp_hdr_len > 0 ) - quirks |= QUIRK_DATA; // SYN with data - - if ( tcp->th_ack ) - quirks |= QUIRK_ACK; - if ( tcp->th_urp ) - quirks |= QUIRK_URG; - if ( tcp->th_x2 ) - quirks |= QUIRK_X2; - - // Parse TCP options. - u_char* options = (u_char*) tcp + sizeof(struct tcphdr); - u_char* opt_end = (u_char*) tcp + tcp_hdr_len; - - while ( options < opt_end ) - { - unsigned int opt = options[0]; - - if ( opt == TCPOPT_EOL ) - { - op[optcount++] = TCPOPT_EOL; - if ( ++options < opt_end ) - quirks |= QUIRK_PAST; - - // All done - could flag if more junk left over .... - break; - } - - if ( opt == TCPOPT_NOP ) - { - op[optcount++] = TCPOPT_NOP; - ++options; - continue; - } - - if ( options + 1 >= opt_end ) - { - // We've run off the end, no room for the length. - quirks |= QUIRK_BROKEN; - break; - } - - unsigned int opt_len = options[1]; - - if ( options + opt_len > opt_end ) - { - // No room for rest of the options. - quirks |= QUIRK_BROKEN; - break; - } - - if ( opt_len == 0 ) - // Trashed length field. - break; - - switch ( opt ) { - case TCPOPT_SACK_PERMITTED: - // SACKOK LEN - op[optcount] = TCPOPT_SACK_PERMITTED; - break; - - case TCPOPT_MAXSEG: - // MSS LEN D0 D1 - if ( opt_len < 4 ) - break; // bad length - - op[optcount] = TCPOPT_MAXSEG; - MSS = (options[2] << 8) | options[3]; - break; - - case TCPOPT_WINDOW: - // WSCALE LEN D0 - if ( opt_len < 3 ) - break; // bad length - - op[optcount] = TCPOPT_WINDOW; - winscale = options[2]; - break; - - case TCPOPT_TIMESTAMP: - // TSTAMP LEN T0 T1 T2 T3 A0 A1 A2 A3 - if ( opt_len < 10 ) - break; // bad length - - op[optcount] = TCPOPT_TIMESTAMP; - - tstamp = ntohl(extract_uint32(options + 2)); - - if ( extract_uint32(options + 6) ) - quirks |= QUIRK_T2; - break; - - default: // just skip over - op[optcount]=opt; - break; - } - - if ( optcount < MAXOPT - 1 ) - ++optcount; - else - quirks |= QUIRK_BROKEN; - - options += opt_len; - } - - struct os_type os_from_print; - int id = sessions->Get_OS_From_SYN(&os_from_print, - uint16(ip->TotalLen()), - uint8(ip->DF()), uint8(ip->TTL()), - uint16(ntohs(tcp->th_win)), - uint8(optcount), op, - uint16(MSS), uint8(winscale), - tstamp, quirks, - uint8(tcp->th_flags & (TH_ECE|TH_CWR))); - - if ( sessions->CompareWithPreviousOSMatch(ip->SrcAddr(), id) ) - { - RecordVal* os = new RecordVal(OS_version); - - os->Assign(0, new StringVal(os_from_print.os)); - - if ( os_from_print.desc ) - os->Assign(1, new StringVal(os_from_print.desc)); - else - os->Assign(1, val_mgr->GetEmptyString()); - - os->Assign(2, val_mgr->GetCount(os_from_print.dist)); - os->Assign(3, OS_version_inference->GetVal(os_from_print.match)); - - return os; - } - - return 0; - } - - -static void passive_fingerprint(TCP_Analyzer* tcp, bool is_orig, - const IP_Hdr* ip, const struct tcphdr* tp, - uint32 tcp_hdr_len) - { - // is_orig will be removed once we can do SYN-ACK fingerprinting - if ( OS_version_found && is_orig ) - { - const IPAddr& orig_addr = tcp->Conn()->OrigAddr(); - AddrVal* src_addr_val = new AddrVal(orig_addr); - - if ( generate_OS_version_event->Size() == 0 || - generate_OS_version_event->Lookup(src_addr_val) ) - { - RecordVal* OS_val = build_os_val(is_orig, ip, tp, tcp_hdr_len); - - if ( OS_val ) - { // found new OS version - val_list* vl = new val_list; - vl->append(tcp->BuildConnVal()); - vl->append(src_addr_val->Ref()); - vl->append(OS_val); - tcp->ConnectionEvent(OS_version_found, vl); - } - } - - Unref(src_addr_val); - } - } TCP_Analyzer::TCP_Analyzer(Connection* conn) : TransportLayerAnalyzer("TCP", conn) @@ -965,20 +769,17 @@ void TCP_Analyzer::GeneratePacketEvent( const u_char* data, int len, int caplen, int is_orig, TCP_Flags flags) { - val_list* vl = new val_list(); - - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(new StringVal(flags.AsString())); - vl->append(val_mgr->GetCount(rel_seq)); - vl->append(val_mgr->GetCount(flags.ACK() ? rel_ack : 0)); - vl->append(val_mgr->GetCount(len)); - - // We need the min() here because Ethernet padding can lead to - // caplen > len. - vl->append(new StringVal(min(caplen, len), (const char*) data)); - - ConnectionEvent(tcp_packet, vl); + ConnectionEventFast(tcp_packet, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + new StringVal(flags.AsString()), + val_mgr->GetCount(rel_seq), + val_mgr->GetCount(flags.ACK() ? rel_ack : 0), + val_mgr->GetCount(len), + // We need the min() here because Ethernet padding can lead to + // caplen > len. + new StringVal(min(caplen, len), (const char*) data), + }); } int TCP_Analyzer::DeliverData(double t, const u_char* data, int len, int caplen, @@ -1019,9 +820,9 @@ void TCP_Analyzer::CheckPIA_FirstPacket(int is_orig, const IP_Hdr* ip) } } -static uint64 get_relative_seq(const TCP_Endpoint* endpoint, - uint32 cur_base, uint32 last, uint32 wraps, - bool* underflow = 0) +uint64 TCP_Analyzer::get_relative_seq(const TCP_Endpoint* endpoint, + uint32 cur_base, uint32 last, + uint32 wraps, bool* underflow) { int32 delta = seq_delta(cur_base, last); @@ -1052,7 +853,7 @@ static uint64 get_relative_seq(const TCP_Endpoint* endpoint, return endpoint->ToRelativeSeqSpace(cur_base, wraps); } -static int get_segment_len(int payload_len, TCP_Flags flags) +int TCP_Analyzer::get_segment_len(int payload_len, TCP_Flags flags) { int seg_len = payload_len; @@ -1283,14 +1084,12 @@ void TCP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, if ( connection_SYN_packet ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(SYN_vals->Ref()); - ConnectionEvent(connection_SYN_packet, vl); + ConnectionEventFast(connection_SYN_packet, { + BuildConnVal(), + SYN_vals->Ref(), + }); } - passive_fingerprint(this, is_orig, ip, tp, tcp_hdr_len); - Unref(SYN_vals); } @@ -1350,11 +1149,9 @@ void TCP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, Weird("TCP_ack_underflow_or_misorder"); } else if ( ! flags.RST() ) - // Don't trust ack's in RSt packets. + // Don't trust ack's in RST packets. update_ack_seq(peer, ack_seq); } - - peer->AckReceived(rel_ack); } int32 delta_last = update_last_seq(endpoint, seq_one_past_segment, flags, len); @@ -1365,6 +1162,15 @@ void TCP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, UpdateStateMachine(current_timestamp, endpoint, peer, base_seq, ack_seq, len, delta_last, is_orig, flags, do_close, gen_event); + if ( flags.ACK() ) + // We wait on doing this until we've updated the state + // machine so that if the ack reveals a content gap, + // we can tell whether it came at the very end of the + // connection (in a FIN or RST). Those gaps aren't + // reliable - especially those for RSTs - and we refrain + // from flagging them in the connection history. + peer->AckReceived(rel_ack); + if ( tcp_packet ) GeneratePacketEvent(rel_seq, rel_ack, data, len, caplen, is_orig, flags); @@ -1503,14 +1309,12 @@ int TCP_Analyzer::TCPOptionEvent(unsigned int opt, { if ( tcp_option ) { - val_list* vl = new val_list(); - - vl->append(analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(val_mgr->GetCount(opt)); - vl->append(val_mgr->GetCount(optlen)); - - analyzer->ConnectionEvent(tcp_option, vl); + analyzer->ConnectionEventFast(tcp_option, { + analyzer->BuildConnVal(), + val_mgr->GetBool(is_orig), + val_mgr->GetCount(opt), + val_mgr->GetCount(optlen), + }); } return 0; @@ -1826,10 +1630,10 @@ void TCP_Analyzer::EndpointEOF(TCP_Reassembler* endp) { if ( connection_EOF ) { - val_list* vl = new val_list(); - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(endp->IsOrig())); - ConnectionEvent(connection_EOF, vl); + ConnectionEventFast(connection_EOF, { + BuildConnVal(), + val_mgr->GetBool(endp->IsOrig()), + }); } const analyzer_list& children(GetChildren()); @@ -2108,15 +1912,14 @@ int TCPStats_Endpoint::DataSent(double /* t */, uint64 seq, int len, int caplen, if ( tcp_rexmit ) { - val_list* vl = new val_list(); - vl->append(endp->TCP()->BuildConnVal()); - vl->append(val_mgr->GetBool(endp->IsOrig())); - vl->append(val_mgr->GetCount(seq)); - vl->append(val_mgr->GetCount(len)); - vl->append(val_mgr->GetCount(data_in_flight)); - vl->append(val_mgr->GetCount(endp->peer->window)); - - endp->TCP()->ConnectionEvent(tcp_rexmit, vl); + endp->TCP()->ConnectionEventFast(tcp_rexmit, { + endp->TCP()->BuildConnVal(), + val_mgr->GetBool(endp->IsOrig()), + val_mgr->GetCount(seq), + val_mgr->GetCount(len), + val_mgr->GetCount(data_in_flight), + val_mgr->GetCount(endp->peer->window), + }); } } else @@ -2164,11 +1967,12 @@ void TCPStats_Analyzer::Done() { TCP_ApplicationAnalyzer::Done(); - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(orig_stats->BuildStats()); - vl->append(resp_stats->BuildStats()); - ConnectionEvent(conn_stats, vl); + if ( conn_stats ) + ConnectionEventFast(conn_stats, { + BuildConnVal(), + orig_stats->BuildStats(), + resp_stats->BuildStats(), + }); } void TCPStats_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, uint64 seq, const IP_Hdr* ip, int caplen) diff --git a/src/analyzer/protocol/tcp/TCP.h b/src/analyzer/protocol/tcp/TCP.h index 69f3482ae0..95ef5c72d7 100644 --- a/src/analyzer/protocol/tcp/TCP.h +++ b/src/analyzer/protocol/tcp/TCP.h @@ -174,6 +174,13 @@ protected: const u_char* option, TCP_Analyzer* analyzer, bool is_orig, void* cookie); + // A couple utility functions that may also be useful to derived analyzers. + static uint64 get_relative_seq(const TCP_Endpoint* endpoint, + uint32 cur_base, uint32 last, + uint32 wraps, bool* underflow = 0); + + static int get_segment_len(int payload_len, TCP_Flags flags); + private: TCP_Endpoint* orig; TCP_Endpoint* resp; diff --git a/src/analyzer/protocol/tcp/TCP_Endpoint.cc b/src/analyzer/protocol/tcp/TCP_Endpoint.cc index 7e7b316e10..5cfc61ca8a 100644 --- a/src/analyzer/protocol/tcp/TCP_Endpoint.cc +++ b/src/analyzer/protocol/tcp/TCP_Endpoint.cc @@ -32,8 +32,8 @@ TCP_Endpoint::TCP_Endpoint(TCP_Analyzer* arg_analyzer, int arg_is_orig) tcp_analyzer = arg_analyzer; is_orig = arg_is_orig; - chk_cnt = rxmt_cnt = win0_cnt = 0; - chk_thresh = rxmt_thresh = win0_thresh = 1; + gap_cnt = chk_cnt = rxmt_cnt = win0_cnt = 0; + gap_thresh = chk_thresh = rxmt_thresh = win0_thresh = 1; hist_last_SYN = hist_last_FIN = hist_last_RST = 0; @@ -237,11 +237,11 @@ int TCP_Endpoint::DataSent(double t, uint64 seq, int len, int caplen, if ( contents_file_write_failure ) { - val_list* vl = new val_list(); - vl->append(Conn()->BuildConnVal()); - vl->append(val_mgr->GetBool(IsOrig())); - vl->append(new StringVal(buf)); - tcp_analyzer->ConnectionEvent(contents_file_write_failure, vl); + tcp_analyzer->ConnectionEventFast(contents_file_write_failure, { + Conn()->BuildConnVal(), + val_mgr->GetBool(IsOrig()), + new StringVal(buf), + }); } } } @@ -313,3 +313,11 @@ void TCP_Endpoint::ZeroWindow() Conn()->HistoryThresholdEvent(tcp_multiple_zero_windows, IsOrig(), t); } + +void TCP_Endpoint::Gap(uint64 seq, uint64 len) + { + uint32 t = gap_thresh; + if ( Conn()->ScaledHistoryEntry(IsOrig() ? 'G' : 'g', + gap_cnt, gap_thresh) ) + Conn()->HistoryThresholdEvent(tcp_multiple_gap, IsOrig(), t); + } diff --git a/src/analyzer/protocol/tcp/TCP_Endpoint.h b/src/analyzer/protocol/tcp/TCP_Endpoint.h index 4c38aadd93..b17cfef700 100644 --- a/src/analyzer/protocol/tcp/TCP_Endpoint.h +++ b/src/analyzer/protocol/tcp/TCP_Endpoint.h @@ -175,6 +175,9 @@ public: // Called to inform endpoint that it has offered a zero window. void ZeroWindow(); + // Called to inform endpoint that a gap occurred. + void Gap(uint64 seq, uint64 len); + // Returns true if the data was used (and hence should be recorded // in the save file), false otherwise. int DataSent(double t, uint64 seq, int len, int caplen, const u_char* data, @@ -240,6 +243,7 @@ protected: uint32 chk_cnt, chk_thresh; uint32 rxmt_cnt, rxmt_thresh; uint32 win0_cnt, win0_thresh; + uint32 gap_cnt, gap_thresh; }; #define ENDIAN_UNKNOWN 0 diff --git a/src/analyzer/protocol/tcp/TCP_Reassembler.cc b/src/analyzer/protocol/tcp/TCP_Reassembler.cc index ef68f621b5..939497b904 100644 --- a/src/analyzer/protocol/tcp/TCP_Reassembler.cc +++ b/src/analyzer/protocol/tcp/TCP_Reassembler.cc @@ -1,5 +1,6 @@ #include +#include "File.h" #include "analyzer/Analyzer.h" #include "TCP_Reassembler.h" #include "analyzer/protocol/tcp/TCP.h" @@ -105,43 +106,49 @@ void TCP_Reassembler::SetContentsFile(BroFile* f) RecordToSeq(blocks->seq, last_reassem_seq, f); } - // Don't want rotation on these files. - f->SetRotateInterval(0); - Ref(f); record_contents_file = f; } -static inline bool established(const TCP_Endpoint* a, const TCP_Endpoint* b) +static inline bool is_clean(const TCP_Endpoint* a) { - return a->state == TCP_ENDPOINT_ESTABLISHED && - b->state == TCP_ENDPOINT_ESTABLISHED; + return a->state == TCP_ENDPOINT_ESTABLISHED || + (a->state == TCP_ENDPOINT_CLOSED && + a->prev_state == TCP_ENDPOINT_ESTABLISHED); + } + +static inline bool established_or_cleanly_closing(const TCP_Endpoint* a, + const TCP_Endpoint* b) + { + return is_clean(a) && is_clean(b); } static inline bool report_gap(const TCP_Endpoint* a, const TCP_Endpoint* b) { return content_gap && - ( BifConst::report_gaps_for_partial || established(a, b) ); + ( BifConst::report_gaps_for_partial || + established_or_cleanly_closing(a, b) ); } void TCP_Reassembler::Gap(uint64 seq, uint64 len) { // Only report on content gaps for connections that - // are in a cleanly established state. In other - // states, these can arise falsely due to things + // are in a cleanly established or closing state. In + // other states, these can arise falsely due to things // like sequence number mismatches in RSTs, or // unseen previous packets in partial connections. - // The one opportunity we lose here is on clean FIN - // handshakes, but Oh Well. + + if ( established_or_cleanly_closing(endp, endp->peer) ) + endp->Gap(seq, len); if ( report_gap(endp, endp->peer) ) { - val_list* vl = new val_list; - vl->append(dst_analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(IsOrig())); - vl->append(val_mgr->GetCount(seq)); - vl->append(val_mgr->GetCount(len)); - dst_analyzer->ConnectionEvent(content_gap, vl); + dst_analyzer->ConnectionEventFast(content_gap, { + dst_analyzer->BuildConnVal(), + val_mgr->GetBool(IsOrig()), + val_mgr->GetCount(seq), + val_mgr->GetCount(len), + }); } if ( type == Direct ) @@ -335,11 +342,11 @@ void TCP_Reassembler::RecordBlock(DataBlock* b, BroFile* f) if ( contents_file_write_failure ) { - val_list* vl = new val_list(); - vl->append(Endpoint()->Conn()->BuildConnVal()); - vl->append(val_mgr->GetBool(IsOrig())); - vl->append(new StringVal("TCP reassembler content write failure")); - tcp_analyzer->ConnectionEvent(contents_file_write_failure, vl); + tcp_analyzer->ConnectionEventFast(contents_file_write_failure, { + Endpoint()->Conn()->BuildConnVal(), + val_mgr->GetBool(IsOrig()), + new StringVal("TCP reassembler content write failure"), + }); } } @@ -352,11 +359,11 @@ void TCP_Reassembler::RecordGap(uint64 start_seq, uint64 upper_seq, BroFile* f) if ( contents_file_write_failure ) { - val_list* vl = new val_list(); - vl->append(Endpoint()->Conn()->BuildConnVal()); - vl->append(val_mgr->GetBool(IsOrig())); - vl->append(new StringVal("TCP reassembler gap write failure")); - tcp_analyzer->ConnectionEvent(contents_file_write_failure, vl); + tcp_analyzer->ConnectionEventFast(contents_file_write_failure, { + Endpoint()->Conn()->BuildConnVal(), + val_mgr->GetBool(IsOrig()), + new StringVal("TCP reassembler gap write failure"), + }); } } @@ -425,29 +432,15 @@ void TCP_Reassembler::Overlap(const u_char* b1, const u_char* b2, uint64 n) BroString* b1_s = new BroString((const u_char*) b1, n, 0); BroString* b2_s = new BroString((const u_char*) b2, n, 0); - val_list* vl = new val_list(3); - vl->append(tcp_analyzer->BuildConnVal()); - vl->append(new StringVal(b1_s)); - vl->append(new StringVal(b2_s)); - vl->append(new StringVal(flags.AsString())); - tcp_analyzer->ConnectionEvent(rexmit_inconsistency, vl); + tcp_analyzer->ConnectionEventFast(rexmit_inconsistency, { + tcp_analyzer->BuildConnVal(), + new StringVal(b1_s), + new StringVal(b2_s), + new StringVal(flags.AsString()), + }); } } -IMPLEMENT_SERIAL(TCP_Reassembler, SER_TCP_REASSEMBLER); - -bool TCP_Reassembler::DoSerialize(SerialInfo* info) const - { - reporter->InternalError("TCP_Reassembler::DoSerialize not implemented"); - return false; // Cannot be reached. - } - -bool TCP_Reassembler::DoUnserialize(UnserialInfo* info) - { - reporter->InternalError("TCP_Reassembler::DoUnserialize not implemented"); - return false; // Cannot be reached. - } - void TCP_Reassembler::Deliver(uint64 seq, int len, const u_char* data) { if ( type == Direct ) @@ -596,13 +589,12 @@ void TCP_Reassembler::DeliverBlock(uint64 seq, int len, const u_char* data) if ( deliver_tcp_contents ) { - val_list* vl = new val_list(); - vl->append(tcp_analyzer->BuildConnVal()); - vl->append(val_mgr->GetBool(IsOrig())); - vl->append(val_mgr->GetCount(seq)); - vl->append(new StringVal(len, (const char*) data)); - - tcp_analyzer->ConnectionEvent(tcp_contents, vl); + tcp_analyzer->ConnectionEventFast(tcp_contents, { + tcp_analyzer->BuildConnVal(), + val_mgr->GetBool(IsOrig()), + val_mgr->GetCount(seq), + new StringVal(len, (const char*) data), + }); } // Q. Can we say this because it is already checked in DataSent()? diff --git a/src/analyzer/protocol/tcp/TCP_Reassembler.h b/src/analyzer/protocol/tcp/TCP_Reassembler.h index bacfa663e0..f4512e4503 100644 --- a/src/analyzer/protocol/tcp/TCP_Reassembler.h +++ b/src/analyzer/protocol/tcp/TCP_Reassembler.h @@ -89,8 +89,6 @@ public: private: TCP_Reassembler() { } - DECLARE_SERIAL(TCP_Reassembler); - void Undelivered(uint64 up_to_seq) override; void Gap(uint64 seq, uint64 len); diff --git a/src/analyzer/protocol/tcp/events.bif b/src/analyzer/protocol/tcp/events.bif index d93ebe4819..032e8f614f 100644 --- a/src/analyzer/protocol/tcp/events.bif +++ b/src/analyzer/protocol/tcp/events.bif @@ -1,11 +1,11 @@ ## Generated when reassembly starts for a TCP connection. This event is raised -## at the moment when Bro's TCP analyzer enables stream reassembly for a +## at the moment when Zeek's TCP analyzer enables stream reassembly for a ## connection. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -15,13 +15,13 @@ event new_connection_contents%(c: connection%); ## Generated for an unsuccessful connection attempt. This event is raised when ## an originator unsuccessfully attempted to establish a connection. -## "Unsuccessful" is defined as at least :bro:id:`tcp_attempt_delay` seconds +## "Unsuccessful" is defined as at least :zeek:id:`tcp_attempt_delay` seconds ## having elapsed since the originator first sent a connection establishment ## packet to the destination without seeing a reply. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_established +## .. zeek:see:: connection_EOF connection_SYN_packet connection_established ## connection_external connection_finished connection_first_ACK ## connection_half_finished connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -31,15 +31,15 @@ event connection_attempt%(c: connection%); ## Generated when seeing a SYN-ACK packet from the responder in a TCP ## handshake. An associated SYN packet was not seen from the originator -## side if its state is not set to :bro:see:`TCP_ESTABLISHED`. +## side if its state is not set to :zeek:see:`TCP_ESTABLISHED`. ## The final ACK of the handshake in response to SYN-ACK may ## or may not occur later, one way to tell is to check the *history* field of -## :bro:type:`connection` to see if the originator sent an ACK, indicated by +## :zeek:type:`connection` to see if the originator sent an ACK, indicated by ## 'A' in the history string. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_external connection_finished connection_first_ACK ## connection_half_finished connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -47,14 +47,14 @@ event connection_attempt%(c: connection%); ## new_connection new_connection_contents partial_connection event connection_established%(c: connection%); -## Generated for a new active TCP connection if Bro did not see the initial -## handshake. This event is raised when Bro has observed traffic from each +## Generated for a new active TCP connection if Zeek did not see the initial +## handshake. This event is raised when Zeek has observed traffic from each ## endpoint, but the activity did not begin with the usual connection ## establishment. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -65,13 +65,13 @@ event partial_connection%(c: connection%); ## Generated when a previously inactive endpoint attempts to close a TCP ## connection via a normal FIN handshake or an abort RST sequence. When the -## endpoint sent one of these packets, Bro waits -## :bro:id:`tcp_partial_close_delay` prior to generating the event, to give +## endpoint sent one of these packets, Zeek waits +## :zeek:id:`tcp_partial_close_delay` prior to generating the event, to give ## the other endpoint a chance to close the connection normally. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -84,7 +84,7 @@ event connection_partial_close%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_first_ACK ## connection_half_finished connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -94,11 +94,11 @@ event connection_finished%(c: connection%); ## Generated when one endpoint of a TCP connection attempted to gracefully close ## the connection, but the other endpoint is in the TCP_INACTIVE state. This can -## happen due to split routing, in which Bro only sees one side of a connection. +## happen due to split routing, in which Zeek only sees one side of a connection. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -112,7 +112,7 @@ event connection_half_finished%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_reset connection_reused connection_state_remove @@ -121,10 +121,10 @@ event connection_half_finished%(c: connection%); ## ## .. note:: ## -## If the responder does not respond at all, :bro:id:`connection_attempt` is +## If the responder does not respond at all, :zeek:id:`connection_attempt` is ## raised instead. If the responder initially accepts the connection but -## aborts it later, Bro first generates :bro:id:`connection_established` -## and then :bro:id:`connection_reset`. +## aborts it later, Zeek first generates :zeek:id:`connection_established` +## and then :zeek:id:`connection_reset`. event connection_rejected%(c: connection%); ## Generated when an endpoint aborted a TCP connection. The event is raised @@ -133,7 +133,7 @@ event connection_rejected%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reused @@ -142,26 +142,26 @@ event connection_rejected%(c: connection%); ## partial_connection event connection_reset%(c: connection%); -## Generated for each still-open TCP connection when Bro terminates. +## Generated for each still-open TCP connection when Zeek terminates. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_rejected connection_reset connection_reused connection_state_remove ## connection_status_update connection_timeout scheduled_analyzer_applied -## new_connection new_connection_contents partial_connection bro_done +## new_connection new_connection_contents partial_connection zeek_done event connection_pending%(c: connection%); -## Generated for a SYN packet. Bro raises this event for every SYN packet seen +## Generated for a SYN packet. Zeek raises this event for every SYN packet seen ## by its TCP analyzer. ## ## c: The connection. ## ## pkt: Information extracted from the SYN packet. ## -## .. bro:see:: connection_EOF connection_attempt connection_established +## .. zeek:see:: connection_EOF connection_attempt connection_established ## connection_external connection_finished connection_first_ACK ## connection_half_finished connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -182,7 +182,7 @@ event connection_SYN_packet%(c: connection, pkt: SYN_packet%); ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_half_finished connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -202,7 +202,7 @@ event connection_first_ACK%(c: connection%); ## ## is_orig: True if the event is raised for the originator side. ## -## .. bro:see:: connection_SYN_packet connection_attempt connection_established +## .. zeek:see:: connection_SYN_packet connection_attempt connection_established ## connection_external connection_finished connection_first_ACK ## connection_half_finished connection_partial_close connection_pending ## connection_rejected connection_reset connection_reused connection_state_remove @@ -213,7 +213,7 @@ event connection_EOF%(c: connection, is_orig: bool%); ## Generated for every TCP packet. This is a very low-level and expensive event ## that should be avoided when at all possible. It's usually infeasible to ## handle when processing even medium volumes of traffic in real-time. It's -## slightly better than :bro:id:`new_packet` because it affects only TCP, but +## slightly better than :zeek:id:`new_packet` because it affects only TCP, but ## not much. That said, if you work from a trace and want to do some ## packet-level analysis, it may come in handy. ## @@ -235,7 +235,7 @@ event connection_EOF%(c: connection, is_orig: bool%); ## payload: The raw TCP payload. Note that this may be shorter than *len* if ## the packet was not fully captured. ## -## .. bro:see:: new_packet packet_contents tcp_option tcp_contents tcp_rexmit +## .. zeek:see:: new_packet packet_contents tcp_option tcp_contents tcp_rexmit event tcp_packet%(c: connection, is_orig: bool, flags: string, seq: count, ack: count, len: count, payload: string%); ## Generated for each option found in a TCP header. Like many of the ``tcp_*`` @@ -250,16 +250,16 @@ event tcp_packet%(c: connection, is_orig: bool, flags: string, seq: count, ack: ## ## optlen: The length of the options value. ## -## .. bro:see:: tcp_packet tcp_contents tcp_rexmit +## .. zeek:see:: tcp_packet tcp_contents tcp_rexmit ## ## .. note:: There is currently no way to get the actual option value, if any. event tcp_option%(c: connection, is_orig: bool, opt: count, optlen: count%); ## Generated for each chunk of reassembled TCP payload. When content delivery is -## enabled for a TCP connection (via :bro:id:`tcp_content_delivery_ports_orig`, -## :bro:id:`tcp_content_delivery_ports_resp`, -## :bro:id:`tcp_content_deliver_all_orig`, -## :bro:id:`tcp_content_deliver_all_resp`), this event is raised for each chunk +## enabled for a TCP connection (via :zeek:id:`tcp_content_delivery_ports_orig`, +## :zeek:id:`tcp_content_delivery_ports_resp`, +## :zeek:id:`tcp_content_deliver_all_orig`, +## :zeek:id:`tcp_content_deliver_all_resp`), this event is raised for each chunk ## of in-order payload reconstructed from the packet stream. Note that this ## event is potentially expensive if many connections carry significant amounts ## of data as then all that data needs to be passed on to the scripting layer. @@ -273,7 +273,7 @@ event tcp_option%(c: connection, is_orig: bool, opt: count, optlen: count%); ## ## contents: The raw payload, which will be non-empty. ## -## .. bro:see:: tcp_packet tcp_option tcp_rexmit +## .. zeek:see:: tcp_packet tcp_option tcp_rexmit ## tcp_content_delivery_ports_orig tcp_content_delivery_ports_resp ## tcp_content_deliver_all_resp tcp_content_deliver_all_orig ## @@ -283,11 +283,25 @@ event tcp_option%(c: connection, is_orig: bool, opt: count, optlen: count%); ## application-layer protocol analyzers internally. Subsequent invocations of ## this event for the same connection receive non-overlapping in-order chunks ## of its TCP payload stream. It is however undefined what size each chunk -## has; while Bro passes the data on as soon as possible, specifics depend on +## has; while Zeek passes the data on as soon as possible, specifics depend on ## network-level effects such as latency, acknowledgements, reordering, etc. event tcp_contents%(c: connection, is_orig: bool, seq: count, contents: string%); -## TODO. +## Generated for each detected TCP segment retransmission. +## +## c: The connection the packet is part of. +## +## is_orig: True if the packet was sent by the connection's originator. +## +## seq: The segment's relative TCP sequence number. +## +## len: The length of the TCP segment, as specified in the packet header. +## +## data_in_flight: The number of bytes corresponding to the difference between +## the last sequence number and last acknowledgement number +## we've seen for a given endpoint. +## +## window: the TCP window size. event tcp_rexmit%(c: connection, is_orig: bool, seq: count, len: count, data_in_flight: count, window: count%); ## Generated if a TCP flow crosses a checksum-error threshold, per @@ -299,8 +313,8 @@ event tcp_rexmit%(c: connection, is_orig: bool, seq: count, len: count, data_in_ ## ## threshold: the threshold that was crossed ## -## .. bro:see:: udp_multiple_checksum_errors -## tcp_multiple_zero_windows tcp_multiple_retransmissions +## .. zeek:see:: udp_multiple_checksum_errors +## tcp_multiple_zero_windows tcp_multiple_retransmissions tcp_multiple_gap event tcp_multiple_checksum_errors%(c: connection, is_orig: bool, threshold: count%); ## Generated if a TCP flow crosses a zero-window threshold, per @@ -312,7 +326,7 @@ event tcp_multiple_checksum_errors%(c: connection, is_orig: bool, threshold: cou ## ## threshold: the threshold that was crossed ## -## .. bro:see:: tcp_multiple_checksum_errors tcp_multiple_retransmissions +## .. zeek:see:: tcp_multiple_checksum_errors tcp_multiple_retransmissions tcp_multiple_gap event tcp_multiple_zero_windows%(c: connection, is_orig: bool, threshold: count%); ## Generated if a TCP flow crosses a retransmission threshold, per @@ -324,9 +338,21 @@ event tcp_multiple_zero_windows%(c: connection, is_orig: bool, threshold: count% ## ## threshold: the threshold that was crossed ## -## .. bro:see:: tcp_multiple_checksum_errors tcp_multiple_zero_windows +## .. zeek:see:: tcp_multiple_checksum_errors tcp_multiple_zero_windows tcp_multiple_gap event tcp_multiple_retransmissions%(c: connection, is_orig: bool, threshold: count%); +## Generated if a TCP flow crosses a gap threshold, per 'G'/'g' history +## reporting. +## +## c: The connection record for the TCP connection. +## +## is_orig: True if the event is raised for the originator side. +## +## threshold: the threshold that was crossed +## +## .. zeek:see:: tcp_multiple_checksum_errors tcp_multiple_zero_windows tcp_multiple_retransmissions +event tcp_multiple_gap%(c: connection, is_orig: bool, threshold: count%); + ## Generated when failing to write contents of a TCP stream to a file. ## ## c: The connection whose contents are being recorded. @@ -335,5 +361,5 @@ event tcp_multiple_retransmissions%(c: connection, is_orig: bool, threshold: cou ## ## msg: A reason or description for the failure. ## -## .. bro:see:: set_contents_file get_contents_file +## .. zeek:see:: set_contents_file get_contents_file event contents_file_write_failure%(c: connection, is_orig: bool, msg: string%); diff --git a/src/analyzer/protocol/tcp/functions.bif b/src/analyzer/protocol/tcp/functions.bif index 90c3e5ae2a..af8a894137 100644 --- a/src/analyzer/protocol/tcp/functions.bif +++ b/src/analyzer/protocol/tcp/functions.bif @@ -1,5 +1,6 @@ %%{ +#include "File.h" #include "analyzer/protocol/tcp/TCP.h" %%} @@ -12,7 +13,7 @@ ## Returns: The highest sequence number sent by a connection's originator, or 0 ## if *cid* does not point to an active TCP connection. ## -## .. bro:see:: get_resp_seq +## .. zeek:see:: get_resp_seq function get_orig_seq%(cid: conn_id%): count %{ Connection* c = sessions->FindConnection(cid); @@ -41,7 +42,7 @@ function get_orig_seq%(cid: conn_id%): count ## Returns: The highest sequence number sent by a connection's responder, or 0 ## if *cid* does not point to an active TCP connection. ## -## .. bro:see:: get_orig_seq +## .. zeek:see:: get_orig_seq function get_resp_seq%(cid: conn_id%): count %{ Connection* c = sessions->FindConnection(cid); @@ -76,7 +77,7 @@ function get_resp_seq%(cid: conn_id%): count ## responder (often the server). ## - ``CONTENTS_BOTH``: Record the data sent in both directions. ## Results in the two directions being intermixed in the file, -## in the order the data was seen by Bro. +## in the order the data was seen by Zeek. ## ## f: The file handle of the file to write the contents to. ## @@ -89,9 +90,9 @@ function get_resp_seq%(cid: conn_id%): count ## contents of individual packets. Reordering and duplicates are ## removed. If any data is missing, the recording stops at the ## missing data; this can happen, e.g., due to an -## :bro:id:`content_gap` event. +## :zeek:id:`content_gap` event. ## -## .. bro:see:: get_contents_file set_record_packets contents_file_write_failure +## .. zeek:see:: get_contents_file set_record_packets contents_file_write_failure function set_contents_file%(cid: conn_id, direction: count, f: file%): bool %{ Connection* c = sessions->FindConnection(cid); @@ -107,14 +108,14 @@ function set_contents_file%(cid: conn_id, direction: count, f: file%): bool ## cid: The connection ID. ## ## direction: Controls what sides of the connection to record. See -## :bro:id:`set_contents_file` for possible values. +## :zeek:id:`set_contents_file` for possible values. ## -## Returns: The :bro:type:`file` handle for the contents file of the +## Returns: The :zeek:type:`file` handle for the contents file of the ## connection identified by *cid*. If the connection exists ## but there is no contents file for *direction*, then the function ## generates an error and returns a file handle to ``stderr``. ## -## .. bro:see:: set_contents_file set_record_packets contents_file_write_failure +## .. zeek:see:: set_contents_file set_record_packets contents_file_write_failure function get_contents_file%(cid: conn_id, direction: count%): file %{ Connection* c = sessions->FindConnection(cid); diff --git a/src/analyzer/protocol/teredo/CMakeLists.txt b/src/analyzer/protocol/teredo/CMakeLists.txt index c9c4a84db6..da23152c3d 100644 --- a/src/analyzer/protocol/teredo/CMakeLists.txt +++ b/src/analyzer/protocol/teredo/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Teredo) -bro_plugin_cc(Teredo.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek Teredo) +zeek_plugin_cc(Teredo.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/teredo/Plugin.cc b/src/analyzer/protocol/teredo/Plugin.cc index 226d84a4a2..eeebea870d 100644 --- a/src/analyzer/protocol/teredo/Plugin.cc +++ b/src/analyzer/protocol/teredo/Plugin.cc @@ -6,7 +6,7 @@ #include "Teredo.h" namespace plugin { -namespace Bro_Teredo { +namespace Zeek_Teredo { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("Teredo", ::analyzer::teredo::Teredo_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::Teredo"; + config.name = "Zeek::Teredo"; config.description = "Teredo analyzer"; return config; } diff --git a/src/analyzer/protocol/teredo/events.bif b/src/analyzer/protocol/teredo/events.bif index 62bc7d06cd..080eb1bf6e 100644 --- a/src/analyzer/protocol/teredo/events.bif +++ b/src/analyzer/protocol/teredo/events.bif @@ -5,7 +5,7 @@ ## ## inner: The Teredo-encapsulated IPv6 packet header and transport header. ## -## .. bro:see:: teredo_authentication teredo_origin_indication teredo_bubble +## .. zeek:see:: teredo_authentication teredo_origin_indication teredo_bubble ## ## .. note:: Since this event may be raised on a per-packet basis, handling ## it may become particularly expensive for real-time analysis. @@ -19,7 +19,7 @@ event teredo_packet%(outer: connection, inner: teredo_hdr%); ## ## inner: The Teredo-encapsulated IPv6 packet header and transport header. ## -## .. bro:see:: teredo_packet teredo_origin_indication teredo_bubble +## .. zeek:see:: teredo_packet teredo_origin_indication teredo_bubble ## ## .. note:: Since this event may be raised on a per-packet basis, handling ## it may become particularly expensive for real-time analysis. @@ -33,21 +33,21 @@ event teredo_authentication%(outer: connection, inner: teredo_hdr%); ## ## inner: The Teredo-encapsulated IPv6 packet header and transport header. ## -## .. bro:see:: teredo_packet teredo_authentication teredo_bubble +## .. zeek:see:: teredo_packet teredo_authentication teredo_bubble ## ## .. note:: Since this event may be raised on a per-packet basis, handling ## it may become particularly expensive for real-time analysis. event teredo_origin_indication%(outer: connection, inner: teredo_hdr%); ## Generated for Teredo bubble packets. That is, IPv6 packets encapsulated -## in a Teredo tunnel that have a Next Header value of :bro:id:`IPPROTO_NONE`. +## in a Teredo tunnel that have a Next Header value of :zeek:id:`IPPROTO_NONE`. ## See :rfc:`4380` for more information about the Teredo protocol. ## ## outer: The Teredo tunnel connection. ## ## inner: The Teredo-encapsulated IPv6 packet header and transport header. ## -## .. bro:see:: teredo_packet teredo_authentication teredo_origin_indication +## .. zeek:see:: teredo_packet teredo_authentication teredo_origin_indication ## ## .. note:: Since this event may be raised on a per-packet basis, handling ## it may become particularly expensive for real-time analysis. diff --git a/src/analyzer/protocol/udp/CMakeLists.txt b/src/analyzer/protocol/udp/CMakeLists.txt index 0c92be60a3..47140a9df2 100644 --- a/src/analyzer/protocol/udp/CMakeLists.txt +++ b/src/analyzer/protocol/udp/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro UDP) -bro_plugin_cc(UDP.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek UDP) +zeek_plugin_cc(UDP.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/udp/Plugin.cc b/src/analyzer/protocol/udp/Plugin.cc index 2569d95a86..9a42be6fa8 100644 --- a/src/analyzer/protocol/udp/Plugin.cc +++ b/src/analyzer/protocol/udp/Plugin.cc @@ -6,7 +6,7 @@ #include "analyzer/protocol/udp/UDP.h" namespace plugin { -namespace Bro_UDP { +namespace Zeek_UDP { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("UDP", ::analyzer::udp::UDP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::UDP"; + config.name = "Zeek::UDP"; config.description = "UDP Analyzer"; return config; } diff --git a/src/analyzer/protocol/udp/UDP.cc b/src/analyzer/protocol/udp/UDP.cc index ca144941b6..8cbb400b9f 100644 --- a/src/analyzer/protocol/udp/UDP.cc +++ b/src/analyzer/protocol/udp/UDP.cc @@ -2,7 +2,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "Net.h" #include "NetVar.h" @@ -157,11 +157,11 @@ void UDP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, if ( do_udp_contents ) { - val_list* vl = new val_list; - vl->append(BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - vl->append(new StringVal(len, (const char*) data)); - ConnectionEvent(udp_contents, vl); + ConnectionEventFast(udp_contents, { + BuildConnVal(), + val_mgr->GetBool(is_orig), + new StringVal(len, (const char*) data), + }); } Unref(port_val); diff --git a/src/analyzer/protocol/udp/events.bif b/src/analyzer/protocol/udp/events.bif index afcace330b..60326bf601 100644 --- a/src/analyzer/protocol/udp/events.bif +++ b/src/analyzer/protocol/udp/events.bif @@ -4,7 +4,7 @@ ## ## u: The connection record for the corresponding UDP flow. ## -## .. bro:see:: udp_contents udp_reply udp_session_done +## .. zeek:see:: udp_contents udp_reply udp_session_done event udp_request%(u: connection%); ## Generated for each packet sent by a UDP flow's responder. This a potentially @@ -13,17 +13,17 @@ event udp_request%(u: connection%); ## ## u: The connection record for the corresponding UDP flow. ## -## .. bro:see:: udp_contents udp_request udp_session_done +## .. zeek:see:: udp_contents udp_request udp_session_done event udp_reply%(u: connection%); ## Generated for UDP packets to pass on their payload. As the number of UDP ## packets can be very large, this event is normally raised only for those on -## ports configured in :bro:id:`udp_content_delivery_ports_orig` (for packets -## sent by the flow's originator) or :bro:id:`udp_content_delivery_ports_resp` +## ports configured in :zeek:id:`udp_content_delivery_ports_orig` (for packets +## sent by the flow's originator) or :zeek:id:`udp_content_delivery_ports_resp` ## (for packets sent by the flow's responder). However, delivery can be enabled ## for all UDP request and reply packets by setting -## :bro:id:`udp_content_deliver_all_orig` or -## :bro:id:`udp_content_deliver_all_resp`, respectively. Note that this +## :zeek:id:`udp_content_deliver_all_orig` or +## :zeek:id:`udp_content_deliver_all_resp`, respectively. Note that this ## event is also raised for all matching UDP packets, including empty ones. ## ## u: The connection record for the corresponding UDP flow. @@ -32,7 +32,7 @@ event udp_reply%(u: connection%); ## ## contents: TODO. ## -## .. bro:see:: udp_reply udp_request udp_session_done +## .. zeek:see:: udp_reply udp_request udp_session_done ## udp_content_deliver_all_orig udp_content_deliver_all_resp ## udp_content_delivery_ports_orig udp_content_delivery_ports_resp event udp_contents%(u: connection, is_orig: bool, contents: string%); @@ -46,6 +46,6 @@ event udp_contents%(u: connection, is_orig: bool, contents: string%); ## ## threshold: the threshold that was crossed ## -## .. bro:see:: udp_reply udp_request udp_session_done +## .. zeek:see:: udp_reply udp_request udp_session_done ## tcp_multiple_checksum_errors event udp_multiple_checksum_errors%(u: connection, is_orig: bool, threshold: count%); diff --git a/src/analyzer/protocol/vxlan/CMakeLists.txt b/src/analyzer/protocol/vxlan/CMakeLists.txt index e531555321..64c8600844 100644 --- a/src/analyzer/protocol/vxlan/CMakeLists.txt +++ b/src/analyzer/protocol/vxlan/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro VXLAN) -bro_plugin_cc(VXLAN.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek VXLAN) +zeek_plugin_cc(VXLAN.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/analyzer/protocol/vxlan/Plugin.cc b/src/analyzer/protocol/vxlan/Plugin.cc index 1c214d691f..73c2cfd53b 100644 --- a/src/analyzer/protocol/vxlan/Plugin.cc +++ b/src/analyzer/protocol/vxlan/Plugin.cc @@ -5,7 +5,7 @@ #include "VXLAN.h" namespace plugin { -namespace Bro_VXLAN { +namespace Zeek_VXLAN { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::analyzer::Component("VXLAN", ::analyzer::vxlan::VXLAN_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::VXLAN"; + config.name = "Zeek::VXLAN"; config.description = "VXLAN analyzer"; return config; } diff --git a/src/analyzer/protocol/xmpp/CMakeLists.txt b/src/analyzer/protocol/xmpp/CMakeLists.txt index ec5bb84837..5cc55f82a7 100644 --- a/src/analyzer/protocol/xmpp/CMakeLists.txt +++ b/src/analyzer/protocol/xmpp/CMakeLists.txt @@ -1,12 +1,12 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro XMPP) -bro_plugin_cc(Plugin.cc) -bro_plugin_cc(XMPP.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(xmpp.pac xmpp-analyzer.pac xmpp-protocol.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek XMPP) +zeek_plugin_cc(Plugin.cc) +zeek_plugin_cc(XMPP.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac(xmpp.pac xmpp-analyzer.pac xmpp-protocol.pac) +zeek_plugin_end() diff --git a/src/analyzer/protocol/xmpp/Plugin.cc b/src/analyzer/protocol/xmpp/Plugin.cc index d3bfcc5b10..92165e3d99 100644 --- a/src/analyzer/protocol/xmpp/Plugin.cc +++ b/src/analyzer/protocol/xmpp/Plugin.cc @@ -4,7 +4,7 @@ #include "XMPP.h" namespace plugin { -namespace Bro_XMPP { +namespace Zeek_XMPP { class Plugin : public plugin::Plugin { public: @@ -13,7 +13,7 @@ public: AddComponent(new ::analyzer::Component("XMPP", ::analyzer::xmpp::XMPP_Analyzer::Instantiate)); plugin::Configuration config; - config.name = "Bro::XMPP"; + config.name = "Zeek::XMPP"; config.description = "XMPP analyzer (StartTLS only)"; return config; } diff --git a/src/analyzer/protocol/xmpp/xmpp-analyzer.pac b/src/analyzer/protocol/xmpp/xmpp-analyzer.pac index 5253ce050b..26a9c69b5b 100644 --- a/src/analyzer/protocol/xmpp/xmpp-analyzer.pac +++ b/src/analyzer/protocol/xmpp/xmpp-analyzer.pac @@ -32,7 +32,8 @@ refine connection XMPP_Conn += { if ( !is_orig && ( token == "proceed" || token_no_ns == "proceed" ) && client_starttls ) { bro_analyzer()->StartTLS(); - BifEvent::generate_xmpp_starttls(bro_analyzer(), bro_analyzer()->Conn()); + if ( xmpp_starttls ) + BifEvent::generate_xmpp_starttls(bro_analyzer(), bro_analyzer()->Conn()); } else if ( !is_orig && token == "proceed" ) reporter->Weird(bro_analyzer()->Conn(), "XMPP: proceed without starttls"); diff --git a/src/analyzer/protocol/zip/CMakeLists.txt b/src/analyzer/protocol/zip/CMakeLists.txt index 40c64afd6e..579d225e5a 100644 --- a/src/analyzer/protocol/zip/CMakeLists.txt +++ b/src/analyzer/protocol/zip/CMakeLists.txt @@ -1,8 +1,8 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro ZIP) -bro_plugin_cc(ZIP.cc Plugin.cc) -bro_plugin_end() +zeek_plugin_begin(Zeek ZIP) +zeek_plugin_cc(ZIP.cc Plugin.cc) +zeek_plugin_end() diff --git a/src/analyzer/protocol/zip/Plugin.cc b/src/analyzer/protocol/zip/Plugin.cc index 7a0bff39ad..f81576e1bb 100644 --- a/src/analyzer/protocol/zip/Plugin.cc +++ b/src/analyzer/protocol/zip/Plugin.cc @@ -6,7 +6,7 @@ #include "ZIP.h" namespace plugin { -namespace Bro_ZIP { +namespace Zeek_ZIP { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::analyzer::Component("ZIP", 0)); plugin::Configuration config; - config.name = "Bro::ZIP"; + config.name = "Zeek::ZIP"; config.description = "Generic ZIP support analyzer"; return config; } diff --git a/src/analyzer/protocol/zip/ZIP.h b/src/analyzer/protocol/zip/ZIP.h index de22803b26..89838729cd 100644 --- a/src/analyzer/protocol/zip/ZIP.h +++ b/src/analyzer/protocol/zip/ZIP.h @@ -3,7 +3,7 @@ #ifndef ANALYZER_PROTOCOL_ZIP_ZIP_H #define ANALYZER_PROTOCOL_ZIP_ZIP_H -#include "bro-config.h" +#include "zeek-config.h" #include "zlib.h" #include "analyzer/protocol/tcp/TCP.h" diff --git a/src/bro.bif b/src/bro.bif deleted file mode 100644 index 96419ab83d..0000000000 --- a/src/bro.bif +++ /dev/null @@ -1,5528 +0,0 @@ -##! A collection of built-in functions that implement a variety of things -##! such as general programming algorithms, string processing, math functions, -##! introspection, type conversion, file/directory manipulation, packet -##! filtering, interprocess communication and controlling protocol analyzer -##! behavior. -##! -##! You'll find most of Bro's built-in functions that aren't protocol-specific -##! in this file. - -%%{ // C segment -#include -#include -#include -#include -#include -#include -#include - -#include "digest.h" -#include "Reporter.h" -#include "IPAddr.h" -#include "util.h" -#include "file_analysis/Manager.h" -#include "iosource/Manager.h" -#include "iosource/Packet.h" - -using namespace std; - -TableType* var_sizes; - -static iosource::PktDumper* addl_pkt_dumper = 0; - -bro_int_t parse_int(const char*& fmt) - { - bro_int_t k = 0; - while ( isdigit(*fmt) ) - { - k = k * 10 + (*fmt - '0'); - ++fmt; - } - - return k; - } - -static TypeTag ok_d_fmt[] = { - TYPE_BOOL, TYPE_ENUM, TYPE_INT, TYPE_COUNT, TYPE_COUNTER, TYPE_PORT, - TYPE_SUBNET, - TYPE_ERROR -}; -static TypeTag ok_f_fmt[] = { - TYPE_DOUBLE, TYPE_TIME, TYPE_INTERVAL, - TYPE_ERROR -}; - -static int check_fmt_type(TypeTag t, TypeTag ok[]) - { - for ( int i = 0; ok[i] != TYPE_ERROR; ++i ) - if ( ok[i] == t ) - return 1; - - return 0; - } - -static void do_fmt(const char*& fmt, Val* v, ODesc* d) - { - TypeTag t = v->Type()->Tag(); - InternalTypeTag it = v->Type()->InternalType(); - - bool zero_pad = false; - bool left_just = false; - int field_width = -1; - - // Left-align, if requested. - if ( *fmt == '-' ) - { - left_just = true; - ++fmt; - } - - // Parse field width, if given. - if ( isdigit(*fmt) ) - { - // If field width starts with zero, do zero-padding. - if ( *fmt == '0' ) - { - zero_pad = true; - ++fmt; - } - - field_width = parse_int(fmt); - } - - int precision = -1; - if ( *fmt == '.' ) - { - ++fmt; - precision = parse_int(fmt); - } - - if ( field_width > 128 || precision > 128 ) - { - builtin_error("excessive field width or precision"); - return; - } - - // Create the numerical format string. - char num_fmt[64]; - num_fmt[0] = '\0'; - - if ( field_width >= 0 ) - { - // Like sprintf(), ignore '0' if '-' is given. - const char* align = left_just ? "-" : (zero_pad ? "0" : ""); - snprintf(num_fmt, sizeof(num_fmt), "%s%d", align, field_width); - } - - if ( precision >= 0 ) - snprintf(num_fmt + strlen(num_fmt), - sizeof(num_fmt) - strlen(num_fmt), ".%d", precision); - - char fmt_buf[512]; - char out_buf[512]; - - ODesc s; - s.SetStyle(RAW_STYLE); - - if ( precision >= 0 && *fmt != 'e' && *fmt != 'f' && *fmt != 'g' ) - builtin_error("precision specified for non-floating point"); - - switch ( *fmt ) { - case 'D': - case 'T': // ISO Timestamp with microsecond precision. - { - if ( t != TYPE_TIME ) - { - builtin_error("bad type for Date/Time format", v); - break; - } - - time_t time = time_t(v->InternalDouble()); - struct tm t; - - int is_time_fmt = *fmt == 'T'; - - if ( ! localtime_r(&time, &t) ) - s.AddSP(""); - - if ( ! strftime(out_buf, sizeof(out_buf), - is_time_fmt ? - "%Y-%m-%d-%H:%M" : "%Y-%m-%d-%H:%M:%S", - &t) ) - s.AddSP(""); - - else - { - s.Add(out_buf); - - if ( is_time_fmt ) - { - double secs = v->CoerceToUnsigned() % 60; - - secs += v->InternalDouble(); - secs -= v->CoerceToUnsigned(); - - snprintf(out_buf, sizeof(out_buf), - ":%012.9f", secs); - s.Add(out_buf); - } - } - } - break; - - case 'd': - case 'x': - { - if ( *fmt == 'x' && it == TYPE_INTERNAL_ADDR ) - { - // Deficiency: we don't support num_fmt in this case. - // This makes only a very slight difference, so not - // clear it would e worth the hassle. - - snprintf(out_buf, sizeof(out_buf), "%s", - v->AsAddr().AsHexString().c_str()); - } - - else if ( ! check_fmt_type(t, ok_d_fmt) ) - { - builtin_error("bad type for %d/%x format", v); - break; - } - - else if ( it == TYPE_INTERNAL_UNSIGNED ) - { - bro_uint_t u = v->CoerceToUnsigned(); - - if ( v->Type()->IsNetworkOrder() ) - { - if ( v->Type()->Tag() == TYPE_PORT ) - u = v->AsPortVal()->Port(); - else - u = ntohl(uint32(u)); - } - - snprintf(fmt_buf, sizeof(fmt_buf), "%%%s%s", num_fmt, - *fmt == 'd' ? "llu" : "llx"); - snprintf(out_buf, sizeof(out_buf), fmt_buf, u); - } - - else - { - snprintf(fmt_buf, sizeof(fmt_buf), "%%%s%s", num_fmt, - *fmt == 'd' ? "lld" : "llx"); - snprintf(out_buf, sizeof(out_buf), fmt_buf, - v->CoerceToInt()); - } - - s.Add(out_buf); - } - break; - - case 's': - v->Describe(&s); - break; - - case 'e': - case 'f': - case 'g': - { - if ( ! check_fmt_type(t, ok_f_fmt) ) - { - builtin_error("bad type for floating-point format", v); - break; - } - - snprintf(fmt_buf, sizeof(fmt_buf), "%%%s%c", num_fmt, *fmt); - snprintf(out_buf, sizeof(out_buf), fmt_buf, v->CoerceToDouble()); - s.Add(out_buf); - } - break; - - default: - builtin_error("bad format"); - } - - // Left-padding with whitespace, if any. - if ( field_width > 0 && ! left_just ) - { - int sl = strlen(s.Description()); - while ( ++sl <= field_width ) - d->Add(" "); - } - - d->AddN((const char*)(s.Bytes()), s.Len()); - - // Right-padding with whitespace, if any. - if ( field_width > 0 && left_just ) - { - int sl = s.Len(); - while ( ++sl <= field_width ) - d->Add(" "); - } - - ++fmt; - } - -static int next_fmt(const char*& fmt, val_list* args, ODesc* d, int& n) - { - const char* fp = fmt; - - // Skip up to next format indicator. - while ( *fp && *fp != '%' ) - ++fp; - - d->AddN(fmt, fp - fmt); - - if ( *fp == '\0' ) - // No more to do. - return 0; - - fmt = fp + 1; - if ( *fmt == '%' ) - { - // "%%" -> '%' - d->Add("%"); - ++fmt; - return next_fmt(fmt, args, d, n); - } - - if ( ++n >= args->length() ) - return 0; - - do_fmt(fmt, (*args)[n], d); - - return *fmt != '\0'; - } -%%} - -# =========================================================================== -# -# Core -# -# =========================================================================== - -## Returns the current wall-clock time. -## -## In general, you should use :bro:id:`network_time` instead -## unless you are using Bro for non-networking uses (such as general -## scripting; not particularly recommended), because otherwise your script -## may behave very differently on live traffic versus played-back traffic -## from a save file. -## -## Returns: The wall-clock time. -## -## .. bro:see:: network_time -function current_time%(%): time - %{ - return new Val(current_time(), TYPE_TIME); - %} - -## Returns the timestamp of the last packet processed. This function returns -## the timestamp of the most recently read packet, whether read from a -## live network interface or from a save file. -## -## Returns: The timestamp of the packet processed. -## -## .. bro:see:: current_time -function network_time%(%): time - %{ - return new Val(network_time, TYPE_TIME); - %} - -## Returns a system environment variable. -## -## var: The name of the variable whose value to request. -## -## Returns: The system environment variable identified by *var*, or an empty -## string if it is not defined. -## -## .. bro:see:: setenv -function getenv%(var: string%): string - %{ - const char* env_val = getenv(var->CheckString()); - if ( ! env_val ) - env_val = ""; // ### - return new StringVal(env_val); - %} - -## Sets a system environment variable. -## -## var: The name of the variable. -## -## val: The (new) value of the variable *var*. -## -## Returns: True on success. -## -## .. bro:see:: getenv -function setenv%(var: string, val: string%): bool - %{ - int result = setenv(var->AsString()->CheckString(), - val->AsString()->CheckString(), 1); - - if ( result < 0 ) - return val_mgr->GetBool(0); - return val_mgr->GetBool(1); - %} - -## Shuts down the Bro process immediately. -## -## code: The exit code to return with. -## -## .. bro:see:: terminate -function exit%(code: int%): any - %{ - exit(code); - return 0; - %} - -## Gracefully shut down Bro by terminating outstanding processing. -## -## Returns: True after successful termination and false when Bro is still in -## the process of shutting down. -## -## .. bro:see:: exit bro_is_terminating -function terminate%(%): bool - %{ - if ( terminating ) - return val_mgr->GetBool(0); - - terminate_processing(); - return val_mgr->GetBool(1); - %} - -%%{ -// Turns the table into environment variables (if 'set' is true) or removes -// all environment variables previously generated from this table (if 'set' -// is false). -static bool prepare_environment(TableVal* tbl, bool set) - { - ListVal* idxs = tbl->ConvertToPureList(); - - for ( int i = 0; i < idxs->Length(); ++i ) - { - Val* key = idxs->Index(i); - Val* val = tbl->Lookup(key, false); - - if ( key->Type()->Tag() != TYPE_STRING || - val->Type()->Tag() != TYPE_STRING ) - { - builtin_error("system_env() needs a table[string] of string"); - return false; - } - - char* tmp = copy_string(key->AsString()->CheckString()); - to_upper(tmp); - const char* var = fmt("BRO_ARG_%s", tmp); - delete [] tmp; - - if ( set ) - setenv(var, val->AsString()->CheckString(), 1); - else - unsetenv(var); - } - - return true; - } - -static int do_system(const char* s) - { - const char* system_fmt = "(%s) 1>&2 &"; // output to stderr - char* cmd = new char[strlen(system_fmt) + strlen(s) + 1]; - - sprintf(cmd, system_fmt, s); - int status = system(cmd); - delete [] cmd; - - return status; - } -%%} - -## Invokes a command via the ``system`` function of the OS. -## The command runs in the background with ``stdout`` redirecting to -## ``stderr``. Here is a usage example: -## ``system(fmt("rm %s", safe_shell_quote(sniffed_data)));`` -## -## str: The command to execute. -## -## Returns: The return value from the OS ``system`` function. -## -## .. bro:see:: system_env safe_shell_quote piped_exec -## -## .. note:: -## -## Note that this corresponds to the status of backgrounding the -## given command, not to the exit status of the command itself. A -## value of 127 corresponds to a failure to execute ``sh``, and -1 -## to an internal system failure. -function system%(str: string%): int - %{ - int result = do_system(str->CheckString()); - return val_mgr->GetInt(result); - %} - -## Invokes a command via the ``system`` function of the OS with a prepared -## environment. The function is essentially the same as :bro:id:`system`, -## but changes the environment before invoking the command. -## -## str: The command to execute. -## -## env: A :bro:type:`table` with the environment variables in the form -## of key-value pairs. Each specified environment variable name -## will be automatically prepended with ``BRO_ARG_``. -## -## Returns: The return value from the OS ``system`` function. -## -## .. bro:see:: system safe_shell_quote piped_exec -function system_env%(str: string, env: table_string_of_string%): int - %{ - if ( env->Type()->Tag() != TYPE_TABLE ) - { - builtin_error("system_env() requires a table argument"); - return val_mgr->GetInt(-1); - } - - if ( ! prepare_environment(env->AsTableVal(), true) ) - return val_mgr->GetInt(-1); - - int result = do_system(str->CheckString()); - - prepare_environment(env->AsTableVal(), false); - - return val_mgr->GetInt(result); - %} - -## Opens a program with ``popen`` and writes a given string to the returned -## stream to send it to the opened process's stdin. -## -## program: The program to execute. -## -## to_write: Data to pipe to the opened program's process via ``stdin``. -## -## Returns: True on success. -## -## .. bro:see:: system system_env -function piped_exec%(program: string, to_write: string%): bool - %{ - const char* prog = program->CheckString(); - - FILE* f = popen(prog, "w"); - if ( ! f ) - { - reporter->Error("Failed to popen %s", prog); - return val_mgr->GetBool(0); - } - - const u_char* input_data = to_write->Bytes(); - int input_data_len = to_write->Len(); - - int bytes_written = fwrite(input_data, 1, input_data_len, f); - - pclose(f); - - if ( bytes_written != input_data_len ) - { - reporter->Error("Failed to write all given data to %s", prog); - return val_mgr->GetBool(0); - } - - return val_mgr->GetBool(1); - %} - -%%{ -#include "OpaqueVal.h" -%%} - -## Computes the MD5 hash value of the provided list of arguments. -## -## Returns: The MD5 hash value of the concatenated arguments. -## -## .. bro:see:: md5_hmac md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -## -## .. note:: -## -## This function performs a one-shot computation of its arguments. -## For incremental hash computation, see :bro:id:`md5_hash_init` and -## friends. -function md5_hash%(...%): string - %{ - unsigned char digest[MD5_DIGEST_LENGTH]; - MD5Val::digest(@ARG@, digest); - return new StringVal(md5_digest_print(digest)); - %} - -## Computes the SHA1 hash value of the provided list of arguments. -## -## Returns: The SHA1 hash value of the concatenated arguments. -## -## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -## -## .. note:: -## -## This function performs a one-shot computation of its arguments. -## For incremental hash computation, see :bro:id:`sha1_hash_init` and -## friends. -function sha1_hash%(...%): string - %{ - unsigned char digest[SHA_DIGEST_LENGTH]; - SHA1Val::digest(@ARG@, digest); - return new StringVal(sha1_digest_print(digest)); - %} - -## Computes the SHA256 hash value of the provided list of arguments. -## -## Returns: The SHA256 hash value of the concatenated arguments. -## -## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash_init sha256_hash_update sha256_hash_finish -## -## .. note:: -## -## This function performs a one-shot computation of its arguments. -## For incremental hash computation, see :bro:id:`sha256_hash_init` and -## friends. -function sha256_hash%(...%): string - %{ - unsigned char digest[SHA256_DIGEST_LENGTH]; - SHA256Val::digest(@ARG@, digest); - return new StringVal(sha256_digest_print(digest)); - %} - -## Computes an HMAC-MD5 hash value of the provided list of arguments. The HMAC -## secret key is generated from available entropy when Bro starts up, or it can -## be specified for repeatability using the ``-K`` command line flag. -## -## Returns: The HMAC-MD5 hash value of the concatenated arguments. -## -## .. bro:see:: md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function md5_hmac%(...%): string - %{ - unsigned char hmac[MD5_DIGEST_LENGTH]; - MD5Val::hmac(@ARG@, shared_hmac_md5_key, hmac); - return new StringVal(md5_digest_print(hmac)); - %} - -## Constructs an MD5 handle to enable incremental hash computation. You can -## feed data to the returned opaque value with :bro:id:`md5_hash_update` and -## eventually need to call :bro:id:`md5_hash_finish` to finish the computation -## and get the hash digest. -## -## For example, when computing incremental MD5 values of transferred files in -## multiple concurrent HTTP connections, one keeps an optional handle in the -## HTTP session record. Then, one would call -## ``c$http$md5_handle = md5_hash_init()`` once before invoking -## ``md5_hash_update(c$http$md5_handle, some_more_data)`` in the -## :bro:id:`http_entity_data` event handler. When all data has arrived, a call -## to :bro:id:`md5_hash_finish` returns the final hash value. -## -## Returns: The opaque handle associated with this hash computation. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function md5_hash_init%(%): opaque of md5 - %{ - HashVal* digest = new MD5Val(); - digest->Init(); - return digest; - %} - -## Constructs an SHA1 handle to enable incremental hash computation. You can -## feed data to the returned opaque value with :bro:id:`sha1_hash_update` and -## finally need to call :bro:id:`sha1_hash_finish` to finish the computation -## and get the hash digest. -## -## For example, when computing incremental SHA1 values of transferred files in -## multiple concurrent HTTP connections, one keeps an optional handle in the -## HTTP session record. Then, one would call -## ``c$http$sha1_handle = sha1_hash_init()`` once before invoking -## ``sha1_hash_update(c$http$sha1_handle, some_more_data)`` in the -## :bro:id:`http_entity_data` event handler. When all data has arrived, a call -## to :bro:id:`sha1_hash_finish` returns the final hash value. -## -## Returns: The opaque handle associated with this hash computation. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function sha1_hash_init%(%): opaque of sha1 - %{ - HashVal* digest = new SHA1Val(); - digest->Init(); - return digest; - %} - -## Constructs an SHA256 handle to enable incremental hash computation. You can -## feed data to the returned opaque value with :bro:id:`sha256_hash_update` and -## finally need to call :bro:id:`sha256_hash_finish` to finish the computation -## and get the hash digest. -## -## For example, when computing incremental SHA256 values of transferred files in -## multiple concurrent HTTP connections, one keeps an optional handle in the -## HTTP session record. Then, one would call -## ``c$http$sha256_handle = sha256_hash_init()`` once before invoking -## ``sha256_hash_update(c$http$sha256_handle, some_more_data)`` in the -## :bro:id:`http_entity_data` event handler. When all data has arrived, a call -## to :bro:id:`sha256_hash_finish` returns the final hash value. -## -## Returns: The opaque handle associated with this hash computation. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_update sha256_hash_finish -function sha256_hash_init%(%): opaque of sha256 - %{ - HashVal* digest = new SHA256Val(); - digest->Init(); - return digest; - %} - -## Updates the MD5 value associated with a given index. It is required to -## call :bro:id:`md5_hash_init` once before calling this -## function. -## -## handle: The opaque handle associated with this hash computation. -## -## data: The data to add to the hash computation. -## -## Returns: True on success. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function md5_hash_update%(handle: opaque of md5, data: string%): bool - %{ - bool rc = static_cast(handle)->Feed(data->Bytes(), data->Len()); - return val_mgr->GetBool(rc); - %} - -## Updates the SHA1 value associated with a given index. It is required to -## call :bro:id:`sha1_hash_init` once before calling this -## function. -## -## handle: The opaque handle associated with this hash computation. -## -## data: The data to add to the hash computation. -## -## Returns: True on success. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function sha1_hash_update%(handle: opaque of sha1, data: string%): bool - %{ - bool rc = static_cast(handle)->Feed(data->Bytes(), data->Len()); - return val_mgr->GetBool(rc); - %} - -## Updates the SHA256 value associated with a given index. It is required to -## call :bro:id:`sha256_hash_init` once before calling this -## function. -## -## handle: The opaque handle associated with this hash computation. -## -## data: The data to add to the hash computation. -## -## Returns: True on success. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_finish -function sha256_hash_update%(handle: opaque of sha256, data: string%): bool - %{ - bool rc = static_cast(handle)->Feed(data->Bytes(), data->Len()); - return val_mgr->GetBool(rc); - %} - -## Returns the final MD5 digest of an incremental hash computation. -## -## handle: The opaque handle associated with this hash computation. -## -## Returns: The hash value associated with the computation of *handle*. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function md5_hash_finish%(handle: opaque of md5%): string - %{ - return static_cast(handle)->Get(); - %} - -## Returns the final SHA1 digest of an incremental hash computation. -## -## handle: The opaque handle associated with this hash computation. -## -## Returns: The hash value associated with the computation of *handle*. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update -## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish -function sha1_hash_finish%(handle: opaque of sha1%): string - %{ - return static_cast(handle)->Get(); - %} - -## Returns the final SHA256 digest of an incremental hash computation. -## -## handle: The opaque handle associated with this hash computation. -## -## Returns: The hash value associated with the computation of *handle*. -## -## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish -## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish -## sha256_hash sha256_hash_init sha256_hash_update -function sha256_hash_finish%(handle: opaque of sha256%): string - %{ - return static_cast(handle)->Get(); - %} - -## Returns 32-bit digest of arbitrary input values using FNV-1a hash algorithm. -## See ``_. -## -## input: The desired input value to hash. -## -## Returns: The hashed value. -## -## .. bro:see:: hrw_weight -function fnv1a32%(input: any%): count - %{ - ODesc desc(DESC_BINARY); - input->Describe(&desc); - auto bytes = desc.Bytes(); - - uint32 offset32 = 2166136261; - uint32 prime32 = 16777619; - uint32 rval = offset32; - - for ( auto i = 0; i < desc.Len(); ++i ) - { - rval ^= (uint32) bytes[i]; - rval *= prime32; - } - - return val_mgr->GetCount(rval); - %} - -## Calculates a weight value for use in a Rendezvous Hashing algorithm. -## See ``_. -## The weight function used is the one recommended in the original -## paper: ``_. -## -## key_digest: A 32-bit digest of a key. E.g. use :bro:see:`fnv1a32` to -## produce this. -## -## site_id: A 32-bit site/node identifier. -## -## Returns: The weight value for the key/site pair. -## -## .. bro:see:: fnv1a32 -function hrw_weight%(key_digest: count, site_id: count%): count - %{ - uint32 d = key_digest; - d &= 0x7fffffff; // 31-bit digest - int32 si = site_id; - auto a = 1103515245; - auto b = 12345; - auto m = 2147483648; // 2**31 - - int32 rval = (a * ((a * si + b) ^ d) + b) % m; - - if ( rval < 0 ) - rval += m; // [0, 2**31 - 1] - - return val_mgr->GetCount((uint64) rval); - %} - -## Generates a random number. -## -## max: The maximum value of the random number. -## -## Returns: a random positive integer in the interval *[0, max)*. -## -## .. bro:see:: srand -## -## .. note:: -## -## This function is a wrapper about the function ``random`` -## provided by the OS. -function rand%(max: count%): count - %{ - auto result = bro_uint_t(double(max) * double(bro_random()) / (RAND_MAX + 1.0)); - return val_mgr->GetCount(result); - %} - -## Sets the seed for subsequent :bro:id:`rand` calls. -## -## seed: The seed for the PRNG. -## -## .. bro:see:: rand -## -## .. note:: -## -## This function is a wrapper about the function ``srandom`` -## provided by the OS. -function srand%(seed: count%): any - %{ - bro_srandom(seed); - return 0; - %} - -%%{ -#include -%%} - -## Send a string to syslog. -## -## s: The string to log via syslog -function syslog%(s: string%): any - %{ - reporter->Syslog("%s", s->CheckString()); - return 0; - %} - -## Determines the MIME type of a piece of data using Bro's file magic -## signatures. -## -## data: The data to find the MIME type for. -## -## return_mime: Deprecated argument; does nothing, except emit a warning -## when false. -## -## Returns: The MIME type of *data*, or "" if there was an error -## or no match. This is the strongest signature match. -## -## .. bro:see:: file_magic -function identify_data%(data: string, return_mime: bool &default=T%): string - %{ - if ( ! return_mime ) - reporter->Warning("identify_data() builtin-function only returns MIME types, but verbose file info requested"); - - string strongest_match = file_mgr->DetectMIME(data->Bytes(), data->Len()); - - if ( strongest_match.empty() ) - return new StringVal(""); - - return new StringVal(strongest_match); - %} - -## Determines the MIME type of a piece of data using Bro's file magic -## signatures. -## -## data: The data for which to find matching MIME types. -## -## Returns: All matching signatures, in order of strength. -## -## .. bro:see:: identify_data -function file_magic%(data: string%): mime_matches - %{ - RuleMatcher::MIME_Matches matches; - file_mgr->DetectMIME(data->Bytes(), data->Len(), &matches); - return file_analysis::GenMIMEMatchesVal(matches); - %} - -## Performs an entropy test on the given data. -## See http://www.fourmilab.ch/random. -## -## data: The data to compute the entropy for. -## -## Returns: The result of the entropy test, which contains the following -## fields. -## -## - ``entropy``: The information density expressed as a number of -## bits per character. -## -## - ``chi_square``: The chi-square test value expressed as an -## absolute number and a percentage which indicates how -## frequently a truly random sequence would exceed the value -## calculated, i.e., the degree to which the sequence tested is -## suspected of being non-random. -## -## If the percentage is greater than 99% or less than 1%, the -## sequence is almost certainly not random. If the percentage is -## between 99% and 95% or between 1% and 5%, the sequence is -## suspect. Percentages between 90\% and 95\% and 5\% and 10\% -## indicate the sequence is "almost suspect." -## -## - ``mean``: The arithmetic mean of all the bytes. If the data -## are close to random, it should be around 127.5. -## -## - ``monte_carlo_pi``: Each successive sequence of six bytes is -## used as 24-bit *x* and *y* coordinates within a square. If -## the distance of the randomly-generated point is less than the -## radius of a circle inscribed within the square, the six-byte -## sequence is considered a "hit." The percentage of hits can -## be used to calculate the value of pi. For very large streams -## the value will approach the correct value of pi if the -## sequence is close to random. -## -## - ``serial_correlation``: This quantity measures the extent to -## which each byte in the file depends upon the previous byte. -## For random sequences this value will be close to zero. -## -## .. bro:see:: entropy_test_init entropy_test_add entropy_test_finish -function find_entropy%(data: string%): entropy_test_result - %{ - double montepi, scc, ent, mean, chisq; - montepi = scc = ent = mean = chisq = 0.0; - EntropyVal e; - e.Feed(data->Bytes(), data->Len()); - e.Get(&ent, &chisq, &mean, &montepi, &scc); - - RecordVal* ent_result = new RecordVal(entropy_test_result); - ent_result->Assign(0, new Val(ent, TYPE_DOUBLE)); - ent_result->Assign(1, new Val(chisq, TYPE_DOUBLE)); - ent_result->Assign(2, new Val(mean, TYPE_DOUBLE)); - ent_result->Assign(3, new Val(montepi, TYPE_DOUBLE)); - ent_result->Assign(4, new Val(scc, TYPE_DOUBLE)); - return ent_result; - %} - -## Initializes data structures for incremental entropy calculation. -## -## Returns: An opaque handle to be used in subsequent operations. -## -## .. bro:see:: find_entropy entropy_test_add entropy_test_finish -function entropy_test_init%(%): opaque of entropy - %{ - return new EntropyVal(); - %} - -## Adds data to an incremental entropy calculation. -## -## handle: The opaque handle representing the entropy calculation state. -## -## data: The data to add to the entropy calculation. -## -## Returns: True on success. -## -## .. bro:see:: find_entropy entropy_test_add entropy_test_finish -function entropy_test_add%(handle: opaque of entropy, data: string%): bool - %{ - bool status = static_cast(handle)->Feed(data->Bytes(), - data->Len()); - return val_mgr->GetBool(status); - %} - -## Finishes an incremental entropy calculation. Before using this function, -## one needs to obtain an opaque handle with :bro:id:`entropy_test_init` and -## add data to it via :bro:id:`entropy_test_add`. -## -## handle: The opaque handle representing the entropy calculation state. -## -## Returns: The result of the entropy test. See :bro:id:`find_entropy` for a -## description of the individual components. -## -## .. bro:see:: find_entropy entropy_test_init entropy_test_add -function entropy_test_finish%(handle: opaque of entropy%): entropy_test_result - %{ - double montepi, scc, ent, mean, chisq; - montepi = scc = ent = mean = chisq = 0.0; - static_cast(handle)->Get(&ent, &chisq, &mean, &montepi, &scc); - - RecordVal* ent_result = new RecordVal(entropy_test_result); - ent_result->Assign(0, new Val(ent, TYPE_DOUBLE)); - ent_result->Assign(1, new Val(chisq, TYPE_DOUBLE)); - ent_result->Assign(2, new Val(mean, TYPE_DOUBLE)); - ent_result->Assign(3, new Val(montepi, TYPE_DOUBLE)); - ent_result->Assign(4, new Val(scc, TYPE_DOUBLE)); - return ent_result; - %} - -## Creates an identifier that is unique with high probability. -## -## prefix: A custom string prepended to the result. -## -## Returns: A string identifier that is unique. -## -## .. bro:see:: unique_id_from -function unique_id%(prefix: string%) : string - %{ - char tmp[20]; - uint64 uid = calculate_unique_id(UID_POOL_DEFAULT_SCRIPT); - return new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62, prefix->CheckString())); - %} - -## Creates an identifier that is unique with high probability. -## -## pool: A seed for determinism. -## -## prefix: A custom string prepended to the result. -## -## Returns: A string identifier that is unique. -## -## .. bro:see:: unique_id -function unique_id_from%(pool: int, prefix: string%) : string - %{ - pool += UID_POOL_CUSTOM_SCRIPT; // Make sure we don't conflict with internal pool. - - char tmp[20]; - uint64 uid = calculate_unique_id(pool); - return new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62, prefix->CheckString())); - %} - -# =========================================================================== -# -# Generic Programming -# -# =========================================================================== - -## Removes all elements from a set or table. -## -## v: The set or table -function clear_table%(v: any%): any - %{ - if ( v->Type()->Tag() == TYPE_TABLE ) - v->AsTableVal()->RemoveAll(); - else - builtin_error("clear_table() requires a table/set argument"); - - return 0; - %} - -## Gets all subnets that contain a given subnet from a set/table[subnet]. -## -## search: the subnet to search for. -## -## t: the set[subnet] or table[subnet]. -## -## Returns: All the keys of the set or table that cover the subnet searched for. -function matching_subnets%(search: subnet, t: any%): subnet_vec - %{ - if ( t->Type()->Tag() != TYPE_TABLE || ! t->Type()->AsTableType()->IsSubNetIndex() ) - { - reporter->Error("matching_subnets needs to be called on a set[subnet]/table[subnet]."); - return nullptr; - } - - return t->AsTableVal()->LookupSubnets(search); - %} - -## For a set[subnet]/table[subnet], create a new table that contains all entries -## that contain a given subnet. -## -## search: the subnet to search for. -## -## t: the set[subnet] or table[subnet]. -## -## Returns: A new table that contains all the entries that cover the subnet searched for. -function filter_subnet_table%(search: subnet, t: any%): any - %{ - if ( t->Type()->Tag() != TYPE_TABLE || ! t->Type()->AsTableType()->IsSubNetIndex() ) - { - reporter->Error("filter_subnet_table needs to be called on a set[subnet]/table[subnet]."); - return nullptr; - } - - return t->AsTableVal()->LookupSubnetValues(search); - %} - -## Checks if a specific subnet is a member of a set/table[subnet]. -## In contrast to the ``in`` operator, this performs an exact match, not -## a longest prefix match. -## -## search: the subnet to search for. -## -## t: the set[subnet] or table[subnet]. -## -## Returns: True if the exact subnet is a member, false otherwise. -function check_subnet%(search: subnet, t: any%): bool - %{ - if ( t->Type()->Tag() != TYPE_TABLE || ! t->Type()->AsTableType()->IsSubNetIndex() ) - { - reporter->Error("check_subnet needs to be called on a set[subnet]/table[subnet]."); - return nullptr; - } - - const PrefixTable* pt = t->AsTableVal()->Subnets(); - if ( ! pt ) - { - reporter->Error("check_subnet encountered nonexisting prefix table."); - return nullptr; - } - - void* res = pt->Lookup(search, true); - - return val_mgr->GetBool(res != nullptr); - %} - -## Checks whether two objects reference the same internal object. This function -## uses equality comparison of C++ raw pointer values to determine if the two -## objects are the same. -## -## o1: The first object. -## -## o2: The second object. -## -## Returns: True if *o1* and *o2* are equal. -function same_object%(o1: any, o2: any%): bool - %{ - return val_mgr->GetBool(o1 == o2); - %} - -## Returns the number of bytes that a value occupies in memory. -## -## v: The value -## -## Returns: The number of bytes that *v* occupies. -function val_size%(v: any%): count - %{ - return val_mgr->GetCount(v->MemoryAllocation()); - %} - -## Resizes a vector. -## -## aggr: The vector instance. -## -## newsize: The new size of *aggr*. -## -## Returns: The old size of *aggr*, or 0 if *aggr* is not a :bro:type:`vector`. -function resize%(aggr: any, newsize: count%) : count - %{ - if ( aggr->Type()->Tag() != TYPE_VECTOR ) - { - builtin_error("resize() operates on vectors"); - return 0; - } - - return val_mgr->GetCount(aggr->AsVectorVal()->Resize(newsize)); - %} - -## Tests whether a boolean vector (``vector of bool``) has *any* true -## element. -## -## v: The boolean vector instance. -## -## Returns: True if any element in *v* is true. -## -## .. bro:see:: all_set -function any_set%(v: any%) : bool - %{ - if ( v->Type()->Tag() != TYPE_VECTOR || - v->Type()->YieldType()->Tag() != TYPE_BOOL ) - { - builtin_error("any_set() requires vector of bool"); - return val_mgr->GetFalse(); - } - - VectorVal* vv = v->AsVectorVal(); - for ( unsigned int i = 0; i < vv->Size(); ++i ) - if ( vv->Lookup(i) && vv->Lookup(i)->AsBool() ) - return val_mgr->GetTrue(); - - return val_mgr->GetFalse(); - %} - -## Tests whether *all* elements of a boolean vector (``vector of bool``) are -## true. -## -## v: The boolean vector instance. -## -## Returns: True iff all elements in *v* are true or there are no elements. -## -## .. bro:see:: any_set -## -## .. note:: -## -## Missing elements count as false. -function all_set%(v: any%) : bool - %{ - if ( v->Type()->Tag() != TYPE_VECTOR || - v->Type()->YieldType()->Tag() != TYPE_BOOL ) - { - builtin_error("all_set() requires vector of bool"); - return val_mgr->GetFalse(); - } - - VectorVal* vv = v->AsVectorVal(); - for ( unsigned int i = 0; i < vv->Size(); ++i ) - if ( ! vv->Lookup(i) || ! vv->Lookup(i)->AsBool() ) - return val_mgr->GetFalse(); - - return val_mgr->GetTrue(); - %} - -%%{ -static Func* sort_function_comp = 0; -static Val** index_map = 0; // used for indirect sorting to support order() - -bool sort_function(Val* a, Val* b) - { - // Sort missing values as "high". - if ( ! a ) - return 0; - if ( ! b ) - return 1; - - val_list sort_func_args; - sort_func_args.append(a->Ref()); - sort_func_args.append(b->Ref()); - - Val* result = sort_function_comp->Call(&sort_func_args); - int int_result = result->CoerceToInt(); - Unref(result); - - return int_result < 0; - } - -bool indirect_sort_function(size_t a, size_t b) - { - return sort_function(index_map[a], index_map[b]); - } - -bool signed_sort_function (Val* a, Val* b) - { - if ( ! a ) - return 0; - if ( ! b ) - return 1; - - auto ia = a->CoerceToInt(); - auto ib = b->CoerceToInt(); - - return ia < ib; - } - -bool unsigned_sort_function (Val* a, Val* b) - { - if ( ! a ) - return 0; - if ( ! b ) - return 1; - - auto ia = a->CoerceToUnsigned(); - auto ib = b->CoerceToUnsigned(); - - return ia < ib; - } - -bool indirect_signed_sort_function(size_t a, size_t b) - { - return signed_sort_function(index_map[a], index_map[b]); - } - -bool indirect_unsigned_sort_function(size_t a, size_t b) - { - return unsigned_sort_function(index_map[a], index_map[b]); - } -%%} - -## Sorts a vector in place. The second argument is a comparison function that -## takes two arguments: if the vector type is ``vector of T``, then the -## comparison function must be ``function(a: T, b: T): int``, which returns -## a value less than zero if ``a < b`` for some type-specific notion of the -## less-than operator. The comparison function is optional if the type -## is an integral type (int, count, etc.). -## -## v: The vector instance to sort. -## -## Returns: The vector, sorted from minimum to maximum value. If the vector -## could not be sorted, then the original vector is returned instead. -## -## .. bro:see:: order -function sort%(v: any, ...%) : any - %{ - v->Ref(); // we always return v - - if ( v->Type()->Tag() != TYPE_VECTOR ) - { - builtin_error("sort() requires vector"); - return v; - } - - BroType* elt_type = v->Type()->YieldType(); - Func* comp = 0; - - if ( @ARG@.length() > 2 ) - builtin_error("sort() called with extraneous argument"); - - if ( @ARG@.length() == 2 ) - { - Val* comp_val = @ARG@[1]; - if ( ! IsFunc(comp_val->Type()->Tag()) ) - { - builtin_error("second argument to sort() needs to be comparison function"); - return v; - } - - comp = comp_val->AsFunc(); - } - - if ( ! comp && ! IsIntegral(elt_type->Tag()) ) - builtin_error("comparison function required for sort() with non-integral types"); - - vector& vv = *v->AsVector(); - - if ( comp ) - { - FuncType* comp_type = comp->FType()->AsFuncType(); - if ( comp_type->YieldType()->Tag() != TYPE_INT || - ! comp_type->ArgTypes()->AllMatch(elt_type, 0) ) - { - builtin_error("invalid comparison function in call to sort()"); - return v; - } - - sort_function_comp = comp; - - sort(vv.begin(), vv.end(), sort_function); - } - else - { - if ( elt_type->InternalType() == TYPE_INTERNAL_UNSIGNED ) - sort(vv.begin(), vv.end(), unsigned_sort_function); - else - sort(vv.begin(), vv.end(), signed_sort_function); - } - - return v; - %} - -## Returns the order of the elements in a vector according to some -## comparison function. See :bro:id:`sort` for details about the comparison -## function. -## -## v: The vector whose order to compute. -## -## Returns: A ``vector of count`` with the indices of the ordered elements. -## For example, the elements of *v* in order are (assuming ``o`` -## is the vector returned by ``order``): v[o[0]], v[o[1]], etc. -## -## .. bro:see:: sort -function order%(v: any, ...%) : index_vec - %{ - VectorVal* result_v = new VectorVal( - internal_type("index_vec")->AsVectorType()); - - if ( v->Type()->Tag() != TYPE_VECTOR ) - { - builtin_error("order() requires vector"); - return result_v; - } - - BroType* elt_type = v->Type()->YieldType(); - Func* comp = 0; - - if ( @ARG@.length() > 2 ) - builtin_error("order() called with extraneous argument"); - - if ( @ARG@.length() == 2 ) - { - Val* comp_val = @ARG@[1]; - if ( ! IsFunc(comp_val->Type()->Tag()) ) - { - builtin_error("second argument to order() needs to be comparison function"); - return v; - } - - comp = comp_val->AsFunc(); - } - - if ( ! comp && ! IsIntegral(elt_type->Tag()) ) - builtin_error("comparison function required for order() with non-integral types"); - - vector& vv = *v->AsVector(); - auto n = vv.size(); - - // Set up initial mapping of indices directly to corresponding - // elements. - vector ind_vv(n); - index_map = new Val*[n]; - size_t i; - for ( i = 0; i < n; ++i ) - { - ind_vv[i] = i; - index_map[i] = vv[i]; - } - - if ( comp ) - { - FuncType* comp_type = comp->FType()->AsFuncType(); - if ( comp_type->YieldType()->Tag() != TYPE_INT || - ! comp_type->ArgTypes()->AllMatch(elt_type, 0) ) - { - builtin_error("invalid comparison function in call to order()"); - return v; - } - - sort_function_comp = comp; - - sort(ind_vv.begin(), ind_vv.end(), indirect_sort_function); - } - else - { - if ( elt_type->InternalType() == TYPE_INTERNAL_UNSIGNED ) - sort(ind_vv.begin(), ind_vv.end(), indirect_unsigned_sort_function); - else - sort(ind_vv.begin(), ind_vv.end(), indirect_signed_sort_function); - } - - delete [] index_map; - index_map = 0; - - // Now spin through ind_vv to read out the rearrangement. - for ( i = 0; i < n; ++i ) - { - int ind = ind_vv[i]; - result_v->Assign(i, val_mgr->GetCount(ind)); - } - - return result_v; - %} - -# =========================================================================== -# -# String Processing -# -# =========================================================================== - -## Returns the concatenation of the string representation of its arguments. The -## arguments can be of any type. For example, ``cat("foo", 3, T)`` returns -## ``"foo3T"``. -## -## Returns: A string concatentation of all arguments. -function cat%(...%): string - %{ - ODesc d; - d.SetStyle(RAW_STYLE); - - loop_over_list(@ARG@, i) - @ARG@[i]->Describe(&d); - - BroString* s = new BroString(1, d.TakeBytes(), d.Len()); - s->SetUseFreeToDelete(true); - - return new StringVal(s); - %} - -## Concatenates all arguments, with a separator placed between each one. This -## function is similar to :bro:id:`cat`, but places a separator between each -## given argument. If any of the variable arguments is an empty string it is -## replaced by a given default string instead. -## -## sep: The separator to place between each argument. -## -## def: The default string to use when an argument is the empty string. -## -## Returns: A concatenation of all arguments with *sep* between each one and -## empty strings replaced with *def*. -## -## .. bro:see:: cat string_cat cat_string_array cat_string_array_n -function cat_sep%(sep: string, def: string, ...%): string - %{ - ODesc d; - d.SetStyle(RAW_STYLE); - - int pre_size = 0; - - loop_over_list(@ARG@, i) - { - // Skip named parameters. - if ( i < 2 ) - continue; - - if ( i > 2 ) - d.Add(sep->CheckString(), 0); - - Val* v = @ARG@[i]; - if ( v->Type()->Tag() == TYPE_STRING && ! v->AsString()->Len() ) - v = def; - - v->Describe(&d); - } - - BroString* s = new BroString(1, d.TakeBytes(), d.Len()); - s->SetUseFreeToDelete(true); - - return new StringVal(s); - %} - -## Produces a formatted string à la ``printf``. The first argument is the -## *format string* and specifies how subsequent arguments are converted for -## output. It is composed of zero or more directives: ordinary characters (not -## ``%``), which are copied unchanged to the output, and conversion -## specifications, each of which fetches zero or more subsequent arguments. -## Conversion specifications begin with ``%`` and the arguments must properly -## correspond to the specifier. After the ``%``, the following characters -## may appear in sequence: -## -## - ``%``: Literal ``%`` -## -## - ``-``: Left-align field -## -## - ``[0-9]+``: The field width (< 128) -## -## - ``.``: Precision of floating point specifiers ``[efg]`` (< 128) -## -## - ``[DTdxsefg]``: Format specifier -## -## - ``[DT]``: ISO timestamp with microsecond precision -## -## - ``d``: Signed/Unsigned integer (using C-style ``%lld``/``%llu`` -## for ``int``/``count``) -## -## - ``x``: Unsigned hexadecimal (using C-style ``%llx``); -## addresses/ports are converted to host-byte order -## -## - ``s``: String (byte values less than 32 or greater than 126 -## will be escaped) -## -## - ``[efg]``: Double -## -## Returns: Returns the formatted string. Given no arguments, :bro:id:`fmt` -## returns an empty string. Given no format string or the wrong -## number of additional arguments for the given format specifier, -## :bro:id:`fmt` generates a run-time error. -## -## .. bro:see:: cat cat_sep string_cat cat_string_array cat_string_array_n -function fmt%(...%): string - %{ - if ( @ARGC@ == 0 ) - return val_mgr->GetEmptyString(); - - Val* fmt_v = @ARG@[0]; - - // Type of fmt_v will be string here, check_built_in_call() in Func.cc - // checks that. - - const char* fmt = fmt_v->AsString()->CheckString(); - ODesc d; - d.SetStyle(RAW_STYLE); - - int n = 0; - - while ( next_fmt(fmt, @ARGS@, &d, n) ) - ; - - if ( n < @ARGC@ - 1 ) - { - builtin_error("too many arguments for format", fmt_v); - return val_mgr->GetEmptyString(); - } - - else if ( n >= @ARGC@ ) - { - builtin_error("too few arguments for format", fmt_v); - return val_mgr->GetEmptyString(); - } - - BroString* s = new BroString(1, d.TakeBytes(), d.Len()); - s->SetUseFreeToDelete(true); - - return new StringVal(s); - %} - -# =========================================================================== -# -# Math -# -# =========================================================================== - -## Computes the greatest integer less than the given :bro:type:`double` value. -## For example, ``floor(3.14)`` returns ``3.0``, and ``floor(-3.14)`` -## returns ``-4.0``. -## -## d: The :bro:type:`double` to manipulate. -## -## Returns: The next lowest integer of *d* as :bro:type:`double`. -## -## .. bro:see:: sqrt exp ln log10 -function floor%(d: double%): double - %{ - return new Val(floor(d), TYPE_DOUBLE); - %} - -## Computes the square root of a :bro:type:`double`. -## -## x: The number to compute the square root of. -## -## Returns: The square root of *x*. -## -## .. bro:see:: floor exp ln log10 -function sqrt%(x: double%): double - %{ - if ( x < 0 ) - { - reporter->Error("negative sqrt argument"); - return new Val(-1.0, TYPE_DOUBLE); - } - - return new Val(sqrt(x), TYPE_DOUBLE); - %} - -## Computes the exponential function. -## -## d: The argument to the exponential function. -## -## Returns: *e* to the power of *d*. -## -## .. bro:see:: floor sqrt ln log10 -function exp%(d: double%): double - %{ - return new Val(exp(d), TYPE_DOUBLE); - %} - -## Computes the natural logarithm of a number. -## -## d: The argument to the logarithm. -## -## Returns: The natural logarithm of *d*. -## -## .. bro:see:: exp floor sqrt log10 -function ln%(d: double%): double - %{ - return new Val(log(d), TYPE_DOUBLE); - %} - -## Computes the common logarithm of a number. -## -## d: The argument to the logarithm. -## -## Returns: The common logarithm of *d*. -## -## .. bro:see:: exp floor sqrt ln -function log10%(d: double%): double - %{ - return new Val(log10(d), TYPE_DOUBLE); - %} - -# =========================================================================== -# -# Introspection -# -# =========================================================================== - -## Determines whether a connection has been received externally. For example, -## Broccoli or the Time Machine can send packets to Bro via a mechanism that is -## one step lower than sending events. This function checks whether the packets -## of a connection stem from one of these external *packet sources*. -## -## c: The connection to test. -## -## Returns: True if *c* has been received externally. -function is_external_connection%(c: connection%) : bool - %{ - return val_mgr->GetBool(c && c->IsExternal()); - %} - -## Returns the ID of the analyzer which raised the current event. -## -## Returns: The ID of the analyzer which raised the current event, or 0 if -## none. -function current_analyzer%(%) : count - %{ - return val_mgr->GetCount(mgr.CurrentAnalyzer()); - %} - -## Returns Bro's process ID. -## -## Returns: Bro's process ID. -function getpid%(%) : count - %{ - return val_mgr->GetCount(getpid()); - %} - -%%{ -extern const char* bro_version(); -%%} - -## Returns the Bro version string. -## -## Returns: Bro's version, e.g., 2.0-beta-47-debug. -function bro_version%(%): string - %{ - return new StringVal(bro_version()); - %} - -## Converts a record type name to a vector of strings, where each element is -## the name of a record field. Nested records are flattened. -## -## rt: The name of the record type. -## -## Returns: A string vector with the field names of *rt*. -function record_type_to_vector%(rt: string%): string_vec - %{ - VectorVal* result = - new VectorVal(internal_type("string_vec")->AsVectorType()); - - RecordType *type = internal_type(rt->CheckString())->AsRecordType(); - - if ( type ) - { - for ( int i = 0; i < type->NumFields(); ++i ) - { - StringVal* val = new StringVal(type->FieldName(i)); - result->Assign(i+1, val); - } - } - - return result; - %} - -## Returns the type name of an arbitrary Bro variable. -## -## t: An arbitrary object. -## -## Returns: The type name of *t*. -function type_name%(t: any%): string - %{ - ODesc d; - t->Type()->Describe(&d); - - BroString* s = new BroString(1, d.TakeBytes(), d.Len()); - s->SetUseFreeToDelete(true); - - return new StringVal(s); - %} - -## Checks whether Bro reads traffic from one or more network interfaces (as -## opposed to from a network trace in a file). Note that this function returns -## true even after Bro has stopped reading network traffic, for example due to -## receiving a termination signal. -## -## Returns: True if reading traffic from a network interface. -## -## .. bro:see:: reading_traces -function reading_live_traffic%(%): bool - %{ - return val_mgr->GetBool(reading_live); - %} - -## Checks whether Bro reads traffic from a trace file (as opposed to from a -## network interface). -## -## Returns: True if reading traffic from a network trace. -## -## .. bro:see:: reading_live_traffic -function reading_traces%(%): bool - %{ - return val_mgr->GetBool(reading_traces); - %} - - -## Generates a table of the size of all global variables. The table index is -## the variable name and the value is the variable size in bytes. -## -## Returns: A table that maps variable names to their sizes. -## -## .. bro:see:: global_ids -function global_sizes%(%): var_sizes - %{ - TableVal* sizes = new TableVal(var_sizes); - PDict(ID)* globals = global_scope()->Vars(); - IterCookie* c = globals->InitForIteration(); - - ID* id; - while ( (id = globals->NextEntry(c)) ) - if ( id->HasVal() && ! id->IsInternalGlobal() ) - { - Val* id_name = new StringVal(id->Name()); - Val* id_size = val_mgr->GetCount(id->ID_Val()->MemoryAllocation()); - sizes->Assign(id_name, id_size); - Unref(id_name); - } - - return sizes; - %} - -## Generates a table with information about all global identifiers. The table -## value is a record containing the type name of the identifier, whether it is -## exported, a constant, an enum constant, redefinable, and its value (if it -## has one). -## -## Returns: A table that maps identifier names to information about them. -## -## .. bro:see:: global_sizes -function global_ids%(%): id_table - %{ - TableVal* ids = new TableVal(id_table); - PDict(ID)* globals = global_scope()->Vars(); - IterCookie* c = globals->InitForIteration(); -#ifdef DEBUG - /** - * Explanation time: c needs to be a robust cookie when one is in debug mode, - * otherwise the Zeek process will crash in ~80% of cases when -B all is specified. - * The reason for this are the RecordVals that we create. RecordVal::Assign triggers - * a StateAccess::Log, which in turn (only in debug mode) triggers StateAccess::Describe, - * which creates a UniqueID for the variable, which triggers an insert into global_scope. - * Which invalidates the iteration cookie if it is not robust. - **/ - globals->MakeRobustCookie(c); -#endif - - ID* id; - while ( (id = globals->NextEntry(c)) ) - { - if ( id->IsInternalGlobal() ) - continue; - - RecordVal* rec = new RecordVal(script_id); - rec->Assign(0, new StringVal(type_name(id->Type()->Tag()))); - rec->Assign(1, val_mgr->GetBool(id->IsExport())); - rec->Assign(2, val_mgr->GetBool(id->IsConst())); - rec->Assign(3, val_mgr->GetBool(id->IsEnumConst())); - rec->Assign(4, val_mgr->GetBool(id->IsOption())); - rec->Assign(5, val_mgr->GetBool(id->IsRedefinable())); - - if ( id->HasVal() ) - { - Val* val = id->ID_Val(); - Ref(val); - rec->Assign(6, val); - } - - Val* id_name = new StringVal(id->Name()); - ids->Assign(id_name, rec); - Unref(id_name); - } - - return ids; - %} - -## Returns the value of a global identifier. -## -## id: The global identifier. -## -## Returns: The value of *id*. If *id* does not describe a valid identifier, -## the string ``""`` or ``""`` is returned. -function lookup_ID%(id: string%) : any - %{ - ID* i = global_scope()->Lookup(id->CheckString()); - if ( ! i ) - return new StringVal(""); - - if ( ! i->ID_Val() ) - return new StringVal(""); - - return i->ID_Val()->Ref(); - %} - -## Generates metadata about a record's fields. The returned information -## includes the field name, whether it is logged, its value (if it has one), -## and its default value (if specified). -## -## rec: The record value or type to inspect. -## -## Returns: A table that describes the fields of a record. -function record_fields%(rec: any%): record_field_table - %{ - TableVal* fields = new TableVal(record_field_table); - - auto t = rec->Type(); - - if ( t->Tag() != TYPE_RECORD && t->Tag() != TYPE_TYPE ) - { - reporter->Error("non-record value/type passed to record_fields"); - return fields; - } - - RecordType* rt = nullptr; - RecordVal* rv = nullptr; - - if ( t->Tag() == TYPE_RECORD ) - { - rt = t->AsRecordType(); - rv = rec->AsRecordVal(); - } - else - { - t = t->AsTypeType()->Type(); - - if ( t->Tag() != TYPE_RECORD ) - { - reporter->Error("non-record value/type passed to record_fields"); - return fields; - } - - rt = t->AsRecordType(); - } - - for ( int i = 0; i < rt->NumFields(); ++i ) - { - BroType* ft = rt->FieldType(i); - TypeDecl* fd = rt->FieldDecl(i); - Val* fv = nullptr; - - if ( rv ) - fv = rv->Lookup(i); - - if ( fv ) - Ref(fv); - - bool logged = (fd->attrs && fd->FindAttr(ATTR_LOG) != 0); - - RecordVal* nr = new RecordVal(record_field); - - if ( ft->Tag() == TYPE_RECORD ) - nr->Assign(0, new StringVal("record " + ft->GetName())); - else - nr->Assign(0, new StringVal(type_name(ft->Tag()))); - - nr->Assign(1, val_mgr->GetBool(logged)); - nr->Assign(2, fv); - nr->Assign(3, rt->FieldDefault(i)); - - Val* field_name = new StringVal(rt->FieldName(i)); - fields->Assign(field_name, nr); - Unref(field_name); - } - - return fields; - %} - -## Enables detailed collection of profiling statistics. Statistics include -## CPU/memory usage, connections, TCP states/reassembler, DNS lookups, -## timers, and script-level state. The script variable :bro:id:`profiling_file` -## holds the name of the file. -## -## .. bro:see:: get_conn_stats -## get_dns_stats -## get_event_stats -## get_file_analysis_stats -## get_gap_stats -## get_matcher_stats -## get_net_stats -## get_proc_stats -## get_reassembler_stats -## get_thread_stats -## get_timer_stats -function do_profiling%(%) : any - %{ - if ( profiling_logger ) - profiling_logger->Log(); - - return 0; - %} - -## Checks whether a given IP address belongs to a local interface. -## -## ip: The IP address to check. -## -## Returns: True if *ip* belongs to a local interface. -function is_local_interface%(ip: addr%) : bool - %{ - if ( ip->AsAddr().IsLoopback() ) - return val_mgr->GetBool(1); - - list addrs; - - char host[MAXHOSTNAMELEN]; - - strcpy(host, "localhost"); - gethostname(host, MAXHOSTNAMELEN); - host[MAXHOSTNAMELEN-1] = '\0'; - - struct hostent* ent = gethostbyname2(host, AF_INET); - - if ( ent ) - { - for ( unsigned int len = 0; ent->h_addr_list[len]; ++len ) - addrs.push_back(IPAddr(IPv4, (uint32*)ent->h_addr_list[len], - IPAddr::Network)); - } - - ent = gethostbyname2(host, AF_INET6); - - if ( ent ) - { - for ( unsigned int len = 0; ent->h_addr_list[len]; ++len ) - addrs.push_back(IPAddr(IPv6, (uint32*)ent->h_addr_list[len], - IPAddr::Network)); - } - - list::const_iterator it; - for ( it = addrs.begin(); it != addrs.end(); ++it ) - { - if ( *it == ip->AsAddr() ) - return val_mgr->GetBool(1); - } - - return val_mgr->GetBool(0); - %} - -## Write rule matcher statistics (DFA states, transitions, memory usage, cache -## hits/misses) to a file. -## -## f: The file to write to. -## -## Returns: True (unconditionally). -## -## .. bro:see:: get_matcher_stats -function dump_rule_stats%(f: file%): bool - %{ - if ( rule_matcher ) - rule_matcher->DumpStats(f); - - return val_mgr->GetBool(1); - %} - -## Checks if Bro is terminating. -## -## Returns: True if Bro is in the process of shutting down. -## -## .. bro:see:: terminate -function bro_is_terminating%(%): bool - %{ - return val_mgr->GetBool(terminating); - %} - -## Returns the hostname of the machine Bro runs on. -## -## Returns: The hostname of the machine Bro runs on. -function gethostname%(%) : string - %{ - char buffer[MAXHOSTNAMELEN]; - if ( gethostname(buffer, MAXHOSTNAMELEN) < 0 ) - strcpy(buffer, ""); - - buffer[MAXHOSTNAMELEN-1] = '\0'; - return new StringVal(buffer); - %} - -## Returns whether an address is IPv4 or not. -## -## a: the address to check. -## -## Returns: true if *a* is an IPv4 address, else false. -function is_v4_addr%(a: addr%): bool - %{ - if ( a->AsAddr().GetFamily() == IPv4 ) - return val_mgr->GetBool(1); - else - return val_mgr->GetBool(0); - %} - -## Returns whether an address is IPv6 or not. -## -## a: the address to check. -## -## Returns: true if *a* is an IPv6 address, else false. -function is_v6_addr%(a: addr%): bool - %{ - if ( a->AsAddr().GetFamily() == IPv6 ) - return val_mgr->GetBool(1); - else - return val_mgr->GetBool(0); - %} - -## Returns whether a subnet specification is IPv4 or not. -## -## s: the subnet to check. -## -## Returns: true if *s* is an IPv4 subnet, else false. -function is_v4_subnet%(s: subnet%): bool - %{ - if ( s->AsSubNet().Prefix().GetFamily() == IPv4 ) - return val_mgr->GetBool(1); - else - return val_mgr->GetBool(0); - %} - -## Returns whether a subnet specification is IPv6 or not. -## -## s: the subnet to check. -## -## Returns: true if *s* is an IPv6 subnet, else false. -function is_v6_subnet%(s: subnet%): bool - %{ - if ( s->AsSubNet().Prefix().GetFamily() == IPv6 ) - return val_mgr->GetBool(1); - else - return val_mgr->GetBool(0); - %} - - -# =========================================================================== -# -# Conversion -# -# =========================================================================== - -## Converts the *data* field of :bro:type:`ip6_routing` records that have -## *rtype* of 0 into a vector of addresses. -## -## s: The *data* field of an :bro:type:`ip6_routing` record that has -## an *rtype* of 0. -## -## Returns: The vector of addresses contained in the routing header data. -function routing0_data_to_addrs%(s: string%): addr_vec - %{ - VectorVal* rval = new VectorVal(internal_type("addr_vec")->AsVectorType()); - - int len = s->Len(); - const u_char* bytes = s->Bytes(); - bytes += 4; // go past 32-bit reserved field - len -= 4; - - if ( ( len % 16 ) != 0 ) - reporter->Warning("Bad ip6_routing data length: %d", s->Len()); - - while ( len > 0 ) - { - IPAddr a(IPv6, (const uint32*) bytes, IPAddr::Network); - rval->Assign(rval->Size(), new AddrVal(a)); - bytes += 16; - len -= 16; - } - - return rval; - %} - -## Converts an :bro:type:`addr` to an :bro:type:`index_vec`. -## -## a: The address to convert into a vector of counts. -## -## Returns: A vector containing the host-order address representation, -## four elements in size for IPv6 addresses, or one element for IPv4. -## -## .. bro:see:: counts_to_addr -function addr_to_counts%(a: addr%): index_vec - %{ - VectorVal* rval = new VectorVal(internal_type("index_vec")->AsVectorType()); - const uint32* bytes; - int len = a->AsAddr().GetBytes(&bytes); - - for ( int i = 0; i < len; ++i ) - rval->Assign(i, val_mgr->GetCount(ntohl(bytes[i]))); - - return rval; - %} - -## Converts an :bro:type:`index_vec` to an :bro:type:`addr`. -## -## v: The vector containing host-order IP address representation, -## one element for IPv4 addresses, four elements for IPv6 addresses. -## -## Returns: An IP address. -## -## .. bro:see:: addr_to_counts -function counts_to_addr%(v: index_vec%): addr - %{ - if ( v->AsVector()->size() == 1 ) - { - return new AddrVal(htonl((*v->AsVector())[0]->AsCount())); - } - else if ( v->AsVector()->size() == 4 ) - { - uint32 bytes[4]; - for ( int i = 0; i < 4; ++i ) - bytes[i] = htonl((*v->AsVector())[i]->AsCount()); - return new AddrVal(bytes); - } - else - { - builtin_error("invalid vector size", @ARG@[0]); - uint32 bytes[4]; - memset(bytes, 0, sizeof(bytes)); - return new AddrVal(bytes); - } - %} - -## Converts an :bro:type:`enum` to an :bro:type:`int`. -## -## e: The :bro:type:`enum` to convert. -## -## Returns: The :bro:type:`int` value that corresponds to the :bro:type:`enum`. -function enum_to_int%(e: any%): int - %{ - if ( e->Type()->Tag() != TYPE_ENUM ) - { - builtin_error("enum_to_int() requires enum value"); - return val_mgr->GetInt(-1); - } - - return val_mgr->GetInt(e->AsEnum()); - %} - -## Converts a :bro:type:`string` to an :bro:type:`int`. -## -## str: The :bro:type:`string` to convert. -## -## Returns: The :bro:type:`string` *str* as :bro:type:`int`. -## -## .. bro:see:: to_addr to_port to_subnet -function to_int%(str: string%): int - %{ - const char* s = str->CheckString(); - char* end_s; - - bro_int_t i = strtoll(s, &end_s, 10); - -#if 0 - // Not clear we should complain. For example, is " 205 " - // a legal conversion? - if ( s[0] == '\0' || end_s[0] != '\0' ) - builtin_error("bad conversion to integer", @ARG@[0]); -#endif - - return val_mgr->GetInt(i); - %} - - -## Converts a (positive) :bro:type:`int` to a :bro:type:`count`. -## -## n: The :bro:type:`int` to convert. -## -## Returns: The :bro:type:`int` *n* as unsigned integer, or 0 if *n* < 0. -function int_to_count%(n: int%): count - %{ - if ( n < 0 ) - { - builtin_error("bad conversion to count", @ARG@[0]); - n = 0; - } - return val_mgr->GetCount(n); - %} - -## Converts a :bro:type:`double` to a :bro:type:`count`. -## -## d: The :bro:type:`double` to convert. -## -## Returns: The :bro:type:`double` *d* as unsigned integer, or 0 if *d* < 0.0. -## -## .. bro:see:: double_to_time -function double_to_count%(d: double%): count - %{ - if ( d < 0.0 ) - builtin_error("bad conversion to count", @ARG@[0]); - - return val_mgr->GetCount(bro_uint_t(rint(d))); - %} - -## Converts a :bro:type:`string` to a :bro:type:`count`. -## -## str: The :bro:type:`string` to convert. -## -## Returns: The :bro:type:`string` *str* as unsigned integer, or 0 if *str* has -## an invalid format. -## -## .. bro:see:: to_addr to_int to_port to_subnet -function to_count%(str: string%): count - %{ - const char* s = str->CheckString(); - char* end_s; - - uint64 u = (uint64) strtoull(s, &end_s, 10); - - if ( s[0] == '\0' || end_s[0] != '\0' ) - { - builtin_error("bad conversion to count", @ARG@[0]); - u = 0; - } - - return val_mgr->GetCount(u); - %} - -## Converts an :bro:type:`interval` to a :bro:type:`double`. -## -## i: The :bro:type:`interval` to convert. -## -## Returns: The :bro:type:`interval` *i* as :bro:type:`double`. -## -## .. bro:see:: double_to_interval -function interval_to_double%(i: interval%): double - %{ - return new Val(i, TYPE_DOUBLE); - %} - -## Converts a :bro:type:`time` value to a :bro:type:`double`. -## -## t: The :bro:type:`time` to convert. -## -## Returns: The :bro:type:`time` value *t* as :bro:type:`double`. -## -## .. bro:see:: double_to_time -function time_to_double%(t: time%): double - %{ - return new Val(t, TYPE_DOUBLE); - %} - -## Converts a :bro:type:`double` value to a :bro:type:`time`. -## -## d: The :bro:type:`double` to convert. -## -## Returns: The :bro:type:`double` value *d* as :bro:type:`time`. -## -## .. bro:see:: time_to_double double_to_count -function double_to_time%(d: double%): time - %{ - return new Val(d, TYPE_TIME); - %} - -## Converts a :bro:type:`double` to an :bro:type:`interval`. -## -## d: The :bro:type:`double` to convert. -## -## Returns: The :bro:type:`double` *d* as :bro:type:`interval`. -## -## .. bro:see:: interval_to_double -function double_to_interval%(d: double%): interval - %{ - return new Val(d, TYPE_INTERVAL); - %} - -## Converts a :bro:type:`port` to a :bro:type:`count`. -## -## p: The :bro:type:`port` to convert. -## -## Returns: The :bro:type:`port` *p* as :bro:type:`count`. -## -## .. bro:see:: count_to_port -function port_to_count%(p: port%): count - %{ - return val_mgr->GetCount(p->Port()); - %} - -## Converts a :bro:type:`count` and ``transport_proto`` to a :bro:type:`port`. -## -## num: The :bro:type:`port` number. -## -## proto: The transport protocol. -## -## Returns: The :bro:type:`count` *num* as :bro:type:`port`. -## -## .. bro:see:: port_to_count -function count_to_port%(num: count, proto: transport_proto%): port - %{ - return val_mgr->GetPort(num, (TransportProto)proto->AsEnum()); - %} - -## Converts a :bro:type:`string` to an :bro:type:`addr`. -## -## ip: The :bro:type:`string` to convert. -## -## Returns: The :bro:type:`string` *ip* as :bro:type:`addr`, or the unspecified -## address ``::`` if the input string does not parse correctly. -## -## .. bro:see:: to_count to_int to_port count_to_v4_addr raw_bytes_to_v4_addr -## to_subnet -function to_addr%(ip: string%): addr - %{ - char* s = ip->AsString()->Render(); - Val* ret = new AddrVal(s); - delete [] s; - return ret; - %} - -## Converts a :bro:type:`string` to a :bro:type:`subnet`. -## -## sn: The subnet to convert. -## -## Returns: The *sn* string as a :bro:type:`subnet`, or the unspecified subnet -## ``::/0`` if the input string does not parse correctly. -## -## .. bro:see:: to_count to_int to_port count_to_v4_addr raw_bytes_to_v4_addr -## to_addr -function to_subnet%(sn: string%): subnet - %{ - char* s = sn->AsString()->Render(); - Val* ret = new SubNetVal(s); - delete [] s; - return ret; - %} - -## Converts a :bro:type:`addr` to a :bro:type:`subnet`. -## -## a: The address to convert. -## -## Returns: The address as a :bro:type:`subnet`. -## -## .. bro:see:: to_subnet -function addr_to_subnet%(a: addr%): subnet - %{ - int width = (a->AsAddr().GetFamily() == IPv4 ? 32 : 128); - return new SubNetVal(a->AsAddr(), width); - %} - -## Converts a :bro:type:`subnet` to an :bro:type:`addr` by -## extracting the prefix. -## -## sn: The subnet to convert. -## -## Returns: The subnet as an :bro:type:`addr`. -## -## .. bro:see:: to_subnet -function subnet_to_addr%(sn: subnet%): addr - %{ - return new AddrVal(sn->Prefix()); - %} - -## Returns the width of a :bro:type:`subnet`. -## -## sn: The subnet. -## -## Returns: The width of the subnet. -## -## .. bro:see:: to_subnet -function subnet_width%(sn: subnet%): count - %{ - return val_mgr->GetCount(sn->Width()); - %} - -## Converts a :bro:type:`string` to a :bro:type:`double`. -## -## str: The :bro:type:`string` to convert. -## -## Returns: The :bro:type:`string` *str* as double, or 0 if *str* has -## an invalid format. -## -function to_double%(str: string%): double - %{ - const char* s = str->CheckString(); - char* end_s; - - double d = strtod(s, &end_s); - - if ( s[0] == '\0' || end_s[0] != '\0' ) - { - builtin_error("bad conversion to double", @ARG@[0]); - d = 0; - } - - return new Val(d, TYPE_DOUBLE); - %} - -## Converts a :bro:type:`count` to an :bro:type:`addr`. -## -## ip: The :bro:type:`count` to convert. -## -## Returns: The :bro:type:`count` *ip* as :bro:type:`addr`. -## -## .. bro:see:: raw_bytes_to_v4_addr to_addr to_subnet -function count_to_v4_addr%(ip: count%): addr - %{ - if ( ip > 4294967295LU ) - { - builtin_error("conversion of non-IPv4 count to addr", @ARG@[0]); - return new AddrVal(uint32(0)); - } - - return new AddrVal(htonl(uint32(ip))); - %} - -## Converts a :bro:type:`string` of bytes into an IPv4 address. In particular, -## this function interprets the first 4 bytes of the string as an IPv4 address -## in network order. -## -## b: The raw bytes (:bro:type:`string`) to convert. -## -## Returns: The byte :bro:type:`string` *b* as :bro:type:`addr`. -## -## .. bro:see:: raw_bytes_to_v4_addr to_addr to_subnet -function raw_bytes_to_v4_addr%(b: string%): addr - %{ - uint32 a = 0; - - if ( b->Len() < 4 ) - builtin_error("too short a string as input to raw_bytes_to_v4_addr()"); - - else - { - const u_char* bp = b->Bytes(); - a = (bp[0] << 24) | (bp[1] << 16) | (bp[2] << 8) | bp[3]; - } - - return new AddrVal(htonl(a)); - %} - -## Converts a :bro:type:`string` to a :bro:type:`port`. -## -## s: The :bro:type:`string` to convert. -## -## Returns: A :bro:type:`port` converted from *s*. -## -## .. bro:see:: to_addr to_count to_int to_subnet -function to_port%(s: string%): port - %{ - int port = 0; - if ( s->Len() < 10 ) - { - char* slash; - errno = 0; - port = strtol(s->CheckString(), &slash, 10); - if ( ! errno ) - { - ++slash; - if ( streq(slash, "tcp") ) - return val_mgr->GetPort(port, TRANSPORT_TCP); - else if ( streq(slash, "udp") ) - return val_mgr->GetPort(port, TRANSPORT_UDP); - else if ( streq(slash, "icmp") ) - return val_mgr->GetPort(port, TRANSPORT_ICMP); - } - } - - builtin_error("wrong port format, must be /[0-9]{1,5}\\/(tcp|udp|icmp)/"); - return val_mgr->GetPort(port, TRANSPORT_UNKNOWN); - %} - -## Converts a string of bytes (in network byte order) to a :bro:type:`double`. -## -## s: A string of bytes containing the binary representation of a double value. -## -## Returns: The double value contained in *s*, or 0 if the conversion -## failed. -## -function bytestring_to_double%(s: string%): double - %{ - if ( s->Len() != sizeof(double) ) - { - builtin_error("bad conversion to double"); - return new Val(0.0, TYPE_DOUBLE); - } - - // See #908 for a discussion of portability. - double d; - memcpy(&d, s->Bytes(), sizeof(double)); - return new Val(ntohd(d), TYPE_DOUBLE); - %} - -## Converts a string of bytes to a :bro:type:`count`. -## -## s: A string of bytes containing the binary representation of the value. -## -## is_le: If true, *s* is assumed to be in little endian format, else it's big endian. -## -## Returns: The value contained in *s*, or 0 if the conversion failed. -## -function bytestring_to_count%(s: string, is_le: bool &default=F%): count - %{ -#ifdef HOST_BIGENDIAN - static const bool host_bigendian = true; -#else - static const bool host_bigendian = false; -#endif - const u_char *p = s->Bytes(); - unsigned int i; - - switch ( s->Len() ) { - case sizeof(uint8): - { - uint8 value = 0; - memcpy(&value, p, sizeof(uint8)); - return val_mgr->GetCount(value); - } - - case sizeof(uint16): - { - uint16 value = 0; - - if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) ) - { - char buf[sizeof(uint16)]; - char *d = &buf[sizeof(uint16)-1]; - - for ( i = 0; i < sizeof(uint16); i++ ) - *d-- = *p++; - - memcpy(&value, buf, sizeof(uint16)); - } - else - memcpy(&value, p, sizeof(uint16)); - - return val_mgr->GetCount(value); - } - - case sizeof(uint32): - { - uint32 value = 0; - - if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) ) - { - char buf[sizeof(uint32)]; - char *d = &buf[sizeof(uint32)-1]; - - for ( i = 0; i < sizeof(uint32); i++ ) - *d-- = *p++; - - memcpy(&value, buf, sizeof(uint32)); - } - else - memcpy(&value, p, sizeof(uint32)); - - return val_mgr->GetCount(value); - } - - case sizeof(uint64): - { - uint64 value = 0; - - if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) ) - { - char buf[sizeof(uint64)]; - char *d = &buf[sizeof(uint64)-1]; - - for ( i = 0; i < sizeof(uint64); i++ ) - *d-- = *p++; - - memcpy(&value, buf, sizeof(uint64)); - } - else - memcpy(&value, p, sizeof(uint64)); - - return val_mgr->GetCount(value); - } - } - - builtin_error("unsupported byte length for bytestring_to_count"); - return val_mgr->GetCount(0); - %} - -## Converts a reverse pointer name to an address. For example, -## ``1.0.168.192.in-addr.arpa`` to ``192.168.0.1``. -## -## s: The string with the reverse pointer name. -## -## Returns: The IP address corresponding to *s*. -## -## .. bro:see:: addr_to_ptr_name to_addr -function ptr_name_to_addr%(s: string%): addr - %{ - if ( s->Len() != 72 ) - { - int a[4]; - uint32 addr; - char ss[13]; // this will contain "in-addr.arpa" - - if ( sscanf(s->CheckString(), - "%d.%d.%d.%d.%12s", - a, a+1, a+2, a+3, ss) != 5 - || strcmp(ss, "in-addr.arpa") != 0 ) - { - builtin_error("bad PTR name", @ARG@[0]); - addr = 0; - } - else - addr = (a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]; - - return new AddrVal(htonl(addr)); - } - else - { - uint32 addr6[4]; - uint32 b[32]; - char ss[9]; // this will contain "ip6.arpa" - if ( sscanf(s->CheckString(), - "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x." - "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x." - "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x." - "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x.%8s", - b+31, b+30, b+29, b+28, b+27, b+26, b+25, b+24, - b+23, b+22, b+21, b+20, b+19, b+18, b+17, b+16, - b+15, b+14, b+13, b+12, b+11, b+10, b+9, b+8, - b+7, b+6, b+5, b+4, b+3, b+2, b+1, b, ss) != 33 - || strcmp(ss, "ip6.arpa") != 0 ) - { - builtin_error("bad PTR name", @ARG@[0]); - memset(addr6, 0, sizeof addr6); - } - else - { - for ( unsigned int i = 0; i < 4; ++i ) - { - uint32 a = 0; - for ( unsigned int j = 1; j <= 8; ++j ) - a |= b[8*i+j-1] << (32-j*4); - - addr6[i] = htonl(a); - } - } - - return new AddrVal(addr6); - } - %} - -## Converts an IP address to a reverse pointer name. For example, -## ``192.168.0.1`` to ``1.0.168.192.in-addr.arpa``. -## -## a: The IP address to convert to a reverse pointer name. -## -## Returns: The reverse pointer representation of *a*. -## -## .. bro:see:: ptr_name_to_addr to_addr -function addr_to_ptr_name%(a: addr%): string - %{ - return new StringVal(a->AsAddr().PtrName().c_str()); - %} - -## Converts a string of bytes into its hexadecimal representation. -## For example, ``"04"`` would be converted to ``"3034"``. -## -## bytestring: The string of bytes. -## -## Returns: The hexadecimal representation of *bytestring*. -## -## .. bro:see:: hexdump hexstr_to_bytestring -function bytestring_to_hexstr%(bytestring: string%): string - %{ - bro_uint_t len = bytestring->AsString()->Len(); - const u_char* bytes = bytestring->AsString()->Bytes(); - char hexstr[(2 * len) + 1]; - - hexstr[0] = 0; - for ( bro_uint_t i = 0; i < len; ++i ) - snprintf(hexstr + (2 * i), 3, "%.2hhx", bytes[i]); - - return new StringVal(hexstr); - %} - -## Converts a hex-string into its binary representation. -## For example, ``"3034"`` would be converted to ``"04"``. -## -## The input string is assumed to contain an even number of hexadecimal digits -## (0-9, a-f, or A-F), otherwise behavior is undefined. -## -## hexstr: The hexadecimal string representation. -## -## Returns: The binary representation of *hexstr*. -## -## .. bro:see:: hexdump bytestring_to_hexstr -function hexstr_to_bytestring%(hexstr: string%): string - %{ - bro_uint_t len = hexstr->AsString()->Len(); - if ( len % 2 != 0 ) - { - reporter->Error("Hex string '%s' has invalid length (not divisible by 2)", hexstr->CheckString()); - return val_mgr->GetEmptyString(); - } - - const char* bytes = hexstr->AsString()->CheckString(); - int outlen = (len/2); - char bytestring[outlen]; - memset(bytestring, 0, outlen); - - for ( bro_uint_t i = 0; i < len/2; ++i ) - { - int res = sscanf(bytes + (2*i), "%2hhx", &bytestring[i]); - - if ( res == EOF ) - { - reporter->Error("Hex string %s contains invalid input: %s", hexstr->CheckString(), strerror(errno)); - return val_mgr->GetEmptyString(); - } - - else if ( res != 1 ) - { - reporter->Error("Could not read hex element from input %s", hexstr->CheckString()); - return val_mgr->GetEmptyString(); - } - - } - - return new StringVal(outlen, bytestring); - %} - -## Encodes a Base64-encoded string. -## -## s: The string to encode. -## -## a: An optional custom alphabet. The empty string indicates the default -## alphabet. If given, the string must consist of 64 unique characters. -## -## Returns: The encoded version of *s*. -## -## .. bro:see:: decode_base64 -function encode_base64%(s: string, a: string &default=""%): string - %{ - BroString* t = encode_base64(s->AsString(), a->AsString()); - if ( t ) - return new StringVal(t); - else - { - reporter->Error("Broker query has an invalid data store"); - return val_mgr->GetEmptyString(); - } - %} - - -## Encodes a Base64-encoded string with a custom alphabet. -## -## s: The string to encode. -## -## a: The custom alphabet. The string must consist of 64 unique -## characters. The empty string indicates the default alphabet. -## -## Returns: The encoded version of *s*. -## -## .. bro:see:: encode_base64 -function encode_base64_custom%(s: string, a: string%): string &deprecated - %{ - BroString* t = encode_base64(s->AsString(), a->AsString()); - if ( t ) - return new StringVal(t); - else - { - reporter->Error("error in encoding string %s", s->CheckString()); - return val_mgr->GetEmptyString(); - } - %} - -## Decodes a Base64-encoded string. -## -## s: The Base64-encoded string. -## -## a: An optional custom alphabet. The empty string indicates the default -## alphabet. If given, the string must consist of 64 unique characters. -## -## Returns: The decoded version of *s*. -## -## .. bro:see:: decode_base64_conn encode_base64 -function decode_base64%(s: string, a: string &default=""%): string - %{ - BroString* t = decode_base64(s->AsString(), a->AsString()); - if ( t ) - return new StringVal(t); - else - { - reporter->Error("error in decoding string %s", s->CheckString()); - return val_mgr->GetEmptyString(); - } - %} - -## Decodes a Base64-encoded string that was derived from processing a connection. -## If an error is encountered decoding the string, that will be logged to -## ``weird.log`` with the associated connection. -## -## cid: The identifier of the connection that the encoding originates from. -## -## s: The Base64-encoded string. -## -## a: An optional custom alphabet. The empty string indicates the default -## alphabet. If given, the string must consist of 64 unique characters. -## -## Returns: The decoded version of *s*. -## -## .. bro:see:: decode_base64 -function decode_base64_conn%(cid: conn_id, s: string, a: string &default=""%): string - %{ - Connection* conn = sessions->FindConnection(cid); - if ( ! conn ) - { - builtin_error("connection ID not a known connection", cid); - return val_mgr->GetEmptyString(); - } - - BroString* t = decode_base64(s->AsString(), a->AsString(), conn); - if ( t ) - return new StringVal(t); - else - { - reporter->Error("error in decoding string %s", s->CheckString()); - return val_mgr->GetEmptyString(); - } - %} - -## Decodes a Base64-encoded string with a custom alphabet. -## -## s: The Base64-encoded string. -## -## a: The custom alphabet. The string must consist of 64 unique characters. -## The empty string indicates the default alphabet. -## -## Returns: The decoded version of *s*. -## -## .. bro:see:: decode_base64 decode_base64_conn -function decode_base64_custom%(s: string, a: string%): string &deprecated - %{ - BroString* t = decode_base64(s->AsString(), a->AsString()); - if ( t ) - return new StringVal(t); - else - { - reporter->Error("error in decoding string %s", s->CheckString()); - return val_mgr->GetEmptyString(); - } - %} - -%%{ -typedef struct { - uint32 time_low; - uint16 time_mid; - uint16 time_hi_and_version; - uint8 clock_seq_hi_and_reserved; - uint8 clock_seq_low; - uint8 node[6]; -} bro_uuid_t; -%%} - -## Converts a bytes representation of a UUID into its string form. For example, -## given a string of 16 bytes, it produces an output string in this format: -## ``550e8400-e29b-41d4-a716-446655440000``. -## See ``_. -## -## uuid: The 16 bytes of the UUID. -## -## Returns: The string representation of *uuid*. -function uuid_to_string%(uuid: string%): string - %{ - if ( uuid->Len() != 16 ) - return new StringVal(""); - - bro_uuid_t* id = (bro_uuid_t*) uuid->Bytes(); - - static char s[1024]; - char* sp = s; - - sp += snprintf(sp, s + sizeof(s) - sp, - "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", - id->time_low, id->time_mid, id->time_hi_and_version, - id->clock_seq_hi_and_reserved, id->clock_seq_low, - id->node[0], - id->node[1], - id->node[2], - id->node[3], - id->node[4], - id->node[5]); - - return new StringVal(s); - %} - -## Merges and compiles two regular expressions at initialization time. -## -## p1: The first pattern. -## -## p2: The second pattern. -## -## Returns: The compiled pattern of the concatenation of *p1* and *p2*. -## -## .. bro:see:: convert_for_pattern string_to_pattern -## -## .. note:: -## -## This function must be called at Bro startup time, e.g., in the event -## :bro:id:`bro_init`. -function merge_pattern%(p1: pattern, p2: pattern%): pattern &deprecated - %{ - RE_Matcher* re = new RE_Matcher(); - re->AddPat(p1->PatternText()); - re->AddPat(p2->PatternText()); - re->Compile(); - return new PatternVal(re); - %} - -%%{ -char* to_pat_str(int sn, const char* ss) - { - const char special_re_char[] = "^$-:\"\\/|*+?.(){}[]"; - - char* pat = new char[sn * 4 + 1]; - int pat_len = 0; - - for ( int i = 0; i < sn; ++i ) - { - if ( ! strchr(special_re_char, ss[i]) ) - pat[pat_len++] = ss[i]; - else - { - pat[pat_len++] = '\\'; - pat[pat_len++] = ss[i]; - } - } - pat[pat_len] = '\0'; - return pat; - } -%%} - -## Escapes a string so that it becomes a valid :bro:type:`pattern` and can be -## used with the :bro:id:`string_to_pattern`. Any character from the set -## ``^$-:"\/|*+?.(){}[]`` is prefixed with a ``\``. -## -## s: The string to escape. -## -## Returns: An escaped version of *s* that has the structure of a valid -## :bro:type:`pattern`. -## -## .. bro:see:: merge_pattern string_to_pattern -## -function convert_for_pattern%(s: string%): string - %{ - char* t = to_pat_str(s->Len(), (const char*)(s->Bytes())); - StringVal* ret = new StringVal(t); - delete [] t; - return ret; - %} - -## Converts a :bro:type:`string` into a :bro:type:`pattern`. -## -## s: The string to convert. -## -## convert: If true, *s* is first passed through the function -## :bro:id:`convert_for_pattern` to escape special characters of -## patterns. -## -## Returns: *s* as :bro:type:`pattern`. -## -## .. bro:see:: convert_for_pattern merge_pattern -## -## .. note:: -## -## This function must be called at Bro startup time, e.g., in the event -## :bro:id:`bro_init`. -function string_to_pattern%(s: string, convert: bool%): pattern - %{ - const char* ss = (const char*) (s->Bytes()); - int sn = s->Len(); - char* pat; - - if ( convert ) - pat = to_pat_str(sn, ss); - else - { - pat = new char[sn+1]; - memcpy(pat, ss, sn); - pat[sn] = '\0'; - } - - RE_Matcher* re = new RE_Matcher(pat); - delete [] pat; - re->Compile(); - return new PatternVal(re); - %} - -## Formats a given time value according to a format string. -## -## fmt: The format string. See ``man strftime`` for the syntax. -## -## d: The time value. -## -## Returns: The time *d* formatted according to *fmt*. -function strftime%(fmt: string, d: time%) : string - %{ - static char buffer[128]; - - time_t timeval = time_t(d); - struct tm t; - - if ( ! localtime_r(&timeval, &t) || - ! strftime(buffer, 128, fmt->CheckString(), &t) ) - return new StringVal(""); - - return new StringVal(buffer); - %} - - -## Parse a textual representation of a date/time value into a ``time`` type value. -## -## fmt: The format string used to parse the following *d* argument. See ``man strftime`` -## for the syntax. -## -## d: The string representing the time. -## -## Returns: The time value calculated from parsing *d* with *fmt*. -function strptime%(fmt: string, d: string%) : time - %{ - const time_t timeval = time_t(); - struct tm t; - - if ( ! localtime_r(&timeval, &t) || - ! strptime(d->CheckString(), fmt->CheckString(), &t) ) - { - reporter->Warning("strptime conversion failed: fmt:%s d:%s", fmt->CheckString(), d->CheckString()); - return new Val(0.0, TYPE_TIME); - } - - double ret = mktime(&t); - return new Val(ret, TYPE_TIME); - %} - - -# =========================================================================== -# -# Network Type Processing -# -# =========================================================================== - -## Masks an address down to the number of given upper bits. For example, -## ``mask_addr(1.2.3.4, 18)`` returns ``1.2.0.0``. -## -## a: The address to mask. -## -## top_bits_to_keep: The number of top bits to keep in *a*; must be greater -## than 0 and less than 33 for IPv4, or 129 for IPv6. -## -## Returns: The address *a* masked down to *top_bits_to_keep* bits. -## -## .. bro:see:: remask_addr -function mask_addr%(a: addr, top_bits_to_keep: count%): subnet - %{ - return new SubNetVal(a->AsAddr(), top_bits_to_keep); - %} - -## Takes some top bits (such as a subnet address) from one address and the other -## bits (intra-subnet part) from a second address and merges them to get a new -## address. This is useful for anonymizing at subnet level while preserving -## serial scans. -## -## a1: The address to mask with *top_bits_from_a1*. -## -## a2: The address to take the remaining bits from. -## -## top_bits_from_a1: The number of top bits to keep in *a1*; must be greater -## than 0 and less than 129. This value is always interpreted -## relative to the IPv6 bit width (v4-mapped addresses start -## at bit number 96). -## -## Returns: The address *a* masked down to *top_bits_to_keep* bits. -## -## .. bro:see:: mask_addr -function remask_addr%(a1: addr, a2: addr, top_bits_from_a1: count%): addr - %{ - IPAddr addr1(a1->AsAddr()); - addr1.Mask(top_bits_from_a1); - IPAddr addr2(a2->AsAddr()); - addr2.ReverseMask(top_bits_from_a1); - return new AddrVal(addr1|addr2); - %} - -## Checks whether a given :bro:type:`port` has TCP as transport protocol. -## -## p: The :bro:type:`port` to check. -## -## Returns: True iff *p* is a TCP port. -## -## .. bro:see:: is_udp_port is_icmp_port -function is_tcp_port%(p: port%): bool - %{ - return val_mgr->GetBool(p->IsTCP()); - %} - -## Checks whether a given :bro:type:`port` has UDP as transport protocol. -## -## p: The :bro:type:`port` to check. -## -## Returns: True iff *p* is a UDP port. -## -## .. bro:see:: is_icmp_port is_tcp_port -function is_udp_port%(p: port%): bool - %{ - return val_mgr->GetBool(p->IsUDP()); - %} - -## Checks whether a given :bro:type:`port` has ICMP as transport protocol. -## -## p: The :bro:type:`port` to check. -## -## Returns: True iff *p* is an ICMP port. -## -## .. bro:see:: is_tcp_port is_udp_port -function is_icmp_port%(p: port%): bool - %{ - return val_mgr->GetBool(p->IsICMP()); - %} - -%%{ -EnumVal* map_conn_type(TransportProto tp) - { - switch ( tp ) { - case TRANSPORT_UNKNOWN: - return transport_proto->GetVal(0); - break; - - case TRANSPORT_TCP: - return transport_proto->GetVal(1); - break; - - case TRANSPORT_UDP: - return transport_proto->GetVal(2); - break; - - case TRANSPORT_ICMP: - return transport_proto->GetVal(3); - break; - - default: - reporter->InternalError("bad connection type in map_conn_type()"); - } - - // Cannot be reached; - assert(false); - return 0; // Make compiler happy. - } -%%} - -## Extracts the transport protocol from a connection. -## -## cid: The connection identifier. -## -## Returns: The transport protocol of the connection identified by *cid*. -## -## .. bro:see:: get_port_transport_proto -## get_orig_seq get_resp_seq -function get_conn_transport_proto%(cid: conn_id%): transport_proto - %{ - Connection* c = sessions->FindConnection(cid); - if ( ! c ) - { - builtin_error("unknown connection id in get_conn_transport_proto()", cid); - return transport_proto->GetVal(0); - } - - return map_conn_type(c->ConnTransport()); - %} - -## Extracts the transport protocol from a :bro:type:`port`. -## -## p: The port. -## -## Returns: The transport protocol of the port *p*. -## -## .. bro:see:: get_conn_transport_proto -## get_orig_seq get_resp_seq -function get_port_transport_proto%(p: port%): transport_proto - %{ - return map_conn_type(p->PortType()); - %} - -## Checks whether a connection is (still) active. -## -## c: The connection id to check. -## -## Returns: True if the connection identified by *c* exists. -## -## .. bro:see:: lookup_connection -function connection_exists%(c: conn_id%): bool - %{ - if ( sessions->FindConnection(c) ) - return val_mgr->GetBool(1); - else - return val_mgr->GetBool(0); - %} - -## Returns the :bro:type:`connection` record for a given connection identifier. -## -## cid: The connection ID. -## -## Returns: The :bro:type:`connection` record for *cid*. If *cid* does not point -## to an existing connection, the function generates a run-time error -## and returns a dummy value. -## -## .. bro:see:: connection_exists -function lookup_connection%(cid: conn_id%): connection - %{ - Connection* conn = sessions->FindConnection(cid); - if ( conn ) - return conn->BuildConnVal(); - - builtin_error("connection ID not a known connection", cid); - - // Return a dummy connection record. - RecordVal* c = new RecordVal(connection_type); - - RecordVal* id_val = new RecordVal(conn_id); - id_val->Assign(0, new AddrVal((unsigned int) 0)); - id_val->Assign(1, val_mgr->GetPort(ntohs(0), TRANSPORT_UDP)); - id_val->Assign(2, new AddrVal((unsigned int) 0)); - id_val->Assign(3, val_mgr->GetPort(ntohs(0), TRANSPORT_UDP)); - c->Assign(0, id_val); - - RecordVal* orig_endp = new RecordVal(endpoint); - orig_endp->Assign(0, val_mgr->GetCount(0)); - orig_endp->Assign(1, val_mgr->GetCount(int(0))); - - RecordVal* resp_endp = new RecordVal(endpoint); - resp_endp->Assign(0, val_mgr->GetCount(0)); - resp_endp->Assign(1, val_mgr->GetCount(int(0))); - - c->Assign(1, orig_endp); - c->Assign(2, resp_endp); - - c->Assign(3, new Val(network_time, TYPE_TIME)); - c->Assign(4, new Val(0.0, TYPE_INTERVAL)); - c->Assign(5, new TableVal(string_set)); // service - c->Assign(6, val_mgr->GetEmptyString()); // history - - return c; - %} - -%%{ -const char* conn_id_string(Val* c) - { - Val* id = (*(c->AsRecord()))[0]; - const val_list* vl = id->AsRecord(); - - const IPAddr& orig_h = (*vl)[0]->AsAddr(); - uint32 orig_p = (*vl)[1]->AsPortVal()->Port(); - const IPAddr& resp_h = (*vl)[2]->AsAddr(); - uint32 resp_p = (*vl)[3]->AsPortVal()->Port(); - - return fmt("%s/%u -> %s/%u\n", orig_h.AsString().c_str(), orig_p, - resp_h.AsString().c_str(), resp_p); - } -%%} - -## Writes the current packet to a file. -## -## file_name: The name of the file to write the packet to. -## -## Returns: True on success. -## -## .. bro:see:: dump_packet get_current_packet send_current_packet -function dump_current_packet%(file_name: string%) : bool - %{ - const Packet* pkt; - - if ( ! current_pktsrc || - ! current_pktsrc->GetCurrentPacket(&pkt) ) - return val_mgr->GetBool(0); - - if ( addl_pkt_dumper && addl_pkt_dumper->Path() != file_name->CheckString()) - { - addl_pkt_dumper->Close(); - addl_pkt_dumper = nullptr; - } - - if ( ! addl_pkt_dumper ) - addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); - - if ( addl_pkt_dumper ) - { - addl_pkt_dumper->Dump(pkt); - } - - return val_mgr->GetBool( addl_pkt_dumper && ! addl_pkt_dumper->IsError()); - %} - -## Returns the currently processed PCAP packet. -## -## Returns: The currently processed packet, which is a record -## containing the timestamp, ``snaplen``, and packet data. -## -## .. bro:see:: dump_current_packet dump_packet send_current_packet -function get_current_packet%(%) : pcap_packet - %{ - const Packet* p; - RecordVal* pkt = new RecordVal(pcap_packet); - - if ( ! current_pktsrc || - ! current_pktsrc->GetCurrentPacket(&p) ) - { - pkt->Assign(0, val_mgr->GetCount(0)); - pkt->Assign(1, val_mgr->GetCount(0)); - pkt->Assign(2, val_mgr->GetCount(0)); - pkt->Assign(3, val_mgr->GetCount(0)); - pkt->Assign(4, val_mgr->GetEmptyString()); - pkt->Assign(5, BifType::Enum::link_encap->GetVal(BifEnum::LINK_UNKNOWN)); - return pkt; - } - - pkt->Assign(0, val_mgr->GetCount(uint32(p->ts.tv_sec))); - pkt->Assign(1, val_mgr->GetCount(uint32(p->ts.tv_usec))); - pkt->Assign(2, val_mgr->GetCount(p->cap_len)); - pkt->Assign(3, val_mgr->GetCount(p->len)); - pkt->Assign(4, new StringVal(p->cap_len, (const char*)p->data)); - pkt->Assign(5, BifType::Enum::link_encap->GetVal(p->link_type)); - - return pkt; - %} - -## Function to get the raw headers of the currently processed packet. -## -## Returns: The :bro:type:`raw_pkt_hdr` record containing the Layer 2, 3 and -## 4 headers of the currently processed packet. -## -## .. bro:see:: raw_pkt_hdr get_current_packet -function get_current_packet_header%(%) : raw_pkt_hdr - %{ - const Packet* p; - - if ( current_pktsrc && - current_pktsrc->GetCurrentPacket(&p) ) - { - return p->BuildPktHdrVal(); - } - - RecordVal* hdr = new RecordVal(raw_pkt_hdr_type); - return hdr; - %} - -## Writes a given packet to a file. -## -## pkt: The PCAP packet. -## -## file_name: The name of the file to write *pkt* to. -## -## Returns: True on success -## -## .. bro:see:: get_current_packet dump_current_packet send_current_packet -function dump_packet%(pkt: pcap_packet, file_name: string%) : bool - %{ - if ( addl_pkt_dumper && addl_pkt_dumper->Path() != file_name->CheckString()) - { - addl_pkt_dumper->Close(); - addl_pkt_dumper = nullptr; - } - - if ( ! addl_pkt_dumper ) - addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); - - if ( ! addl_pkt_dumper->IsError() ) - { - pkt_timeval ts; - uint32 caplen, len, link_type; - u_char *data; - - const val_list* pkt_vl = pkt->AsRecord(); - - ts.tv_sec = (*pkt_vl)[0]->AsCount(); - ts.tv_usec = (*pkt_vl)[1]->AsCount(); - caplen = (*pkt_vl)[2]->AsCount(); - len = (*pkt_vl)[3]->AsCount(); - data = (*pkt_vl)[4]->AsString()->Bytes(); - link_type = (*pkt_vl)[5]->AsEnum(); - Packet p(link_type, &ts, caplen, len, data, true); - - addl_pkt_dumper->Dump(&p); - } - - return val_mgr->GetBool(addl_pkt_dumper && ! addl_pkt_dumper->IsError()); - %} - -%%{ -#include "DNS_Mgr.h" -#include "Trigger.h" - -class LookupHostCallback : public DNS_Mgr::LookupCallback { -public: - LookupHostCallback(Trigger* arg_trigger, const CallExpr* arg_call, - bool arg_lookup_name) - { - Ref(arg_trigger); - trigger = arg_trigger; - call = arg_call; - lookup_name = arg_lookup_name; - } - - ~LookupHostCallback() - { - Unref(trigger); - } - - // Overridden from DNS_Mgr:Lookup:Callback. - virtual void Resolved(const char* name) - { - Val* result = new StringVal(name); - trigger->Cache(call, result); - Unref(result); - trigger->Release(); - } - - virtual void Resolved(TableVal* addrs) - { - // No Ref() for addrs. - trigger->Cache(call, addrs); - trigger->Release(); - } - - virtual void Timeout() - { - if ( lookup_name ) - { - Val* result = new StringVal("<\?\?\?>"); - trigger->Cache(call, result); - Unref(result); - } - - else - { - ListVal* lv = new ListVal(TYPE_ADDR); - lv->Append(new AddrVal("0.0.0.0")); - Val* result = lv->ConvertToSet(); - trigger->Cache(call, result); - Unref(result); - Unref(lv); - } - - trigger->Release(); - } - -private: - Trigger* trigger; - const CallExpr* call; - bool lookup_name; -}; -%%} - -## Issues an asynchronous reverse DNS lookup and delays the function result. -## This function can therefore only be called inside a ``when`` condition, -## e.g., ``when ( local host = lookup_addr(10.0.0.1) ) { f(host); }``. -## -## host: The IP address to lookup. -## -## Returns: The DNS name of *host*. -## -## .. bro:see:: lookup_hostname -function lookup_addr%(host: addr%) : string - %{ - // FIXME: It should be easy to adapt the function to synchronous - // lookups if we're reading a trace. - Trigger* trigger = frame->GetTrigger(); - - if ( ! trigger) - { - builtin_error("lookup_addr() can only be called inside a when-condition"); - return new StringVal(""); - } - - frame->SetDelayed(); - trigger->Hold(); - - dns_mgr->AsyncLookupAddr(host->AsAddr(), - new LookupHostCallback(trigger, frame->GetCall(), true)); - return 0; - %} - -## Issues an asynchronous TEXT DNS lookup and delays the function result. -## This function can therefore only be called inside a ``when`` condition, -## e.g., ``when ( local h = lookup_hostname_txt("www.bro.org") ) { f(h); }``. -## -## host: The hostname to lookup. -## -## Returns: The DNS TXT record associated with *host*. -## -## .. bro:see:: lookup_hostname -function lookup_hostname_txt%(host: string%) : string - %{ - // FIXME: Is should be easy to adapt the function to synchronous - // lookups if we're reading a trace. - Trigger* trigger = frame->GetTrigger(); - - if ( ! trigger) - { - builtin_error("lookup_hostname_txt() can only be called inside a when-condition"); - return new StringVal(""); - } - - frame->SetDelayed(); - trigger->Hold(); - - dns_mgr->AsyncLookupNameText(host->CheckString(), - new LookupHostCallback(trigger, frame->GetCall(), true)); - return 0; - %} - -## Issues an asynchronous DNS lookup and delays the function result. -## This function can therefore only be called inside a ``when`` condition, -## e.g., ``when ( local h = lookup_hostname("www.bro.org") ) { f(h); }``. -## -## host: The hostname to lookup. -## -## Returns: A set of DNS A and AAAA records associated with *host*. -## -## .. bro:see:: lookup_addr -function lookup_hostname%(host: string%) : addr_set - %{ - // FIXME: Is should be easy to adapt the function to synchronous - // lookups if we're reading a trace. - Trigger* trigger = frame->GetTrigger(); - - if ( ! trigger) - { - builtin_error("lookup_hostname() can only be called inside a when-condition"); - return new StringVal(""); - } - - frame->SetDelayed(); - trigger->Hold(); - - dns_mgr->AsyncLookupName(host->CheckString(), - new LookupHostCallback(trigger, frame->GetCall(), false)); - return 0; - %} - -%%{ -#ifdef USE_GEOIP -#include - -extern "C" { -#include -#include -#include -#include -#include -} - -class MMDB { -public: - MMDB(const char* filename, struct stat info); - - ~MMDB(); - - MMDB_lookup_result_s Lookup(const struct sockaddr* const sa); - bool StaleDB(); - const char* Filename(); - -private: - MMDB_s mmdb; - struct stat file_info; - bool lookup_error; - std::chrono::time_point last_check; -}; - -MMDB::MMDB(const char* filename, struct stat info) - : file_info(info), lookup_error{false}, - last_check{std::chrono::steady_clock::now()} - { - int status = MMDB_open(filename, MMDB_MODE_MMAP, &mmdb); - - if ( MMDB_SUCCESS != status ) - { - throw std::runtime_error(MMDB_strerror(status)); - } - } - -MMDB::~MMDB() - { - MMDB_close(&mmdb); - } - -MMDB_lookup_result_s MMDB::Lookup(const struct sockaddr* const sa) - { - int mmdb_error; - MMDB_lookup_result_s result = MMDB_lookup_sockaddr(&mmdb, sa, &mmdb_error); - - if ( MMDB_SUCCESS != mmdb_error ) - { - lookup_error = true; - throw std::runtime_error(MMDB_strerror(mmdb_error)); - } - - return result; - } - -// Check to see if the Maxmind DB should be closed and reopened. This will -// happen if there was a lookup error or if the mmap'd file has been replaced -// by an external process. -bool MMDB::StaleDB() - { - struct stat buf; - using Clock = std::chrono::steady_clock; - std::chrono::time_point now = Clock::now(); - - if ( lookup_error ) - return true; - - // Only perform stat once per 5 minutes. - using Min = std::chrono::minutes; - if ( std::chrono::duration_cast(now - last_check).count() < 5 ) - return false; - - last_check = now; - - if ( 0 != stat(mmdb.filename, &buf) ) - return true; - - if ( buf.st_ino != file_info.st_ino || buf.st_mtime != file_info.st_mtime ) - { - reporter->Info("Inode change detected for MaxMind DB [%s]", - mmdb.filename); - return true; - } - - return false; - } - -const char* MMDB::Filename() - { - return mmdb.filename; - } - -std::unique_ptr mmdb_loc; -std::unique_ptr mmdb_asn; -static bool did_mmdb_loc_db_error = false; -static bool did_mmdb_asn_db_error = false; - -static bool mmdb_open(const char* filename, bool asn) - { - struct stat buf; - - if ( 0 != stat(filename, &buf) ) - { - return false; - } - - try - { - if ( asn ) - { - mmdb_asn.reset(new MMDB(filename, buf)); - } - else - { - mmdb_loc.reset(new MMDB(filename, buf)); - } - } - - catch ( const std::exception& e ) - { - if ( asn ) - did_mmdb_asn_db_error = false; - else - did_mmdb_loc_db_error = false; - - reporter->Info("Failed to open MaxMind DB: %s [%s]", filename, - e.what()); - return false; - } - - return true; - } - -static bool mmdb_open_loc(const char* filename) - { - return mmdb_open(filename, false); - } - -static bool mmdb_open_asn(const char* filename) - { - return mmdb_open(filename, true); - } - -static void mmdb_check_loc() - { - if ( mmdb_loc && mmdb_loc->StaleDB() ) - { - reporter->Info("Closing stale MaxMind DB [%s]", mmdb_loc->Filename()); - did_mmdb_loc_db_error = false; - mmdb_loc.release(); - } - } - -static void mmdb_check_asn() - { - if ( mmdb_asn && mmdb_asn->StaleDB() ) - { - reporter->Info("Closing stale MaxMind DB [%s]", mmdb_asn->Filename()); - did_mmdb_asn_db_error = false; - mmdb_asn.release(); - } - } - -static bool mmdb_lookup(const IPAddr& addr, MMDB_lookup_result_s& result, - bool asn) - { - struct sockaddr_storage ss = {0}; - - if ( IPv4 == addr.GetFamily() ) - { - struct sockaddr_in* sa = (struct sockaddr_in*)&ss; - sa->sin_family = AF_INET; - addr.CopyIPv4(&sa->sin_addr); - } - - else - { - struct sockaddr_in6* sa = (struct sockaddr_in6*)&ss; - sa->sin6_family = AF_INET6; - addr.CopyIPv6(&sa->sin6_addr); - } - - try - { - result = asn ? mmdb_asn->Lookup((struct sockaddr*)&ss) - : mmdb_loc->Lookup((struct sockaddr*)&ss); - } - - catch ( const std::exception& e ) - { - reporter->Info("MaxMind DB lookup location error [%s]", - e.what()); - return false; - } - - return result.found_entry; - } - -static bool mmdb_lookup_loc(const IPAddr& addr, MMDB_lookup_result_s& result) - { - return mmdb_lookup(addr, result, false); - } - -static bool mmdb_lookup_asn(const IPAddr& addr, MMDB_lookup_result_s& result) - { - return mmdb_lookup(addr, result, true); - } - -static Val* mmdb_getvalue(MMDB_entry_data_s* entry_data, int status, - int data_type ) - { - switch (status) - { - case MMDB_SUCCESS: - if ( entry_data->has_data ) - { - switch (data_type) - { - case MMDB_DATA_TYPE_UTF8_STRING: - return new StringVal(entry_data->data_size, - entry_data->utf8_string); - break; - - case MMDB_DATA_TYPE_DOUBLE: - return new Val(entry_data->double_value, TYPE_DOUBLE); - break; - - case MMDB_DATA_TYPE_UINT32: - return val_mgr->GetCount(entry_data->uint32); - - default: - break; - } - } - break; - - case MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR: - // key doesn't exist, nothing to do - break; - - default: - reporter->Info("MaxMind DB error [%s]", MMDB_strerror(status)); - break; - } - - return nullptr; - } - -static bool mmdb_try_open_loc () - { - // City database is always preferred over Country database. - auto mmdb_dir_val = global_scope()->Lookup("mmdb_dir")->ID_Val(); - std::string mmdb_dir = mmdb_dir_val->AsString()->CheckString(); - - if ( ! mmdb_dir.empty() ) - { - auto d = mmdb_dir + "/GeoLite2-City.mmdb"; - - if ( mmdb_open_loc(d.data()) ) - return true; - - d = mmdb_dir + "/GeoLite2-Country.mmdb"; - - if ( mmdb_open_loc(d.data()) ) - return true;; - } - - return mmdb_open_loc("/usr/share/GeoIP/GeoLite2-City.mmdb") - || mmdb_open_loc("/var/lib/GeoIP/GeoLite2-City.mmdb") - || mmdb_open_loc("/usr/local/share/GeoIP/GeoLite2-City.mmdb") - || mmdb_open_loc("/usr/local/var/GeoIP/GeoLite2-City.mmdb") - || mmdb_open_loc("/usr/share/GeoIP/GeoLite2-Country.mmdb") - || mmdb_open_loc("/var/lib/GeoIP/GeoLite2-Country.mmdb") - || mmdb_open_loc("/usr/local/share/GeoIP/GeoLite2-Country.mmdb") - || mmdb_open_loc("/usr/local/var/GeoIP/GeoLite2-Country.mmdb"); - } - -static bool mmdb_try_open_asn () - { - auto mmdb_dir_val = global_scope()->Lookup("mmdb_dir")->ID_Val(); - std::string mmdb_dir = mmdb_dir_val->AsString()->CheckString(); - - if ( ! mmdb_dir.empty() ) - { - auto d = mmdb_dir + "/GeoLite2-ASN.mmdb"; - - if ( mmdb_open_asn(d.data()) ) - return true; - } - - return mmdb_open_asn("/usr/share/GeoIP/GeoLite2-ASN.mmdb") - || mmdb_open_asn("/var/lib/GeoIP/GeoLite2-ASN.mmdb") - || mmdb_open_asn("/usr/local/share/GeoIP/GeoLite2-ASN.mmdb") - || mmdb_open_asn("/usr/local/var/GeoIP/GeoLite2-ASN.mmdb"); - } - -#endif -%%} - -## Initializes MMDB for later use of lookup_location. -## Requires Bro to be built with ``libmaxminddb``. -## -## f: The filename of the MaxMind City or Country DB. -## -## Returns: A boolean indicating whether the db was successfully opened. -## -## .. bro:see:: lookup_asn -function mmdb_open_location_db%(f: string%) : bool - %{ -#ifdef USE_GEOIP - return val_mgr->GetBool(mmdb_open_loc(f->CheckString())); -#else - return val_mgr->GetBool(0); -#endif - %} - -## Initializes MMDB for later use of lookup_asn. -## Requires Bro to be built with ``libmaxminddb``. -## -## f: The filename of the MaxMind ASN DB. -## -## Returns: A boolean indicating whether the db was successfully opened. -## -## .. bro:see:: lookup_asn -function mmdb_open_asn_db%(f: string%) : bool - %{ -#ifdef USE_GEOIP - return val_mgr->GetBool(mmdb_open_asn(f->CheckString())); -#else - return val_mgr->GetBool(0); -#endif - %} - -## Performs a geo-lookup of an IP address. -## Requires Bro to be built with ``libmaxminddb``. -## -## a: The IP address to lookup. -## -## Returns: A record with country, region, city, latitude, and longitude. -## -## .. bro:see:: lookup_asn -function lookup_location%(a: addr%) : geo_location - %{ - RecordVal* location = new RecordVal(geo_location); - -#ifdef USE_GEOIP - mmdb_check_loc(); - if ( ! mmdb_loc ) - { - if ( ! mmdb_try_open_loc() ) - { - if ( ! did_mmdb_loc_db_error ) - { - did_mmdb_loc_db_error = true; - builtin_error("Failed to open GeoIP location database"); - } - - return location; - } - } - - MMDB_lookup_result_s result; - - if ( mmdb_lookup_loc(a->AsAddr(), result) ) - { - MMDB_entry_data_s entry_data; - int status; - - // Get Country ISO Code - status = MMDB_get_value(&result.entry, &entry_data, - "country", "iso_code", nullptr); - location->Assign(0, mmdb_getvalue(&entry_data, status, - MMDB_DATA_TYPE_UTF8_STRING)); - - // Get Major Subdivision ISO Code - status = MMDB_get_value(&result.entry, &entry_data, - "subdivisions", "0", "iso_code", nullptr); - location->Assign(1, mmdb_getvalue(&entry_data, status, - MMDB_DATA_TYPE_UTF8_STRING)); - - // Get City English Name - status = MMDB_get_value(&result.entry, &entry_data, - "city", "names", "en", nullptr); - location->Assign(2, mmdb_getvalue(&entry_data, status, - MMDB_DATA_TYPE_UTF8_STRING)); - - // Get Location Latitude - status = MMDB_get_value(&result.entry, &entry_data, - "location", "latitude", nullptr); - location->Assign(3, mmdb_getvalue(&entry_data, status, - MMDB_DATA_TYPE_DOUBLE)); - - // Get Location Longitude - status = MMDB_get_value(&result.entry, &entry_data, - "location", "longitude", nullptr); - location->Assign(4, mmdb_getvalue(&entry_data, status, - MMDB_DATA_TYPE_DOUBLE)); - - return location; - } - -#else // not USE_GEOIP - static int missing_geoip_reported = 0; - - if ( ! missing_geoip_reported ) - { - builtin_error("Bro was not configured for GeoIP support"); - missing_geoip_reported = 1; - } -#endif - - // We can get here even if we have MMDB support if we weren't - // able to initialize it or it didn't return any information for - // the address. - - return location; - %} - -## Performs an ASN lookup of an IP address. -## Requires Bro to be built with ``libmaxminddb``. -## -## a: The IP address to lookup. -## -## Returns: The number of the ASN that contains *a*. -## -## .. bro:see:: lookup_location -function lookup_asn%(a: addr%) : count - %{ -#ifdef USE_GEOIP - mmdb_check_asn(); - if ( ! mmdb_asn ) - { - if ( ! mmdb_try_open_asn() ) - { - if ( ! did_mmdb_asn_db_error ) - { - did_mmdb_asn_db_error = true; - builtin_error("Failed to open GeoIP ASN database"); - } - - return val_mgr->GetCount(0); - } - } - - MMDB_lookup_result_s result; - - if ( mmdb_lookup_asn(a->AsAddr(), result) ) - { - MMDB_entry_data_s entry_data; - int status; - - // Get Autonomous System Number - status = MMDB_get_value(&result.entry, &entry_data, - "autonomous_system_number", nullptr); - Val* asn = mmdb_getvalue(&entry_data, status, MMDB_DATA_TYPE_UINT32); - return asn == nullptr ? val_mgr->GetCount(0) : asn; - } - -#else // not USE_GEOIP - static int missing_geoip_reported = 0; - - if ( ! missing_geoip_reported ) - { - builtin_error("Bro was not configured for GeoIP ASN support"); - missing_geoip_reported = 1; - } -#endif - - // We can get here even if we have GeoIP support, if we weren't - // able to initialize it or it didn't return any information for - // the address. - return val_mgr->GetCount(0); - %} - -## Calculates distance between two geographic locations using the haversine -## formula. Latitudes and longitudes must be given in degrees, where southern -## hemispere latitudes are negative and western hemisphere longitudes are -## negative. -## -## lat1: Latitude (in degrees) of location 1. -## -## long1: Longitude (in degrees) of location 1. -## -## lat2: Latitude (in degrees) of location 2. -## -## long2: Longitude (in degrees) of location 2. -## -## Returns: Distance in miles. -## -## .. bro:see:: haversine_distance_ip -function haversine_distance%(lat1: double, long1: double, lat2: double, long2: double%): double - %{ - const double PI = 3.14159; - const double RADIUS = 3958.8; // Earth's radius in miles. - - double s1 = sin((lat2 - lat1) * PI/360); - double s2 = sin((long2 - long1) * PI/360); - double a = s1 * s1 + cos(lat1 * PI/180) * cos(lat2 * PI/180) * s2 * s2; - double distance = 2 * RADIUS * asin(sqrt(a)); - - return new Val(distance, TYPE_DOUBLE); - %} - -## Converts UNIX file permissions given by a mode to an ASCII string. -## -## mode: The permissions (an octal number like 0644 converted to decimal). -## -## Returns: A string representation of *mode* in the format -## ``rw[xsS]rw[xsS]rw[xtT]``. -function file_mode%(mode: count%): string - %{ - char str[12]; - char *p = str; - - /* usr */ - if (mode & S_IRUSR) - *p++ = 'r'; - else - *p++ = '-'; - - if (mode & S_IWUSR) - *p++ = 'w'; - else - *p++ = '-'; - - switch (mode & (S_IXUSR | S_ISUID)) { - case 0: - *p++ = '-'; - break; - case S_IXUSR: - *p++ = 'x'; - break; - case S_ISUID: - *p++ = 'S'; - break; - case S_IXUSR | S_ISUID: - *p++ = 's'; - break; - } - - /* group */ - if (mode & S_IRGRP) - *p++ = 'r'; - else - *p++ = '-'; - if (mode & S_IWGRP) - *p++ = 'w'; - else - *p++ = '-'; - - switch (mode & (S_IXGRP | S_ISGID)) { - case 0: - *p++ = '-'; - break; - case S_IXGRP: - *p++ = 'x'; - break; - case S_ISGID: - *p++ = 'S'; - break; - case S_IXGRP | S_ISGID: - *p++ = 's'; - break; - } - - /* other */ - if (mode & S_IROTH) - *p++ = 'r'; - else - *p++ = '-'; - if (mode & S_IWOTH) - *p++ = 'w'; - else - *p++ = '-'; - - switch (mode & (S_IXOTH | S_ISVTX)) { - case 0: - *p++ = '-'; - break; - case S_IXOTH: - *p++ = 'x'; - break; - case S_ISVTX: - *p++ = 'T'; - break; - case S_IXOTH | S_ISVTX: - *p++ = 't'; - break; - } - - *p = '\0'; - - return new StringVal(str); - %} - -# =========================================================================== -# -# Controlling Analyzer Behavior -# -# =========================================================================== - -%%{ -#include "analyzer/Manager.h" -%%} - -## Disables the analyzer which raised the current event (if the analyzer -## belongs to the given connection). -## -## cid: The connection identifier. -## -## aid: The analyzer ID. -## -## Returns: True if the connection identified by *cid* exists and has analyzer -## *aid*. -## -## .. bro:see:: Analyzer::schedule_analyzer Analyzer::name -function disable_analyzer%(cid: conn_id, aid: count, err_if_no_conn: bool &default=T%) : bool - %{ - Connection* c = sessions->FindConnection(cid); - if ( ! c ) - { - reporter->Error("cannot find connection"); - return val_mgr->GetBool(0); - } - - analyzer::Analyzer* a = c->FindAnalyzer(aid); - if ( ! a ) - { - if ( err_if_no_conn ) - reporter->Error("connection does not have analyzer specified to disable"); - return val_mgr->GetBool(0); - } - - a->Remove(); - return val_mgr->GetBool(1); - %} - -## Informs Bro that it should skip any further processing of the contents of -## a given connection. In particular, Bro will refrain from reassembling the -## TCP byte stream and from generating events relating to any analyzers that -## have been processing the connection. -## -## cid: The connection ID. -## -## Returns: False if *cid* does not point to an active connection, and true -## otherwise. -## -## .. note:: -## -## Bro will still generate connection-oriented events such as -## :bro:id:`connection_finished`. -function skip_further_processing%(cid: conn_id%): bool - %{ - Connection* c = sessions->FindConnection(cid); - if ( ! c ) - return val_mgr->GetBool(0); - - c->SetSkip(1); - return val_mgr->GetBool(1); - %} - -## Controls whether packet contents belonging to a connection should be -## recorded (when ``-w`` option is provided on the command line). -## -## cid: The connection identifier. -## -## do_record: True to enable packet contents, and false to disable for the -## connection identified by *cid*. -## -## Returns: False if *cid* does not point to an active connection, and true -## otherwise. -## -## .. bro:see:: skip_further_processing -## -## .. note:: -## -## This is independent of whether Bro processes the packets of this -## connection, which is controlled separately by -## :bro:id:`skip_further_processing`. -## -## .. bro:see:: get_contents_file set_contents_file -function set_record_packets%(cid: conn_id, do_record: bool%): bool - %{ - Connection* c = sessions->FindConnection(cid); - if ( ! c ) - return val_mgr->GetBool(0); - - c->SetRecordPackets(do_record); - return val_mgr->GetBool(1); - %} - -## Sets an individual inactivity timeout for a connection and thus -## overrides the global inactivity timeout. -## -## cid: The connection ID. -## -## t: The new inactivity timeout for the connection identified by *cid*. -## -## Returns: The previous timeout interval. -function set_inactivity_timeout%(cid: conn_id, t: interval%): interval - %{ - Connection* c = sessions->FindConnection(cid); - if ( ! c ) - return new Val(0.0, TYPE_INTERVAL); - - double old_timeout = c->InactivityTimeout(); - c->SetInactivityTimeout(t); - - return new Val(old_timeout, TYPE_INTERVAL); - %} - -# =========================================================================== -# -# Files and Directories -# -# =========================================================================== - -## Opens a file for writing. If a file with the same name already exists, this -## function overwrites it (as opposed to :bro:id:`open_for_append`). -## -## f: The path to the file. -## -## Returns: A :bro:type:`file` handle for subsequent operations. -## -## .. bro:see:: active_file open_for_append close write_file -## get_file_name set_buf flush_all mkdir enable_raw_output -## rmdir unlink rename -function open%(f: string%): file - %{ - const char* file = f->CheckString(); - - if ( streq(file, "-") ) - return new Val(new BroFile(stdout, "-", "w")); - else - return new Val(new BroFile(file, "w")); - %} - -## Opens a file for writing or appending. If a file with the same name already -## exists, this function appends to it (as opposed to :bro:id:`open`). -## -## f: The path to the file. -## -## Returns: A :bro:type:`file` handle for subsequent operations. -## -## .. bro:see:: active_file open close write_file -## get_file_name set_buf flush_all mkdir enable_raw_output -## rmdir unlink rename -function open_for_append%(f: string%): file - %{ - return new Val(new BroFile(f->CheckString(), "a")); - %} - -## Closes an open file and flushes any buffered content. -## -## f: A :bro:type:`file` handle to an open file. -## -## Returns: True on success. -## -## .. bro:see:: active_file open open_for_append write_file -## get_file_name set_buf flush_all mkdir enable_raw_output -## rmdir unlink rename -function close%(f: file%): bool - %{ - return val_mgr->GetBool(f->Close()); - %} - -## Writes data to an open file. -## -## f: A :bro:type:`file` handle to an open file. -## -## data: The data to write to *f*. -## -## Returns: True on success. -## -## .. bro:see:: active_file open open_for_append close -## get_file_name set_buf flush_all mkdir enable_raw_output -## rmdir unlink rename -function write_file%(f: file, data: string%): bool - %{ - if ( ! f ) - return val_mgr->GetBool(0); - - return val_mgr->GetBool(f->Write((const char*) data->Bytes(), data->Len())); - %} - -## Alters the buffering behavior of a file. -## -## f: A :bro:type:`file` handle to an open file. -## -## buffered: When true, *f* is fully buffered, i.e., bytes are saved in a -## buffer until the block size has been reached. When -## false, *f* is line buffered, i.e., bytes are saved up until a -## newline occurs. -## -## .. bro:see:: active_file open open_for_append close -## get_file_name write_file flush_all mkdir enable_raw_output -## rmdir unlink rename -function set_buf%(f: file, buffered: bool%): any - %{ - f->SetBuf(buffered); - return val_mgr->GetTrue(); - %} - -## Flushes all open files to disk. -## -## Returns: True on success. -## -## .. bro:see:: active_file open open_for_append close -## get_file_name write_file set_buf mkdir enable_raw_output -## rmdir unlink rename -function flush_all%(%): bool - %{ - return val_mgr->GetBool(fflush(0) == 0); - %} - -## Creates a new directory. -## -## f: The directory name. -## -## Returns: True if the operation succeeds or if *f* already exists, -## and false if the file creation fails. -## -## .. bro:see:: active_file open_for_append close write_file -## get_file_name set_buf flush_all enable_raw_output -## rmdir unlink rename -function mkdir%(f: string%): bool - %{ - const char* filename = f->CheckString(); - - if ( mkdir(filename, 0777) < 0 ) - { - int error = errno; - struct stat filestat; - // check if already exists and is directory. - if ( errno == EEXIST && stat(filename, &filestat) == 0 - && S_ISDIR(filestat.st_mode) ) - return val_mgr->GetBool(1); - - builtin_error(fmt("cannot create directory '%s': %s", filename, - strerror(error))); - return val_mgr->GetBool(0); - } - else - return val_mgr->GetBool(1); - %} - - -## Removes a directory. -## -## d: The directory name. -## -## Returns: True if the operation succeeds, and false if the -## directory delete operation fails. -## -## .. bro:see:: active_file open_for_append close write_file -## get_file_name set_buf flush_all enable_raw_output -## mkdir unlink rename -function rmdir%(d: string%): bool - %{ - const char* dirname = d->CheckString(); - - if ( rmdir(dirname) < 0 ) - { - builtin_error(fmt("cannot remove directory '%s': %s", dirname, - strerror(errno))); - return val_mgr->GetBool(0); - } - else - return val_mgr->GetBool(1); - %} - -## Removes a file from a directory. -## -## f: the file to delete. -## -## Returns: True if the operation succeeds and the file was deleted, -## and false if the deletion fails. -## -## .. bro:see:: active_file open_for_append close write_file -## get_file_name set_buf flush_all enable_raw_output -## mkdir rmdir rename -function unlink%(f: string%): bool - %{ - const char* filename = f->CheckString(); - - if ( unlink(filename) < 0 ) - { - builtin_error(fmt("cannot unlink file '%s': %s", filename, - strerror(errno))); - return val_mgr->GetBool(0); - } - else - return val_mgr->GetBool(1); - %} - -## Renames a file from src_f to dst_f. -## -## src_f: the name of the file to rename. -## -## dest_f: the name of the file after the rename operation. -## -## Returns: True if the rename succeeds and false otherwise. -## -## .. bro:see:: active_file open_for_append close write_file -## get_file_name set_buf flush_all enable_raw_output -## mkdir rmdir unlink -function rename%(src_f: string, dst_f: string%): bool - %{ - const char* src_filename = src_f->CheckString(); - const char* dst_filename = dst_f->CheckString(); - - if ( rename(src_filename, dst_filename) < 0 ) - { - builtin_error(fmt("cannot rename file '%s' to '%s': %s", src_filename, - dst_filename, strerror(errno))); - return val_mgr->GetBool(0); - } - else - return val_mgr->GetBool(1); - %} - -## Checks whether a given file is open. -## -## f: The file to check. -## -## Returns: True if *f* is an open :bro:type:`file`. -## -## .. todo:: Rename to ``is_open``. -function active_file%(f: file%): bool - %{ - return val_mgr->GetBool(f->IsOpen()); - %} - -## Gets the filename associated with a file handle. -## -## f: The file handle to inquire the name for. -## -## Returns: The filename associated with *f*. -## -## .. bro:see:: open -function get_file_name%(f: file%): string - %{ - if ( ! f ) - return val_mgr->GetEmptyString(); - - return new StringVal(f->Name()); - %} - -## Rotates a file. -## -## f: An open file handle. -## -## Returns: Rotation statistics which include the original file name, the name -## after the rotation, and the time when *f* was opened/closed. -## -## .. bro:see:: rotate_file_by_name calc_next_rotate -function rotate_file%(f: file%): rotate_info - %{ - RecordVal* info = f->Rotate(); - if ( info ) - return info; - - // Record indicating error. - info = new RecordVal(rotate_info); - info->Assign(0, val_mgr->GetEmptyString()); - info->Assign(1, val_mgr->GetEmptyString()); - info->Assign(2, new Val(0.0, TYPE_TIME)); - info->Assign(3, new Val(0.0, TYPE_TIME)); - - return info; - %} - -## Rotates a file identified by its name. -## -## f: The name of the file to rotate -## -## Returns: Rotation statistics which include the original file name, the name -## after the rotation, and the time when *f* was opened/closed. -## -## .. bro:see:: rotate_file calc_next_rotate -function rotate_file_by_name%(f: string%): rotate_info - %{ - RecordVal* info = new RecordVal(rotate_info); - - bool is_pkt_dumper = false; - bool is_addl_pkt_dumper = false; - - // Special case: one of current dump files. - if ( pkt_dumper && streq(pkt_dumper->Path().c_str(), f->CheckString()) ) - { - is_pkt_dumper = true; - pkt_dumper->Close(); - } - - if ( addl_pkt_dumper && - streq(addl_pkt_dumper->Path().c_str(), f->CheckString()) ) - { - is_addl_pkt_dumper = true; - addl_pkt_dumper->Close(); - } - - FILE* file = rotate_file(f->CheckString(), info); - if ( ! file ) - { - // Record indicating error. - info->Assign(0, val_mgr->GetEmptyString()); - info->Assign(1, val_mgr->GetEmptyString()); - info->Assign(2, new Val(0.0, TYPE_TIME)); - info->Assign(3, new Val(0.0, TYPE_TIME)); - return info; - } - - fclose(file); - - if ( is_pkt_dumper ) - { - info->Assign(2, new Val(pkt_dumper->OpenTime(), TYPE_TIME)); - pkt_dumper->Open(); - } - - if ( is_addl_pkt_dumper ) - info->Assign(2, new Val(addl_pkt_dumper->OpenTime(), TYPE_TIME)); - - return info; - %} - -## Calculates the duration until the next time a file is to be rotated, based -## on a given rotate interval. -## -## i: The rotate interval to base the calculation on. -## -## Returns: The duration until the next file rotation time. -## -## .. bro:see:: rotate_file rotate_file_by_name -function calc_next_rotate%(i: interval%) : interval - %{ - const char* base_time = log_rotate_base_time ? - log_rotate_base_time->AsString()->CheckString() : 0; - - double base = parse_rotate_base_time(base_time); - return new Val(calc_next_rotate(network_time, i, base), TYPE_INTERVAL); - %} - -## Returns the size of a given file. -## -## f: The name of the file whose size to lookup. -## -## Returns: The size of *f* in bytes. -function file_size%(f: string%) : double - %{ - struct stat s; - - if ( stat(f->CheckString(), &s) < 0 ) - return new Val(-1.0, TYPE_DOUBLE); - - return new Val(double(s.st_size), TYPE_DOUBLE); - %} - -## Disables sending :bro:id:`print_hook` events to remote peers for a given -## file. In a -## distributed setup, communicating Bro instances generate the event -## :bro:id:`print_hook` for each print statement and send it to the remote -## side. When disabled for a particular file, these events will not be -## propagated to other peers. -## -## f: The file to disable :bro:id:`print_hook` events for. -## -## .. bro:see:: enable_raw_output -function disable_print_hook%(f: file%): any - %{ - f->DisablePrintHook(); - return 0; - %} - -## Prevents escaping of non-ASCII characters when writing to a file. -## This function is equivalent to :bro:attr:`&raw_output`. -## -## f: The file to disable raw output for. -## -## .. bro:see:: disable_print_hook -function enable_raw_output%(f: file%): any - %{ - f->EnableRawOutput(); - return 0; - %} - -# =========================================================================== -# -# Packet Filtering -# -# =========================================================================== - -## Installs a filter to drop packets from a given IP source address with -## a certain probability if none of a given set of TCP flags are set. -## Note that for IPv6 packets with a Destination options header that has -## the Home Address option, this filters out against that home address. -## -## ip: The IP address to drop. -## -## tcp_flags: If none of these TCP flags are set, drop packets from *ip* with -## probability *prob*. -## -## prob: The probability [0.0, 1.0] used to drop packets from *ip*. -## -## Returns: True (unconditionally). -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## Pcap::error -## -## .. todo:: The return value should be changed to any. -function install_src_addr_filter%(ip: addr, tcp_flags: count, prob: double%) : bool - %{ - sessions->GetPacketFilter()->AddSrc(ip->AsAddr(), tcp_flags, prob); - return val_mgr->GetBool(1); - %} - -## Installs a filter to drop packets originating from a given subnet with -## a certain probability if none of a given set of TCP flags are set. -## -## snet: The subnet to drop packets from. -## -## tcp_flags: If none of these TCP flags are set, drop packets from *snet* with -## probability *prob*. -## -## prob: The probability [0.0, 1.0] used to drop packets from *snet*. -## -## Returns: True (unconditionally). -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## Pcap::error -## -## .. todo:: The return value should be changed to any. -function install_src_net_filter%(snet: subnet, tcp_flags: count, prob: double%) : bool - %{ - sessions->GetPacketFilter()->AddSrc(snet, tcp_flags, prob); - return val_mgr->GetBool(1); - %} - -## Removes a source address filter. -## -## ip: The IP address for which a source filter was previously installed. -## -## Returns: True on success. -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## Pcap::error -function uninstall_src_addr_filter%(ip: addr%) : bool - %{ - return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveSrc(ip->AsAddr())); - %} - -## Removes a source subnet filter. -## -## snet: The subnet for which a source filter was previously installed. -## -## Returns: True on success. -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## Pcap::error -function uninstall_src_net_filter%(snet: subnet%) : bool - %{ - return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveSrc(snet)); - %} - -## Installs a filter to drop packets destined to a given IP address with -## a certain probability if none of a given set of TCP flags are set. -## Note that for IPv6 packets with a routing type header and non-zero -## segments left, this filters out against the final destination of the -## packet according to the routing extension header. -## -## ip: Drop packets to this IP address. -## -## tcp_flags: If none of these TCP flags are set, drop packets to *ip* with -## probability *prob*. -## -## prob: The probability [0.0, 1.0] used to drop packets to *ip*. -## -## Returns: True (unconditionally). -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## Pcap::error -## -## .. todo:: The return value should be changed to any. -function install_dst_addr_filter%(ip: addr, tcp_flags: count, prob: double%) : bool - %{ - sessions->GetPacketFilter()->AddDst(ip->AsAddr(), tcp_flags, prob); - return val_mgr->GetBool(1); - %} - -## Installs a filter to drop packets destined to a given subnet with -## a certain probability if none of a given set of TCP flags are set. -## -## snet: Drop packets to this subnet. -## -## tcp_flags: If none of these TCP flags are set, drop packets to *snet* with -## probability *prob*. -## -## prob: The probability [0.0, 1.0] used to drop packets to *snet*. -## -## Returns: True (unconditionally). -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## Pcap::error -## -## .. todo:: The return value should be changed to any. -function install_dst_net_filter%(snet: subnet, tcp_flags: count, prob: double%) : bool - %{ - sessions->GetPacketFilter()->AddDst(snet, tcp_flags, prob); - return val_mgr->GetBool(1); - %} - -## Removes a destination address filter. -## -## ip: The IP address for which a destination filter was previously installed. -## -## Returns: True on success. -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_net_filter -## Pcap::error -function uninstall_dst_addr_filter%(ip: addr%) : bool - %{ - return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveDst(ip->AsAddr())); - %} - -## Removes a destination subnet filter. -## -## snet: The subnet for which a destination filter was previously installed. -## -## Returns: True on success. -## -## .. bro:see:: Pcap::precompile_pcap_filter -## Pcap::install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## Pcap::error -function uninstall_dst_net_filter%(snet: subnet%) : bool - %{ - return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveDst(snet)); - %} - -# =========================================================================== -# -# Communication -# -# =========================================================================== - -## Enables the communication system. By default, the communication is off until -## explicitly enabled, and all other calls to communication-related functions -## will be ignored until done so. -function enable_communication%(%): any &deprecated - %{ - if ( bro_start_network_time != 0.0 ) - { - builtin_error("communication must be enabled in bro_init"); - return 0; - } - - if ( using_communication ) - // Ignore duplicate calls. - return 0; - - using_communication = 1; - remote_serializer->Enable(); - return 0; - %} - -## Flushes in-memory state tagged with the :bro:attr:`&persistent` attribute -## to disk. The function writes the state to the file ``.state/state.bst`` in -## the directory where Bro was started. -## -## Returns: True on success. -## -## .. bro:see:: rescan_state -function checkpoint_state%(%) : bool - %{ - return val_mgr->GetBool(persistence_serializer->WriteState(true)); - %} - -## Reads persistent state and populates the in-memory data structures -## accordingly. Persistent state is read from the ``.state`` directory. -## This function is the dual to :bro:id:`checkpoint_state`. -## -## Returns: True on success. -## -## .. bro:see:: checkpoint_state -function rescan_state%(%) : bool - %{ - return val_mgr->GetBool(persistence_serializer->ReadAll(false, true)); - %} - -## Writes the binary event stream generated by the core to a given file. -## Use the ``-x `` command line switch to replay saved events. -## -## filename: The name of the file which stores the events. -## -## Returns: True if opening the target file succeeds. -## -## .. bro:see:: capture_state_updates -function capture_events%(filename: string%) : bool - %{ - if ( ! event_serializer ) - event_serializer = new FileSerializer(); - else - event_serializer->Close(); - - return val_mgr->GetBool(event_serializer->Open( - (const char*) filename->CheckString())); - %} - -## Writes state updates generated by :bro:attr:`&synchronized` variables to a -## file. -## -## filename: The name of the file which stores the state updates. -## -## Returns: True if opening the target file succeeds. -## -## .. bro:see:: capture_events -function capture_state_updates%(filename: string%) : bool - %{ - if ( ! state_serializer ) - state_serializer = new FileSerializer(); - else - state_serializer->Close(); - - return val_mgr->GetBool(state_serializer->Open( - (const char*) filename->CheckString())); - %} - -## Establishes a connection to a remote Bro or Broccoli instance. -## -## ip: The IP address of the remote peer. -## -## zone_id: If *ip* is a non-global IPv6 address, a particular :rfc:`4007` -## ``zone_id`` can given here. An empty string, ``""``, means -## not to add any ``zone_id``. -## -## p: The port of the remote peer. -## -## our_class: If a non-empty string, then the remote (listening) peer checks it -## against its class name in its peer table and terminates the -## connection if they don't match. -## -## retry: If the connection fails, try to reconnect with the peer after this -## time interval. -## -## ssl: If true, use SSL to encrypt the session. -## -## Returns: A locally unique ID of the new peer. -## -## .. bro:see:: disconnect -## listen -## request_remote_events -## request_remote_sync -## request_remote_logs -## request_remote_events -## set_accept_state -## set_compression_level -## send_state -## send_id -function connect%(ip: addr, zone_id: string, p: port, our_class: string, retry: interval, ssl: bool%) : count &deprecated - %{ - return val_mgr->GetCount(uint32(remote_serializer->Connect(ip->AsAddr(), - zone_id->CheckString(), p->Port(), our_class->CheckString(), - retry, ssl))); - %} - -## Terminate the connection with a peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## Returns: True on success. -## -## .. bro:see:: connect listen -function disconnect%(p: event_peer%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->CloseConnection(id)); - %} - -## Subscribes to all events from a remote peer whose names match a given -## pattern. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## handlers: The pattern describing the events to request from peer *p*. -## -## Returns: True on success. -## -## .. bro:see:: request_remote_sync -## request_remote_logs -## set_accept_state -function request_remote_events%(p: event_peer, handlers: pattern%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->RequestEvents(id, handlers)); - %} - -## Requests synchronization of IDs with a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## auth: If true, the local instance considers its current state authoritative -## and sends it to *p* right after the handshake. -## -## Returns: True on success. -## -## .. bro:see:: request_remote_events -## request_remote_logs -## set_accept_state -function request_remote_sync%(p: event_peer, auth: bool%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->RequestSync(id, auth)); - %} - -## Requests logs from a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## Returns: True on success. -## -## .. bro:see:: request_remote_events -## request_remote_sync -function request_remote_logs%(p: event_peer%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->RequestLogs(id)); - %} - -## Sets a boolean flag indicating whether Bro accepts state from a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## accept: True if Bro accepts state from peer *p*, or false otherwise. -## -## Returns: True on success. -## -## .. bro:see:: request_remote_events -## request_remote_sync -## set_compression_level -function set_accept_state%(p: event_peer, accept: bool%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->SetAcceptState(id, accept)); - %} - -## Sets the compression level of the session with a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## level: Allowed values are in the range *[0, 9]*, where 0 is the default and -## means no compression. -## -## Returns: True on success. -## -## .. bro:see:: set_accept_state -function set_compression_level%(p: event_peer, level: count%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->SetCompressionLevel(id, level)); - %} - -## Listens on a given IP address and port for remote connections. -## -## ip: The IP address to bind to. -## -## p: The TCP port to listen on. -## -## ssl: If true, Bro uses SSL to encrypt the session. -## -## ipv6: If true, enable listening on IPv6 addresses. -## -## zone_id: If *ip* is a non-global IPv6 address, a particular :rfc:`4007` -## ``zone_id`` can given here. An empty string, ``""``, means -## not to add any ``zone_id``. -## -## retry_interval: If address *ip* is found to be already in use, this is -## the interval at which to automatically retry binding. -## -## Returns: True on success. -## -## .. bro:see:: connect disconnect -function listen%(ip: addr, p: port, ssl: bool, ipv6: bool, zone_id: string, retry_interval: interval%) : bool &deprecated - %{ - return val_mgr->GetBool(remote_serializer->Listen(ip->AsAddr(), p->Port(), ssl, ipv6, zone_id->CheckString(), retry_interval)); - %} - -## Checks whether the last raised event came from a remote peer. -## -## Returns: True if the last raised event came from a remote peer. -function is_remote_event%(%) : bool - %{ - return val_mgr->GetBool(mgr.CurrentSource() != SOURCE_LOCAL); - %} - -## Sends all persistent state to a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## Returns: True on success. -## -## .. bro:see:: send_id send_ping send_current_packet send_capture_filter -function send_state%(p: event_peer%) : bool - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(persistence_serializer->SendState(id, true)); - %} - -## Sends a global identifier to a remote peer, which then might install it -## locally. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## id: The identifier to send. -## -## Returns: True on success. -## -## .. bro:see:: send_state send_ping send_current_packet send_capture_filter -function send_id%(p: event_peer, id: string%) : bool &deprecated - %{ - RemoteSerializer::PeerID pid = p->AsRecordVal()->Lookup(0)->AsCount(); - - ID* i = global_scope()->Lookup(id->CheckString()); - if ( ! i ) - { - reporter->Error("send_id: no global id %s", id->CheckString()); - return val_mgr->GetBool(0); - } - - SerialInfo info(remote_serializer); - return val_mgr->GetBool(remote_serializer->SendID(&info, pid, *i)); - %} - -## Gracefully finishes communication by first making sure that all remaining -## data from parent and child has been sent out. -## -## Returns: True if the termination process has been started successfully. -function terminate_communication%(%) : bool &deprecated - %{ - return val_mgr->GetBool(remote_serializer->Terminate()); - %} - -## Signals a remote peer that the local Bro instance finished the initial -## handshake. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## Returns: True on success. -function complete_handshake%(p: event_peer%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->CompleteHandshake(id)); - %} - -## Sends a ping event to a remote peer. In combination with an event handler -## for :bro:id:`remote_pong`, this function can be used to measure latency -## between two peers. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## seq: A sequence number (also included by :bro:id:`remote_pong`). -## -## Returns: True if sending the ping succeeds. -## -## .. bro:see:: send_state send_id send_current_packet send_capture_filter -function send_ping%(p: event_peer, seq: count%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->SendPing(id, seq)); - %} - -## Sends the currently processed packet to a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## Returns: True if sending the packet succeeds. -## -## .. bro:see:: send_id send_state send_ping send_capture_filter -## dump_packet dump_current_packet get_current_packet -function send_current_packet%(p: event_peer%) : bool &deprecated - %{ - const Packet* pkt; - - if ( ! current_pktsrc || - ! current_pktsrc->GetCurrentPacket(&pkt) ) - return val_mgr->GetBool(0); - - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - - SerialInfo info(remote_serializer); - return val_mgr->GetBool(remote_serializer->SendPacket(&info, id, *pkt)); - %} - -## Returns the peer who generated the last event. -## -## Note, this function is deprecated. It works correctly only for local events and -## events received through the legacy communication system. It does *not* work for -## events received through Broker and will report an error in that case. -## -## Returns: The ID of the peer who generated the last event. -## -## .. bro:see:: get_local_event_peer -function get_event_peer%(%) : event_peer &deprecated - %{ - SourceID src = mgr.CurrentSource(); - - if ( src == SOURCE_LOCAL ) - { - RecordVal* p = mgr.GetLocalPeerVal(); - Ref(p); - return p; - } - - if ( src == SOURCE_BROKER ) - { - reporter->Error("get_event_peer() does not support Broker events"); - RecordVal* p = mgr.GetLocalPeerVal(); - Ref(p); - return p; - } - - if ( ! remote_serializer ) - reporter->InternalError("remote_serializer not initialized"); - - Val* v = remote_serializer->GetPeerVal(src); - if ( ! v ) - { - reporter->Error("peer %d does not exist anymore", int(src)); - RecordVal* p = mgr.GetLocalPeerVal(); - Ref(p); - return p; - } - - return v; - %} - -## Returns the local peer ID. -## -## Returns: The peer ID of the local Bro instance. -## -## .. bro:see:: get_event_peer -function get_local_event_peer%(%) : event_peer &deprecated - %{ - RecordVal* p = mgr.GetLocalPeerVal(); - Ref(p); - return p; - %} - -## Sends a capture filter to a remote peer. -## -## p: The peer ID returned from :bro:id:`connect`. -## -## s: The capture filter. -## -## Returns: True if sending the packet succeeds. -## -## .. bro:see:: send_id send_state send_ping send_current_packet -function send_capture_filter%(p: event_peer, s: string%) : bool &deprecated - %{ - RemoteSerializer::PeerID id = p->AsRecordVal()->Lookup(0)->AsCount(); - return val_mgr->GetBool(remote_serializer->SendCaptureFilter(id, s->CheckString())); - %} - -## Stops Bro's packet processing. This function is used to synchronize -## distributed trace processing with communication enabled -## (*pseudo-realtime* mode). -## -## .. bro:see:: continue_processing suspend_state_updates resume_state_updates -function suspend_processing%(%) : any - %{ - net_suspend_processing(); - return 0; - %} - -## Resumes Bro's packet processing. -## -## .. bro:see:: suspend_processing suspend_state_updates resume_state_updates -function continue_processing%(%) : any - %{ - net_continue_processing(); - return 0; - %} - -## Stops propagating :bro:attr:`&synchronized` accesses. -## -## .. bro:see:: suspend_processing continue_processing resume_state_updates -function suspend_state_updates%(%) : any &deprecated - %{ - if ( remote_serializer ) - remote_serializer->SuspendStateUpdates(); - return 0; - %} - -## Resumes propagating :bro:attr:`&synchronized` accesses. -## -## .. bro:see:: suspend_processing continue_processing suspend_state_updates -function resume_state_updates%(%) : any &deprecated - %{ - if ( remote_serializer ) - remote_serializer->ResumeStateUpdates(); - return 0; - %} - -# =========================================================================== -# -# Internal Functions -# -# =========================================================================== - -## Manually triggers the signature engine for a given connection. -## This is an internal function. -function match_signatures%(c: connection, pattern_type: int, s: string, - bol: bool, eol: bool, - from_orig: bool, clear: bool%) : bool - %{ - if ( ! rule_matcher ) - return val_mgr->GetBool(0); - - c->Match((Rule::PatternType) pattern_type, s->Bytes(), s->Len(), - from_orig, bol, eol, clear); - - return val_mgr->GetBool(1); - %} - -# =========================================================================== -# -# Deprecated Functions -# -# =========================================================================== - - - -%%{ -#include "Anon.h" -%%} - -## Preserves the prefix of an IP address in anonymization. -## -## a: The address to preserve. -## -## width: The number of bits from the top that should remain intact. -## -## .. bro:see:: preserve_subnet anonymize_addr -## -## .. todo:: Currently dysfunctional. -function preserve_prefix%(a: addr, width: count%): any - %{ - AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; - if ( ip_anon ) - { - if ( a->AsAddr().GetFamily() == IPv6 ) - builtin_error("preserve_prefix() not supported for IPv6 addresses"); - else - { - const uint32* bytes; - a->AsAddr().GetBytes(&bytes); - ip_anon->PreservePrefix(*bytes, width); - } - } - - - return 0; - %} - -## Preserves the prefix of a subnet in anonymization. -## -## a: The subnet to preserve. -## -## .. bro:see:: preserve_prefix anonymize_addr -## -## .. todo:: Currently dysfunctional. -function preserve_subnet%(a: subnet%): any - %{ - DEBUG_MSG("%s/%d\n", a->Prefix().AsString().c_str(), a->Width()); - AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; - if ( ip_anon ) - { - if ( a->AsSubNet().Prefix().GetFamily() == IPv6 ) - builtin_error("preserve_subnet() not supported for IPv6 addresses"); - else - { - const uint32* bytes; - a->AsSubNet().Prefix().GetBytes(&bytes); - ip_anon->PreservePrefix(*bytes, a->AsSubNet().Length()); - } - } - - return 0; - %} - -## Anonymizes an IP address. -## -## a: The address to anonymize. -## -## cl: The anonymization class, which can take on three different values: -## -## - ``ORIG_ADDR``: Tag *a* as an originator address. -## -## - ``RESP_ADDR``: Tag *a* as an responder address. -## -## - ``OTHER_ADDR``: Tag *a* as an arbitrary address. -## -## Returns: An anonymized version of *a*. -## -## .. bro:see:: preserve_prefix preserve_subnet -## -## .. todo:: Currently dysfunctional. -function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr - %{ - int anon_class = cl->InternalInt(); - if ( anon_class < 0 || anon_class >= NUM_ADDR_ANONYMIZATION_CLASSES ) - builtin_error("anonymize_addr(): invalid ip addr anonymization class"); - - if ( a->AsAddr().GetFamily() == IPv6 ) - { - builtin_error("anonymize_addr() not supported for IPv6 addresses"); - return 0; - } - else - { - const uint32* bytes; - a->AsAddr().GetBytes(&bytes); - return new AddrVal(anonymize_ip(*bytes, - (enum ip_addr_anonymization_class_t) anon_class)); - } - %} diff --git a/src/broker/CMakeLists.txt b/src/broker/CMakeLists.txt index e3a3a73661..08e02597e7 100644 --- a/src/broker/CMakeLists.txt +++ b/src/broker/CMakeLists.txt @@ -1,4 +1,4 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/broker/Data.cc b/src/broker/Data.cc index 754a51390b..d2e53fe45b 100644 --- a/src/broker/Data.cc +++ b/src/broker/Data.cc @@ -1,5 +1,9 @@ #include "Data.h" +#include "File.h" #include "broker/data.bif.h" + +#include + #include #include #include @@ -125,15 +129,6 @@ struct val_converter { return rval->Ref(); } - case TYPE_OPAQUE: - { - SerializationFormat* form = new BinarySerializationFormat(); - form->StartRead(a.data(), a.size()); - CloneSerializer ss(form); - UnserialInfo uinfo(&ss); - uinfo.cache = false; - return Val::Unserialize(&uinfo, type->Tag()); - } default: return nullptr; } @@ -433,6 +428,8 @@ struct val_converter { auto rval = new PatternVal(re); return rval; } + else if ( type->Tag() == TYPE_OPAQUE ) + return OpaqueVal::Unserialize(a); return nullptr; } @@ -507,16 +504,6 @@ struct type_checker { return true; } - case TYPE_OPAQUE: - { - // TODO - SerializationFormat* form = new BinarySerializationFormat(); - form->StartRead(a.data(), a.size()); - CloneSerializer ss(form); - UnserialInfo uinfo(&ss); - uinfo.cache = false; - return Val::Unserialize(&uinfo, type->Tag()); - } default: return false; } @@ -763,6 +750,15 @@ struct type_checker { return true; } + else if ( type->Tag() == TYPE_OPAQUE ) + { + // TODO: Could avoid doing the full unserialization here + // and just check if the type is a correct match. + auto ov = OpaqueVal::Unserialize(a); + auto rval = ov != nullptr; + Unref(ov); + return rval; + } return false; } @@ -979,21 +975,14 @@ broker::expected bro_broker::val_to_data(Val* v) } case TYPE_OPAQUE: { - SerializationFormat* form = new BinarySerializationFormat(); - form->StartWrite(); - CloneSerializer ss(form); - SerialInfo sinfo(&ss); - sinfo.cache = false; - sinfo.include_locations = false; + auto c = v->AsOpaqueVal()->Serialize(); + if ( ! c ) + { + reporter->Error("unsupported opaque type for serialization"); + break; + } - if ( ! v->Serialize(&sinfo) ) - return broker::ec::invalid_data; - - char* data; - uint32 len = form->EndWrite(&data); - string rval(data, len); - free(data); - return {std::move(rval)}; + return {c}; } default: reporter->Error("unsupported Broker::Data type: %s", @@ -1130,39 +1119,114 @@ Val* bro_broker::DataVal::castTo(BroType* t) return data_to_val(data, t); } -IMPLEMENT_SERIAL(bro_broker::DataVal, SER_COMM_DATA_VAL); +IMPLEMENT_OPAQUE_VALUE(bro_broker::DataVal) -bool bro_broker::DataVal::DoSerialize(SerialInfo* info) const +broker::expected bro_broker::DataVal::DoSerialize() const { - DO_SERIALIZE(SER_COMM_DATA_VAL, OpaqueVal); - - std::string buffer; - caf::containerbuf sb{buffer}; - caf::stream_serializer&> serializer{sb}; - serializer << data; - - if ( ! SERIALIZE_STR(buffer.data(), buffer.size()) ) - return false; + return data; + } +bool bro_broker::DataVal::DoUnserialize(const broker::data& data_) + { + data = data_; return true; } -bool bro_broker::DataVal::DoUnserialize(UnserialInfo* info) +IMPLEMENT_OPAQUE_VALUE(bro_broker::SetIterator) + +broker::expected bro_broker::SetIterator::DoSerialize() const { - DO_UNSERIALIZE(OpaqueVal); + return broker::vector{dat, *it}; + } - const char* serial; - int len; - - if ( ! UNSERIALIZE_STR(&serial, &len) ) +bool bro_broker::SetIterator::DoUnserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + if ( ! (v && v->size() == 2) ) return false; - caf::arraybuf sb{const_cast(serial), // will not write - static_cast(len)}; - caf::stream_deserializer&> deserializer{sb}; - deserializer >> data; + auto x = caf::get_if(&(*v)[0]); - delete [] serial; + // We set the iterator by finding the element it used to point to. + // This is not perfect, as there's no guarantee that the restored + // container will list the elements in the same order. But it's as + // good as we can do, and it should generally work out. + if( x->find((*v)[1]) == x->end() ) + return false; + + dat = *x; + it = dat.find((*v)[1]); + return true; + } + +IMPLEMENT_OPAQUE_VALUE(bro_broker::TableIterator) + +broker::expected bro_broker::TableIterator::DoSerialize() const + { + return broker::vector{dat, it->first}; + } + +bool bro_broker::TableIterator::DoUnserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + if ( ! (v && v->size() == 2) ) + return false; + + auto x = caf::get_if(&(*v)[0]); + + // We set the iterator by finding the element it used to point to. + // This is not perfect, as there's no guarantee that the restored + // container will list the elements in the same order. But it's as + // good as we can do, and it should generally work out. + if( x->find((*v)[1]) == x->end() ) + return false; + + dat = *x; + it = dat.find((*v)[1]); + return true; + } + +IMPLEMENT_OPAQUE_VALUE(bro_broker::VectorIterator) + +broker::expected bro_broker::VectorIterator::DoSerialize() const + { + broker::integer difference = it - dat.begin(); + return broker::vector{dat, difference}; + } + +bool bro_broker::VectorIterator::DoUnserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + if ( ! (v && v->size() == 2) ) + return false; + + auto x = caf::get_if(&(*v)[0]); + auto y = caf::get_if(&(*v)[1]); + + dat = *x; + it = dat.begin() + *y; + return true; + } + +IMPLEMENT_OPAQUE_VALUE(bro_broker::RecordIterator) + +broker::expected bro_broker::RecordIterator::DoSerialize() const + { + broker::integer difference = it - dat.begin(); + return broker::vector{dat, difference}; + } + +bool bro_broker::RecordIterator::DoUnserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + if ( ! (v && v->size() == 2) ) + return false; + + auto x = caf::get_if(&(*v)[0]); + auto y = caf::get_if(&(*v)[1]); + + dat = *x; + it = dat.begin() + *y; return true; } diff --git a/src/broker/Data.h b/src/broker/Data.h index e2a5968a82..b134656123 100644 --- a/src/broker/Data.h +++ b/src/broker/Data.h @@ -1,8 +1,10 @@ #ifndef BRO_COMM_DATA_H #define BRO_COMM_DATA_H -#include -#include "Val.h" +#include +#include + +#include "OpaqueVal.h" #include "Reporter.h" #include "Frame.h" #include "Expr.h" @@ -120,14 +122,14 @@ public: return script_data_type; } - DECLARE_SERIAL(DataVal); - broker::data data; protected: DataVal() {} + DECLARE_OPAQUE_VALUE(bro_broker::DataVal) + static BroType* script_data_type; }; @@ -242,6 +244,10 @@ public: broker::set dat; broker::set::iterator it; + +protected: + SetIterator() {} + DECLARE_OPAQUE_VALUE(bro_broker::SetIterator) }; class TableIterator : public OpaqueVal { @@ -255,6 +261,10 @@ public: broker::table dat; broker::table::iterator it; + +protected: + TableIterator() {} + DECLARE_OPAQUE_VALUE(bro_broker::TableIterator) }; class VectorIterator : public OpaqueVal { @@ -268,6 +278,10 @@ public: broker::vector dat; broker::vector::iterator it; + +protected: + VectorIterator() {} + DECLARE_OPAQUE_VALUE(bro_broker::VectorIterator) }; class RecordIterator : public OpaqueVal { @@ -281,6 +295,10 @@ public: broker::vector dat; broker::vector::iterator it; + +protected: + RecordIterator() {} + DECLARE_OPAQUE_VALUE(bro_broker::RecordIterator) }; } // namespace bro_broker diff --git a/src/broker/Manager.cc b/src/broker/Manager.cc index d31198ced7..e4ef3d570c 100644 --- a/src/broker/Manager.cc +++ b/src/broker/Manager.cc @@ -1,6 +1,6 @@ #include -#include +#include #include #include #include @@ -18,19 +18,12 @@ #include "logging/Manager.h" #include "DebugLogger.h" #include "iosource/Manager.h" +#include "SerializationFormat.h" using namespace std; namespace bro_broker { -// Max number of log messages buffered per stream before we send them out as -// a batch. -static const int LOG_BATCH_SIZE = 400; - -// Max secs to buffer log messages before sending the current set out as a -// batch. -static const double LOG_BUFFER_INTERVAL = 1.0; - static inline Val* get_option(const char* option) { auto id = global_scope()->Lookup(option); @@ -91,17 +84,17 @@ struct scoped_reporter_location { }; #ifdef DEBUG -static std::string RenderMessage(std::string topic, broker::data x) +static std::string RenderMessage(std::string topic, const broker::data& x) { return fmt("%s -> %s", broker::to_string(x).c_str(), topic.c_str()); } -static std::string RenderEvent(std::string topic, std::string name, broker::data args) +static std::string RenderEvent(std::string topic, std::string name, const broker::data& args) { return fmt("%s(%s) -> %s", name.c_str(), broker::to_string(args).c_str(), topic.c_str()); } -static std::string RenderMessage(broker::store::response x) +static std::string RenderMessage(const broker::store::response& x) { return fmt("%s [id %" PRIu64 "]", (x.answer ? broker::to_string(*x.answer).c_str() : ""), x.id); } @@ -138,8 +131,11 @@ Manager::Manager(bool arg_reading_pcaps) { bound_port = 0; reading_pcaps = arg_reading_pcaps; - after_bro_init = false; + after_zeek_init = false; peer_count = 0; + times_processed_without_idle = 0; + log_batch_size = 0; + log_batch_interval = 0; log_topic_func = nullptr; vector_of_data_type = nullptr; log_id_type = nullptr; @@ -156,6 +152,8 @@ void Manager::InitPostScript() { DBG_LOG(DBG_BROKER, "Initializing"); + log_batch_size = get_option("Broker::log_batch_size")->AsCount(); + log_batch_interval = get_option("Broker::log_batch_interval")->AsInterval(); default_log_topic_prefix = get_option("Broker::default_log_topic_prefix")->AsString()->CheckString(); log_topic_func = get_option("Broker::log_topic")->AsFunc(); @@ -180,7 +178,7 @@ void Manager::InitPostScript() BrokerConfig config{std::move(options)}; - auto max_threads_env = getenv("BRO_BROKER_MAX_THREADS"); + auto max_threads_env = zeekenv("ZEEK_BROKER_MAX_THREADS"); if ( max_threads_env ) config.set("scheduler.max-threads", atoi(max_threads_env)); @@ -306,7 +304,7 @@ void Manager::Peer(const string& addr, uint16_t port, double retry) DBG_LOG(DBG_BROKER, "Starting to peer with %s:%" PRIu16, addr.c_str(), port); - auto e = getenv("BRO_DEFAULT_CONNECT_RETRY"); + auto e = zeekenv("ZEEK_DEFAULT_CONNECT_RETRY"); if ( e ) retry = atoi(e); @@ -360,8 +358,8 @@ bool Manager::PublishEvent(string topic, std::string name, broker::vector args) DBG_LOG(DBG_BROKER, "Publishing event: %s", RenderEvent(topic, name, args).c_str()); - broker::bro::Event ev(std::move(name), std::move(args)); - bstate->endpoint.publish(move(topic), std::move(ev)); + broker::zeek::Event ev(std::move(name), std::move(args)); + bstate->endpoint.publish(move(topic), ev.move_data()); ++statistics.num_events_outgoing; return true; } @@ -421,10 +419,10 @@ bool Manager::PublishIdentifier(std::string topic, std::string id) return false; } - broker::bro::IdentifierUpdate msg(move(id), move(*data)); + broker::zeek::IdentifierUpdate msg(move(id), move(*data)); DBG_LOG(DBG_BROKER, "Publishing id-update: %s", - RenderMessage(topic, msg).c_str()); - bstate->endpoint.publish(move(topic), move(msg)); + RenderMessage(topic, msg.as_data()).c_str()); + bstate->endpoint.publish(move(topic), msg.move_data()); ++statistics.num_ids_outgoing; return true; } @@ -472,16 +470,16 @@ bool Manager::PublishLogCreate(EnumVal* stream, EnumVal* writer, std::string topic = default_log_topic_prefix + stream_id; auto bstream_id = broker::enum_value(move(stream_id)); auto bwriter_id = broker::enum_value(move(writer_id)); - broker::bro::LogCreate msg(move(bstream_id), move(bwriter_id), move(writer_info), move(fields_data)); + broker::zeek::LogCreate msg(move(bstream_id), move(bwriter_id), move(writer_info), move(fields_data)); - DBG_LOG(DBG_BROKER, "Publishing log creation: %s", RenderMessage(topic, msg).c_str()); + DBG_LOG(DBG_BROKER, "Publishing log creation: %s", RenderMessage(topic, msg.as_data()).c_str()); if ( peer.node != NoPeer.node ) // Direct message. - bstate->endpoint.publish(peer, move(topic), move(msg)); + bstate->endpoint.publish(peer, move(topic), msg.move_data()); else // Broadcast. - bstate->endpoint.publish(move(topic), move(msg)); + bstate->endpoint.publish(move(topic), msg.move_data()); return true; } @@ -540,9 +538,11 @@ bool Manager::PublishLogWrite(EnumVal* stream, EnumVal* writer, string path, int std::string serial_data(data, len); free(data); - val_list vl(2); - vl.append(stream->Ref()); - vl.append(new StringVal(path)); + val_list vl{ + stream->Ref(), + new StringVal(path), + }; + Val* v = log_topic_func->Call(&vl); if ( ! v ) @@ -558,10 +558,10 @@ bool Manager::PublishLogWrite(EnumVal* stream, EnumVal* writer, string path, int auto bstream_id = broker::enum_value(move(stream_id)); auto bwriter_id = broker::enum_value(move(writer_id)); - broker::bro::LogWrite msg(move(bstream_id), move(bwriter_id), move(path), + broker::zeek::LogWrite msg(move(bstream_id), move(bwriter_id), move(path), move(serial_data)); - DBG_LOG(DBG_BROKER, "Buffering log record: %s", RenderMessage(topic, msg).c_str()); + DBG_LOG(DBG_BROKER, "Buffering log record: %s", RenderMessage(topic, msg.as_data()).c_str()); if ( log_buffers.size() <= (unsigned int)stream_id_num ) log_buffers.resize(stream_id_num + 1); @@ -569,16 +569,16 @@ bool Manager::PublishLogWrite(EnumVal* stream, EnumVal* writer, string path, int auto& lb = log_buffers[stream_id_num]; ++lb.message_count; auto& pending_batch = lb.msgs[topic]; - pending_batch.emplace_back(std::move(msg)); + pending_batch.emplace_back(msg.move_data()); - if ( lb.message_count >= LOG_BATCH_SIZE || - (network_time - lb.last_flush >= LOG_BUFFER_INTERVAL) ) - statistics.num_logs_outgoing += lb.Flush(bstate->endpoint); + if ( lb.message_count >= log_batch_size || + (network_time - lb.last_flush >= log_batch_interval ) ) + statistics.num_logs_outgoing += lb.Flush(bstate->endpoint, log_batch_size); return true; } -size_t Manager::LogBuffer::Flush(broker::endpoint& endpoint) +size_t Manager::LogBuffer::Flush(broker::endpoint& endpoint, size_t log_batch_size) { if ( endpoint.is_shutdown() ) return 0; @@ -592,10 +592,10 @@ size_t Manager::LogBuffer::Flush(broker::endpoint& endpoint) auto& topic = kv.first; auto& pending_batch = kv.second; broker::vector batch; - batch.reserve(LOG_BATCH_SIZE + 1); + batch.reserve(log_batch_size + 1); pending_batch.swap(batch); - broker::bro::Batch msg(std::move(batch)); - endpoint.publish(topic, move(msg)); + broker::zeek::Batch msg(std::move(batch)); + endpoint.publish(topic, msg.move_data()); } auto rval = message_count; @@ -610,7 +610,7 @@ size_t Manager::FlushLogBuffers() auto rval = 0u; for ( auto& lb : log_buffers ) - rval += lb.Flush(bstate->endpoint); + rval += lb.Flush(bstate->endpoint, log_batch_interval); return rval; } @@ -772,7 +772,16 @@ RecordVal* Manager::MakeEvent(val_list* args, Frame* frame) bool Manager::Subscribe(const string& topic_prefix) { DBG_LOG(DBG_BROKER, "Subscribing to topic prefix %s", topic_prefix.c_str()); - bstate->subscriber.add_topic(topic_prefix, ! after_bro_init); + bstate->subscriber.add_topic(topic_prefix, ! after_zeek_init); + + // For backward compatibility, we also may receive messages on + // "bro/" topic prefixes in addition to "zeek/". + if ( strncmp(topic_prefix.data(), "zeek/", 5) == 0 ) + { + std::string alt_topic = "bro/" + topic_prefix.substr(5); + bstate->subscriber.add_topic(std::move(alt_topic), ! after_zeek_init); + } + return true; } @@ -799,22 +808,15 @@ bool Manager::Unsubscribe(const string& topic_prefix) } DBG_LOG(DBG_BROKER, "Unsubscribing from topic prefix %s", topic_prefix.c_str()); - bstate->subscriber.remove_topic(topic_prefix, ! after_bro_init); + bstate->subscriber.remove_topic(topic_prefix, ! after_zeek_init); return true; } void Manager::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, iosource::FD_Set* except) { - if ( bstate->status_subscriber.available() || bstate->subscriber.available() ) - SetIdle(false); - read->Insert(bstate->subscriber.fd()); read->Insert(bstate->status_subscriber.fd()); - write->Insert(bstate->subscriber.fd()); - write->Insert(bstate->status_subscriber.fd()); - except->Insert(bstate->subscriber.fd()); - except->Insert(bstate->status_subscriber.fd()); for ( auto& x : data_stores ) read->Insert(x.second->proxy.mailbox().descriptor()); @@ -822,48 +824,39 @@ void Manager::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, double Manager::NextTimestamp(double* local_network_time) { - if ( ! IsIdle() ) - return timer_mgr->Time(); - - if ( bstate->status_subscriber.available() || bstate->subscriber.available() ) - return timer_mgr->Time(); - - for ( auto& s : data_stores ) - { - if ( ! s.second->proxy.mailbox().empty() ) - return timer_mgr->Time(); - } - - return -1; + // We're only asked for a timestamp if either (1) a FD was ready + // or (2) we're not idle (and we go idle if when Process is no-op), + // so there's no case where returning -1 to signify a skip will help. + return timer_mgr->Time(); } void Manager::DispatchMessage(const broker::topic& topic, broker::data msg) { - switch ( broker::bro::Message::type(msg) ) { - case broker::bro::Message::Type::Invalid: + switch ( broker::zeek::Message::type(msg) ) { + case broker::zeek::Message::Type::Invalid: reporter->Warning("received invalid broker message: %s", broker::to_string(msg).data()); break; - case broker::bro::Message::Type::Event: + case broker::zeek::Message::Type::Event: ProcessEvent(topic, std::move(msg)); break; - case broker::bro::Message::Type::LogCreate: + case broker::zeek::Message::Type::LogCreate: ProcessLogCreate(std::move(msg)); break; - case broker::bro::Message::Type::LogWrite: + case broker::zeek::Message::Type::LogWrite: ProcessLogWrite(std::move(msg)); break; - case broker::bro::Message::Type::IdentifierUpdate: + case broker::zeek::Message::Type::IdentifierUpdate: ProcessIdentifierUpdate(std::move(msg)); break; - case broker::bro::Message::Type::Batch: + case broker::zeek::Message::Type::Batch: { - broker::bro::Batch batch(std::move(msg)); + broker::zeek::Batch batch(std::move(msg)); if ( ! batch.valid() ) { @@ -934,24 +927,53 @@ void Manager::Process() for ( auto& s : data_stores ) { - while ( ! s.second->proxy.mailbox().empty() ) + auto num_available = s.second->proxy.mailbox().size(); + + if ( num_available > 0 ) { had_input = true; - auto response = s.second->proxy.receive(); - ProcessStoreResponse(s.second, move(response)); + auto responses = s.second->proxy.receive(num_available); + + for ( auto& r : responses ) + ProcessStoreResponse(s.second, move(r)); } } - SetIdle(! had_input); + if ( had_input ) + { + ++times_processed_without_idle; + + // The max number of Process calls allowed to happen in a row without + // idling is chosen a bit arbitrarily, except 12 is around half of the + // SELECT_FREQUENCY (25). + // + // But probably the general idea should be for it to have some relation + // to the SELECT_FREQUENCY: less than it so other busy IOSources can + // fit several Process loops in before the next poll event (e.g. the + // select() call ), but still large enough such that we don't have to + // wait long before the next poll ourselves after being forced to idle. + if ( times_processed_without_idle > 12 ) + { + times_processed_without_idle = 0; + SetIdle(true); + } + else + SetIdle(false); + } + else + { + times_processed_without_idle = 0; + SetIdle(true); + } } -void Manager::ProcessEvent(const broker::topic& topic, broker::bro::Event ev) +void Manager::ProcessEvent(const broker::topic& topic, broker::zeek::Event ev) { if ( ! ev.valid() ) { reporter->Warning("received invalid broker Event: %s", - broker::to_string(ev).data()); + broker::to_string(ev.as_data()).data()); return; } @@ -993,7 +1015,7 @@ void Manager::ProcessEvent(const broker::topic& topic, broker::bro::Event ev) return; } - auto vl = new val_list; + val_list vl(args.size()); for ( auto i = 0u; i < args.size(); ++i ) { @@ -1002,7 +1024,7 @@ void Manager::ProcessEvent(const broker::topic& topic, broker::bro::Event ev) auto val = data_to_val(std::move(args[i]), expected_type); if ( val ) - vl->append(val); + vl.append(val); else { reporter->Warning("failed to convert remote event '%s' arg #%d," @@ -1013,15 +1035,18 @@ void Manager::ProcessEvent(const broker::topic& topic, broker::bro::Event ev) } } - if ( static_cast(vl->length()) == args.size() ) - mgr.QueueEvent(handler, vl, SOURCE_BROKER); + if ( static_cast(vl.length()) == args.size() ) + mgr.QueueEventFast(handler, std::move(vl), SOURCE_BROKER); else - delete_vals(vl); + { + loop_over_list(vl, i) + Unref(vl[i]); + } } -bool bro_broker::Manager::ProcessLogCreate(broker::bro::LogCreate lc) +bool bro_broker::Manager::ProcessLogCreate(broker::zeek::LogCreate lc) { - DBG_LOG(DBG_BROKER, "Received log-create: %s", RenderMessage(lc).c_str()); + DBG_LOG(DBG_BROKER, "Received log-create: %s", RenderMessage(lc.as_data()).c_str()); if ( ! lc.valid() ) { reporter->Warning("received invalid broker LogCreate: %s", @@ -1089,9 +1114,9 @@ bool bro_broker::Manager::ProcessLogCreate(broker::bro::LogCreate lc) return true; } -bool bro_broker::Manager::ProcessLogWrite(broker::bro::LogWrite lw) +bool bro_broker::Manager::ProcessLogWrite(broker::zeek::LogWrite lw) { - DBG_LOG(DBG_BROKER, "Received log-write: %s", RenderMessage(lw).c_str()); + DBG_LOG(DBG_BROKER, "Received log-write: %s", RenderMessage(lw.as_data()).c_str()); if ( ! lw.valid() ) { @@ -1176,9 +1201,9 @@ bool bro_broker::Manager::ProcessLogWrite(broker::bro::LogWrite lw) return true; } -bool Manager::ProcessIdentifierUpdate(broker::bro::IdentifierUpdate iu) +bool Manager::ProcessIdentifierUpdate(broker::zeek::IdentifierUpdate iu) { - DBG_LOG(DBG_BROKER, "Received id-update: %s", RenderMessage(iu).c_str()); + DBG_LOG(DBG_BROKER, "Received id-update: %s", RenderMessage(iu.as_data()).c_str()); if ( ! iu.valid() ) { @@ -1242,6 +1267,9 @@ void Manager::ProcessStatus(broker::status stat) break; } + if ( ! event ) + return; + auto ei = internal_type("Broker::EndpointInfo")->AsRecordType(); auto endpoint_info = new RecordVal(ei); @@ -1270,11 +1298,7 @@ void Manager::ProcessStatus(broker::status stat) auto str = stat.message(); auto msg = new StringVal(str ? *str : ""); - auto vl = new val_list; - vl->append(endpoint_info); - vl->append(msg); - - mgr.QueueEvent(event, vl); + mgr.QueueEventFast(event, {endpoint_info, msg}); } void Manager::ProcessError(broker::error err) @@ -1351,10 +1375,10 @@ void Manager::ProcessError(broker::error err) msg = fmt("[%s] %s", caf::to_string(err.category()).c_str(), caf::to_string(err.context()).c_str()); } - auto vl = new val_list; - vl->append(BifType::Enum::Broker::ErrorCode->GetVal(ec)); - vl->append(new StringVal(msg)); - mgr.QueueEvent(Broker::error, vl); + mgr.QueueEventFast(Broker::error, { + BifType::Enum::Broker::ErrorCode->GetVal(ec), + new StringVal(msg), + }); } void Manager::ProcessStoreResponse(StoreHandleVal* s, broker::store::response response) diff --git a/src/broker/Manager.h b/src/broker/Manager.h index 87aba80058..569355b533 100644 --- a/src/broker/Manager.h +++ b/src/broker/Manager.h @@ -1,8 +1,18 @@ #ifndef BRO_COMM_MANAGER_H #define BRO_COMM_MANAGER_H -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -13,6 +23,7 @@ #include "Reporter.h" #include "iosource/IOSource.h" #include "Val.h" +#include "logging/WriterBackend.h" namespace bro_broker { @@ -66,8 +77,8 @@ public: */ void InitPostScript(); - void BroInitDone() - { after_bro_init = true; } + void ZeekInitDone() + { after_zeek_init = true; } /** * Shuts Broker down at termination. @@ -101,7 +112,7 @@ public: * @param addr an address to connect to, e.g. "localhost" or "127.0.0.1". * @param port the TCP port on which the remote side is listening. * @param retry If non-zero, the time after which to retry if - * connection cannot be established, or breaks. BRO_DEFAULT_CONNECT_RETRY + * connection cannot be established, or breaks. ZEEK_DEFAULT_CONNECT_RETRY * environment variable overrides this value. */ void Peer(const std::string& addr, uint16_t port, double retry = 10.0); @@ -323,10 +334,10 @@ public: private: void DispatchMessage(const broker::topic& topic, broker::data msg); - void ProcessEvent(const broker::topic& topic, broker::bro::Event ev); - bool ProcessLogCreate(broker::bro::LogCreate lc); - bool ProcessLogWrite(broker::bro::LogWrite lw); - bool ProcessIdentifierUpdate(broker::bro::IdentifierUpdate iu); + void ProcessEvent(const broker::topic& topic, broker::zeek::Event ev); + bool ProcessLogCreate(broker::zeek::LogCreate lc); + bool ProcessLogWrite(broker::zeek::LogWrite lw); + bool ProcessIdentifierUpdate(broker::zeek::IdentifierUpdate iu); void ProcessStatus(broker::status stat); void ProcessError(broker::error err); void ProcessStoreResponse(StoreHandleVal*, broker::store::response response); @@ -352,7 +363,7 @@ private: double last_flush; size_t message_count; - size_t Flush(broker::endpoint& endpoint); + size_t Flush(broker::endpoint& endpoint, size_t batch_size); }; // Data stores @@ -380,9 +391,12 @@ private: uint16_t bound_port; bool reading_pcaps; - bool after_bro_init; + bool after_zeek_init; int peer_count; + int times_processed_without_idle; + size_t log_batch_size; + double log_batch_interval; Func* log_topic_func; VectorType* vector_of_data_type; EnumType* log_id_type; diff --git a/src/broker/Store.cc b/src/broker/Store.cc index 200e1b6abf..2f61b14d37 100644 --- a/src/broker/Store.cc +++ b/src/broker/Store.cc @@ -49,46 +49,18 @@ void StoreHandleVal::ValDescribe(ODesc* d) const d->Add("}"); } -IMPLEMENT_SERIAL(StoreHandleVal, SER_COMM_STORE_HANDLE_VAL); +IMPLEMENT_OPAQUE_VALUE(StoreHandleVal) -bool StoreHandleVal::DoSerialize(SerialInfo* info) const +broker::expected StoreHandleVal::DoSerialize() const { - DO_SERIALIZE(SER_COMM_STORE_HANDLE_VAL, OpaqueVal); - - auto name = store.name(); - if ( ! SERIALIZE_STR(name.data(), name.size()) ) - return false; - - return true; + // Cannot serialize. + return broker::ec::invalid_data; } -bool StoreHandleVal::DoUnserialize(UnserialInfo* info) +bool StoreHandleVal::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(OpaqueVal); - - const char* name_str; - int len; - - if ( ! UNSERIALIZE_STR(&name_str, &len) ) - return false; - - std::string name(name_str, len); - delete [] name_str; - - auto handle = broker_mgr->LookupStore(name); - if ( ! handle ) - { - // Passing serialized version of store handles to other Bro processes - // doesn't make sense, only allow local clones of the handle val. - reporter->Error("failed to look up unserialized store handle %s", - name.c_str()); - return false; - } - - store = handle->store; - proxy = broker::store::proxy{store}; - - return true; + // Cannot unserialize. + return false; } broker::backend to_backend_type(BifEnum::Broker::BackendType type) diff --git a/src/broker/Store.h b/src/broker/Store.h index 1df60584fd..46d19ee923 100644 --- a/src/broker/Store.h +++ b/src/broker/Store.h @@ -5,10 +5,12 @@ #include "broker/data.bif.h" #include "Reporter.h" #include "Type.h" -#include "Val.h" +#include "OpaqueVal.h" #include "Trigger.h" -#include +#include +#include +#include namespace bro_broker { @@ -116,13 +118,13 @@ public: void ValDescribe(ODesc* d) const override; - DECLARE_SERIAL(StoreHandleVal); - broker::store store; broker::store::proxy proxy; protected: StoreHandleVal() = default; + + DECLARE_OPAQUE_VALUE(StoreHandleVal) }; // Helper function to construct a broker backend type from script land. diff --git a/src/broker/comm.bif b/src/broker/comm.bif index 19be90befc..660701e058 100644 --- a/src/broker/comm.bif +++ b/src/broker/comm.bif @@ -1,5 +1,5 @@ -##! Functions and events regarding Bro's broker communication mechanisms. +##! Functions and events regarding broker communication mechanisms. %%{ #include "broker/Manager.h" diff --git a/src/broker/data.bif b/src/broker/data.bif index 2f6dc2cd77..f0862c0f66 100644 --- a/src/broker/data.bif +++ b/src/broker/data.bif @@ -7,8 +7,8 @@ module Broker; -## Enumerates the possible types that :bro:see:`Broker::Data` may be in -## terms of Bro data types. +## Enumerates the possible types that :zeek:see:`Broker::Data` may be in +## terms of Zeek data types. enum DataType %{ NONE, BOOL, @@ -41,6 +41,20 @@ function Broker::__data_type%(d: Broker::Data%): Broker::DataType return bro_broker::get_data_type(d->AsRecordVal(), frame); %} +# For testing only. +function Broker::__opaque_clone_through_serialization%(d: any%): any + %{ + auto x = d->AsOpaqueVal()->Serialize(); + + if ( ! x ) + { + builtin_error("cannot serialize object to clone"); + return val_mgr->GetFalse(); + } + + return OpaqueVal::Unserialize(std::move(*x)); + %} + function Broker::__set_create%(%): Broker::Data %{ return bro_broker::make_data_val(broker::set()); diff --git a/src/broker/messaging.bif b/src/broker/messaging.bif index ec7696c752..6c873c863a 100644 --- a/src/broker/messaging.bif +++ b/src/broker/messaging.bif @@ -74,7 +74,7 @@ module Broker; type Broker::Event: record; ## Create a data structure that may be used to send a remote event via -## :bro:see:`Broker::publish`. +## :zeek:see:`Broker::publish`. ## ## args: an event, followed by a list of argument values that may be used ## to call it. @@ -93,7 +93,7 @@ function Broker::make_event%(...%): Broker::Event ## topic: a topic associated with the event message. ## ## args: Either the event arguments as already made by -## :bro:see:`Broker::make_event` or the argument list to pass along +## :zeek:see:`Broker::make_event` or the argument list to pass along ## to it. ## ## Returns: true if the message is sent. @@ -172,7 +172,7 @@ type Cluster::Pool: record; ## script like "Intel::cluster_rr_key". ## ## args: Either the event arguments as already made by -## :bro:see:`Broker::make_event` or the argument list to pass along +## :zeek:see:`Broker::make_event` or the argument list to pass along ## to it. ## ## Returns: true if the message is sent. @@ -183,9 +183,7 @@ function Cluster::publish_rr%(pool: Pool, key: string, ...%): bool if ( ! topic_func ) topic_func = global_scope()->Lookup("Cluster::rr_topic")->ID_Val()->AsFunc(); - val_list vl(2); - vl.append(pool->Ref()); - vl.append(key->Ref()); + val_list vl{pool->Ref(), key->Ref()}; auto topic = topic_func->Call(&vl); if ( ! topic->AsString()->Len() ) @@ -215,7 +213,7 @@ function Cluster::publish_rr%(pool: Pool, key: string, ...%): bool ## distribute keys among available nodes. ## ## args: Either the event arguments as already made by -## :bro:see:`Broker::make_event` or the argument list to pass along +## :zeek:see:`Broker::make_event` or the argument list to pass along ## to it. ## ## Returns: true if the message is sent. @@ -226,9 +224,7 @@ function Cluster::publish_hrw%(pool: Pool, key: any, ...%): bool if ( ! topic_func ) topic_func = global_scope()->Lookup("Cluster::hrw_topic")->ID_Val()->AsFunc(); - val_list vl(2); - vl.append(pool->Ref()); - vl.append(key->Ref()); + val_list vl{pool->Ref(), key->Ref()}; auto topic = topic_func->Call(&vl); if ( ! topic->AsString()->Len() ) diff --git a/src/broxygen/CMakeLists.txt b/src/broxygen/CMakeLists.txt deleted file mode 100644 index f41cd68ff5..0000000000 --- a/src/broxygen/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# See the file "COPYING" in the main distribution directory for copyright. - -include(BroSubdir) - -include_directories(BEFORE - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} -) - -set(broxygen_SRCS - Manager.cc - Info.h - PackageInfo.cc - ScriptInfo.cc - IdentifierInfo.cc - Target.cc - Configuration.cc - ReStructuredTextTable.cc - utils.cc -) - -bif_target(broxygen.bif) -bro_add_subdir_library(broxygen ${broxygen_SRCS}) - -add_dependencies(bro_broxygen generate_outputs) diff --git a/src/broxygen/IdentifierInfo.cc b/src/broxygen/IdentifierInfo.cc deleted file mode 100644 index afc0cf751a..0000000000 --- a/src/broxygen/IdentifierInfo.cc +++ /dev/null @@ -1,148 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include "IdentifierInfo.h" -#include "utils.h" - -#include "Desc.h" -#include "Val.h" - -using namespace std; -using namespace broxygen; - -IdentifierInfo::IdentifierInfo(ID* arg_id, ScriptInfo* script) - : Info(), - comments(), id(arg_id), initial_val_desc(), redefs(), fields(), - last_field_seen(), declaring_script(script) - { - Ref(id); - - if ( id->ID_Val() ) - { - ODesc d; - id->ID_Val()->Describe(&d); - initial_val_desc = d.Description(); - } - } - -IdentifierInfo::~IdentifierInfo() - { - Unref(id); - - for ( redef_list::const_iterator it = redefs.begin(); it != redefs.end(); - ++it ) - delete *it; - - for ( record_field_map::const_iterator it = fields.begin(); - it != fields.end(); ++it ) - delete it->second; - } - -void IdentifierInfo::AddRedef(const string& script, - const vector& comments) - { - Redefinition* redef = new Redefinition(); - redef->from_script = script; - - if ( id->ID_Val() ) - { - ODesc d; - id->ID_Val()->Describe(&d); - redef->new_val_desc = d.Description(); - } - - redef->comments = comments; - redefs.push_back(redef); - } - -void IdentifierInfo::AddRecordField(const TypeDecl* field, - const string& script, - vector& comments) - { - RecordField* rf = new RecordField(); - rf->field = new TypeDecl(*field); - rf->from_script = script; - rf->comments = comments; - fields[rf->field->id] = rf; - last_field_seen = rf; - } - -vector IdentifierInfo::GetComments() const - { - return comments; - } - -vector IdentifierInfo::GetFieldComments(const string& field) const - { - record_field_map::const_iterator it = fields.find(field); - - if ( it == fields.end() ) - return vector(); - - return it->second->comments; - } - -list -IdentifierInfo::GetRedefs(const string& from_script) const - { - list rval; - - for ( redef_list::const_iterator it = redefs.begin(); it != redefs.end(); - ++it ) - { - if ( from_script == (*it)->from_script ) - rval.push_back(*(*it)); - } - - return rval; - } - -string IdentifierInfo::GetDeclaringScriptForField(const string& field) const - { - record_field_map::const_iterator it = fields.find(field); - - if ( it == fields.end() ) - return ""; - - return it->second->from_script; - } - -string IdentifierInfo::DoReStructuredText(bool roles_only) const - { - ODesc d; - d.SetIndentSpaces(3); - d.SetQuotes(true); - id->DescribeReST(&d, roles_only); - - if ( comments.empty() ) - return d.Description(); - - d.ClearIndentLevel(); - d.PushIndent(); - - for ( size_t i = 0; i < comments.size(); ++i ) - { - if ( i > 0 ) - d.NL(); - - if ( IsFunc(id->Type()->Tag()) ) - { - string s = comments[i]; - - if ( broxygen::prettify_params(s) ) - d.NL(); - - d.Add(s.c_str()); - } - else - d.Add(comments[i].c_str()); - } - - return d.Description(); - } - -time_t IdentifierInfo::DoGetModificationTime() const - { - // Could probably get away with just checking the set of scripts that - // contributed to the ID declaration/redefinitions, but this is easier... - return declaring_script->GetModificationTime(); - } diff --git a/src/broxygen/PackageInfo.h b/src/broxygen/PackageInfo.h deleted file mode 100644 index 967bbe3443..0000000000 --- a/src/broxygen/PackageInfo.h +++ /dev/null @@ -1,50 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#ifndef BROXYGEN_PACKAGEINFO_H -#define BROXYGEN_PACKAGEINFO_H - -#include "Info.h" - -#include -#include - -namespace broxygen { - -/** - * Information about a Bro script package. - */ -class PackageInfo : public Info { - -public: - - /** - * Ctor. - * @param name The name of the Bro script package (relative path from a - * component within BROPATH. - */ - explicit PackageInfo(const std::string& name); - - /** - * @return The content of the package's README file, each line being - * an element in the returned vector. If the package has no README, the - * vector is empty. - */ - std::vector GetReadme() const - { return readme; } - -private: - - time_t DoGetModificationTime() const override; - - std::string DoName() const override - { return pkg_name; } - - std::string DoReStructuredText(bool roles_only) const override; - - std::string pkg_name; - std::vector readme; -}; - -} // namespace broxygen - -#endif diff --git a/src/broxygen/broxygen.bif b/src/broxygen/broxygen.bif deleted file mode 100644 index d1b3028edc..0000000000 --- a/src/broxygen/broxygen.bif +++ /dev/null @@ -1,97 +0,0 @@ -# See the file "COPYING" in the main distribution directory for copyright. - -##! Functions for querying script, package, or variable documentation. - -%%{ -#include "broxygen/Manager.h" -#include "util.h" - -static StringVal* comments_to_val(const vector& comments) - { - return new StringVal(implode_string_vector(comments)); - } -%%} - -## Retrieve the Broxygen-style comments (``##``) associated with an identifier -## (e.g. a variable or type). -## -## name: a script-level identifier for which to retrieve comments. -## -## Returns: comments associated with *name*. If *name* is not a known -## identifier, an empty string is returned. -function get_identifier_comments%(name: string%): string - %{ - using namespace broxygen; - IdentifierInfo* d = broxygen_mgr->GetIdentifierInfo(name->CheckString()); - - if ( ! d ) - return val_mgr->GetEmptyString(); - - return comments_to_val(d->GetComments()); - %} - -## Retrieve the Broxygen-style summary comments (``##!``) associated with -## a Bro script. -## -## name: the name of a Bro script. It must be a relative path to where -## it is located within a particular component of BROPATH and use -## the same file name extension/suffix as the actual file (e.g. ".bro"). -## -## Returns: summary comments associated with script with *name*. If -## *name* is not a known script, an empty string is returned. -function get_script_comments%(name: string%): string - %{ - using namespace broxygen; - ScriptInfo* d = broxygen_mgr->GetScriptInfo(name->CheckString()); - - if ( ! d ) - return val_mgr->GetEmptyString(); - - return comments_to_val(d->GetComments()); - %} - -## Retrieve the contents of a Bro script package's README file. -## -## name: the name of a Bro script package. It must be a relative path -## to where it is located within a particular component of BROPATH. -## -## Returns: contents of the package's README file. If *name* is not a known -## package, an empty string is returned. -function get_package_readme%(name: string%): string - %{ - using namespace broxygen; - PackageInfo* d = broxygen_mgr->GetPackageInfo(name->CheckString()); - - if ( ! d ) - return val_mgr->GetEmptyString(); - - return comments_to_val(d->GetReadme()); - %} - -## Retrieve the Broxygen-style comments (``##``) associated with a record field. -## -## name: the name of a record type and a field within it formatted like -## a typical record field access: "$". -## -## Returns: comments associated with the record field. If *name* does -## not point to a known record type or a known field within a record -## type, an empty string is returned. -function get_record_field_comments%(name: string%): string - %{ - using namespace broxygen; - string accessor = name->CheckString(); - size_t i = accessor.find('$'); - - if ( i > accessor.size() - 2 ) - return val_mgr->GetEmptyString(); - - string id = accessor.substr(0, i); - - IdentifierInfo* d = broxygen_mgr->GetIdentifierInfo(id); - - if ( ! d ) - return val_mgr->GetEmptyString(); - - string field = accessor.substr(i + 1); - return comments_to_val(d->GetFieldComments(field)); - %} diff --git a/src/bsd-getopt-long.c b/src/bsd-getopt-long.c index 65a3d94093..dc880f87dd 100644 --- a/src/bsd-getopt-long.c +++ b/src/bsd-getopt-long.c @@ -54,7 +54,7 @@ #define IN_GETOPT_LONG_C 1 -#include +#include #include #include #include diff --git a/src/const.bif b/src/const.bif index 6d60ac707b..c20615892d 100644 --- a/src/const.bif +++ b/src/const.bif @@ -1,6 +1,6 @@ -##! Declaration of various scripting-layer constants that the Bro core uses +##! Declaration of various scripting-layer constants that the Zeek core uses ##! internally. Documentation and default values for the scripting-layer -##! variables themselves are found in :doc:`/scripts/base/init-bare.bro`. +##! variables themselves are found in :doc:`/scripts/base/init-bare.zeek`. const ignore_keep_alive_rexmit: bool; const skip_http_data: bool; diff --git a/src/event.bif b/src/event.bif index ae00c9b653..92f3532ef0 100644 --- a/src/event.bif +++ b/src/event.bif @@ -1,4 +1,4 @@ -##! The protocol-independent events that the C/C++ core of Bro can generate. +##! The protocol-independent events that the C/C++ core of Zeek can generate. ##! ##! This is mostly events not related to a specific transport- or ##! application-layer protocol, but also includes a few that may be generated @@ -24,51 +24,57 @@ # # - Parameters # -# - .. bro:see:: +# - .. zeek:see:: # # - .. note:: # # - .. todo:: -## Generated at Bro initialization time. The event engine generates this +## Generated at Zeek initialization time. The event engine generates this ## event just before normal input processing begins. It can be used to execute -## one-time initialization code at startup. At the time a handler runs, Bro will +## one-time initialization code at startup. At the time a handler runs, Zeek will ## have executed any global initializations and statements. ## -## .. bro:see:: bro_done +## .. zeek:see:: zeek_done ## ## .. note:: ## -## When a ``bro_init`` handler executes, Bro has not yet seen any input -## packets and therefore :bro:id:`network_time` is not initialized yet. An -## artifact of that is that any timer installed in a ``bro_init`` handler +## When a ``zeek_init`` handler executes, Zeek has not yet seen any input +## packets and therefore :zeek:id:`network_time` is not initialized yet. An +## artifact of that is that any timer installed in a ``zeek_init`` handler ## will fire immediately with the first packet. The standard way to work ## around that is to ignore the first time the timer fires and immediately ## reschedule. ## -event bro_init%(%); +event zeek_init%(%); -## Generated at Bro termination time. The event engine generates this event when -## Bro is about to terminate, either due to having exhausted reading its input -## trace file(s), receiving a termination signal, or because Bro was run without +## Deprecated synonym for :zeek:see:`zeek_init`. +event bro_init%(%) &deprecated; + +## Generated at Zeek termination time. The event engine generates this event when +## Zeek is about to terminate, either due to having exhausted reading its input +## trace file(s), receiving a termination signal, or because Zeek was run without ## a network input source and has finished executing any global statements. ## -## .. bro:see:: bro_init +## .. zeek:see:: zeek_init ## ## .. note:: ## -## If Bro terminates due to an invocation of :bro:id:`exit`, then this event +## If Zeek terminates due to an invocation of :zeek:id:`exit`, then this event ## is not generated. -event bro_done%(%); +event zeek_done%(%); + +## Deprecated synonym for :zeek:see:`zeek_done`. +event bro_done%(%) &deprecated; ## Generated for every new connection. This event is raised with the first -## packet of a previously unknown connection. Bro uses a flow-based definition +## packet of a previously unknown connection. Zeek uses a flow-based definition ## of "connection" here that includes not only TCP sessions but also UDP and ## ICMP flows. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -88,7 +94,7 @@ event new_connection%(c: connection%); ## *tunnel* field is NOT automatically/internally assigned to the new ## encapsulation value of *e* after this event is raised. If the desired ## behavior is to track the latest tunnel encapsulation per-connection, -## then a handler of this event should assign *e* to ``c$tunnel`` (which Bro's +## then a handler of this event should assign *e* to ``c$tunnel`` (which Zeek's ## default scripts are doing). ## ## c: The connection whose tunnel/encapsulation changed. @@ -98,12 +104,12 @@ event tunnel_changed%(c: connection, e: EncapsulatingConnVector%); ## Generated when a TCP connection timed out. This event is raised when ## no activity was seen for an interval of at least -## :bro:id:`tcp_connection_linger`, and either one endpoint has already +## :zeek:id:`tcp_connection_linger`, and either one endpoint has already ## closed the connection or one side never became active. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -115,14 +121,14 @@ event tunnel_changed%(c: connection, e: EncapsulatingConnVector%); ## ## The precise semantics of this event can be unintuitive as it only ## covers a subset of cases where a connection times out. Often, handling -## :bro:id:`connection_state_remove` is the better option. That one will be +## :zeek:id:`connection_state_remove` is the better option. That one will be ## generated reliably when an interval of ``tcp_inactivity_timeout`` has ## passed without any activity seen (but also for all other ways a ## connection may terminate). event connection_timeout%(c: connection%); ## Generated when a connection's internal state is about to be removed from -## memory. Bro generates this event reliably once for every connection when it +## memory. Zeek generates this event reliably once for every connection when it ## is about to delete the internal state. As such, the event is well-suited for ## script-level cleanup that needs to be performed for every connection. This ## event is generated not only for TCP sessions but also for UDP and ICMP @@ -130,7 +136,7 @@ event connection_timeout%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -139,13 +145,13 @@ event connection_timeout%(c: connection%); ## tcp_inactivity_timeout icmp_inactivity_timeout conn_stats event connection_state_remove%(c: connection%); -## Generated when a connection 4-tuple is reused. This event is raised when Bro +## Generated when a connection 4-tuple is reused. This event is raised when Zeek ## sees a new TCP session or UDP flow using a 4-tuple matching that of an ## earlier connection it still considers active. ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_state_remove @@ -159,7 +165,7 @@ event connection_reused%(c: connection%); ## ## c: The connection. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -178,11 +184,11 @@ event connection_status_update%(c: connection%); ## ## new_label: The new flow label that the endpoint is using. ## -## .. bro:see:: connection_established new_connection +## .. zeek:see:: connection_established new_connection event connection_flow_label_changed%(c: connection, is_orig: bool, old_label: count, new_label: count%); ## Generated for a new connection received from the communication subsystem. -## Remote peers can inject packets into Bro's packet loop, for example via +## Remote peers can inject packets into Zeek's packet loop, for example via ## Broccoli. The communication system ## raises this event with the first packet of a connection coming in this way. ## @@ -192,17 +198,17 @@ event connection_flow_label_changed%(c: connection, is_orig: bool, old_label: co event connection_external%(c: connection, tag: string%); ## Generated when a UDP session for a supported protocol has finished. Some of -## Bro's application-layer UDP analyzers flag the end of a session by raising +## Zeek's application-layer UDP analyzers flag the end of a session by raising ## this event. Currently, the analyzers for DNS, NTP, Netbios, Syslog, AYIYA, ## Teredo, and GTPv1 support this. ## ## u: The connection record for the corresponding UDP flow. ## -## .. bro:see:: udp_contents udp_reply udp_request +## .. zeek:see:: udp_contents udp_reply udp_request event udp_session_done%(u: connection%); ## Generated when a connection is seen that is marked as being expected. -## The function :bro:id:`Analyzer::schedule_analyzer` tells Bro to expect a +## The function :zeek:id:`Analyzer::schedule_analyzer` tells Zeek to expect a ## particular connection to come up, and which analyzer to associate with it. ## Once the first packet of such a connection is indeed seen, this event is ## raised. @@ -210,11 +216,11 @@ event udp_session_done%(u: connection%); ## c: The connection. ## ## a: The analyzer that was scheduled for the connection with the -## :bro:id:`Analyzer::schedule_analyzer` call. When the event is raised, that +## :zeek:id:`Analyzer::schedule_analyzer` call. When the event is raised, that ## analyzer will already have been activated to process the connection. The ## ``count`` is one of the ``ANALYZER_*`` constants, e.g., ``ANALYZER_HTTP``. ## -## .. bro:see:: connection_EOF connection_SYN_packet connection_attempt +## .. zeek:see:: connection_EOF connection_SYN_packet connection_attempt ## connection_established connection_external connection_finished ## connection_first_ACK connection_half_finished connection_partial_close ## connection_pending connection_rejected connection_reset connection_reused @@ -225,7 +231,7 @@ event udp_session_done%(u: connection%); ## ``ANALYZER_*`` constants right now. event scheduled_analyzer_applied%(c: connection, a: Analyzer::Tag%); -## Generated for every packet Bro sees that have a valid link-layer header. This +## Generated for every packet Zeek sees that have a valid link-layer header. This ## is a very very low-level and expensive event that should be avoided when at all ## possible. It's usually infeasible to handle when processing even medium volumes ## of traffic in real-time. That said, if you work from a trace and want to do some @@ -233,11 +239,11 @@ event scheduled_analyzer_applied%(c: connection, a: Analyzer::Tag%); ## ## p: Information from the header of the packet that triggered the event. ## -## .. bro:see:: new_packet packet_contents +## .. zeek:see:: new_packet packet_contents event raw_packet%(p: raw_pkt_hdr%); -## Generated for all packets that make it into Bro's connection processing. In -## contrast to :bro:id:`raw_packet` this filters out some more packets that don't +## Generated for all packets that make it into Zeek's connection processing. In +## contrast to :zeek:id:`raw_packet` this filters out some more packets that don't ## pass certain sanity checks. ## ## This is a very low-level and expensive event that should be avoided when at all @@ -249,7 +255,7 @@ event raw_packet%(p: raw_pkt_hdr%); ## ## p: Information from the header of the packet that triggered the event. ## -## .. bro:see:: tcp_packet packet_contents raw_packet +## .. zeek:see:: tcp_packet packet_contents raw_packet event new_packet%(c: connection, p: pkt_hdr%); ## Generated for every IPv6 packet that contains extension headers. @@ -260,7 +266,7 @@ event new_packet%(c: connection, p: pkt_hdr%); ## ## p: Information from the header of the packet that triggered the event. ## -## .. bro:see:: new_packet tcp_packet packet_contents esp_packet +## .. zeek:see:: new_packet tcp_packet packet_contents esp_packet event ipv6_ext_headers%(c: connection, p: pkt_hdr%); ## Generated for any packets using the IPv6 Encapsulating Security Payload (ESP) @@ -268,35 +274,35 @@ event ipv6_ext_headers%(c: connection, p: pkt_hdr%); ## ## p: Information from the header of the packet that triggered the event. ## -## .. bro:see:: new_packet tcp_packet ipv6_ext_headers +## .. zeek:see:: new_packet tcp_packet ipv6_ext_headers event esp_packet%(p: pkt_hdr%); ## Generated for any packet using a Mobile IPv6 Mobility Header. ## ## p: Information from the header of the packet that triggered the event. ## -## .. bro:see:: new_packet tcp_packet ipv6_ext_headers +## .. zeek:see:: new_packet tcp_packet ipv6_ext_headers event mobile_ipv6_message%(p: pkt_hdr%); ## Generated for every packet that has a non-empty transport-layer payload. ## This is a very low-level and expensive event that should be avoided when ## at all possible. It's usually infeasible to handle when processing even ## medium volumes of traffic in real-time. It's even worse than -## :bro:id:`new_packet`. That said, if you work from a trace and want to +## :zeek:id:`new_packet`. That said, if you work from a trace and want to ## do some packet-level analysis, it may come in handy. ## ## c: The connection the packet is part of. ## ## contents: The raw transport-layer payload. ## -## .. bro:see:: new_packet tcp_packet +## .. zeek:see:: new_packet tcp_packet event packet_contents%(c: connection, contents: string%); -## Generated when Bro detects a TCP retransmission inconsistency. When -## reassembling a TCP stream, Bro buffers all payload until it sees the +## Generated when Zeek detects a TCP retransmission inconsistency. When +## reassembling a TCP stream, Zeek buffers all payload until it sees the ## responder acking it. If during that time, the sender resends a chunk of ## payload but with different content than originally, this event will be -## raised. In addition, if :bro:id:`tcp_max_old_segments` is larger than zero, +## raised. In addition, if :zeek:id:`tcp_max_old_segments` is larger than zero, ## mismatches with that older still-buffered data will likewise trigger the event. ## ## c: The connection showing the inconsistency. @@ -311,13 +317,13 @@ event packet_contents%(c: connection, contents: string%); ## ``A`` -> ACK; ``P`` -> PUSH. This string will not always be set, ## only if the information is available; it's "best effort". ## -## .. bro:see:: tcp_rexmit tcp_contents +## .. zeek:see:: tcp_rexmit tcp_contents event rexmit_inconsistency%(c: connection, t1: string, t2: string, tcp_flags: string%); -## Generated when Bro detects a gap in a reassembled TCP payload stream. This -## event is raised when Bro, while reassembling a payload stream, determines +## Generated when Zeek detects a gap in a reassembled TCP payload stream. This +## event is raised when Zeek, while reassembling a payload stream, determines ## that a chunk of payload is missing (e.g., because the responder has already -## acknowledged it, even though Bro didn't see it). +## acknowledged it, even though Zeek didn't see it). ## ## c: The connection. ## @@ -337,7 +343,7 @@ event rexmit_inconsistency%(c: connection, t1: string, t2: string, tcp_flags: st event content_gap%(c: connection, is_orig: bool, seq: count, length: count%); ## Generated when a protocol analyzer confirms that a connection is indeed -## using that protocol. Bro's dynamic protocol detection heuristically activates +## using that protocol. Zeek's dynamic protocol detection heuristically activates ## analyzers as soon as it believes a connection *could* be using a particular ## protocol. It is then left to the corresponding analyzer to verify whether ## that is indeed the case; if so, this event will be generated. @@ -352,19 +358,19 @@ event content_gap%(c: connection, is_orig: bool, seq: count, length: count%); ## aid: A unique integer ID identifying the specific *instance* of the ## analyzer *atype* that is analyzing the connection ``c``. The ID can ## be used to reference the analyzer when using builtin functions like -## :bro:id:`disable_analyzer`. +## :zeek:id:`disable_analyzer`. ## -## .. bro:see:: protocol_violation +## .. zeek:see:: protocol_violation ## ## .. note:: ## -## Bro's default scripts use this event to determine the ``service`` column -## of :bro:type:`Conn::Info`: once confirmed, the protocol will be listed +## Zeek's default scripts use this event to determine the ``service`` column +## of :zeek:type:`Conn::Info`: once confirmed, the protocol will be listed ## there (and thus in ``conn.log``). event protocol_confirmation%(c: connection, atype: Analyzer::Tag, aid: count%); ## Generated when a protocol analyzer determines that a connection it is parsing -## is not conforming to the protocol it expects. Bro's dynamic protocol +## is not conforming to the protocol it expects. Zeek's dynamic protocol ## detection heuristically activates analyzers as soon as it believes a ## connection *could* be using a particular protocol. It is then left to the ## corresponding analyzer to verify whether that is indeed the case; if not, @@ -380,22 +386,22 @@ event protocol_confirmation%(c: connection, atype: Analyzer::Tag, aid: count%); ## aid: A unique integer ID identifying the specific *instance* of the ## analyzer *atype* that is analyzing the connection ``c``. The ID can ## be used to reference the analyzer when using builtin functions like -## :bro:id:`disable_analyzer`. +## :zeek:id:`disable_analyzer`. ## ## reason: TODO. ## -## .. bro:see:: protocol_confirmation +## .. zeek:see:: protocol_confirmation ## ## .. note:: ## -## Bro's default scripts use this event to disable an analyzer via -## :bro:id:`disable_analyzer` if it's parsing the wrong protocol. That's +## Zeek's default scripts use this event to disable an analyzer via +## :zeek:id:`disable_analyzer` if it's parsing the wrong protocol. That's ## however a script-level decision and not done automatically by the event ## engine. event protocol_violation%(c: connection, atype: Analyzer::Tag, aid: count, reason: string%); ## Generated when a TCP connection terminated, passing on statistics about the -## two endpoints. This event is always generated when Bro flushes the internal +## two endpoints. This event is always generated when Zeek flushes the internal ## connection state, independent of how a connection terminates. ## ## c: The connection. @@ -404,16 +410,16 @@ event protocol_violation%(c: connection, atype: Analyzer::Tag, aid: count, reaso ## ## rs: Statistics for the responder endpoint. ## -## .. bro:see:: connection_state_remove +## .. zeek:see:: connection_state_remove event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%); ## Generated for unexpected activity related to a specific connection. When -## Bro's packet analysis encounters activity that does not conform to a +## Zeek's packet analysis encounters activity that does not conform to a ## protocol's specification, it raises one of the ``*_weird`` events to report ## that. This event is raised if the activity is tied directly to a specific ## connection. ## -## name: A unique name for the specific type of "weird" situation. Bro's default +## name: A unique name for the specific type of "weird" situation. Zeek's default ## scripts use this name in filtering policies that specify which ## "weirds" are worth reporting. ## @@ -421,7 +427,7 @@ event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%); ## ## addl: Optional additional context further describing the situation. ## -## .. bro:see:: flow_weird net_weird file_weird +## .. zeek:see:: flow_weird net_weird file_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic ## than one would intuitively expect. While in principle, any protocol @@ -430,13 +436,13 @@ event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%); event conn_weird%(name: string, c: connection, addl: string%); ## Generated for unexpected activity related to a pair of hosts, but independent -## of a specific connection. When Bro's packet analysis encounters activity +## of a specific connection. When Zeek's packet analysis encounters activity ## that does not conform to a protocol's specification, it raises one of ## the ``*_weird`` events to report that. This event is raised if the activity ## is related to a pair of hosts, yet not to a specific connection between ## them. ## -## name: A unique name for the specific type of "weird" situation. Bro's default +## name: A unique name for the specific type of "weird" situation. Zeek's default ## scripts use this name in filtering policies that specify which ## "weirds" are worth reporting. ## @@ -444,7 +450,7 @@ event conn_weird%(name: string, c: connection, addl: string%); ## ## dst: The destination address corresponding to the activity. ## -## .. bro:see:: conn_weird net_weird file_weird +## .. zeek:see:: conn_weird net_weird file_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic ## than one would intuitively expect. While in principle, any protocol @@ -453,16 +459,16 @@ event conn_weird%(name: string, c: connection, addl: string%); event flow_weird%(name: string, src: addr, dst: addr%); ## Generated for unexpected activity that is not tied to a specific connection -## or pair of hosts. When Bro's packet analysis encounters activity that +## or pair of hosts. When Zeek's packet analysis encounters activity that ## does not conform to a protocol's specification, it raises one of the ## ``*_weird`` events to report that. This event is raised if the activity is ## not tied directly to a specific connection or pair of hosts. ## -## name: A unique name for the specific type of "weird" situation. Bro's default +## name: A unique name for the specific type of "weird" situation. Zeek's default ## scripts use this name in filtering policies that specify which ## "weirds" are worth reporting. ## -## .. bro:see:: flow_weird file_weird +## .. zeek:see:: flow_weird file_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic ## than one would intuitively expect. While in principle, any protocol @@ -471,11 +477,11 @@ event flow_weird%(name: string, src: addr, dst: addr%); event net_weird%(name: string%); ## Generated for unexpected activity that is tied to a file. -## When Bro's packet analysis encounters activity that +## When Zeek's packet analysis encounters activity that ## does not conform to a protocol's specification, it raises one of the ## ``*_weird`` events to report that. ## -## name: A unique name for the specific type of "weird" situation. Bro's default +## name: A unique name for the specific type of "weird" situation. Zeek's default ## scripts use this name in filtering policies that specify which ## "weirds" are worth reporting. ## @@ -483,7 +489,7 @@ event net_weird%(name: string%); ## ## addl: Additional information related to the weird. ## -## .. bro:see:: flow_weird net_weird conn_weird +## .. zeek:see:: flow_weird net_weird conn_weird ## ## .. note:: "Weird" activity is much more common in real-world network traffic ## than one would intuitively expect. While in principle, any protocol @@ -491,11 +497,11 @@ event net_weird%(name: string%); ## endpoint's implementation interprets an RFC quite liberally. event file_weird%(name: string, f: fa_file, addl: string%); -## Generated regularly for the purpose of profiling Bro's processing. This event -## is raised for every :bro:id:`load_sample_freq` packet. For these packets, -## Bro records script-level functions executed during their processing as well +## Generated regularly for the purpose of profiling Zeek's processing. This event +## is raised for every :zeek:id:`load_sample_freq` packet. For these packets, +## Zeek records script-level functions executed during their processing as well ## as further internal locations. By sampling the processing in this form, one -## can understand where Bro spends its time. +## can understand where Zeek spends its time. ## ## samples: A set with functions and locations seen during the processing of ## the sampled packet. @@ -505,13 +511,13 @@ event file_weird%(name: string, f: fa_file, addl: string%); ## dmem: The difference in memory usage caused by processing the sampled packet. event load_sample%(samples: load_sample_info, CPU: interval, dmem: int%); -## Generated when a signature matches. Bro's signature engine provides +## Generated when a signature matches. Zeek's signature engine provides ## high-performance pattern matching separately from the normal script ## processing. If a signature with an ``event`` action matches, this event is ## raised. ## ## See the :doc:`user manual ` for more information -## about Bro's signature engine. +## about Zeek's signature engine. ## ## state: Context about the match, including which signatures triggered the ## event and the connection for which the match was found. @@ -519,7 +525,7 @@ event load_sample%(samples: load_sample_info, CPU: interval, dmem: int%); ## msg: The message passed to the ``event`` signature action. ## ## data: The last chunk of input that triggered the match. Note that the -## specifics here are not well-defined as Bro does not buffer any input. +## specifics here are not well-defined as Zeek does not buffer any input. ## If a match is split across packet boundaries, only the last chunk ## triggering the match will be passed on to the event. event signature_match%(state: signature_state, msg: string, data: string%); @@ -528,7 +534,7 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## used on a system. This is a protocol-independent event that is fed by ## different analyzers. For example, the HTTP analyzer reports user-agent and ## server software by raising this event, assuming it can parse it (if not, -## :bro:id:`software_parse_error` will be generated instead). +## :zeek:id:`software_parse_error` will be generated instead). ## ## c: The connection. ## @@ -539,7 +545,7 @@ event signature_match%(state: signature_state, msg: string, data: string%); ## descr: The raw (unparsed) software identification string as extracted from ## the protocol. ## -## .. bro:see:: software_parse_error software_unparsed_version_found OS_version_found +## .. zeek:see:: software_parse_error software_unparsed_version_found event software_version_found%(c: connection, host: addr, s: software, descr: string%); @@ -547,7 +553,7 @@ event software_version_found%(c: connection, host: addr, ## used on a system but cannot parse it. This is a protocol-independent event ## that is fed by different analyzers. For example, the HTTP analyzer reports ## user-agent and server software by raising this event if it cannot parse them -## directly (if it can :bro:id:`software_version_found` will be generated +## directly (if it can :zeek:id:`software_version_found` will be generated ## instead). ## ## c: The connection. @@ -557,16 +563,15 @@ event software_version_found%(c: connection, host: addr, ## descr: The raw (unparsed) software identification string as extracted from ## the protocol. ## -## .. bro:see:: software_version_found software_unparsed_version_found -## OS_version_found +## .. zeek:see:: software_version_found software_unparsed_version_found event software_parse_error%(c: connection, host: addr, descr: string%); ## Generated when a protocol analyzer finds an identification of a software ## used on a system. This is a protocol-independent event that is fed by ## different analyzers. For example, the HTTP analyzer reports user-agent and ## server software by raising this event. Different from -## :bro:id:`software_version_found` and :bro:id:`software_parse_error`, this -## event is always raised, independent of whether Bro can parse the version +## :zeek:id:`software_version_found` and :zeek:id:`software_parse_error`, this +## event is always raised, independent of whether Zeek can parse the version ## string. ## ## c: The connection. @@ -575,235 +580,24 @@ event software_parse_error%(c: connection, host: addr, descr: string%); ## ## str: The software identification string as extracted from the protocol. ## -## .. bro:see:: software_parse_error software_version_found OS_version_found +## .. zeek:see:: software_parse_error software_version_found event software_unparsed_version_found%(c: connection, host: addr, str: string%); -## Generated when an operating system has been fingerprinted. Bro uses `p0f -## `__ to fingerprint endpoints passively, -## and it raises this event for each system identified. The p0f fingerprints are -## defined by :bro:id:`passive_fingerprint_file`. -## -## c: The connection. -## -## host: The host running the reported OS. -## -## OS: The OS version string. -## -## .. bro:see:: passive_fingerprint_file software_parse_error -## software_version_found software_unparsed_version_found -## generate_OS_version_event -event OS_version_found%(c: connection, host: addr, OS: OS_version%); - -## Generated when a connection to a remote Bro has been established. This event -## is intended primarily for use by Bro's communication framework, but it can -## also trigger additional code if helpful. -## -## p: A record describing the peer. -## -## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error -## remote_connection_handshake_done remote_event_registered remote_log remote_pong -## remote_state_access_performed remote_state_inconsistency print_hook -event remote_connection_established%(p: event_peer%); - -## Generated when a connection to a remote Bro has been closed. This event is -## intended primarily for use by Bro's communication framework, but it can -## also trigger additional code if helpful. -## -## p: A record describing the peer. -## -## .. bro:see:: remote_capture_filter remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_log remote_pong remote_state_access_performed -## remote_state_inconsistency print_hook -event remote_connection_closed%(p: event_peer%); - -## Generated when a remote connection's initial handshake has been completed. -## This event is intended primarily for use by Bro's communication framework, -## but it can also trigger additional code if helpful. -## -## p: A record describing the peer. -## -## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error -## remote_connection_established remote_event_registered remote_log remote_pong -## remote_state_access_performed remote_state_inconsistency print_hook -event remote_connection_handshake_done%(p: event_peer%); - -## Generated for each event registered by a remote peer. This event is intended -## primarily for use by Bro's communication framework, but it can also trigger -## additional code if helpful. -## -## p: A record describing the peer. -## -## name: TODO. -## -## .. bro:see:: remote_capture_filter remote_connection_closed -## remote_connection_error remote_connection_established -## remote_connection_handshake_done remote_log remote_pong -## remote_state_access_performed remote_state_inconsistency print_hook -event remote_event_registered%(p: event_peer, name: string%); - -## Generated when a connection to a remote Bro encountered an error. This event -## is intended primarily for use by Bro's communication framework, but it can -## also trigger additional code if helpful. -## -## p: A record describing the peer. -## -## reason: A textual description of the error. -## -## .. bro:see:: remote_capture_filter remote_connection_closed -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_log remote_pong remote_state_access_performed -## remote_state_inconsistency print_hook -event remote_connection_error%(p: event_peer, reason: string%); - -## Generated when a remote peer sent us a capture filter. While this event is -## intended primarily for use by Bro's communication framework, it can also -## trigger additional code if helpful. -## -## p: A record describing the peer. -## -## filter: The filter string sent by the peer. -## -## .. bro:see:: remote_connection_closed remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_log remote_pong remote_state_access_performed -## remote_state_inconsistency print_hook -event remote_capture_filter%(p: event_peer, filter: string%); - -## Generated after a call to :bro:id:`send_state` when all data has been -## successfully sent to the remote side. While this event is -## intended primarily for use by Bro's communication framework, it can also -## trigger additional code if helpful. -## -## p: A record describing the remote peer. -## -## .. bro:see:: remote_capture_filter remote_connection_closed -## remote_connection_error remote_connection_established -## remote_connection_handshake_done remote_event_registered remote_log remote_pong -## remote_state_access_performed remote_state_inconsistency print_hook -event finished_send_state%(p: event_peer%); - -## Generated if state synchronization detects an inconsistency. While this -## event is intended primarily for use by Bro's communication framework, it can -## also trigger additional code if helpful. This event is only raised if -## :bro:id:`remote_check_sync_consistency` is false. -## -## operation: The textual description of the state operation performed. -## -## id: The name of the Bro script identifier that was operated on. -## -## expected_old: A textual representation of the value of *id* that was -## expected to be found before the operation was carried out. -## -## real_old: A textual representation of the value of *id* that was actually -## found before the operation was carried out. The difference between -## *real_old* and *expected_old* is the inconsistency being reported. -## -## .. bro:see:: remote_capture_filter remote_connection_closed -## remote_connection_error remote_connection_established -## remote_connection_handshake_done remote_event_registered remote_log remote_pong -## remote_state_access_performed print_hook remote_check_sync_consistency -event remote_state_inconsistency%(operation: string, id: string, - expected_old: string, real_old: string%); - -## Generated for communication log messages. While this event is -## intended primarily for use by Bro's communication framework, it can also -## trigger additional code if helpful. -## -## level: The log level, which is either :bro:id:`REMOTE_LOG_INFO` or -## :bro:id:`REMOTE_LOG_ERROR`. -## -## src: The component of the communication system that logged the message. -## Currently, this will be one of :bro:id:`REMOTE_SRC_CHILD` (Bro's -## child process), :bro:id:`REMOTE_SRC_PARENT` (Bro's main process), or -## :bro:id:`REMOTE_SRC_SCRIPT` (the script level). -## -## msg: The message logged. -## -## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_pong remote_state_access_performed -## remote_state_inconsistency print_hook remote_log_peer -event remote_log%(level: count, src: count, msg: string%); - -## Generated for communication log messages. While this event is -## intended primarily for use by Bro's communication framework, it can also -## trigger additional code if helpful. This event is equivalent to -## :bro:see:`remote_log` except the message is with respect to a certain peer. -## -## p: A record describing the remote peer. -## -## level: The log level, which is either :bro:id:`REMOTE_LOG_INFO` or -## :bro:id:`REMOTE_LOG_ERROR`. -## -## src: The component of the communication system that logged the message. -## Currently, this will be one of :bro:id:`REMOTE_SRC_CHILD` (Bro's -## child process), :bro:id:`REMOTE_SRC_PARENT` (Bro's main process), or -## :bro:id:`REMOTE_SRC_SCRIPT` (the script level). -## -## msg: The message logged. -## -## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_pong remote_state_access_performed -## remote_state_inconsistency print_hook remote_log -event remote_log_peer%(p: event_peer, level: count, src: count, msg: string%); - -## Generated when a remote peer has answered to our ping. This event is part of -## Bro's infrastructure for measuring communication latency. One can send a ping -## by calling :bro:id:`send_ping` and when a corresponding reply is received, -## this event will be raised. -## -## p: The peer sending us the pong. -## -## seq: The sequence number passed to the original :bro:id:`send_ping` call. -## The number is sent back by the peer in its response. -## -## d1: The time interval between sending the ping and receiving the pong. This -## is the latency of the complete path. -## -## d2: The time interval between sending out the ping to the network and its -## reception at the peer. This is the network latency. -## -## d3: The time interval between when the peer's child process received the -## ping and when its parent process sent the pong. This is the -## processing latency at the peer. -## -## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_log remote_state_access_performed -## remote_state_inconsistency print_hook -event remote_pong%(p: event_peer, seq: count, - d1: interval, d2: interval, d3: interval%); - -## Generated each time a remote state access has been replayed locally. This -## event is primarily intended for debugging. -## -## id: The name of the Bro script variable that's being operated on. -## -## v: The new value of the variable. -## -## .. bro:see:: remote_capture_filter remote_connection_closed remote_connection_error -## remote_connection_established remote_connection_handshake_done -## remote_event_registered remote_log remote_pong remote_state_inconsistency -## print_hook -event remote_state_access_performed%(id: string, v: any%); - -## Generated each time Bro's internal profiling log is updated. The file is -## defined by :bro:id:`profiling_file`, and its update frequency by -## :bro:id:`profiling_interval` and :bro:id:`expensive_profiling_multiple`. +## Generated each time Zeek's internal profiling log is updated. The file is +## defined by :zeek:id:`profiling_file`, and its update frequency by +## :zeek:id:`profiling_interval` and :zeek:id:`expensive_profiling_multiple`. ## ## f: The profiling file. ## ## expensive: True if this event corresponds to heavier-weight profiling as -## indicated by the :bro:id:`expensive_profiling_multiple` variable. +## indicated by the :zeek:id:`expensive_profiling_multiple` variable. ## -## .. bro:see:: profiling_interval expensive_profiling_multiple +## .. zeek:see:: profiling_interval expensive_profiling_multiple event profiling_update%(f: file, expensive: bool%); -## Raised for informational messages reported via Bro's reporter framework. Such +## Raised for informational messages reported via Zeek's reporter framework. Such ## messages may be generated internally by the event engine and also by other -## scripts calling :bro:id:`Reporter::info`. +## scripts calling :zeek:id:`Reporter::info`. ## ## t: The time the message was passed to the reporter. ## @@ -812,17 +606,17 @@ event profiling_update%(f: file, expensive: bool%); ## location: A (potentially empty) string describing a location associated with ## the message. ## -## .. bro:see:: reporter_warning reporter_error Reporter::info Reporter::warning +## .. zeek:see:: reporter_warning reporter_error Reporter::info Reporter::warning ## Reporter::error ## -## .. note:: Bro will not call reporter events recursively. If the handler of +## .. note:: Zeek will not call reporter events recursively. If the handler of ## any reporter event triggers a new reporter message itself, the output ## will go to ``stderr`` instead. event reporter_info%(t: time, msg: string, location: string%) &error_handler; -## Raised for warnings reported via Bro's reporter framework. Such messages may +## Raised for warnings reported via Zeek's reporter framework. Such messages may ## be generated internally by the event engine and also by other scripts calling -## :bro:id:`Reporter::warning`. +## :zeek:id:`Reporter::warning`. ## ## t: The time the warning was passed to the reporter. ## @@ -831,17 +625,17 @@ event reporter_info%(t: time, msg: string, location: string%) &error_handler; ## location: A (potentially empty) string describing a location associated with ## the warning. ## -## .. bro:see:: reporter_info reporter_error Reporter::info Reporter::warning +## .. zeek:see:: reporter_info reporter_error Reporter::info Reporter::warning ## Reporter::error ## -## .. note:: Bro will not call reporter events recursively. If the handler of +## .. note:: Zeek will not call reporter events recursively. If the handler of ## any reporter event triggers a new reporter message itself, the output ## will go to ``stderr`` instead. event reporter_warning%(t: time, msg: string, location: string%) &error_handler; -## Raised for errors reported via Bro's reporter framework. Such messages may +## Raised for errors reported via Zeek's reporter framework. Such messages may ## be generated internally by the event engine and also by other scripts calling -## :bro:id:`Reporter::error`. +## :zeek:id:`Reporter::error`. ## ## t: The time the error was passed to the reporter. ## @@ -850,10 +644,10 @@ event reporter_warning%(t: time, msg: string, location: string%) &error_handler; ## location: A (potentially empty) string describing a location associated with ## the error. ## -## .. bro:see:: reporter_info reporter_warning Reporter::info Reporter::warning +## .. zeek:see:: reporter_info reporter_warning Reporter::info Reporter::warning ## Reporter::error ## -## .. note:: Bro will not call reporter events recursively. If the handler of +## .. note:: Zeek will not call reporter events recursively. If the handler of ## any reporter event triggers a new reporter message itself, the output ## will go to ``stderr`` instead. event reporter_error%(t: time, msg: string, location: string%) &error_handler; @@ -862,12 +656,15 @@ event reporter_error%(t: time, msg: string, location: string%) &error_handler; ## ## path: The full path to the script loaded. ## -## level: The "nesting level": zero for a top-level Bro script and incremented +## level: The "nesting level": zero for a top-level Zeek script and incremented ## recursively for each ``@load``. -event bro_script_loaded%(path: string, level: count%); +event zeek_script_loaded%(path: string, level: count%); -## Generated each time Bro's script interpreter opens a file. This event is -## triggered only for files opened via :bro:id:`open`, and in particular not for +## Deprecated synonym for :zeek:see:`zeek_script_loaded`. +event bro_script_loaded%(path: string, level: count%) &deprecated; + +## Generated each time Zeek's script interpreter opens a file. This event is +## triggered only for files opened via :zeek:id:`open`, and in particular not for ## normal log files as created by log writers. ## ## f: The opened file. @@ -881,7 +678,7 @@ event event_queue_flush_point%(%); ## belongs. All incoming data to the framework is buffered, and depends ## on a handler for this event to return a string value that uniquely ## identifies a file. Among all handlers of this event, the last one to -## call :bro:see:`set_file_handle` will "win". +## call :zeek:see:`set_file_handle` will "win". ## ## tag: The analyzer which is carrying the file data. ## @@ -889,15 +686,15 @@ event event_queue_flush_point%(%); ## ## is_orig: The direction the file data is flowing over the connection. ## -## .. bro:see:: set_file_handle +## .. zeek:see:: set_file_handle event get_file_handle%(tag: Analyzer::Tag, c: connection, is_orig: bool%); ## Indicates that an analysis of a new file has begun. The analysis can be -## augmented at this time via :bro:see:`Files::add_analyzer`. +## augmented at this time via :zeek:see:`Files::add_analyzer`. ## ## f: The file. ## -## .. bro:see:: file_over_new_connection file_timeout file_gap +## .. zeek:see:: file_over_new_connection file_timeout file_gap ## file_sniff file_state_remove event file_new%(f: fa_file%); @@ -910,16 +707,16 @@ event file_new%(f: fa_file%); ## ## is_orig: true if the originator of *c* is the one sending the file. ## -## .. bro:see:: file_new file_timeout file_gap file_sniff +## .. zeek:see:: file_new file_timeout file_gap file_sniff ## file_state_remove event file_over_new_connection%(f: fa_file, c: connection, is_orig: bool%); ## Provide all metadata that has been inferred about a particular file ## from inspection of the initial content that been seen at the beginning ## of the file. The analysis can be augmented at this time via -## :bro:see:`Files::add_analyzer`. The amount of data fed into the file +## :zeek:see:`Files::add_analyzer`. The amount of data fed into the file ## sniffing can be increased or decreased by changing either -## :bro:see:`default_file_bof_buffer_size` or the `bof_buffer_size` field +## :zeek:see:`default_file_bof_buffer_size` or the `bof_buffer_size` field ## in an `fa_file` record. The event will be raised even if content inspection ## has been unable to infer any metadata, in which case the fields in *meta* ## will be left all unset. @@ -928,7 +725,7 @@ event file_over_new_connection%(f: fa_file, c: connection, is_orig: bool%); ## ## meta: Metadata that's been discovered about the file. ## -## .. bro:see:: file_over_new_connection file_timeout file_gap +## .. zeek:see:: file_over_new_connection file_timeout file_gap ## file_state_remove event file_sniff%(f: fa_file, meta: fa_metadata%); @@ -937,7 +734,7 @@ event file_sniff%(f: fa_file, meta: fa_metadata%); ## ## f: The file. ## -## .. bro:see:: file_new file_over_new_connection file_gap +## .. zeek:see:: file_new file_over_new_connection file_gap ## file_sniff file_state_remove default_file_timeout_interval ## Files::set_timeout_interval event file_timeout%(f: fa_file%); @@ -950,12 +747,12 @@ event file_timeout%(f: fa_file%); ## ## len: The number of missing bytes. ## -## .. bro:see:: file_new file_over_new_connection file_timeout +## .. zeek:see:: file_new file_over_new_connection file_timeout ## file_sniff file_state_remove file_reassembly_overflow event file_gap%(f: fa_file, offset: count, len: count%); ## Indicates that the file had an overflow of the reassembly buffer. -## This is a specialization of the :bro:id:`file_gap` event. +## This is a specialization of the :zeek:id:`file_gap` event. ## ## f: The file. ## @@ -966,7 +763,7 @@ event file_gap%(f: fa_file, offset: count, len: count%); ## file data and get back under the reassembly buffer size limit. ## This value will also be represented as a gap. ## -## .. bro:see:: file_new file_over_new_connection file_timeout +## .. zeek:see:: file_new file_over_new_connection file_timeout ## file_sniff file_state_remove file_gap ## Files::enable_reassembler Files::reassembly_buffer_size ## Files::enable_reassembly Files::disable_reassembly @@ -977,58 +774,58 @@ event file_reassembly_overflow%(f: fa_file, offset: count, skipped: count%); ## ## f: The file. ## -## .. bro:see:: file_new file_over_new_connection file_timeout file_gap +## .. zeek:see:: file_new file_over_new_connection file_timeout file_gap ## file_sniff event file_state_remove%(f: fa_file%); ## Generated when an internal DNS lookup produces the same result as last time. -## Bro keeps an internal DNS cache for host names and IP addresses it has +## Zeek keeps an internal DNS cache for host names and IP addresses it has ## already resolved. This event is generated when a subsequent lookup returns ## the same result as stored in the cache. ## ## dm: A record describing the new resolver result (which matches the old one). ## -## .. bro:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name +## .. zeek:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_unverified event dns_mapping_valid%(dm: dns_mapping%); ## Generated when an internal DNS lookup got no answer even though it had -## succeeded in the past. Bro keeps an internal DNS cache for host names and IP +## succeeded in the past. Zeek keeps an internal DNS cache for host names and IP ## addresses it has already resolved. This event is generated when a ## subsequent lookup does not produce an answer even though we have ## already stored a result in the cache. ## ## dm: A record describing the old resolver result. ## -## .. bro:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name +## .. zeek:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name ## dns_mapping_valid event dns_mapping_unverified%(dm: dns_mapping%); ## Generated when an internal DNS lookup succeeded but an earlier attempt -## did not. Bro keeps an internal DNS cache for host names and IP +## did not. Zeek keeps an internal DNS cache for host names and IP ## addresses it has already resolved. This event is generated when a subsequent ## lookup produces an answer for a query that was marked as failed in the cache. ## ## dm: A record describing the new resolver result. ## -## .. bro:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_unverified +## .. zeek:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_unverified ## dns_mapping_valid event dns_mapping_new_name%(dm: dns_mapping%); ## Generated when an internal DNS lookup returned zero answers even though it -## had succeeded in the past. Bro keeps an internal DNS cache for host names +## had succeeded in the past. Zeek keeps an internal DNS cache for host names ## and IP addresses it has already resolved. This event is generated when ## on a subsequent lookup we receive an answer that is empty even ## though we have already stored a result in the cache. ## ## dm: A record describing the old resolver result. ## -## .. bro:see:: dns_mapping_altered dns_mapping_new_name dns_mapping_unverified +## .. zeek:see:: dns_mapping_altered dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid event dns_mapping_lost_name%(dm: dns_mapping%); ## Generated when an internal DNS lookup produced a different result than in -## the past. Bro keeps an internal DNS cache for host names and IP addresses +## the past. Zeek keeps an internal DNS cache for host names and IP addresses ## it has already resolved. This event is generated when a subsequent lookup ## returns a different answer than we have stored in the cache. ## @@ -1040,11 +837,11 @@ event dns_mapping_lost_name%(dm: dns_mapping%); ## new_addrs: Addresses that were not part of the returned set for the query ## described by *dm*, but now are. ## -## .. bro:see:: dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified +## .. zeek:see:: dns_mapping_lost_name dns_mapping_new_name dns_mapping_unverified ## dns_mapping_valid event dns_mapping_altered%(dm: dns_mapping, old_addrs: addr_set, new_addrs: addr_set%); -## A meta event generated for events that Bro raises. This will report all +## A meta event generated for events that Zeek raises. This will report all ## events for which at least one handler is defined. ## ## Note that handling this meta event is expensive and should be limited to @@ -1070,11 +867,5 @@ event gaobot_signature_found%(c: connection%); ## Deprecated. Will be removed. event anonymization_mapping%(orig: addr, mapped: addr%); -## Deprecated. Will be removed. -event rotate_interval%(f: file%); - -## Deprecated. Will be removed. -event rotate_size%(f: file%); - ## Deprecated. Will be removed. event print_hook%(f:file, s: string%); diff --git a/src/file_analysis/CMakeLists.txt b/src/file_analysis/CMakeLists.txt index 34dc8d5387..f9a2758920 100644 --- a/src/file_analysis/CMakeLists.txt +++ b/src/file_analysis/CMakeLists.txt @@ -1,4 +1,4 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/file_analysis/Component.h b/src/file_analysis/Component.h index b4bcbb9552..85e53a5cde 100644 --- a/src/file_analysis/Component.h +++ b/src/file_analysis/Component.h @@ -9,7 +9,7 @@ #include "Val.h" -#include "../bro-config.h" +#include "../zeek-config.h" #include "../util.h" namespace file_analysis { diff --git a/src/file_analysis/File.cc b/src/file_analysis/File.cc index 641943909e..b3680c2a2c 100644 --- a/src/file_analysis/File.cc +++ b/src/file_analysis/File.cc @@ -154,11 +154,11 @@ void File::RaiseFileOverNewConnection(Connection* conn, bool is_orig) { if ( conn && FileEventAvailable(file_over_new_connection) ) { - val_list* vl = new val_list(); - vl->append(val->Ref()); - vl->append(conn->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - FileEvent(file_over_new_connection, vl); + FileEvent(file_over_new_connection, { + val->Ref(), + conn->BuildConnVal(), + val_mgr->GetBool(is_orig), + }); } } @@ -303,13 +303,11 @@ bool File::SetMime(const string& mime_type) if ( ! FileEventAvailable(file_sniff) ) return false; - val_list* vl = new val_list(); - vl->append(val->Ref()); RecordVal* meta = new RecordVal(fa_metadata_type); - vl->append(meta); meta->Assign(meta_mime_type_idx, new StringVal(mime_type)); meta->Assign(meta_inferred_idx, val_mgr->GetBool(0)); - FileEvent(file_sniff, vl); + + FileEvent(file_sniff, {val->Ref(), meta}); return true; } @@ -338,10 +336,7 @@ void File::InferMetadata() len = min(len, LookupFieldDefaultCount(bof_buffer_size_idx)); file_mgr->DetectMIME(data, len, &matches); - val_list* vl = new val_list(); - vl->append(val->Ref()); RecordVal* meta = new RecordVal(fa_metadata_type); - vl->append(meta); if ( ! matches.empty() ) { @@ -351,7 +346,7 @@ void File::InferMetadata() file_analysis::GenMIMEMatchesVal(matches)); } - FileEvent(file_sniff, vl); + FileEvent(file_sniff, {val->Ref(), meta}); return; } @@ -463,11 +458,11 @@ void File::DeliverChunk(const u_char* data, uint64 len, uint64 offset) if ( FileEventAvailable(file_reassembly_overflow) ) { - val_list* vl = new val_list(); - vl->append(val->Ref()); - vl->append(val_mgr->GetCount(current_offset)); - vl->append(val_mgr->GetCount(gap_bytes)); - FileEvent(file_reassembly_overflow, vl); + FileEvent(file_reassembly_overflow, { + val->Ref(), + val_mgr->GetCount(current_offset), + val_mgr->GetCount(gap_bytes), + }); } } @@ -608,11 +603,11 @@ void File::Gap(uint64 offset, uint64 len) if ( FileEventAvailable(file_gap) ) { - val_list* vl = new val_list(); - vl->append(val->Ref()); - vl->append(val_mgr->GetCount(offset)); - vl->append(val_mgr->GetCount(len)); - FileEvent(file_gap, vl); + FileEvent(file_gap, { + val->Ref(), + val_mgr->GetCount(offset), + val_mgr->GetCount(len), + }); } analyzers.DrainModifications(); @@ -631,14 +626,18 @@ void File::FileEvent(EventHandlerPtr h) if ( ! FileEventAvailable(h) ) return; - val_list* vl = new val_list(); - vl->append(val->Ref()); - FileEvent(h, vl); + FileEvent(h, {val->Ref()}); } void File::FileEvent(EventHandlerPtr h, val_list* vl) { - mgr.QueueEvent(h, vl); + FileEvent(h, std::move(*vl)); + delete vl; + } + +void File::FileEvent(EventHandlerPtr h, val_list vl) + { + mgr.QueueEventFast(h, std::move(vl)); if ( h == file_new || h == file_over_new_connection || h == file_sniff || diff --git a/src/file_analysis/File.h b/src/file_analysis/File.h index 0c4c313f06..54517b53ba 100644 --- a/src/file_analysis/File.h +++ b/src/file_analysis/File.h @@ -172,6 +172,12 @@ public: */ void FileEvent(EventHandlerPtr h, val_list* vl); + /** + * Raises an event related to the file's life-cycle. + * @param h pointer to an event handler. + * @param vl list of argument values to pass to event call. + */ + void FileEvent(EventHandlerPtr h, val_list vl); /** * Sets the MIME type for a file to a specific value. diff --git a/src/file_analysis/FileReassembler.cc b/src/file_analysis/FileReassembler.cc index ba15086320..41a37c52fd 100644 --- a/src/file_analysis/FileReassembler.cc +++ b/src/file_analysis/FileReassembler.cc @@ -110,19 +110,4 @@ void FileReassembler::Overlap(const u_char* b1, const u_char* b2, uint64 n) { // Not doing anything here yet. } - -IMPLEMENT_SERIAL(FileReassembler, SER_FILE_REASSEMBLER); - -bool FileReassembler::DoSerialize(SerialInfo* info) const - { - reporter->InternalError("FileReassembler::DoSerialize not implemented"); - return false; // Cannot be reached. - } - -bool FileReassembler::DoUnserialize(UnserialInfo* info) - { - reporter->InternalError("FileReassembler::DoUnserialize not implemented"); - return false; // Cannot be reached. - } - } // end file_analysis diff --git a/src/file_analysis/FileReassembler.h b/src/file_analysis/FileReassembler.h index c6143a5565..79aff34829 100644 --- a/src/file_analysis/FileReassembler.h +++ b/src/file_analysis/FileReassembler.h @@ -50,8 +50,6 @@ public: protected: FileReassembler(); - DECLARE_SERIAL(FileReassembler); - void Undelivered(uint64 up_to_seq) override; void BlockInserted(DataBlock* b) override; void Overlap(const u_char* b1, const u_char* b2, uint64 n) override; diff --git a/src/file_analysis/Manager.cc b/src/file_analysis/Manager.cc index ab4b1ed261..da6099b1fe 100644 --- a/src/file_analysis/Manager.cc +++ b/src/file_analysis/Manager.cc @@ -443,12 +443,11 @@ string Manager::GetFileID(analyzer::Tag tag, Connection* c, bool is_orig) EnumVal* tagval = tag.AsEnumVal(); Ref(tagval); - val_list* vl = new val_list(); - vl->append(tagval); - vl->append(c->BuildConnVal()); - vl->append(val_mgr->GetBool(is_orig)); - - mgr.QueueEvent(get_file_handle, vl); + mgr.QueueEventFast(get_file_handle, { + tagval, + c->BuildConnVal(), + val_mgr->GetBool(is_orig), + }); mgr.Drain(); // need file handle immediately so we don't have to buffer data return current_file_id; } diff --git a/src/file_analysis/Tag.h b/src/file_analysis/Tag.h index 9d131fa808..a0f6634f64 100644 --- a/src/file_analysis/Tag.h +++ b/src/file_analysis/Tag.h @@ -3,7 +3,7 @@ #ifndef FILE_ANALYZER_TAG_H #define FILE_ANALYZER_TAG_H -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "../Tag.h" #include "plugin/TaggedComponent.h" diff --git a/src/file_analysis/analyzer/data_event/CMakeLists.txt b/src/file_analysis/analyzer/data_event/CMakeLists.txt index 49e23d49a0..0a62b1d666 100644 --- a/src/file_analysis/analyzer/data_event/CMakeLists.txt +++ b/src/file_analysis/analyzer/data_event/CMakeLists.txt @@ -1,8 +1,8 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro FileDataEvent) -bro_plugin_cc(DataEvent.cc Plugin.cc ../../Analyzer.cc) -bro_plugin_end() +zeek_plugin_begin(Zeek FileDataEvent) +zeek_plugin_cc(DataEvent.cc Plugin.cc ../../Analyzer.cc) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/data_event/DataEvent.cc b/src/file_analysis/analyzer/data_event/DataEvent.cc index 15462e8e92..5d692383e1 100644 --- a/src/file_analysis/analyzer/data_event/DataEvent.cc +++ b/src/file_analysis/analyzer/data_event/DataEvent.cc @@ -41,12 +41,11 @@ bool DataEvent::DeliverChunk(const u_char* data, uint64 len, uint64 offset) { if ( ! chunk_event ) return true; - val_list* args = new val_list; - args->append(GetFile()->GetVal()->Ref()); - args->append(new StringVal(new BroString(data, len, 0))); - args->append(val_mgr->GetCount(offset)); - - mgr.QueueEvent(chunk_event, args); + mgr.QueueEventFast(chunk_event, { + GetFile()->GetVal()->Ref(), + new StringVal(new BroString(data, len, 0)), + val_mgr->GetCount(offset), + }); return true; } @@ -55,11 +54,10 @@ bool DataEvent::DeliverStream(const u_char* data, uint64 len) { if ( ! stream_event ) return true; - val_list* args = new val_list; - args->append(GetFile()->GetVal()->Ref()); - args->append(new StringVal(new BroString(data, len, 0))); - - mgr.QueueEvent(stream_event, args); + mgr.QueueEventFast(stream_event, { + GetFile()->GetVal()->Ref(), + new StringVal(new BroString(data, len, 0)), + }); return true; } diff --git a/src/file_analysis/analyzer/data_event/Plugin.cc b/src/file_analysis/analyzer/data_event/Plugin.cc index d39120cfe6..b41d2356a7 100644 --- a/src/file_analysis/analyzer/data_event/Plugin.cc +++ b/src/file_analysis/analyzer/data_event/Plugin.cc @@ -5,7 +5,7 @@ #include "DataEvent.h" namespace plugin { -namespace Bro_FileDataEvent { +namespace Zeek_FileDataEvent { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::file_analysis::Component("DATA_EVENT", ::file_analysis::DataEvent::Instantiate)); plugin::Configuration config; - config.name = "Bro::FileDataEvent"; + config.name = "Zeek::FileDataEvent"; config.description = "Delivers file content"; return config; } diff --git a/src/file_analysis/analyzer/entropy/CMakeLists.txt b/src/file_analysis/analyzer/entropy/CMakeLists.txt index 38db5e726a..7841f27f94 100644 --- a/src/file_analysis/analyzer/entropy/CMakeLists.txt +++ b/src/file_analysis/analyzer/entropy/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro FileEntropy) -bro_plugin_cc(Entropy.cc Plugin.cc ../../Analyzer.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek FileEntropy) +zeek_plugin_cc(Entropy.cc Plugin.cc ../../Analyzer.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/entropy/Entropy.cc b/src/file_analysis/analyzer/entropy/Entropy.cc index 4802224950..a0a561a1cc 100644 --- a/src/file_analysis/analyzer/entropy/Entropy.cc +++ b/src/file_analysis/analyzer/entropy/Entropy.cc @@ -53,8 +53,8 @@ void Entropy::Finalize() if ( ! fed ) return; - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); + if ( ! file_entropy ) + return; double montepi, scc, ent, mean, chisq; montepi = scc = ent = mean = chisq = 0.0; @@ -67,6 +67,8 @@ void Entropy::Finalize() ent_result->Assign(3, new Val(montepi, TYPE_DOUBLE)); ent_result->Assign(4, new Val(scc, TYPE_DOUBLE)); - vl->append(ent_result); - mgr.QueueEvent(file_entropy, vl); + mgr.QueueEventFast(file_entropy, { + GetFile()->GetVal()->Ref(), + ent_result, + }); } diff --git a/src/file_analysis/analyzer/entropy/Plugin.cc b/src/file_analysis/analyzer/entropy/Plugin.cc index f1dd954cba..a4ae3416cd 100644 --- a/src/file_analysis/analyzer/entropy/Plugin.cc +++ b/src/file_analysis/analyzer/entropy/Plugin.cc @@ -5,7 +5,7 @@ #include "Entropy.h" namespace plugin { -namespace Bro_FileEntropy { +namespace Zeek_FileEntropy { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::file_analysis::Component("ENTROPY", ::file_analysis::Entropy::Instantiate)); plugin::Configuration config; - config.name = "Bro::FileEntropy"; + config.name = "Zeek::FileEntropy"; config.description = "Entropy test file content"; return config; } diff --git a/src/file_analysis/analyzer/extract/CMakeLists.txt b/src/file_analysis/analyzer/extract/CMakeLists.txt index 5f96f4f01b..7df895af38 100644 --- a/src/file_analysis/analyzer/extract/CMakeLists.txt +++ b/src/file_analysis/analyzer/extract/CMakeLists.txt @@ -1,10 +1,10 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro FileExtract) -bro_plugin_cc(Extract.cc Plugin.cc ../../Analyzer.cc) -bro_plugin_bif(events.bif) -bro_plugin_bif(functions.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek FileExtract) +zeek_plugin_cc(Extract.cc Plugin.cc ../../Analyzer.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_bif(functions.bif) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/extract/Extract.cc b/src/file_analysis/analyzer/extract/Extract.cc index dc05fba367..8761c8493c 100644 --- a/src/file_analysis/analyzer/extract/Extract.cc +++ b/src/file_analysis/analyzer/extract/Extract.cc @@ -1,6 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. #include +#include #include "Extract.h" #include "util.h" @@ -90,12 +91,12 @@ bool Extract::DeliverStream(const u_char* data, uint64 len) if ( limit_exceeded && file_extraction_limit ) { File* f = GetFile(); - val_list* vl = new val_list(); - vl->append(f->GetVal()->Ref()); - vl->append(Args()->Ref()); - vl->append(val_mgr->GetCount(limit)); - vl->append(val_mgr->GetCount(len)); - f->FileEvent(file_extraction_limit, vl); + f->FileEvent(file_extraction_limit, { + f->GetVal()->Ref(), + Args()->Ref(), + val_mgr->GetCount(limit), + val_mgr->GetCount(len), + }); // Limit may have been modified by a BIF, re-check it. limit_exceeded = check_limit_exceeded(limit, depth, len, &towrite); diff --git a/src/file_analysis/analyzer/extract/Plugin.cc b/src/file_analysis/analyzer/extract/Plugin.cc index f4e234ef11..be8c44eaac 100644 --- a/src/file_analysis/analyzer/extract/Plugin.cc +++ b/src/file_analysis/analyzer/extract/Plugin.cc @@ -5,7 +5,7 @@ #include "Extract.h" namespace plugin { -namespace Bro_FileExtract { +namespace Zeek_FileExtract { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::file_analysis::Component("EXTRACT", ::file_analysis::Extract::Instantiate)); plugin::Configuration config; - config.name = "Bro::FileExtract"; + config.name = "Zeek::FileExtract"; config.description = "Extract file content"; return config; } diff --git a/src/file_analysis/analyzer/extract/events.bif b/src/file_analysis/analyzer/extract/events.bif index d1dfe0c654..2324294b88 100644 --- a/src/file_analysis/analyzer/extract/events.bif +++ b/src/file_analysis/analyzer/extract/events.bif @@ -1,17 +1,17 @@ ## This event is generated when a file extraction analyzer is about ## to exceed the maximum permitted file size allowed by the -## *extract_limit* field of :bro:see:`Files::AnalyzerArgs`. +## *extract_limit* field of :zeek:see:`Files::AnalyzerArgs`. ## The analyzer is automatically removed from file *f*. ## ## f: The file. ## ## args: Arguments that identify a particular file extraction analyzer. ## This is only provided to be able to pass along to -## :bro:see:`FileExtract::set_limit`. +## :zeek:see:`FileExtract::set_limit`. ## ## limit: The limit, in bytes, the extracted file is about to breach. ## ## len: The length of the file chunk about to be written. ## -## .. bro:see:: Files::add_analyzer Files::ANALYZER_EXTRACT +## .. zeek:see:: Files::add_analyzer Files::ANALYZER_EXTRACT event file_extraction_limit%(f: fa_file, args: Files::AnalyzerArgs, limit: count, len: count%); diff --git a/src/file_analysis/analyzer/extract/functions.bif b/src/file_analysis/analyzer/extract/functions.bif index 18e9dde171..c91f0590bd 100644 --- a/src/file_analysis/analyzer/extract/functions.bif +++ b/src/file_analysis/analyzer/extract/functions.bif @@ -6,7 +6,7 @@ module FileExtract; #include "file_analysis/Manager.h" %%} -## :bro:see:`FileExtract::set_limit`. +## :zeek:see:`FileExtract::set_limit`. function FileExtract::__set_limit%(file_id: string, args: any, n: count%): bool %{ using BifType::Record::Files::AnalyzerArgs; diff --git a/src/file_analysis/analyzer/hash/CMakeLists.txt b/src/file_analysis/analyzer/hash/CMakeLists.txt index 0e3143ee05..46d557fd4b 100644 --- a/src/file_analysis/analyzer/hash/CMakeLists.txt +++ b/src/file_analysis/analyzer/hash/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro FileHash) -bro_plugin_cc(Hash.cc Plugin.cc ../../Analyzer.cc) -bro_plugin_bif(events.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek FileHash) +zeek_plugin_cc(Hash.cc Plugin.cc ../../Analyzer.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/hash/Hash.cc b/src/file_analysis/analyzer/hash/Hash.cc index 9829934301..7b2ecb5799 100644 --- a/src/file_analysis/analyzer/hash/Hash.cc +++ b/src/file_analysis/analyzer/hash/Hash.cc @@ -48,10 +48,12 @@ void Hash::Finalize() if ( ! hash->IsValid() || ! fed ) return; - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - vl->append(new StringVal(kind)); - vl->append(hash->Get()); + if ( ! file_hash ) + return; - mgr.QueueEvent(file_hash, vl); + mgr.QueueEventFast(file_hash, { + GetFile()->GetVal()->Ref(), + new StringVal(kind), + hash->Get(), + }); } diff --git a/src/file_analysis/analyzer/hash/Plugin.cc b/src/file_analysis/analyzer/hash/Plugin.cc index 8bb0f0fab3..774e51511e 100644 --- a/src/file_analysis/analyzer/hash/Plugin.cc +++ b/src/file_analysis/analyzer/hash/Plugin.cc @@ -5,7 +5,7 @@ #include "Hash.h" namespace plugin { -namespace Bro_FileHash { +namespace Zeek_FileHash { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::file_analysis::Component("SHA256", ::file_analysis::SHA256::Instantiate)); plugin::Configuration config; - config.name = "Bro::FileHash"; + config.name = "Zeek::FileHash"; config.description = "Hash file content"; return config; } diff --git a/src/file_analysis/analyzer/hash/events.bif b/src/file_analysis/analyzer/hash/events.bif index e03cbf359a..814c4741e6 100644 --- a/src/file_analysis/analyzer/hash/events.bif +++ b/src/file_analysis/analyzer/hash/events.bif @@ -7,6 +7,6 @@ ## ## hash: The result of the hashing. ## -## .. bro:see:: Files::add_analyzer Files::ANALYZER_MD5 +## .. zeek:see:: Files::add_analyzer Files::ANALYZER_MD5 ## Files::ANALYZER_SHA1 Files::ANALYZER_SHA256 event file_hash%(f: fa_file, kind: string, hash: string%); diff --git a/src/file_analysis/analyzer/pe/CMakeLists.txt b/src/file_analysis/analyzer/pe/CMakeLists.txt index 7fc89bfd51..c6439ce54d 100644 --- a/src/file_analysis/analyzer/pe/CMakeLists.txt +++ b/src/file_analysis/analyzer/pe/CMakeLists.txt @@ -1,10 +1,17 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro PE) -bro_plugin_cc(PE.cc Plugin.cc) -bro_plugin_bif(events.bif) -bro_plugin_pac(pe.pac pe-file.pac pe-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek PE) +zeek_plugin_cc(PE.cc Plugin.cc) +zeek_plugin_bif(events.bif) +zeek_plugin_pac( + pe.pac + pe-analyzer.pac + pe-file-headers.pac + pe-file-idata.pac + pe-file.pac + pe-file-types.pac +) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/pe/PE.cc b/src/file_analysis/analyzer/pe/PE.cc index 9db13291b0..070aff32dd 100644 --- a/src/file_analysis/analyzer/pe/PE.cc +++ b/src/file_analysis/analyzer/pe/PE.cc @@ -20,7 +20,8 @@ PE::~PE() bool PE::DeliverStream(const u_char* data, uint64 len) { if ( conn->is_done() ) - return true; + return false; + try { interp->NewData(data, data + len); @@ -30,7 +31,7 @@ bool PE::DeliverStream(const u_char* data, uint64 len) return false; } - return true; + return ! conn->is_done(); } bool PE::EndOfFile() diff --git a/src/file_analysis/analyzer/pe/Plugin.cc b/src/file_analysis/analyzer/pe/Plugin.cc index 8601dedb67..08a255785e 100644 --- a/src/file_analysis/analyzer/pe/Plugin.cc +++ b/src/file_analysis/analyzer/pe/Plugin.cc @@ -5,7 +5,7 @@ #include "PE.h" namespace plugin { -namespace Bro_PE { +namespace Zeek_PE { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::file_analysis::Component("PE", ::file_analysis::PE::Instantiate)); plugin::Configuration config; - config.name = "Bro::PE"; + config.name = "Zeek::PE"; config.description = "Portable Executable analyzer"; return config; } diff --git a/src/file_analysis/analyzer/pe/events.bif b/src/file_analysis/analyzer/pe/events.bif index c804937c49..1d25936a65 100644 --- a/src/file_analysis/analyzer/pe/events.bif +++ b/src/file_analysis/analyzer/pe/events.bif @@ -6,7 +6,7 @@ ## ## h: The parsed DOS header information. ## -## .. bro:see:: pe_dos_code pe_file_header pe_optional_header pe_section_header +## .. zeek:see:: pe_dos_code pe_file_header pe_optional_header pe_section_header event pe_dos_header%(f: fa_file, h: PE::DOSHeader%); ## A :abbr:`PE (Portable Executable)` file DOS stub was parsed. @@ -17,7 +17,7 @@ event pe_dos_header%(f: fa_file, h: PE::DOSHeader%); ## ## code: The DOS stub ## -## .. bro:see:: pe_dos_header pe_file_header pe_optional_header pe_section_header +## .. zeek:see:: pe_dos_header pe_file_header pe_optional_header pe_section_header event pe_dos_code%(f: fa_file, code: string%); ## A :abbr:`PE (Portable Executable)` file file header was parsed. @@ -29,7 +29,7 @@ event pe_dos_code%(f: fa_file, code: string%); ## ## h: The parsed file header information. ## -## .. bro:see:: pe_dos_header pe_dos_code pe_optional_header pe_section_header +## .. zeek:see:: pe_dos_header pe_dos_code pe_optional_header pe_section_header event pe_file_header%(f: fa_file, h: PE::FileHeader%); ## A :abbr:`PE (Portable Executable)` file optional header was parsed. @@ -42,7 +42,7 @@ event pe_file_header%(f: fa_file, h: PE::FileHeader%); ## ## h: The parsed optional header information. ## -## .. bro:see:: pe_dos_header pe_dos_code pe_file_header pe_section_header +## .. zeek:see:: pe_dos_header pe_dos_code pe_file_header pe_section_header event pe_optional_header%(f: fa_file, h: PE::OptionalHeader%); ## A :abbr:`PE (Portable Executable)` file section header was parsed. @@ -53,5 +53,5 @@ event pe_optional_header%(f: fa_file, h: PE::OptionalHeader%); ## ## h: The parsed section header information. ## -## .. bro:see:: pe_dos_header pe_dos_code pe_file_header pe_optional_header +## .. zeek:see:: pe_dos_header pe_dos_code pe_file_header pe_optional_header event pe_section_header%(f: fa_file, h: PE::SectionHeader%); diff --git a/src/file_analysis/analyzer/pe/pe-file-headers.pac b/src/file_analysis/analyzer/pe/pe-file-headers.pac index f12d76e035..9eee6e03da 100644 --- a/src/file_analysis/analyzer/pe/pe-file-headers.pac +++ b/src/file_analysis/analyzer/pe/pe-file-headers.pac @@ -1,3 +1,8 @@ +# Do not try parsing if the DOS stub program seems larger than 4mb. +# DOS stub programs are not expected to be much more than on the order of +# hundreds of bytes even though the format allows a full 32-bit range. +let MAX_DOS_CODE_LENGTH = 4 * 1024 * 1024; + type Headers = record { dos_header : DOS_Header; dos_code : DOS_Code(dos_code_len); @@ -6,6 +11,9 @@ type Headers = record { } &let { dos_code_len: uint32 = dos_header.AddressOfNewExeHeader > 64 ? dos_header.AddressOfNewExeHeader - 64 : 0; length: uint64 = 64 + dos_code_len + pe_header.length + section_headers.length; + + # Do not care about parsing rest of the file so mark done now ... + proc: bool = $context.connection.mark_done(); }; # The DOS header gives us the offset of the NT headers @@ -28,7 +36,7 @@ type DOS_Header = record { OEMid : uint16; OEMinfo : uint16; Reserved2 : uint16[10]; - AddressOfNewExeHeader : uint32; + AddressOfNewExeHeader : uint32 &enforce(AddressOfNewExeHeader >= 64 && (AddressOfNewExeHeader - 64) < MAX_DOS_CODE_LENGTH); } &length=64; type DOS_Code(len: uint32) = record { diff --git a/src/file_analysis/analyzer/unified2/CMakeLists.txt b/src/file_analysis/analyzer/unified2/CMakeLists.txt index 4a9b11ef92..bd1537c8ef 100644 --- a/src/file_analysis/analyzer/unified2/CMakeLists.txt +++ b/src/file_analysis/analyzer/unified2/CMakeLists.txt @@ -1,11 +1,11 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Unified2) -bro_plugin_cc(Unified2.cc Plugin.cc ../../Analyzer.cc) -bro_plugin_bif(events.bif types.bif) -bro_plugin_pac(unified2.pac unified2-file.pac unified2-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek Unified2) +zeek_plugin_cc(Unified2.cc Plugin.cc ../../Analyzer.cc) +zeek_plugin_bif(events.bif types.bif) +zeek_plugin_pac(unified2.pac unified2-file.pac unified2-analyzer.pac) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/unified2/Plugin.cc b/src/file_analysis/analyzer/unified2/Plugin.cc index a0f885b7cb..2fef6e5dfa 100644 --- a/src/file_analysis/analyzer/unified2/Plugin.cc +++ b/src/file_analysis/analyzer/unified2/Plugin.cc @@ -7,7 +7,7 @@ #include "Unified2.h" namespace plugin { -namespace Bro_Unified2 { +namespace Zeek_Unified2 { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::file_analysis::Component("UNIFIED2", ::file_analysis::Unified2::Instantiate)); plugin::Configuration config; - config.name = "Bro::Unified2"; + config.name = "Zeek::Unified2"; config.description = "Analyze Unified2 alert files."; return config; } diff --git a/src/file_analysis/analyzer/unified2/unified2-analyzer.pac b/src/file_analysis/analyzer/unified2/unified2-analyzer.pac index 00229184a2..a4a7da5081 100644 --- a/src/file_analysis/analyzer/unified2/unified2-analyzer.pac +++ b/src/file_analysis/analyzer/unified2/unified2-analyzer.pac @@ -81,10 +81,11 @@ refine flow Flow += { ids_event->Assign(11, to_port(${ev.dst_p}, ${ev.protocol})); ids_event->Assign(17, val_mgr->GetCount(${ev.packet_action})); - val_list* vl = new val_list(); - vl->append(connection()->bro_analyzer()->GetFile()->GetVal()->Ref()); - vl->append(ids_event); - mgr.QueueEvent(::unified2_event, vl, SOURCE_LOCAL); + mgr.QueueEventFast(::unified2_event, { + connection()->bro_analyzer()->GetFile()->GetVal()->Ref(), + ids_event, + }, + SOURCE_LOCAL); } return true; %} @@ -112,10 +113,11 @@ refine flow Flow += { ids_event->Assign(15, val_mgr->GetCount(${ev.mpls_label})); ids_event->Assign(16, val_mgr->GetCount(${ev.vlan_id})); - val_list* vl = new val_list(); - vl->append(connection()->bro_analyzer()->GetFile()->GetVal()->Ref()); - vl->append(ids_event); - mgr.QueueEvent(::unified2_event, vl, SOURCE_LOCAL); + mgr.QueueEventFast(::unified2_event, { + connection()->bro_analyzer()->GetFile()->GetVal()->Ref(), + ids_event, + }, + SOURCE_LOCAL); } return true; @@ -133,10 +135,11 @@ refine flow Flow += { packet->Assign(4, val_mgr->GetCount(${pkt.link_type})); packet->Assign(5, bytestring_to_val(${pkt.packet_data})); - val_list* vl = new val_list(); - vl->append(connection()->bro_analyzer()->GetFile()->GetVal()->Ref()); - vl->append(packet); - mgr.QueueEvent(::unified2_packet, vl, SOURCE_LOCAL); + mgr.QueueEventFast(::unified2_packet, { + connection()->bro_analyzer()->GetFile()->GetVal()->Ref(), + packet, + }, + SOURCE_LOCAL); } return true; diff --git a/src/file_analysis/analyzer/x509/CMakeLists.txt b/src/file_analysis/analyzer/x509/CMakeLists.txt index a4c5767e56..d8ef11fe17 100644 --- a/src/file_analysis/analyzer/x509/CMakeLists.txt +++ b/src/file_analysis/analyzer/x509/CMakeLists.txt @@ -1,11 +1,11 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro X509) -bro_plugin_cc(X509Common.cc X509.cc OCSP.cc Plugin.cc) -bro_plugin_bif(events.bif types.bif functions.bif ocsp_events.bif) -bro_plugin_pac(x509-extension.pac x509-signed_certificate_timestamp.pac) -bro_plugin_end() +zeek_plugin_begin(Zeek X509) +zeek_plugin_cc(X509Common.cc X509.cc OCSP.cc Plugin.cc) +zeek_plugin_bif(events.bif types.bif functions.bif ocsp_events.bif) +zeek_plugin_pac(x509-extension.pac x509-signed_certificate_timestamp.pac) +zeek_plugin_end() diff --git a/src/file_analysis/analyzer/x509/OCSP.cc b/src/file_analysis/analyzer/x509/OCSP.cc index c49481c23a..6833d5d8de 100644 --- a/src/file_analysis/analyzer/x509/OCSP.cc +++ b/src/file_analysis/analyzer/x509/OCSP.cc @@ -28,8 +28,6 @@ X509* helper_sk_X509_value(const STACK_OF(X509)* certs, int i) using namespace file_analysis; -IMPLEMENT_SERIAL(OCSP_RESPVal, SER_OCSP_RESP_VAL); - #define OCSP_STRING_BUF_SIZE 2048 static Val* get_ocsp_type(RecordVal* args, const char* name) @@ -177,9 +175,8 @@ bool file_analysis::OCSP::EndOfFile() return false; } - OCSP_RESPVal* resp_val = new OCSP_RESPVal(resp); // resp_val takes ownership - ParseResponse(resp_val); - Unref(resp_val); + ParseResponse(resp); + OCSP_RESPONSE_free(resp); } return true; @@ -417,10 +414,6 @@ void file_analysis::OCSP::ParseRequest(OCSP_REQUEST* req) char buf[OCSP_STRING_BUF_SIZE]; // we need a buffer for some of the openssl functions memset(buf, 0, sizeof(buf)); - // build up our response as we go along... - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - uint64 version = 0; #if ( OPENSSL_VERSION_NUMBER < 0x10100000L ) || defined(LIBRESSL_VERSION_NUMBER) @@ -431,31 +424,32 @@ void file_analysis::OCSP::ParseRequest(OCSP_REQUEST* req) // TODO: try to parse out general name ? #endif - vl->append(val_mgr->GetCount(version)); + if ( ocsp_request ) + mgr.QueueEventFast(ocsp_request, { + GetFile()->GetVal()->Ref(), + val_mgr->GetCount(version), + }); BIO *bio = BIO_new(BIO_s_mem()); - mgr.QueueEvent(ocsp_request, vl); - int req_count = OCSP_request_onereq_count(req); for ( int i=0; iappend(GetFile()->GetVal()->Ref()); + val_list rvl(5); + rvl.append(GetFile()->GetVal()->Ref()); OCSP_ONEREQ *one_req = OCSP_request_onereq_get0(req, i); OCSP_CERTID *cert_id = OCSP_onereq_get0_id(one_req); - ocsp_add_cert_id(cert_id, rvl, bio); - mgr.QueueEvent(ocsp_request_certificate, rvl); + ocsp_add_cert_id(cert_id, &rvl, bio); + mgr.QueueEvent(ocsp_request_certificate, std::move(rvl)); } BIO_free(bio); } -void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) +void file_analysis::OCSP::ParseResponse(OCSP_RESPONSE *resp) { - OCSP_RESPONSE *resp = resp_val->GetResp(); //OCSP_RESPBYTES *resp_bytes = resp->responseBytes; OCSP_BASICRESP *basic_resp = nullptr; OCSP_RESPDATA *resp_data = nullptr; @@ -470,14 +464,14 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) char buf[OCSP_STRING_BUF_SIZE]; memset(buf, 0, sizeof(buf)); - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - const char *status_str = OCSP_response_status_str(OCSP_response_status(resp)); StringVal* status_val = new StringVal(strlen(status_str), status_str); - vl->append(status_val->Ref()); - mgr.QueueEvent(ocsp_response_status, vl); - vl = nullptr; + + if ( ocsp_response_status ) + mgr.QueueEventFast(ocsp_response_status, { + GetFile()->GetVal()->Ref(), + status_val->Ref(), + }); //if (!resp_bytes) // { @@ -490,39 +484,45 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) //int len = BIO_read(bio, buf, sizeof(buf)); //BIO_reset(bio); + val_list vl(8); + // get the basic response basic_resp = OCSP_response_get1_basic(resp); if ( !basic_resp ) + { + Unref(status_val); goto clean_up; + } #if ( OPENSSL_VERSION_NUMBER < 0x10100000L ) || defined(LIBRESSL_VERSION_NUMBER) resp_data = basic_resp->tbsResponseData; if ( !resp_data ) + { + Unref(status_val); goto clean_up; + } #endif - vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - vl->append(resp_val->Ref()); - vl->append(status_val); + vl.append(GetFile()->GetVal()->Ref()); + vl.append(status_val); #if ( OPENSSL_VERSION_NUMBER < 0x10100000L ) || defined(LIBRESSL_VERSION_NUMBER) - vl->append(val_mgr->GetCount((uint64)ASN1_INTEGER_get(resp_data->version))); + vl.append(val_mgr->GetCount((uint64)ASN1_INTEGER_get(resp_data->version))); #else - vl->append(parse_basic_resp_data_version(basic_resp)); + vl.append(parse_basic_resp_data_version(basic_resp)); #endif // responderID if ( OCSP_RESPID_bio(basic_resp, bio) ) { len = BIO_read(bio, buf, sizeof(buf)); - vl->append(new StringVal(len, buf)); + vl.append(new StringVal(len, buf)); BIO_reset(bio); } else { reporter->Weird("OpenSSL failed to get OCSP responder id"); - vl->append(val_mgr->GetEmptyString()); + vl.append(val_mgr->GetEmptyString()); } // producedAt @@ -532,7 +532,7 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) produced_at = OCSP_resp_get0_produced_at(basic_resp); #endif - vl->append(new Val(GetTimeFromAsn1(produced_at, GetFile(), reporter), TYPE_TIME)); + vl.append(new Val(GetTimeFromAsn1(produced_at, GetFile(), reporter), TYPE_TIME)); // responses @@ -545,8 +545,8 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) if ( !single_resp ) continue; - val_list* rvl = new val_list(); - rvl->append(GetFile()->GetVal()->Ref()); + val_list rvl(10); + rvl.append(GetFile()->GetVal()->Ref()); // cert id const OCSP_CERTID* cert_id = nullptr; @@ -557,7 +557,7 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) cert_id = OCSP_SINGLERESP_get0_id(single_resp); #endif - ocsp_add_cert_id(cert_id, rvl, bio); + ocsp_add_cert_id(cert_id, &rvl, bio); BIO_reset(bio); // certStatus @@ -574,38 +574,38 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) reporter->Weird("OpenSSL failed to find status of OCSP response"); const char* cert_status_str = OCSP_cert_status_str(status); - rvl->append(new StringVal(strlen(cert_status_str), cert_status_str)); + rvl.append(new StringVal(strlen(cert_status_str), cert_status_str)); // revocation time and reason if revoked if ( status == V_OCSP_CERTSTATUS_REVOKED ) { - rvl->append(new Val(GetTimeFromAsn1(revoke_time, GetFile(), reporter), TYPE_TIME)); + rvl.append(new Val(GetTimeFromAsn1(revoke_time, GetFile(), reporter), TYPE_TIME)); if ( reason != OCSP_REVOKED_STATUS_NOSTATUS ) { const char* revoke_reason = OCSP_crl_reason_str(reason); - rvl->append(new StringVal(strlen(revoke_reason), revoke_reason)); + rvl.append(new StringVal(strlen(revoke_reason), revoke_reason)); } else - rvl->append(new StringVal(0, "")); + rvl.append(new StringVal(0, "")); } else { - rvl->append(new Val(0.0, TYPE_TIME)); - rvl->append(new StringVal(0, "")); + rvl.append(new Val(0.0, TYPE_TIME)); + rvl.append(new StringVal(0, "")); } if ( this_update ) - rvl->append(new Val(GetTimeFromAsn1(this_update, GetFile(), reporter), TYPE_TIME)); + rvl.append(new Val(GetTimeFromAsn1(this_update, GetFile(), reporter), TYPE_TIME)); else - rvl->append(new Val(0.0, TYPE_TIME)); + rvl.append(new Val(0.0, TYPE_TIME)); if ( next_update ) - rvl->append(new Val(GetTimeFromAsn1(next_update, GetFile(), reporter), TYPE_TIME)); + rvl.append(new Val(GetTimeFromAsn1(next_update, GetFile(), reporter), TYPE_TIME)); else - rvl->append(new Val(0.0, TYPE_TIME)); + rvl.append(new Val(0.0, TYPE_TIME)); - mgr.QueueEvent(ocsp_response_certificate, rvl); + mgr.QueueEvent(ocsp_response_certificate, std::move(rvl)); num_ext = OCSP_SINGLERESP_get_ext_count(single_resp); for ( int k = 0; k < num_ext; ++k ) @@ -621,10 +621,10 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) #if ( OPENSSL_VERSION_NUMBER < 0x10100000L ) || defined(LIBRESSL_VERSION_NUMBER) i2a_ASN1_OBJECT(bio, basic_resp->signatureAlgorithm->algorithm); len = BIO_read(bio, buf, sizeof(buf)); - vl->append(new StringVal(len, buf)); + vl.append(new StringVal(len, buf)); BIO_reset(bio); #else - vl->append(parse_basic_resp_sig_alg(basic_resp, bio, buf, sizeof(buf))); + vl.append(parse_basic_resp_sig_alg(basic_resp, bio, buf, sizeof(buf))); #endif //i2a_ASN1_OBJECT(bio, basic_resp->signature); @@ -633,7 +633,7 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) //BIO_reset(bio); certs_vector = new VectorVal(internal_type("x509_opaque_vector")->AsVectorType()); - vl->append(certs_vector); + vl.append(certs_vector); #if ( OPENSSL_VERSION_NUMBER < 0x10100000L ) || defined(LIBRESSL_VERSION_NUMBER) certs = basic_resp->certs; @@ -654,7 +654,8 @@ void file_analysis::OCSP::ParseResponse(OCSP_RESPVal *resp_val) reporter->Weird("OpenSSL returned null certificate"); } } - mgr.QueueEvent(ocsp_response_bytes, vl); + + mgr.QueueEvent(ocsp_response_bytes, std::move(vl)); // ok, now that we are done with the actual certificate - let's parse extensions :) num_ext = OCSP_BASICRESP_get_ext_count(basic_resp); @@ -686,52 +687,3 @@ void file_analysis::OCSP::ParseExtensionsSpecific(X509_EXTENSION* ex, bool globa ParseSignedCertificateTimestamps(ex); } -OCSP_RESPVal::OCSP_RESPVal(OCSP_RESPONSE* arg_ocsp_resp) : OpaqueVal(ocsp_resp_opaque_type) - { - ocsp_resp = arg_ocsp_resp; - } - -OCSP_RESPVal::OCSP_RESPVal() : OpaqueVal(ocsp_resp_opaque_type) - { - ocsp_resp = nullptr; - } - -OCSP_RESPVal::~OCSP_RESPVal() - { - if (ocsp_resp) - OCSP_RESPONSE_free(ocsp_resp); - } - -OCSP_RESPONSE* OCSP_RESPVal::GetResp() const - { - return ocsp_resp; - } - -bool OCSP_RESPVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_OCSP_RESP_VAL, OpaqueVal); - unsigned char *buf = nullptr; - int length = i2d_OCSP_RESPONSE(ocsp_resp, &buf); - if ( length < 0 ) - return false; - bool res = SERIALIZE_STR(reinterpret_cast(buf), length); - OPENSSL_free(buf); - return res; - } - -bool OCSP_RESPVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(OpaqueVal) - - int length; - unsigned char *ocsp_resp_buf, *opensslbuf; - - if ( ! UNSERIALIZE_STR(reinterpret_cast(&ocsp_resp_buf), &length) ) - return false; - opensslbuf = ocsp_resp_buf; // OpenSSL likes to shift pointers around. really. - ocsp_resp = d2i_OCSP_RESPONSE(nullptr, const_cast(&opensslbuf), length); - delete [] ocsp_resp_buf; - if ( ! ocsp_resp ) - return false; - return true; - } diff --git a/src/file_analysis/analyzer/x509/OCSP.h b/src/file_analysis/analyzer/x509/OCSP.h index eb6499794c..c2cc0f6e5d 100644 --- a/src/file_analysis/analyzer/x509/OCSP.h +++ b/src/file_analysis/analyzer/x509/OCSP.h @@ -5,7 +5,6 @@ #include -#include "Val.h" #include "../File.h" #include "Analyzer.h" #include "X509Common.h" @@ -14,8 +13,6 @@ namespace file_analysis { -class OCSP_RESPVal; - class OCSP : public file_analysis::X509Common { public: bool DeliverStream(const u_char* data, uint64 len) override; @@ -29,7 +26,7 @@ protected: OCSP(RecordVal* args, File* file, bool request); private: - void ParseResponse(OCSP_RESPVal*); + void ParseResponse(OCSP_RESPONSE*); void ParseRequest(OCSP_REQUEST*); void ParseExtensionsSpecific(X509_EXTENSION* ex, bool, ASN1_OBJECT*, const char*) override; @@ -37,18 +34,6 @@ private: bool request = false; // true if ocsp request, false if reply }; -class OCSP_RESPVal: public OpaqueVal { -public: - explicit OCSP_RESPVal(OCSP_RESPONSE *); - ~OCSP_RESPVal() override; - OCSP_RESPONSE *GetResp() const; -protected: - OCSP_RESPVal(); -private: - OCSP_RESPONSE *ocsp_resp; - DECLARE_SERIAL(OCSP_RESPVal); -}; - } #endif diff --git a/src/file_analysis/analyzer/x509/Plugin.cc b/src/file_analysis/analyzer/x509/Plugin.cc index 31dbe346a8..9de6648893 100644 --- a/src/file_analysis/analyzer/x509/Plugin.cc +++ b/src/file_analysis/analyzer/x509/Plugin.cc @@ -7,7 +7,7 @@ #include "OCSP.h" namespace plugin { -namespace Bro_X509 { +namespace Zeek_X509 { class Plugin : public plugin::Plugin { public: @@ -18,7 +18,7 @@ public: AddComponent(new ::file_analysis::Component("OCSP_REPLY", ::file_analysis::OCSP::InstantiateReply)); plugin::Configuration config; - config.name = "Bro::X509"; + config.name = "Zeek::X509"; config.description = "X509 and OCSP analyzer"; return config; } diff --git a/src/file_analysis/analyzer/x509/X509.cc b/src/file_analysis/analyzer/x509/X509.cc index 38422897db..33f2cb4d07 100644 --- a/src/file_analysis/analyzer/x509/X509.cc +++ b/src/file_analysis/analyzer/x509/X509.cc @@ -10,6 +10,8 @@ #include "file_analysis/Manager.h" +#include + #include #include #include @@ -18,8 +20,6 @@ using namespace file_analysis; -IMPLEMENT_SERIAL(X509Val, SER_X509_VAL); - file_analysis::X509::X509(RecordVal* args, file_analysis::File* file) : file_analysis::X509Common::X509Common(file_mgr->GetComponentTag("X509"), args, file) { @@ -57,11 +57,11 @@ bool file_analysis::X509::EndOfFile() RecordVal* cert_record = ParseCertificate(cert_val, GetFile()); // and send the record on to scriptland - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - vl->append(cert_val->Ref()); - vl->append(cert_record->Ref()); // we Ref it here, because we want to keep a copy around for now... - mgr.QueueEvent(x509_certificate, vl); + mgr.QueueEvent(x509_certificate, { + GetFile()->GetVal()->Ref(), + cert_val->Ref(), + cert_record->Ref(), // we Ref it here, because we want to keep a copy around for now... + }); // after parsing the certificate - parse the extensions... @@ -221,17 +221,20 @@ void file_analysis::X509::ParseBasicConstraints(X509_EXTENSION* ex) if ( constr ) { - RecordVal* pBasicConstraint = new RecordVal(BifType::Record::X509::BasicConstraints); - pBasicConstraint->Assign(0, val_mgr->GetBool(constr->ca ? 1 : 0)); + if ( x509_ext_basic_constraints ) + { + RecordVal* pBasicConstraint = new RecordVal(BifType::Record::X509::BasicConstraints); + pBasicConstraint->Assign(0, val_mgr->GetBool(constr->ca ? 1 : 0)); - if ( constr->pathlen ) - pBasicConstraint->Assign(1, val_mgr->GetCount((int32_t) ASN1_INTEGER_get(constr->pathlen))); + if ( constr->pathlen ) + pBasicConstraint->Assign(1, val_mgr->GetCount((int32_t) ASN1_INTEGER_get(constr->pathlen))); - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - vl->append(pBasicConstraint); + mgr.QueueEventFast(x509_ext_basic_constraints, { + GetFile()->GetVal()->Ref(), + pBasicConstraint, + }); + } - mgr.QueueEvent(x509_ext_basic_constraints, vl); BASIC_CONSTRAINTS_free(constr); } @@ -367,10 +370,10 @@ void file_analysis::X509::ParseSAN(X509_EXTENSION* ext) sanExt->Assign(4, val_mgr->GetBool(otherfields)); - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - vl->append(sanExt); - mgr.QueueEvent(x509_ext_subject_alternative_name, vl); + mgr.QueueEvent(x509_ext_subject_alternative_name, { + GetFile()->GetVal()->Ref(), + sanExt, + }); GENERAL_NAMES_free(altname); } @@ -474,44 +477,43 @@ X509Val::~X509Val() X509_free(certificate); } +Val* X509Val::DoClone(CloneState* state) + { + auto copy = new X509Val(); + if ( certificate ) + copy->certificate = X509_dup(certificate); + + return state->NewClone(this, copy); + } + ::X509* X509Val::GetCertificate() const { return certificate; } -bool X509Val::DoSerialize(SerialInfo* info) const +IMPLEMENT_OPAQUE_VALUE(X509Val) + +broker::expected X509Val::DoSerialize() const { - DO_SERIALIZE(SER_X509_VAL, OpaqueVal); - unsigned char *buf = NULL; - int length = i2d_X509(certificate, &buf); if ( length < 0 ) - return false; - - bool res = SERIALIZE_STR(reinterpret_cast(buf), length); + return broker::ec::invalid_data; + auto d = std::string(reinterpret_cast(buf), length); OPENSSL_free(buf); - return res; + + return {std::move(d)}; } -bool X509Val::DoUnserialize(UnserialInfo* info) +bool X509Val::DoUnserialize(const broker::data& data) { - DO_UNSERIALIZE(OpaqueVal) - - int length; - unsigned char *certbuf, *opensslbuf; - - if ( ! UNSERIALIZE_STR(reinterpret_cast(&certbuf), &length) ) + auto s = caf::get_if(&data); + if ( ! s ) return false; - opensslbuf = certbuf; // OpenSSL likes to shift pointers around. really. - certificate = d2i_X509(NULL, const_cast(&opensslbuf), length); - delete[] certbuf; - - if ( !certificate ) - return false; - - return true; + auto opensslbuf = reinterpret_cast(s->data()); + certificate = d2i_X509(NULL, &opensslbuf, s->size()); + return (certificate != nullptr); } diff --git a/src/file_analysis/analyzer/x509/X509.h b/src/file_analysis/analyzer/x509/X509.h index a3dc62e533..f20712cab2 100644 --- a/src/file_analysis/analyzer/x509/X509.h +++ b/src/file_analysis/analyzer/x509/X509.h @@ -5,7 +5,7 @@ #include -#include "Val.h" +#include "OpaqueVal.h" #include "X509Common.h" #if ( OPENSSL_VERSION_NUMBER < 0x10002000L ) || defined(LIBRESSL_VERSION_NUMBER) @@ -123,6 +123,15 @@ public: */ explicit X509Val(::X509* certificate); + /** + * Clone an X509Val + * + * @param state certifies the state of the clone operation (duplicate tracking) + * + * @return A cloned X509Val. + */ + Val* DoClone(CloneState* state) override; + /** * Destructor. */ @@ -142,10 +151,9 @@ protected: */ X509Val(); + DECLARE_OPAQUE_VALUE(X509Val) private: ::X509* certificate; // the wrapped certificate - - DECLARE_SERIAL(X509Val); }; } diff --git a/src/file_analysis/analyzer/x509/X509Common.cc b/src/file_analysis/analyzer/x509/X509Common.cc index b6c16fc1dc..7fb3100e97 100644 --- a/src/file_analysis/analyzer/x509/X509Common.cc +++ b/src/file_analysis/analyzer/x509/X509Common.cc @@ -277,13 +277,18 @@ void file_analysis::X509Common::ParseExtension(X509_EXTENSION* ex, EventHandlerP // parsed. And if we have it, we send the specialized event on top of the // generic event that we just had. I know, that is... kind of not nice, // but I am not sure if there is a better way to do it... - val_list* vl = new val_list(); - vl->append(GetFile()->GetVal()->Ref()); - vl->append(pX509Ext); - if ( h == ocsp_extension ) - vl->append(val_mgr->GetBool(global ? 1 : 0)); - mgr.QueueEvent(h, vl); + if ( h == ocsp_extension ) + mgr.QueueEvent(h, { + GetFile()->GetVal()->Ref(), + pX509Ext, + val_mgr->GetBool(global ? 1 : 0), + }); + else + mgr.QueueEvent(h, { + GetFile()->GetVal()->Ref(), + pX509Ext, + }); // let individual analyzers parse more. ParseExtensionsSpecific(ex, global, ext_asn, oid); diff --git a/src/file_analysis/analyzer/x509/events.bif b/src/file_analysis/analyzer/x509/events.bif index 68afe5340a..fd4f9fadfe 100644 --- a/src/file_analysis/analyzer/x509/events.bif +++ b/src/file_analysis/analyzer/x509/events.bif @@ -11,7 +11,7 @@ ## ## cert: The parsed certificate information. ## -## .. bro:see:: x509_extension x509_ext_basic_constraints +## .. zeek:see:: x509_extension x509_ext_basic_constraints ## x509_ext_subject_alternative_name x509_parse x509_verify ## x509_get_certificate_string x509_ocsp_ext_signed_certificate_timestamp event x509_certificate%(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate%); @@ -25,7 +25,7 @@ event x509_certificate%(f: fa_file, cert_ref: opaque of x509, cert: X509::Certif ## ## ext: The parsed extension. ## -## .. bro:see:: x509_certificate x509_ext_basic_constraints +## .. zeek:see:: x509_certificate x509_ext_basic_constraints ## x509_ext_subject_alternative_name x509_parse x509_verify ## x509_get_certificate_string x509_ocsp_ext_signed_certificate_timestamp event x509_extension%(f: fa_file, ext: X509::Extension%); @@ -37,7 +37,7 @@ event x509_extension%(f: fa_file, ext: X509::Extension%); ## ## ext: The parsed basic constraints extension. ## -## .. bro:see:: x509_certificate x509_extension +## .. zeek:see:: x509_certificate x509_extension ## x509_ext_subject_alternative_name x509_parse x509_verify ## x509_get_certificate_string x509_ocsp_ext_signed_certificate_timestamp event x509_ext_basic_constraints%(f: fa_file, ext: X509::BasicConstraints%); @@ -51,7 +51,7 @@ event x509_ext_basic_constraints%(f: fa_file, ext: X509::BasicConstraints%); ## ## ext: The parsed subject alternative name extension. ## -## .. bro:see:: x509_certificate x509_extension x509_ext_basic_constraints +## .. zeek:see:: x509_certificate x509_extension x509_ext_basic_constraints ## x509_parse x509_verify x509_ocsp_ext_signed_certificate_timestamp ## x509_get_certificate_string event x509_ext_subject_alternative_name%(f: fa_file, ext: X509::SubjectAlternativeName%); @@ -76,7 +76,7 @@ event x509_ext_subject_alternative_name%(f: fa_file, ext: X509::SubjectAlternati ## ## signature: signature part of the digitally_signed struct ## -## .. bro:see:: ssl_extension_signed_certificate_timestamp x509_extension x509_ext_basic_constraints +## .. zeek:see:: ssl_extension_signed_certificate_timestamp x509_extension x509_ext_basic_constraints ## x509_parse x509_verify x509_ext_subject_alternative_name ## x509_get_certificate_string ssl_extension_signed_certificate_timestamp ## sct_verify ocsp_request ocsp_request_certificate ocsp_response_status diff --git a/src/file_analysis/analyzer/x509/functions.bif b/src/file_analysis/analyzer/x509/functions.bif index e4e263fd35..b2521141c4 100644 --- a/src/file_analysis/analyzer/x509/functions.bif +++ b/src/file_analysis/analyzer/x509/functions.bif @@ -13,20 +13,6 @@ // This is the indexed map of X509 certificate stores. static map x509_stores; -// ### NOTE: while d2i_X509 does not take a const u_char** pointer, -// here we assume d2i_X509 does not write to , so it is safe to -// convert data to a non-const pointer. Could some X509 guru verify -// this? - -X509* d2i_X509_(X509** px, const u_char** in, int len) - { -#ifdef OPENSSL_D2I_X509_USES_CONST_CHAR - return d2i_X509(px, in, len); -#else - return d2i_X509(px, (u_char**)in, len); -#endif - } - // construct an error record RecordVal* x509_result_record(uint64_t num, const char* reason, Val* chainVector = 0) { @@ -56,7 +42,7 @@ X509_STORE* x509_get_root_store(TableVal* root_certs) StringVal *sv = root_certs->Lookup(key)->AsStringVal(); assert(sv); const uint8* data = sv->Bytes(); - X509* x = d2i_X509_(NULL, &data, sv->Len()); + X509* x = d2i_X509(NULL, &data, sv->Len()); if ( ! x ) { builtin_error(fmt("Root CA error: %s", ERR_error_string(ERR_get_error(),NULL))); @@ -192,7 +178,7 @@ const EVP_MD* hash_to_evp(int hash) ## ## Returns: A X509::Certificate structure. ## -## .. bro:see:: x509_certificate x509_extension x509_ext_basic_constraints +## .. zeek:see:: x509_certificate x509_extension x509_ext_basic_constraints ## x509_ext_subject_alternative_name x509_verify ## x509_get_certificate_string function x509_parse%(cert: opaque of x509%): X509::Certificate @@ -203,6 +189,19 @@ function x509_parse%(cert: opaque of x509%): X509::Certificate return file_analysis::X509::ParseCertificate(h); %} +## Constructs an opaque of X509 from a der-formatted string. +## +## Note: this function is mostly meant for testing purposes +## +## .. zeek:see:: x509_certificate x509_extension x509_ext_basic_constraints +## x509_ext_subject_alternative_name x509_verify +## x509_get_certificate_string x509_parse +function x509_from_der%(der: string%): opaque of x509 + %{ + const u_char* data = der->Bytes(); + return new file_analysis::X509Val(d2i_X509(nullptr, &data, der->Len())); + %} + ## Returns the string form of a certificate. ## ## cert: The X509 certificate opaque handle. @@ -213,7 +212,7 @@ function x509_parse%(cert: opaque of x509%): X509::Certificate ## ## Returns: X509 certificate as a string. ## -## .. bro:see:: x509_certificate x509_extension x509_ext_basic_constraints +## .. zeek:see:: x509_certificate x509_extension x509_ext_basic_constraints ## x509_ext_subject_alternative_name x509_parse x509_verify function x509_get_certificate_string%(cert: opaque of x509, pem: bool &default=F%): string %{ @@ -249,7 +248,7 @@ function x509_get_certificate_string%(cert: opaque of x509, pem: bool &default=F ## Returns: A record of type X509::Result containing the result code of the ## verify operation. ## -## .. bro:see:: x509_certificate x509_extension x509_ext_basic_constraints +## .. zeek:see:: x509_certificate x509_extension x509_ext_basic_constraints ## x509_ext_subject_alternative_name x509_parse ## x509_get_certificate_string x509_verify function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_certs: table_string_of_string, verify_time: time &default=network_time()%): X509::Result @@ -536,7 +535,7 @@ x509_ocsp_cleanup: ## verify operation. In case of success also returns the full ## certificate chain. ## -## .. bro:see:: x509_certificate x509_extension x509_ext_basic_constraints +## .. zeek:see:: x509_certificate x509_extension x509_ext_basic_constraints ## x509_ext_subject_alternative_name x509_parse ## x509_get_certificate_string x509_ocsp_verify sct_verify function x509_verify%(certs: x509_opaque_vector, root_certs: table_string_of_string, verify_time: time &default=network_time()%): X509::Result @@ -646,7 +645,7 @@ x509_verify_chainerror: ## ## Returns: T if the validation could be performed succesfully, F otherwhise. ## -## .. bro:see:: ssl_extension_signed_certificate_timestamp +## .. zeek:see:: ssl_extension_signed_certificate_timestamp ## x509_ocsp_ext_signed_certificate_timestamp ## x509_verify function sct_verify%(cert: opaque of x509, logid: string, log_key: string, signature: string, timestamp: count, hash_algorithm: count, issuer_key_hash: string &default=""%): bool @@ -876,7 +875,7 @@ StringVal* x509_entity_hash(file_analysis::X509Val *cert_handle, unsigned int ha ## ## Returns: The hash as a string. ## -## .. bro:see:: x509_issuer_name_hash x509_spki_hash +## .. zeek:see:: x509_issuer_name_hash x509_spki_hash ## x509_verify sct_verify function x509_subject_name_hash%(cert: opaque of x509, hash_alg: count%): string %{ @@ -894,7 +893,7 @@ function x509_subject_name_hash%(cert: opaque of x509, hash_alg: count%): string ## ## Returns: The hash as a string. ## -## .. bro:see:: x509_subject_name_hash x509_spki_hash +## .. zeek:see:: x509_subject_name_hash x509_spki_hash ## x509_verify sct_verify function x509_issuer_name_hash%(cert: opaque of x509, hash_alg: count%): string %{ @@ -912,7 +911,7 @@ function x509_issuer_name_hash%(cert: opaque of x509, hash_alg: count%): string ## ## Returns: The hash as a string. ## -## .. bro:see:: x509_subject_name_hash x509_issuer_name_hash +## .. zeek:see:: x509_subject_name_hash x509_issuer_name_hash ## x509_verify sct_verify function x509_spki_hash%(cert: opaque of x509, hash_alg: count%): string %{ diff --git a/src/file_analysis/analyzer/x509/ocsp_events.bif b/src/file_analysis/analyzer/x509/ocsp_events.bif index f49208d238..fe17344490 100644 --- a/src/file_analysis/analyzer/x509/ocsp_events.bif +++ b/src/file_analysis/analyzer/x509/ocsp_events.bif @@ -7,7 +7,7 @@ ## ## req: version: the version of the OCSP request. Typically 0 (Version 1). ## -## .. bro:see:: ocsp_request_certificate ocsp_response_status +## .. zeek:see:: ocsp_request_certificate ocsp_response_status ## ocsp_response_bytes ocsp_response_certificate ocsp_extension ## x509_ocsp_ext_signed_certificate_timestamp event ocsp_request%(f: fa_file, version: count%); @@ -27,7 +27,7 @@ event ocsp_request%(f: fa_file, version: count%); ## ## serialNumber: Serial number of the certificate for which the status is requested. ## -## .. bro:see:: ocsp_request ocsp_response_status +## .. zeek:see:: ocsp_request ocsp_response_status ## ocsp_response_bytes ocsp_response_certificate ocsp_extension ## x509_ocsp_ext_signed_certificate_timestamp event ocsp_request_certificate%(f: fa_file, hashAlgorithm: string, issuerNameHash: string, issuerKeyHash: string, serialNumber: string%); @@ -41,7 +41,7 @@ event ocsp_request_certificate%(f: fa_file, hashAlgorithm: string, issuerNameHas ## ## status: The status of the OCSP response (e.g. succesful, malformedRequest, tryLater). ## -## .. bro:see:: ocsp_request ocsp_request_certificate +## .. zeek:see:: ocsp_request ocsp_request_certificate ## ocsp_response_bytes ocsp_response_certificate ocsp_extension ## x509_ocsp_ext_signed_certificate_timestamp event ocsp_response_status%(f: fa_file, status: string%); @@ -52,9 +52,6 @@ event ocsp_response_status%(f: fa_file, status: string%); ## ## f: The file. ## -## req_ref: An opaque pointer to the underlying OpenSSL data structure of the -## OCSP response. -## ## status: The status of the OCSP response (e.g. succesful, malformedRequest, tryLater). ## ## version: Version of the OCSP response (typically - for version 1). @@ -68,10 +65,10 @@ event ocsp_response_status%(f: fa_file, status: string%); ## certs: Optional list of certificates that are sent with the OCSP response; these typically ## are needed to perform validation of the reply. ## -## .. bro:see:: ocsp_request ocsp_request_certificate ocsp_response_status +## .. zeek:see:: ocsp_request ocsp_request_certificate ocsp_response_status ## ocsp_response_certificate ocsp_extension ## x509_ocsp_ext_signed_certificate_timestamp -event ocsp_response_bytes%(f: fa_file, resp_ref: opaque of ocsp_resp, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector%); +event ocsp_response_bytes%(f: fa_file, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector%); ## This event is raised for each SingleResponse contained in an OCSP response. ## See :rfc:`6960` for more details on OCSP. @@ -96,7 +93,7 @@ event ocsp_response_bytes%(f: fa_file, resp_ref: opaque of ocsp_resp, status: st ## ## nextUpdate: Time next response will be ready; 0 if not supploed. ## -## .. bro:see:: ocsp_request ocsp_request_certificate ocsp_response_status +## .. zeek:see:: ocsp_request ocsp_request_certificate ocsp_response_status ## ocsp_response_bytes ocsp_extension ## x509_ocsp_ext_signed_certificate_timestamp event ocsp_response_certificate%(f: fa_file, hashAlgorithm: string, issuerNameHash: string, issuerKeyHash: string, serialNumber: string, certStatus: string, revokeTime: time, revokeReason: string, thisUpdate: time, nextUpdate: time%); @@ -111,7 +108,7 @@ event ocsp_response_certificate%(f: fa_file, hashAlgorithm: string, issuerNameHa ## global_resp: T if extension encountered in the global response (in ResponseData), ## F when encountered in a SingleResponse. ## -## .. bro:see:: ocsp_request ocsp_request_certificate ocsp_response_status +## .. zeek:see:: ocsp_request ocsp_request_certificate ocsp_response_status ## ocsp_response_bytes ocsp_response_certificate ## x509_ocsp_ext_signed_certificate_timestamp event ocsp_extension%(f: fa_file, ext: X509::Extension, global_resp: bool%); diff --git a/src/file_analysis/analyzer/x509/x509-extension.pac b/src/file_analysis/analyzer/x509/x509-extension.pac index 396debbbbe..b6a6611d3c 100644 --- a/src/file_analysis/analyzer/x509/x509-extension.pac +++ b/src/file_analysis/analyzer/x509/x509-extension.pac @@ -35,6 +35,9 @@ refine connection MockConnection += { function proc_signedcertificatetimestamp(rec: HandshakeRecord, version: uint8, logid: const_bytestring, timestamp: uint64, digitally_signed_algorithms: SignatureAndHashAlgorithm, digitally_signed_signature: const_bytestring) : bool %{ + if ( ! x509_ocsp_ext_signed_certificate_timestamp ) + return true; + BifEvent::generate_x509_ocsp_ext_signed_certificate_timestamp((analyzer::Analyzer *) bro_analyzer(), bro_analyzer()->GetFile()->GetVal()->Ref(), version, diff --git a/src/file_analysis/file_analysis.bif b/src/file_analysis/file_analysis.bif index 81435bc3b5..f3086041b0 100644 --- a/src/file_analysis/file_analysis.bif +++ b/src/file_analysis/file_analysis.bif @@ -8,35 +8,35 @@ module Files; type AnalyzerArgs: record; -## :bro:see:`Files::set_timeout_interval`. +## :zeek:see:`Files::set_timeout_interval`. function Files::__set_timeout_interval%(file_id: string, t: interval%): bool %{ bool result = file_mgr->SetTimeoutInterval(file_id->CheckString(), t); return val_mgr->GetBool(result); %} -## :bro:see:`Files::enable_reassembly`. +## :zeek:see:`Files::enable_reassembly`. function Files::__enable_reassembly%(file_id: string%): bool %{ bool result = file_mgr->EnableReassembly(file_id->CheckString()); return val_mgr->GetBool(result); %} -## :bro:see:`Files::disable_reassembly`. +## :zeek:see:`Files::disable_reassembly`. function Files::__disable_reassembly%(file_id: string%): bool %{ bool result = file_mgr->DisableReassembly(file_id->CheckString()); return val_mgr->GetBool(result); %} -## :bro:see:`Files::set_reassembly_buffer_size`. +## :zeek:see:`Files::set_reassembly_buffer_size`. function Files::__set_reassembly_buffer%(file_id: string, max: count%): bool %{ bool result = file_mgr->SetReassemblyBuffer(file_id->CheckString(), max); return val_mgr->GetBool(result); %} -## :bro:see:`Files::add_analyzer`. +## :zeek:see:`Files::add_analyzer`. function Files::__add_analyzer%(file_id: string, tag: Files::Tag, args: any%): bool %{ using BifType::Record::Files::AnalyzerArgs; @@ -47,7 +47,7 @@ function Files::__add_analyzer%(file_id: string, tag: Files::Tag, args: any%): b return val_mgr->GetBool(result); %} -## :bro:see:`Files::remove_analyzer`. +## :zeek:see:`Files::remove_analyzer`. function Files::__remove_analyzer%(file_id: string, tag: Files::Tag, args: any%): bool %{ using BifType::Record::Files::AnalyzerArgs; @@ -58,20 +58,20 @@ function Files::__remove_analyzer%(file_id: string, tag: Files::Tag, args: any%) return val_mgr->GetBool(result); %} -## :bro:see:`Files::stop`. +## :zeek:see:`Files::stop`. function Files::__stop%(file_id: string%): bool %{ bool result = file_mgr->IgnoreFile(file_id->CheckString()); return val_mgr->GetBool(result); %} -## :bro:see:`Files::analyzer_name`. +## :zeek:see:`Files::analyzer_name`. function Files::__analyzer_name%(tag: Files::Tag%) : string %{ return new StringVal(file_mgr->GetComponentName(tag)); %} -## :bro:see:`Files::file_exists`. +## :zeek:see:`Files::file_exists`. function Files::__file_exists%(fuid: string%): bool %{ if ( file_mgr->LookupFile(fuid->CheckString()) != nullptr ) @@ -80,7 +80,7 @@ function Files::__file_exists%(fuid: string%): bool return val_mgr->GetFalse(); %} -## :bro:see:`Files::lookup_file`. +## :zeek:see:`Files::lookup_file`. function Files::__lookup_file%(fuid: string%): fa_file %{ auto f = file_mgr->LookupFile(fuid->CheckString()); @@ -95,14 +95,14 @@ function Files::__lookup_file%(fuid: string%): fa_file module GLOBAL; -## For use within a :bro:see:`get_file_handle` handler to set a unique +## For use within a :zeek:see:`get_file_handle` handler to set a unique ## identifier to associate with the current input to the file analysis ## framework. Using an empty string for the handle signifies that the ## input will be ignored/discarded. ## ## handle: A string that uniquely identifies a file. ## -## .. bro:see:: get_file_handle +## .. zeek:see:: get_file_handle function set_file_handle%(handle: string%): any %{ auto bytes = reinterpret_cast(handle->Bytes()); diff --git a/src/input/CMakeLists.txt b/src/input/CMakeLists.txt index b1c79d2bd0..c928451cb3 100644 --- a/src/input/CMakeLists.txt +++ b/src/input/CMakeLists.txt @@ -1,5 +1,5 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/input/Manager.cc b/src/input/Manager.cc index aaf84a99b2..34e8960193 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -224,7 +224,7 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, EnumVal* tag) return backend; } -// Create a new input reader object to be used at whomevers leisure lateron. +// Create a new input reader object to be used at whomevers leisure later on. bool Manager::CreateStream(Stream* info, RecordVal* description) { RecordType* rtype = description->Type()->AsRecordType(); @@ -232,7 +232,7 @@ bool Manager::CreateStream(Stream* info, RecordVal* description) || same_type(rtype, BifType::Record::Input::EventDescription, 0) || same_type(rtype, BifType::Record::Input::AnalysisDescription, 0) ) ) { - reporter->Error("Streamdescription argument not of right type for new input stream"); + reporter->Error("Stream description argument not of right type for new input stream"); return false; } @@ -547,6 +547,7 @@ bool Manager::CreateTableStream(RecordVal* fval) Val *want_record = fval->Lookup("want_record", true); + if ( val ) { const BroType* table_yield = dst->Type()->AsTableType()->YieldType(); const BroType* compare_type = val; @@ -565,6 +566,17 @@ bool Manager::CreateTableStream(RecordVal* fval) return false; } } + else + { + if ( ! dst->Type()->IsSet() ) + { + reporter->Error("Input stream %s: 'destination' field is a table," + " but 'val' field is not provided" + " (did you mean to use a set instead of a table?)", + stream_name.c_str()); + return false; + } + } Val* event_val = fval->Lookup("ev", true); Func* event = event_val ? event_val->AsFunc() : 0; @@ -812,6 +824,7 @@ bool Manager::IsCompatibleType(BroType* t, bool atomic_only) case TYPE_INTERVAL: case TYPE_ENUM: case TYPE_STRING: + case TYPE_PATTERN: return true; case TYPE_RECORD: @@ -1865,11 +1878,12 @@ bool Manager::SendEvent(ReaderFrontend* reader, const string& name, const int nu bool convert_error = false; - val_list* vl = new val_list; + val_list vl(num_vals); + for ( int j = 0; j < num_vals; j++) { Val* v = ValueToVal(i, vals[j], convert_error); - vl->append(v); + vl.append(v); if ( v && ! convert_error && ! same_type(type->FieldType(j), v->Type()) ) { convert_error = true; @@ -1881,18 +1895,20 @@ bool Manager::SendEvent(ReaderFrontend* reader, const string& name, const int nu if ( convert_error ) { - delete_vals(vl); + loop_over_list(vl, i) + Unref(vl[i]); + return false; } else - mgr.QueueEvent(handler, vl, SOURCE_LOCAL); + mgr.QueueEvent(handler, std::move(vl), SOURCE_LOCAL); return true; } void Manager::SendEvent(EventHandlerPtr ev, const int numvals, ...) const { - val_list* vl = new val_list; + val_list vl(numvals); #ifdef DEBUG DBG_LOG(DBG_INPUT, "SendEvent with %d vals", @@ -1902,16 +1918,16 @@ void Manager::SendEvent(EventHandlerPtr ev, const int numvals, ...) const va_list lP; va_start(lP, numvals); for ( int i = 0; i < numvals; i++ ) - vl->append( va_arg(lP, Val*) ); + vl.append( va_arg(lP, Val*) ); va_end(lP); - mgr.QueueEvent(ev, vl, SOURCE_LOCAL); + mgr.QueueEvent(ev, std::move(vl), SOURCE_LOCAL); } void Manager::SendEvent(EventHandlerPtr ev, list events) const { - val_list* vl = new val_list; + val_list vl(events.size()); #ifdef DEBUG DBG_LOG(DBG_INPUT, "SendEvent with %" PRIuPTR " vals (list)", @@ -1919,11 +1935,9 @@ void Manager::SendEvent(EventHandlerPtr ev, list events) const #endif for ( list::iterator i = events.begin(); i != events.end(); i++ ) - { - vl->append( *i ); - } + vl.append( *i ); - mgr.QueueEvent(ev, vl, SOURCE_LOCAL); + mgr.QueueEvent(ev, std::move(vl), SOURCE_LOCAL); } // Convert a bro list value to a bro record value. @@ -2061,6 +2075,12 @@ int Manager::GetValueLength(const Value* val) const } break; + case TYPE_PATTERN: + { + length += strlen(val->val.pattern_text_val) + 1; + break; + } + case TYPE_TABLE: { for ( int i = 0; i < val->val.set_val.size; i++ ) @@ -2180,6 +2200,14 @@ int Manager::CopyValue(char *data, const int startpos, const Value* val) const return length; } + case TYPE_PATTERN: + { + // include null-terminator + int length = strlen(val->val.pattern_text_val) + 1; + memcpy(data + startpos, val->val.pattern_text_val, length); + return length; + } + case TYPE_TABLE: { int length = 0; @@ -2337,6 +2365,13 @@ Val* Manager::ValueToVal(const Stream* i, const Value* val, BroType* request_typ return subnetval; } + case TYPE_PATTERN: + { + RE_Matcher* re = new RE_Matcher(val->val.pattern_text_val); + re->Compile(); + return new PatternVal(re); + } + case TYPE_TABLE: { // all entries have to have the same type... @@ -2479,6 +2514,13 @@ Val* Manager::ValueToVal(const Stream* i, const Value* val, bool& have_error) co return subnetval; } + case TYPE_PATTERN: + { + RE_Matcher* re = new RE_Matcher(val->val.pattern_text_val); + re->Compile(); + return new PatternVal(re); + } + case TYPE_TABLE: { TypeList* set_index; diff --git a/src/input/Manager.h b/src/input/Manager.h index abbf8793b5..6b48f69ee4 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -7,7 +7,6 @@ #include "BroString.h" #include "EventHandler.h" -#include "RemoteSerializer.h" #include "Val.h" #include "Component.h" diff --git a/src/input/Tag.h b/src/input/Tag.h index 91d7539a39..1d4bcc2f9f 100644 --- a/src/input/Tag.h +++ b/src/input/Tag.h @@ -3,7 +3,7 @@ #ifndef INPUT_TAG_H #define INPUT_TAG_H -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "../Tag.h" #include "plugin/TaggedComponent.h" diff --git a/src/input/readers/ascii/Ascii.cc b/src/input/readers/ascii/Ascii.cc index 5ea11cf4d8..7003c519a0 100644 --- a/src/input/readers/ascii/Ascii.cc +++ b/src/input/readers/ascii/Ascii.cc @@ -243,8 +243,8 @@ bool Ascii::ReadHeader(bool useCached) map::iterator fit2 = ifields.find(field->secondary_name); if ( fit2 == ifields.end() ) { - FailWarn(fail_on_file_problem, Fmt("Could not find requested port type field %s in input data file.", - field->secondary_name), true); + FailWarn(fail_on_file_problem, Fmt("Could not find requested port type field %s in input data file %s.", + field->secondary_name, fname.c_str()), true); return false; } @@ -305,11 +305,15 @@ bool Ascii::DoUpdate() // no change return true; + // Warn again in case of trouble if the file changes. The comparison to 0 + // is to suppress an extra warning that we'd otherwise get on the initial + // inode assignment. + if ( ino != 0 ) + suppress_warnings = false; + mtime = sb.st_mtime; ino = sb.st_ino; - // file changed. reread. - - // fallthrough + // File changed. Fall through to re-read. } case MODE_MANUAL: @@ -387,8 +391,8 @@ bool Ascii::DoUpdate() if ( (*fit).position > pos || (*fit).secondary_position > pos ) { - FailWarn(fail_on_invalid_lines, Fmt("Not enough fields in line %s. Found %d fields, want positions %d and %d", - line.c_str(), pos, (*fit).position, (*fit).secondary_position)); + FailWarn(fail_on_invalid_lines, Fmt("Not enough fields in line '%s' of %s. Found %d fields, want positions %d and %d", + line.c_str(), fname.c_str(), pos, (*fit).position, (*fit).secondary_position)); if ( fail_on_invalid_lines ) { @@ -410,7 +414,7 @@ bool Ascii::DoUpdate() if ( val == 0 ) { - Warning(Fmt("Could not convert line '%s' to Val. Ignoring line.", line.c_str())); + Warning(Fmt("Could not convert line '%s' of %s to Val. Ignoring line.", line.c_str(), fname.c_str())); error = true; break; } @@ -470,8 +474,8 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) case MODE_REREAD: case MODE_STREAM: - Update(); // call update and not DoUpdate, because update - // checks disabled. + Update(); // Call Update, not DoUpdate, because Update + // checks the "disabled" flag. break; default: diff --git a/src/input/readers/ascii/CMakeLists.txt b/src/input/readers/ascii/CMakeLists.txt index 267bb9a7ab..fe5c9f01a4 100644 --- a/src/input/readers/ascii/CMakeLists.txt +++ b/src/input/readers/ascii/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro AsciiReader) -bro_plugin_cc(Ascii.cc Plugin.cc) -bro_plugin_bif(ascii.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek AsciiReader) +zeek_plugin_cc(Ascii.cc Plugin.cc) +zeek_plugin_bif(ascii.bif) +zeek_plugin_end() diff --git a/src/input/readers/ascii/Plugin.cc b/src/input/readers/ascii/Plugin.cc index b389cb8602..79738ccba5 100644 --- a/src/input/readers/ascii/Plugin.cc +++ b/src/input/readers/ascii/Plugin.cc @@ -5,7 +5,7 @@ #include "Ascii.h" namespace plugin { -namespace Bro_AsciiReader { +namespace Zeek_AsciiReader { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::input::Component("Ascii", ::input::reader::Ascii::Instantiate)); plugin::Configuration config; - config.name = "Bro::AsciiReader"; + config.name = "Zeek::AsciiReader"; config.description = "ASCII input reader"; return config; } diff --git a/src/input/readers/benchmark/CMakeLists.txt b/src/input/readers/benchmark/CMakeLists.txt index 3b3a34ae47..1595af8f6c 100644 --- a/src/input/readers/benchmark/CMakeLists.txt +++ b/src/input/readers/benchmark/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro BenchmarkReader) -bro_plugin_cc(Benchmark.cc Plugin.cc) -bro_plugin_bif(benchmark.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek BenchmarkReader) +zeek_plugin_cc(Benchmark.cc Plugin.cc) +zeek_plugin_bif(benchmark.bif) +zeek_plugin_end() diff --git a/src/input/readers/benchmark/Plugin.cc b/src/input/readers/benchmark/Plugin.cc index d5e0975a80..8da8b24148 100644 --- a/src/input/readers/benchmark/Plugin.cc +++ b/src/input/readers/benchmark/Plugin.cc @@ -5,7 +5,7 @@ #include "Benchmark.h" namespace plugin { -namespace Bro_BenchmarkReader { +namespace Zeek_BenchmarkReader { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::input::Component("Benchmark", ::input::reader::Benchmark::Instantiate)); plugin::Configuration config; - config.name = "Bro::BenchmarkReader"; + config.name = "Zeek::BenchmarkReader"; config.description = "Benchmark input reader"; return config; } diff --git a/src/input/readers/binary/CMakeLists.txt b/src/input/readers/binary/CMakeLists.txt index 800c3b7567..32dd2059e0 100644 --- a/src/input/readers/binary/CMakeLists.txt +++ b/src/input/readers/binary/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro BinaryReader) -bro_plugin_cc(Binary.cc Plugin.cc) -bro_plugin_bif(binary.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek BinaryReader) +zeek_plugin_cc(Binary.cc Plugin.cc) +zeek_plugin_bif(binary.bif) +zeek_plugin_end() diff --git a/src/input/readers/binary/Plugin.cc b/src/input/readers/binary/Plugin.cc index 7c5dc16b8b..a84260eb67 100644 --- a/src/input/readers/binary/Plugin.cc +++ b/src/input/readers/binary/Plugin.cc @@ -5,7 +5,7 @@ #include "Binary.h" namespace plugin { -namespace Bro_BinaryReader { +namespace Zeek_BinaryReader { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::input::Component("Binary", ::input::reader::Binary::Instantiate)); plugin::Configuration config; - config.name = "Bro::BinaryReader"; + config.name = "Zeek::BinaryReader"; config.description = "Binary input reader"; return config; } diff --git a/src/input/readers/config/CMakeLists.txt b/src/input/readers/config/CMakeLists.txt index 8e4c1aa5aa..8f3553db2c 100644 --- a/src/input/readers/config/CMakeLists.txt +++ b/src/input/readers/config/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro ConfigReader) -bro_plugin_cc(Config.cc Plugin.cc) -bro_plugin_bif(config.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek ConfigReader) +zeek_plugin_cc(Config.cc Plugin.cc) +zeek_plugin_bif(config.bif) +zeek_plugin_end() diff --git a/src/input/readers/config/Config.cc b/src/input/readers/config/Config.cc index eca276281c..8f0447cf66 100644 --- a/src/input/readers/config/Config.cc +++ b/src/input/readers/config/Config.cc @@ -33,7 +33,7 @@ Config::Config(ReaderFrontend *frontend) : ReaderBackend(frontend) while ( auto id = globals->NextEntry(c) ) { - if ( id->IsInternalGlobal() || ! id->IsOption() ) + if ( ! id->IsOption() ) continue; if ( id->Type()->Tag() == TYPE_RECORD || @@ -151,11 +151,15 @@ bool Config::DoUpdate() // no change return true; + // Warn again in case of trouble if the file changes. The comparison to 0 + // is to suppress an extra warning that we'd otherwise get on the initial + // inode assignment. + if ( ino != 0 ) + suppress_warnings = false; + mtime = sb.st_mtime; ino = sb.st_ino; - // file changed. reread. - - // fallthrough + // File changed. Fall through to re-read. } case MODE_MANUAL: @@ -309,8 +313,8 @@ bool Config::DoHeartbeat(double network_time, double current_time) case MODE_REREAD: case MODE_STREAM: - Update(); // call update and not DoUpdate, because update - // checks disabled. + Update(); // Call Update, not DoUpdate, because Update + // checks the "disabled" flag. break; default: diff --git a/src/input/readers/config/Plugin.cc b/src/input/readers/config/Plugin.cc index 77c8a97091..810acc2370 100644 --- a/src/input/readers/config/Plugin.cc +++ b/src/input/readers/config/Plugin.cc @@ -5,7 +5,7 @@ #include "Config.h" namespace plugin { -namespace Bro_ConfigReader { +namespace Zeek_ConfigReader { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::input::Component("Config", ::input::reader::Config::Instantiate)); plugin::Configuration config; - config.name = "Bro::ConfigReader"; + config.name = "Zeek::ConfigReader"; config.description = "Configuration file input reader"; return config; } diff --git a/src/input/readers/raw/CMakeLists.txt b/src/input/readers/raw/CMakeLists.txt index 5540d70202..2b197d5a4e 100644 --- a/src/input/readers/raw/CMakeLists.txt +++ b/src/input/readers/raw/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro RawReader) -bro_plugin_cc(Raw.cc Plugin.cc) -bro_plugin_bif(raw.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek RawReader) +zeek_plugin_cc(Raw.cc Plugin.cc) +zeek_plugin_bif(raw.bif) +zeek_plugin_end() diff --git a/src/input/readers/raw/Plugin.cc b/src/input/readers/raw/Plugin.cc index e16a233fe6..5791b836a1 100644 --- a/src/input/readers/raw/Plugin.cc +++ b/src/input/readers/raw/Plugin.cc @@ -2,9 +2,9 @@ #include "Plugin.h" -namespace plugin { namespace Bro_RawReader { Plugin plugin; } } +namespace plugin { namespace Zeek_RawReader { Plugin plugin; } } -using namespace plugin::Bro_RawReader; +using namespace plugin::Zeek_RawReader; Plugin::Plugin() { @@ -15,7 +15,7 @@ plugin::Configuration Plugin::Configure() AddComponent(new ::input::Component("Raw", ::input::reader::Raw::Instantiate)); plugin::Configuration config; - config.name = "Bro::RawReader"; + config.name = "Zeek::RawReader"; config.description = "Raw input reader"; return config; } diff --git a/src/input/readers/raw/Plugin.h b/src/input/readers/raw/Plugin.h index 31fa611130..7dcd5e1b13 100644 --- a/src/input/readers/raw/Plugin.h +++ b/src/input/readers/raw/Plugin.h @@ -7,7 +7,7 @@ #include "Raw.h" namespace plugin { -namespace Bro_RawReader { +namespace Zeek_RawReader { class Plugin : public plugin::Plugin { public: diff --git a/src/input/readers/raw/Raw.cc b/src/input/readers/raw/Raw.cc index 51b041744c..81627ac169 100644 --- a/src/input/readers/raw/Raw.cc +++ b/src/input/readers/raw/Raw.cc @@ -99,7 +99,7 @@ bool Raw::SetFDFlags(int fd, int cmd, int flags) std::unique_lock Raw::AcquireForkMutex() { - auto lock = plugin::Bro_RawReader::plugin.ForkMutex(); + auto lock = plugin::Zeek_RawReader::plugin.ForkMutex(); try { diff --git a/src/input/readers/sqlite/CMakeLists.txt b/src/input/readers/sqlite/CMakeLists.txt index 3c513127dc..868a6c704b 100644 --- a/src/input/readers/sqlite/CMakeLists.txt +++ b/src/input/readers/sqlite/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SQLiteReader) -bro_plugin_cc(SQLite.cc Plugin.cc) -bro_plugin_bif(sqlite.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek SQLiteReader) +zeek_plugin_cc(SQLite.cc Plugin.cc) +zeek_plugin_bif(sqlite.bif) +zeek_plugin_end() diff --git a/src/input/readers/sqlite/Plugin.cc b/src/input/readers/sqlite/Plugin.cc index db75d6dc22..6217d3bf93 100644 --- a/src/input/readers/sqlite/Plugin.cc +++ b/src/input/readers/sqlite/Plugin.cc @@ -5,7 +5,7 @@ #include "SQLite.h" namespace plugin { -namespace Bro_SQLiteReader { +namespace Zeek_SQLiteReader { class Plugin : public plugin::Plugin { public: @@ -14,7 +14,7 @@ public: AddComponent(new ::input::Component("SQLite", ::input::reader::SQLite::Instantiate)); plugin::Configuration config; - config.name = "Bro::SQLiteReader"; + config.name = "Zeek::SQLiteReader"; config.description = "SQLite input reader"; return config; } diff --git a/src/input/readers/sqlite/SQLite.cc b/src/input/readers/sqlite/SQLite.cc index 40c0f8a063..8dcaed61c0 100644 --- a/src/input/readers/sqlite/SQLite.cc +++ b/src/input/readers/sqlite/SQLite.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -71,7 +71,7 @@ bool SQLite::DoInit(const ReaderInfo& info, int arg_num_fields, const threading: { if ( sqlite3_threadsafe() == 0 ) { - Error("SQLite reports that it is not threadsafe. Bro needs a threadsafe version of SQLite. Aborting"); + Error("SQLite reports that it is not threadsafe. Zeek needs a threadsafe version of SQLite. Aborting"); return false; } diff --git a/src/input/readers/sqlite/SQLite.h b/src/input/readers/sqlite/SQLite.h index 2aa01017e1..4255a2841f 100644 --- a/src/input/readers/sqlite/SQLite.h +++ b/src/input/readers/sqlite/SQLite.h @@ -3,7 +3,7 @@ #ifndef INPUT_READERS_SQLITE_H #define INPUT_READERS_SQLITE_H -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/iosource/BPF_Program.cc b/src/iosource/BPF_Program.cc index ca5a6eef54..901010e9bc 100644 --- a/src/iosource/BPF_Program.cc +++ b/src/iosource/BPF_Program.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "BPF_Program.h" diff --git a/src/iosource/CMakeLists.txt b/src/iosource/CMakeLists.txt index 27c42e9a40..f7497c7fe6 100644 --- a/src/iosource/CMakeLists.txt +++ b/src/iosource/CMakeLists.txt @@ -1,5 +1,5 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/iosource/Packet.cc b/src/iosource/Packet.cc index 3bb6e34e50..e2bbe99c30 100644 --- a/src/iosource/Packet.cc +++ b/src/iosource/Packet.cc @@ -671,66 +671,3 @@ void Packet::Describe(ODesc* d) const d->Add(ip.DstAddr()); } -bool Packet::Serialize(SerialInfo* info) const - { - return SERIALIZE(uint32(ts.tv_sec)) && - SERIALIZE(uint32(ts.tv_usec)) && - SERIALIZE(uint32(len)) && - SERIALIZE(link_type) && - info->s->Write(tag.c_str(), tag.length(), "tag") && - info->s->Write((const char*)data, cap_len, "data"); - } - -#ifdef DEBUG -static iosource::PktDumper* dump = 0; -#endif - -Packet* Packet::Unserialize(UnserialInfo* info) - { - pkt_timeval ts; - uint32 len, link_type; - - if ( ! (UNSERIALIZE((uint32 *)&ts.tv_sec) && - UNSERIALIZE((uint32 *)&ts.tv_usec) && - UNSERIALIZE(&len) && - UNSERIALIZE(&link_type)) ) - return 0; - - char* tag; - if ( ! info->s->Read((char**) &tag, 0, "tag") ) - return 0; - - const u_char* pkt; - int caplen; - if ( ! info->s->Read((char**) &pkt, &caplen, "data") ) - { - delete [] tag; - return 0; - } - - Packet *p = new Packet(link_type, &ts, caplen, len, pkt, true, - std::string(tag)); - delete [] tag; - - // For the global timer manager, we take the global network_time as the - // packet's timestamp for feeding it into our packet loop. - if ( p->tag == "" ) - p->time = timer_mgr->Time(); - else - p->time = p->ts.tv_sec + double(p->ts.tv_usec) / 1e6; - -#ifdef DEBUG - if ( debug_logger.IsEnabled(DBG_TM) ) - { - if ( ! dump ) - dump = iosource_mgr->OpenPktDumper("tm.pcap", true); - - if ( dump ) - { - dump->Dump(p); - } - } -#endif - - return p; - } diff --git a/src/iosource/Packet.h b/src/iosource/Packet.h index ec29f39ff5..3ca24cb737 100644 --- a/src/iosource/Packet.h +++ b/src/iosource/Packet.h @@ -24,12 +24,6 @@ enum Layer3Proto { /** * A link-layer packet. - * - * Note that for serialization we don't use much of the support provided by - * the serialization framework. Serialize/Unserialize do all the work by - * themselves. In particular, Packets aren't derived from SerialObj. They are - * completely seperate and self-contained entities, and we don't need any of - * the sophisticated features like object caching. */ class Packet { public: @@ -144,16 +138,6 @@ public: */ void Describe(ODesc* d) const; - /** - * Serializes the packet, with standard signature. - */ - bool Serialize(SerialInfo* info) const; - - /** - * Unserializes the packet, with standard signature. - */ - static Packet* Unserialize(UnserialInfo* info); - /** * Maximal length of a layer 2 address. */ diff --git a/src/iosource/PktDumper.cc b/src/iosource/PktDumper.cc index 10c95e8021..863c46ec81 100644 --- a/src/iosource/PktDumper.cc +++ b/src/iosource/PktDumper.cc @@ -4,7 +4,7 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "PktDumper.h" diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc index 343801ab7d..8b1ab90ebb 100644 --- a/src/iosource/PktSrc.cc +++ b/src/iosource/PktSrc.cc @@ -3,7 +3,7 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "PktSrc.h" @@ -160,21 +160,6 @@ double PktSrc::CheckPseudoTime() if ( ! ExtractNextPacketInternal() ) return 0; - if ( remote_trace_sync_interval ) - { - if ( next_sync_point == 0 || current_packet.time >= next_sync_point ) - { - int n = remote_serializer->SendSyncPoint(); - next_sync_point = first_timestamp + - n * remote_trace_sync_interval; - remote_serializer->Log(RemoteSerializer::LogInfo, - fmt("stopping at packet %.6f, next sync-point at %.6f", - current_packet.time, next_sync_point)); - - return 0; - } - } - double pseudo_time = current_packet.time - first_timestamp; double ct = (current_time(true) - first_wallclock) * pseudo_realtime; @@ -308,15 +293,6 @@ bool PktSrc::ExtractNextPacketInternal() if ( pseudo_realtime && ! IsOpen() ) { - if ( using_communication ) - { - // Source has gone dry, we're done. - if ( remote_trace_sync_interval ) - remote_serializer->SendFinalSyncPoint(); - else - remote_serializer->Terminate(); - } - if ( broker_mgr->Active() ) iosource_mgr->Terminate(); } diff --git a/src/iosource/pcap/CMakeLists.txt b/src/iosource/pcap/CMakeLists.txt index fbfffff051..f829a96a19 100644 --- a/src/iosource/pcap/CMakeLists.txt +++ b/src/iosource/pcap/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro Pcap) -bro_plugin_cc(Source.cc Dumper.cc Plugin.cc) +zeek_plugin_begin(Zeek Pcap) +zeek_plugin_cc(Source.cc Dumper.cc Plugin.cc) bif_target(pcap.bif) -bro_plugin_end() +zeek_plugin_end() diff --git a/src/iosource/pcap/Plugin.cc b/src/iosource/pcap/Plugin.cc index af74b16ead..75f8f54a2c 100644 --- a/src/iosource/pcap/Plugin.cc +++ b/src/iosource/pcap/Plugin.cc @@ -6,7 +6,7 @@ #include "Dumper.h" namespace plugin { -namespace Bro_Pcap { +namespace Zeek_Pcap { class Plugin : public plugin::Plugin { public: @@ -16,7 +16,7 @@ public: AddComponent(new ::iosource::PktDumperComponent("PcapWriter", "pcap", ::iosource::pcap::PcapDumper::Instantiate)); plugin::Configuration config; - config.name = "Bro::Pcap"; + config.name = "Zeek::Pcap"; config.description = "Packet acquisition via libpcap"; return config; } diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc index fb9954981c..119280f1e5 100644 --- a/src/iosource/pcap/Source.cc +++ b/src/iosource/pcap/Source.cc @@ -2,7 +2,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "Source.h" #include "iosource/Packet.h" diff --git a/src/iosource/pcap/pcap.bif b/src/iosource/pcap/pcap.bif index 1e7ca8a844..9e6e0238ba 100644 --- a/src/iosource/pcap/pcap.bif +++ b/src/iosource/pcap/pcap.bif @@ -12,7 +12,7 @@ const bufsize: count; ## ## Returns: True if *s* is valid and precompiles successfully. ## -## .. bro:see:: Pcap::install_pcap_filter +## .. zeek:see:: Pcap::install_pcap_filter ## install_src_addr_filter ## install_src_net_filter ## uninstall_src_addr_filter @@ -51,14 +51,14 @@ function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool %} ## Installs a PCAP filter that has been precompiled with -## :bro:id:`Pcap::precompile_pcap_filter`. +## :zeek:id:`Pcap::precompile_pcap_filter`. ## ## id: The PCAP filter id of a precompiled filter. ## ## Returns: True if the filter associated with *id* has been installed ## successfully. ## -## .. bro:see:: Pcap::precompile_pcap_filter +## .. zeek:see:: Pcap::precompile_pcap_filter ## install_src_addr_filter ## install_src_net_filter ## uninstall_src_addr_filter @@ -90,7 +90,7 @@ function Pcap::install_pcap_filter%(id: PcapFilterID%): bool ## ## Returns: A descriptive error message of the PCAP function that failed. ## -## .. bro:see:: Pcap::precompile_pcap_filter +## .. zeek:see:: Pcap::precompile_pcap_filter ## Pcap::install_pcap_filter ## install_src_addr_filter ## install_src_net_filter diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 6553e2170f..05478f240b 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -1,5 +1,5 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index f1b459811f..0fe75b91db 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -2,11 +2,12 @@ #include -#include "../Event.h" -#include "../EventHandler.h" -#include "../NetVar.h" -#include "../Net.h" -#include "../Type.h" +#include "Event.h" +#include "EventHandler.h" +#include "NetVar.h" +#include "Net.h" +#include "Type.h" +#include "File.h" #include "broker/Manager.h" #include "threading/Manager.h" @@ -16,8 +17,8 @@ #include "WriterFrontend.h" #include "WriterBackend.h" #include "logging.bif.h" -#include "../plugin/Plugin.h" -#include "../plugin/Manager.h" +#include "plugin/Plugin.h" +#include "plugin/Manager.h" using namespace logging; @@ -715,11 +716,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) // Raise the log event. if ( stream->event ) - { - val_list* vl = new val_list(1); - vl->append(columns->Ref()); - mgr.QueueEvent(stream->event, vl, SOURCE_LOCAL); - } + mgr.QueueEventFast(stream->event, {columns->Ref()}, SOURCE_LOCAL); // Send to each of our filters. for ( list::iterator i = stream->filters.begin(); @@ -732,8 +729,7 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) { // See whether the predicates indicates that we want // to log this record. - val_list vl(1); - vl.append(columns->Ref()); + val_list vl{columns->Ref()}; int result = 1; @@ -750,17 +746,12 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) if ( filter->path_func ) { - val_list vl(3); - vl.append(id->Ref()); - Val* path_arg; if ( filter->path_val ) path_arg = filter->path_val->Ref(); else path_arg = val_mgr->GetEmptyString(); - vl.append(path_arg); - Val* rec_arg; BroType* rt = filter->path_func->FType()->Args()->FieldType("rec"); @@ -770,7 +761,11 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) // Can be TYPE_ANY here. rec_arg = columns->Ref(); - vl.append(rec_arg); + val_list vl{ + id->Ref(), + path_arg, + rec_arg, + }; Val* v = 0; @@ -1087,8 +1082,7 @@ threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, RecordVal* ext_rec = nullptr; if ( filter->num_ext_fields > 0 ) { - val_list vl(1); - vl.append(filter->path_val->Ref()); + val_list vl{filter->path_val->Ref()}; Val* res = filter->ext_func->Call(&vl); if ( res ) ext_rec = res->AsRecordVal(); @@ -1307,32 +1301,6 @@ bool Manager::WriteFromRemote(EnumVal* id, EnumVal* writer, string path, int num return true; } -void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) - { - auto et = internal_type("Log::Writer")->AsEnumType(); - - for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) - { - Stream* stream = (*s); - - if ( ! (stream && stream->enable_remote) ) - continue; - - for ( Stream::WriterMap::iterator i = stream->writers.begin(); - i != stream->writers.end(); i++ ) - { - WriterFrontend* writer = i->second->writer; - auto writer_val = et->GetVal(i->first.first); - remote_serializer->SendLogCreateWriter(peer, (*s)->id, - writer_val, - *i->second->info, - writer->NumFields(), - writer->Fields()); - Unref(writer_val); - } - } - } - void Manager::SendAllWritersTo(const broker::endpoint_info& ei) { auto et = internal_type("Log::Writer")->AsEnumType(); @@ -1593,8 +1561,7 @@ bool Manager::FinishedRotation(WriterFrontend* writer, const char* new_name, con assert(func); // Call the postprocessor function. - val_list vl(1); - vl.append(info); + val_list vl{info}; int result = 0; diff --git a/src/logging/Manager.h b/src/logging/Manager.h index d04def7938..96ff2ea0c9 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -10,14 +10,12 @@ #include "../Val.h" #include "../Tag.h" #include "../EventHandler.h" -#include "../RemoteSerializer.h" #include "../plugin/ComponentManager.h" #include "Component.h" #include "WriterBackend.h" class SerializationFormat; -class RemoteSerializer; class RotationTimer; namespace logging { @@ -234,7 +232,6 @@ protected: friend class WriterFrontend; friend class RotationFinishedMessage; friend class RotationFailedMessage; - friend class ::RemoteSerializer; friend class ::RotationTimer; // Instantiates a new WriterBackend of the given type (note that @@ -248,9 +245,6 @@ protected: int num_fields, const threading::Field* const* fields, bool local, bool remote, bool from_remote, const string& instantiating_filter=""); - // Announces all instantiated writers to peer. - void SendAllWritersTo(RemoteSerializer::PeerID peer); - // Signals that a file has been rotated. bool FinishedRotation(WriterFrontend* writer, const char* new_name, const char* old_name, double open, double close, bool success, bool terminating); diff --git a/src/logging/Tag.h b/src/logging/Tag.h index ab0a702d47..07c45826b8 100644 --- a/src/logging/Tag.h +++ b/src/logging/Tag.h @@ -3,7 +3,7 @@ #ifndef LOGGING_TAG_H #define LOGGING_TAG_H -#include "bro-config.h" +#include "zeek-config.h" #include "util.h" #include "../Tag.h" #include "plugin/TaggedComponent.h" diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 4416e41d17..162b19d26a 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -69,58 +69,6 @@ public: using namespace logging; -bool WriterBackend::WriterInfo::Read(SerializationFormat* fmt) - { - int size; - - string tmp_path; - - if ( ! (fmt->Read(&tmp_path, "path") && - fmt->Read(&rotation_base, "rotation_base") && - fmt->Read(&rotation_interval, "rotation_interval") && - fmt->Read(&network_time, "network_time") && - fmt->Read(&size, "config_size")) ) - return false; - - path = copy_string(tmp_path.c_str()); - - config.clear(); - - while ( size-- ) - { - string value; - string key; - - if ( ! (fmt->Read(&value, "config-value") && fmt->Read(&key, "config-key")) ) - return false; - - config.insert(std::make_pair(copy_string(value.c_str()), copy_string(key.c_str()))); - } - - return true; - } - - -bool WriterBackend::WriterInfo::Write(SerializationFormat* fmt) const - { - int size = config.size(); - - if ( ! (fmt->Write(path, "path") && - fmt->Write(rotation_base, "rotation_base") && - fmt->Write(rotation_interval, "rotation_interval") && - fmt->Write(network_time, "network_time") && - fmt->Write(size, "config_size")) ) - return false; - - for ( config_map::const_iterator i = config.begin(); i != config.end(); ++i ) - { - if ( ! (fmt->Write(i->first, "config-value") && fmt->Write(i->second, "config-key")) ) - return false; - } - - return true; - } - broker::data WriterBackend::WriterInfo::ToBroker() const { auto t = broker::table(); diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index 74541d8586..35cf401199 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -9,8 +9,6 @@ #include "Component.h" -class RemoteSerializer; - namespace broker { class data; } namespace logging { @@ -114,8 +112,6 @@ public: // Note, these need to be adapted when changing the struct's // fields. They serialize/deserialize the struct. - bool Read(SerializationFormat* fmt); - bool Write(SerializationFormat* fmt) const; broker::data ToBroker() const; bool FromBroker(broker::data d); diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 56bbf68161..fdc4a7a97b 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -169,12 +169,6 @@ void WriterFrontend::Init(int arg_num_fields, const Field* const * arg_fields) if ( remote ) { - remote_serializer->SendLogCreateWriter(stream, - writer, - *info, - arg_num_fields, - arg_fields); - broker_mgr->PublishLogCreate(stream, writer, *info, @@ -201,12 +195,6 @@ void WriterFrontend::Write(int arg_num_fields, Value** vals) if ( remote ) { - remote_serializer->SendLogWrite(stream, - writer, - info->path, - num_fields, - vals); - broker_mgr->PublishLogWrite(stream, writer, info->path, diff --git a/src/logging/writers/ascii/Ascii.cc b/src/logging/writers/ascii/Ascii.cc index baaba22665..f84bde5488 100644 --- a/src/logging/writers/ascii/Ascii.cc +++ b/src/logging/writers/ascii/Ascii.cc @@ -444,7 +444,8 @@ bool Ascii::DoHeartbeat(double network_time, double current_time) string Ascii::LogExt() { - const char* ext = getenv("BRO_LOG_SUFFIX"); + const char* ext = zeekenv("ZEEK_LOG_SUFFIX"); + if ( ! ext ) ext = "log"; diff --git a/src/logging/writers/ascii/CMakeLists.txt b/src/logging/writers/ascii/CMakeLists.txt index 0cb0357a0d..430631f997 100644 --- a/src/logging/writers/ascii/CMakeLists.txt +++ b/src/logging/writers/ascii/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro AsciiWriter) -bro_plugin_cc(Ascii.cc Plugin.cc) -bro_plugin_bif(ascii.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek AsciiWriter) +zeek_plugin_cc(Ascii.cc Plugin.cc) +zeek_plugin_bif(ascii.bif) +zeek_plugin_end() diff --git a/src/logging/writers/ascii/Plugin.cc b/src/logging/writers/ascii/Plugin.cc index 4dcefda47b..cc258c4236 100644 --- a/src/logging/writers/ascii/Plugin.cc +++ b/src/logging/writers/ascii/Plugin.cc @@ -6,7 +6,7 @@ #include "Ascii.h" namespace plugin { -namespace Bro_AsciiWriter { +namespace Zeek_AsciiWriter { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::logging::Component("Ascii", ::logging::writer::Ascii::Instantiate)); plugin::Configuration config; - config.name = "Bro::AsciiWriter"; + config.name = "Zeek::AsciiWriter"; config.description = "ASCII log writer"; return config; } diff --git a/src/logging/writers/none/CMakeLists.txt b/src/logging/writers/none/CMakeLists.txt index f6e1265772..af386e3aee 100644 --- a/src/logging/writers/none/CMakeLists.txt +++ b/src/logging/writers/none/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro NoneWriter) -bro_plugin_cc(None.cc Plugin.cc) -bro_plugin_bif(none.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek NoneWriter) +zeek_plugin_cc(None.cc Plugin.cc) +zeek_plugin_bif(none.bif) +zeek_plugin_end() diff --git a/src/logging/writers/none/Plugin.cc b/src/logging/writers/none/Plugin.cc index f712e7408c..3c86a238a1 100644 --- a/src/logging/writers/none/Plugin.cc +++ b/src/logging/writers/none/Plugin.cc @@ -6,7 +6,7 @@ #include "None.h" namespace plugin { -namespace Bro_NoneWriter { +namespace Zeek_NoneWriter { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::logging::Component("None", ::logging::writer::None::Instantiate)); plugin::Configuration config; - config.name = "Bro::NoneWriter"; + config.name = "Zeek::NoneWriter"; config.description = "None log writer (primarily for debugging)"; return config; } diff --git a/src/logging/writers/sqlite/CMakeLists.txt b/src/logging/writers/sqlite/CMakeLists.txt index ce25251679..41c2f01c9e 100644 --- a/src/logging/writers/sqlite/CMakeLists.txt +++ b/src/logging/writers/sqlite/CMakeLists.txt @@ -1,9 +1,9 @@ -include(BroPlugin) +include(ZeekPlugin) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -bro_plugin_begin(Bro SQLiteWriter) -bro_plugin_cc(SQLite.cc Plugin.cc) -bro_plugin_bif(sqlite.bif) -bro_plugin_end() +zeek_plugin_begin(Zeek SQLiteWriter) +zeek_plugin_cc(SQLite.cc Plugin.cc) +zeek_plugin_bif(sqlite.bif) +zeek_plugin_end() diff --git a/src/logging/writers/sqlite/Plugin.cc b/src/logging/writers/sqlite/Plugin.cc index f48ec838f1..a7ddc95472 100644 --- a/src/logging/writers/sqlite/Plugin.cc +++ b/src/logging/writers/sqlite/Plugin.cc @@ -6,7 +6,7 @@ #include "SQLite.h" namespace plugin { -namespace Bro_SQLiteWriter { +namespace Zeek_SQLiteWriter { class Plugin : public plugin::Plugin { public: @@ -15,7 +15,7 @@ public: AddComponent(new ::logging::Component("SQLite", ::logging::writer::SQLite::Instantiate)); plugin::Configuration config; - config.name = "Bro::SQLiteWriter"; + config.name = "Zeek::SQLiteWriter"; config.description = "SQLite log writer"; return config; } diff --git a/src/logging/writers/sqlite/SQLite.cc b/src/logging/writers/sqlite/SQLite.cc index 977a0c6089..48930a6225 100644 --- a/src/logging/writers/sqlite/SQLite.cc +++ b/src/logging/writers/sqlite/SQLite.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -116,7 +116,7 @@ bool SQLite::DoInit(const WriterInfo& info, int arg_num_fields, { if ( sqlite3_threadsafe() == 0 ) { - Error("SQLite reports that it is not threadsafe. Bro needs a threadsafe version of SQLite. Aborting"); + Error("SQLite reports that it is not threadsafe. Zeek needs a threadsafe version of SQLite. Aborting"); return false; } diff --git a/src/logging/writers/sqlite/SQLite.h b/src/logging/writers/sqlite/SQLite.h index 3ad535e543..7e8ff739b3 100644 --- a/src/logging/writers/sqlite/SQLite.h +++ b/src/logging/writers/sqlite/SQLite.h @@ -5,7 +5,7 @@ #ifndef LOGGING_WRITER_SQLITE_H #define LOGGING_WRITER_SQLITE_H -#include "bro-config.h" +#include "zeek-config.h" #include "logging/WriterBackend.h" #include "threading/formatters/Ascii.h" diff --git a/src/main.cc b/src/main.cc index 473f3a72e7..3556a0e99d 100644 --- a/src/main.cc +++ b/src/main.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -38,9 +38,6 @@ extern "C" { #include "DFA.h" #include "RuleMatcher.h" #include "Anon.h" -#include "Serializer.h" -#include "RemoteSerializer.h" -#include "PersistenceSerializer.h" #include "EventRegistry.h" #include "Stats.h" #include "Brofiler.h" @@ -55,7 +52,7 @@ extern "C" { #include "analyzer/Tag.h" #include "plugin/Manager.h" #include "file_analysis/Manager.h" -#include "broxygen/Manager.h" +#include "zeekygen/Manager.h" #include "iosource/Manager.h" #include "broker/Manager.h" @@ -91,7 +88,7 @@ input::Manager* input_mgr = 0; plugin::Manager* plugin_mgr = 0; analyzer::Manager* analyzer_mgr = 0; file_analysis::Manager* file_mgr = 0; -broxygen::Manager* broxygen_mgr = 0; +zeekygen::Manager* zeekygen_mgr = 0; iosource::Manager* iosource_mgr = 0; bro_broker::Manager* broker_mgr = 0; @@ -101,11 +98,6 @@ name_list prefixes; Stmt* stmts; EventHandlerPtr net_done = 0; RuleMatcher* rule_matcher = 0; -PersistenceSerializer* persistence_serializer = 0; -FileSerializer* event_serializer = 0; -FileSerializer* state_serializer = 0; -RemoteSerializer* remote_serializer = 0; -EventPlayer* event_player = 0; EventRegistry* event_registry = 0; ProfileLogger* profiling_logger = 0; ProfileLogger* segment_logger = 0; @@ -116,7 +108,6 @@ char* command_line_policy = 0; vector params; set requested_plugins; char* proc_status_file = 0; -int old_comm_usage_count = 0; OpaqueType* md5_type = 0; OpaqueType* sha1_type = 0; @@ -127,12 +118,13 @@ OpaqueType* topk_type = 0; OpaqueType* bloomfilter_type = 0; OpaqueType* x509_opaque_type = 0; OpaqueType* ocsp_resp_opaque_type = 0; +OpaqueType* paraglob_type = 0; // Keep copy of command line int bro_argc; char** bro_argv; -const char* bro_version() +const char* zeek_version() { #ifdef DEBUG static char* debug_version = 0; @@ -150,25 +142,21 @@ const char* bro_version() #endif } -const char* bro_dns_fake() +bool bro_dns_fake() { - if ( ! getenv("BRO_DNS_FAKE") ) - return "off"; - else - return "on"; + return zeekenv("ZEEK_DNS_FAKE"); } void usage(int code = 1) { - fprintf(stderr, "bro version %s\n", bro_version()); + fprintf(stderr, "zeek version %s\n", zeek_version()); fprintf(stderr, "usage: %s [options] [file ...]\n", prog); fprintf(stderr, " | policy file, or read stdin\n"); fprintf(stderr, " -a|--parse-only | exit immediately after parsing scripts\n"); fprintf(stderr, " -b|--bare-mode | don't load scripts from the base/ directory\n"); fprintf(stderr, " -d|--debug-policy | activate policy file debugging\n"); - fprintf(stderr, " -e|--exec | augment loaded policies by given code\n"); + fprintf(stderr, " -e|--exec | augment loaded policies by given code\n"); fprintf(stderr, " -f|--filter | tcpdump filter\n"); - fprintf(stderr, " -g|--dump-config | dump current config into .state dir\n"); fprintf(stderr, " -h|--help | command line help\n"); fprintf(stderr, " -i|--iface | read from given interface\n"); fprintf(stderr, " -p|--prefix | add given prefix to policy file resolution\n"); @@ -177,7 +165,6 @@ void usage(int code = 1) fprintf(stderr, " -t|--tracefile | activate execution tracing\n"); fprintf(stderr, " -v|--version | print version and exit\n"); fprintf(stderr, " -w|--writefile | write to given tcpdump file\n"); - fprintf(stderr, " -x|--print-state | print contents of state file\n"); #ifdef DEBUG fprintf(stderr, " -B|--debug | Enable debugging output for selected streams ('-B help' for help)\n"); #endif @@ -189,12 +176,11 @@ void usage(int code = 1) fprintf(stderr, " -N|--print-plugins | print available plugins and exit (-NN for verbose)\n"); fprintf(stderr, " -P|--prime-dns | prime DNS\n"); fprintf(stderr, " -Q|--time | print execution time summary to stderr\n"); - fprintf(stderr, " -R|--replay | replay events\n"); fprintf(stderr, " -S|--debug-rules | enable rule debugging\n"); fprintf(stderr, " -T|--re-level | set 'RE_level' for rules\n"); fprintf(stderr, " -U|--status-file | Record process status in file\n"); fprintf(stderr, " -W|--watchdog | activate watchdog timer\n"); - fprintf(stderr, " -X|--broxygen | generate documentation based on config file\n"); + fprintf(stderr, " -X|--zeekygen | generate documentation based on config file\n"); #ifdef USE_PERFTOOLS_DEBUG fprintf(stderr, " -m|--mem-leaks | show leaks [perftools]\n"); @@ -206,15 +192,16 @@ void usage(int code = 1) fprintf(stderr, " -n|--idmef-dtd | specify path to IDMEF DTD file\n"); #endif - fprintf(stderr, " $BROPATH | file search path (%s)\n", bro_path().c_str()); - fprintf(stderr, " $BRO_PLUGIN_PATH | plugin search path (%s)\n", bro_plugin_path()); - fprintf(stderr, " $BRO_PLUGIN_ACTIVATE | plugins to always activate (%s)\n", bro_plugin_activate()); - fprintf(stderr, " $BRO_PREFIXES | prefix list (%s)\n", bro_prefixes().c_str()); - fprintf(stderr, " $BRO_DNS_FAKE | disable DNS lookups (%s)\n", bro_dns_fake()); - fprintf(stderr, " $BRO_SEED_FILE | file to load seeds from (not set)\n"); - fprintf(stderr, " $BRO_LOG_SUFFIX | ASCII log file extension (.%s)\n", logging::writer::Ascii::LogExt().c_str()); - fprintf(stderr, " $BRO_PROFILER_FILE | Output file for script execution statistics (not set)\n"); - fprintf(stderr, " $BRO_DISABLE_BROXYGEN | Disable Broxygen documentation support (%s)\n", getenv("BRO_DISABLE_BROXYGEN") ? "set" : "not set"); + fprintf(stderr, " $ZEEKPATH | file search path (%s)\n", bro_path().c_str()); + fprintf(stderr, " $ZEEK_PLUGIN_PATH | plugin search path (%s)\n", bro_plugin_path()); + fprintf(stderr, " $ZEEK_PLUGIN_ACTIVATE | plugins to always activate (%s)\n", bro_plugin_activate()); + fprintf(stderr, " $ZEEK_PREFIXES | prefix list (%s)\n", bro_prefixes().c_str()); + fprintf(stderr, " $ZEEK_DNS_FAKE | disable DNS lookups (%s)\n", bro_dns_fake() ? "on" : "off"); + fprintf(stderr, " $ZEEK_SEED_FILE | file to load seeds from (not set)\n"); + fprintf(stderr, " $ZEEK_LOG_SUFFIX | ASCII log file extension (.%s)\n", logging::writer::Ascii::LogExt().c_str()); + fprintf(stderr, " $ZEEK_PROFILER_FILE | Output file for script execution statistics (not set)\n"); + fprintf(stderr, " $ZEEK_DISABLE_ZEEKYGEN | Disable Zeekygen documentation support (%s)\n", zeekenv("ZEEK_DISABLE_ZEEKYGEN") ? "set" : "not set"); + fprintf(stderr, " $ZEEK_DNS_RESOLVER | IPv4/IPv6 address of DNS resolver to use (%s)\n", zeekenv("ZEEK_DNS_RESOLVER") ? zeekenv("ZEEK_DNS_RESOLVER") : "not set, will use first IPv4 address from /etc/resolv.conf"); fprintf(stderr, "\n"); @@ -275,26 +262,18 @@ void done_with_network() { set_processing_status("TERMINATING", "done_with_network"); - // Release the port, which is important for checkpointing Bro. - if ( remote_serializer ) - remote_serializer->StopListening(); - // Cancel any pending alarms (watchdog, in particular). (void) alarm(0); if ( net_done ) { - val_list* args = new val_list; - args->append(new Val(timer_mgr->Time(), TYPE_TIME)); mgr.Drain(); - // Don't propagate this event to remote clients. - mgr.Dispatch(new Event(net_done, args), true); + mgr.Dispatch(new Event(net_done, + {new Val(timer_mgr->Time(), TYPE_TIME)}), + true); } - // Save state before expiring the remaining events/timers. - persistence_serializer->WriteState(false); - if ( profiling_logger ) profiling_logger->Log(); @@ -306,9 +285,6 @@ void done_with_network() mgr.Drain(); mgr.Drain(); - if ( remote_serializer ) - remote_serializer->Finish(); - net_finish(1); #ifdef USE_PERFTOOLS_DEBUG @@ -339,9 +315,9 @@ void terminate_bro() brofiler.WriteStats(); - EventHandlerPtr bro_done = internal_handler("bro_done"); - if ( bro_done ) - mgr.QueueEvent(bro_done, new val_list); + EventHandlerPtr zeek_done = internal_handler("zeek_done"); + if ( zeek_done ) + mgr.QueueEventFast(zeek_done, val_list{}); timer_mgr->Expire(); mgr.Drain(); @@ -356,9 +332,6 @@ void terminate_bro() delete profiling_logger; } - if ( remote_serializer ) - remote_serializer->LogStats(); - mgr.Drain(); log_mgr->Terminate(); @@ -370,11 +343,8 @@ void terminate_bro() plugin_mgr->FinishPlugins(); - delete broxygen_mgr; + delete zeekygen_mgr; delete timer_mgr; - delete persistence_serializer; - delete event_serializer; - delete state_serializer; delete event_registry; delete analyzer_mgr; delete file_mgr; @@ -427,70 +397,6 @@ static void bro_new_handler() out_of_memory("new"); } -static auto old_comm_ids = std::set{ - "connect", - "disconnect", - "request_remote_events", - "request_remote_sync", - "request_remote_logs", - "set_accept_state", - "set_compression_level", - "listen", - "send_id", - "terminate_communication", - "complete_handshake", - "send_ping", - "send_current_packet", - "get_event_peer", - "send_capture_filter", - "suspend_state_updates", - "resume_state_updates", -}; - -static bool is_old_comm_usage(const ID* id) - { - auto name = id->Name(); - - if ( old_comm_ids.find(name) == old_comm_ids.end() ) - return false; - - return true; - } - -class OldCommUsageTraversalCallback : public TraversalCallback { -public: - virtual TraversalCode PreExpr(const Expr* expr) override - { - switch ( expr->Tag() ) { - case EXPR_CALL: - { - const CallExpr* call = static_cast(expr); - auto func = call->Func(); - - if ( func->Tag() == EXPR_NAME ) - { - const NameExpr* ne = static_cast(func); - auto id = ne->Id(); - - if ( is_old_comm_usage(id) ) - ++old_comm_usage_count; - } - } - break; - default: - break; - } - - return TC_CONTINUE; - } -}; - -static void find_old_comm_usages() - { - OldCommUsageTraversalCallback cb; - traverse_all(&cb); - } - int main(int argc, char** argv) { std::set_new_handler(bro_new_handler); @@ -508,16 +414,14 @@ int main(int argc, char** argv) name_list interfaces; name_list read_files; name_list rule_files; - char* bst_file = 0; char* id_name = 0; - char* events_file = 0; - char* seed_load_file = getenv("BRO_SEED_FILE"); + + char* seed_load_file = zeekenv("ZEEK_SEED_FILE"); char* seed_save_file = 0; char* user_pcap_filter = 0; char* debug_streams = 0; int parse_only = false; int bare_mode = false; - int dump_cfg = false; int do_watchdog = 0; int override_ignore_checksums = 0; int rule_debug = 0; @@ -529,19 +433,17 @@ int main(int argc, char** argv) {"parse-only", no_argument, 0, 'a'}, {"bare-mode", no_argument, 0, 'b'}, {"debug-policy", no_argument, 0, 'd'}, - {"dump-config", no_argument, 0, 'g'}, {"exec", required_argument, 0, 'e'}, {"filter", required_argument, 0, 'f'}, {"help", no_argument, 0, 'h'}, {"iface", required_argument, 0, 'i'}, - {"broxygen", required_argument, 0, 'X'}, + {"zeekygen", required_argument, 0, 'X'}, {"prefix", required_argument, 0, 'p'}, {"readfile", required_argument, 0, 'r'}, {"rulefile", required_argument, 0, 's'}, {"tracefile", required_argument, 0, 't'}, {"writefile", required_argument, 0, 'w'}, {"version", no_argument, 0, 'v'}, - {"print-state", required_argument, 0, 'x'}, {"no-checksums", no_argument, 0, 'C'}, {"force-dns", no_argument, 0, 'F'}, {"load-seeds", required_argument, 0, 'G'}, @@ -549,7 +451,6 @@ int main(int argc, char** argv) {"print-plugins", no_argument, 0, 'N'}, {"prime-dns", no_argument, 0, 'P'}, {"time", no_argument, 0, 'Q'}, - {"replay", required_argument, 0, 'R'}, {"debug-rules", no_argument, 0, 'S'}, {"re-level", required_argument, 0, 'T'}, {"watchdog", no_argument, 0, 'W'}, @@ -574,7 +475,7 @@ int main(int argc, char** argv) enum DNS_MgrMode dns_type = DNS_DEFAULT; - dns_type = getenv("BRO_DNS_FAKE") ? DNS_FAKE : DNS_DEFAULT; + dns_type = bro_dns_fake() ? DNS_FAKE : DNS_DEFAULT; RETSIGTYPE (*oldhandler)(int); @@ -582,11 +483,12 @@ int main(int argc, char** argv) prefixes.append(strdup("")); // "" = "no prefix" - char* p = getenv("BRO_PREFIXES"); + char* p = zeekenv("ZEEK_PREFIXES"); + if ( p ) add_to_name_list(p, ':', prefixes); - string broxygen_config; + string zeekygen_config; #ifdef USE_IDMEF string libidmef_dtd_path = "idmef-message.dtd"; @@ -599,7 +501,7 @@ int main(int argc, char** argv) opterr = 0; char opts[256]; - safe_strncpy(opts, "B:e:f:G:H:I:i:n:p:R:r:s:T:t:U:w:x:X:CFNPQSWabdghv", + safe_strncpy(opts, "B:e:f:G:H:I:i:n:p:r:s:T:t:U:w:X:CFNPQSWabdhv", sizeof(opts)); #ifdef USE_PERFTOOLS_DEBUG @@ -630,10 +532,6 @@ int main(int argc, char** argv) user_pcap_filter = optarg; break; - case 'g': - dump_cfg = true; - break; - case 'h': usage(0); break; @@ -660,7 +558,7 @@ int main(int argc, char** argv) break; case 'v': - fprintf(stdout, "%s version %s\n", prog, bro_version()); + fprintf(stdout, "%s version %s\n", prog, zeek_version()); exit(0); break; @@ -668,10 +566,6 @@ int main(int argc, char** argv) writefile = optarg; break; - case 'x': - bst_file = optarg; - break; - case 'B': debug_streams = optarg; break; @@ -718,10 +612,6 @@ int main(int argc, char** argv) time_bro = 1; break; - case 'R': - events_file = optarg; - break; - case 'S': rule_debug = 1; break; @@ -739,7 +629,7 @@ int main(int argc, char** argv) break; case 'X': - broxygen_config = optarg; + zeekygen_config = optarg; break; #ifdef USE_PERFTOOLS_DEBUG @@ -821,20 +711,20 @@ int main(int argc, char** argv) timer_mgr = new PQ_TimerMgr(""); // timer_mgr = new CQ_TimerMgr(); - broxygen_mgr = new broxygen::Manager(broxygen_config, bro_argv[0]); + zeekygen_mgr = new zeekygen::Manager(zeekygen_config, bro_argv[0]); - add_essential_input_file("base/init-bare.bro"); - add_essential_input_file("base/init-frameworks-and-bifs.bro"); + add_essential_input_file("base/init-bare.zeek"); + add_essential_input_file("base/init-frameworks-and-bifs.zeek"); if ( ! bare_mode ) - add_input_file("base/init-default.bro"); + add_input_file("base/init-default.zeek"); plugin_mgr->SearchDynamicPlugins(bro_plugin_path()); if ( optind == argc && read_files.length() == 0 && interfaces.length() == 0 && - ! (id_name || bst_file) && ! command_line_policy && ! print_plugins ) + ! id_name && ! command_line_policy && ! print_plugins ) add_input_file("-"); // Process remaining arguments. X=Y arguments indicate script @@ -860,8 +750,6 @@ int main(int argc, char** argv) dns_mgr->SetDir(".state"); iosource_mgr = new iosource::Manager(); - persistence_serializer = new PersistenceSerializer(); - remote_serializer = new RemoteSerializer(); event_registry = new EventRegistry(); analyzer_mgr = new analyzer::Manager(); log_mgr = new logging::Manager(); @@ -872,7 +760,7 @@ int main(int argc, char** argv) plugin_mgr->InitPreScript(); analyzer_mgr->InitPreScript(); file_mgr->InitPreScript(); - broxygen_mgr->InitPreScript(); + zeekygen_mgr->InitPreScript(); bool missing_plugin = false; @@ -888,13 +776,6 @@ int main(int argc, char** argv) plugin_mgr->ActivateDynamicPlugins(! bare_mode); - if ( events_file ) - event_player = new EventPlayer(events_file); - - // Must come after plugin activation (and also after hash - // initialization). - binpac::init(); - init_event_handlers(); md5_type = new OpaqueType("md5"); @@ -906,6 +787,7 @@ int main(int argc, char** argv) bloomfilter_type = new OpaqueType("bloomfilter"); x509_opaque_type = new OpaqueType("x509"); ocsp_resp_opaque_type = new OpaqueType("ocsp_resp"); + paraglob_type = new OpaqueType("paraglob"); // The leak-checker tends to produce some false // positives (memory which had already been @@ -922,36 +804,30 @@ int main(int argc, char** argv) yyparse(); is_parsing = false; - find_old_comm_usages(); - - if ( old_comm_usage_count ) - { - auto old_comm_ack_id = global_scope()->Lookup("old_comm_usage_is_ok"); - - if ( ! old_comm_ack_id->ID_Val()->AsBool() ) - reporter->FatalError("Detected old, deprecated communication " - "system usages that will not work unless " - "you explicitly take action to initizialize " - "and set up the old comm. system. " - "Set the 'old_comm_usage_is_ok' flag " - "to bypass this error if you've taken such " - "actions, but the suggested solution is to " - "port scripts to use the new Broker API."); - } - RecordVal::ResizeParseTimeRecords(); init_general_global_var(); init_net_var(); init_builtin_funcs_subdirs(); + // Must come after plugin activation (and also after hash + // initialization). + binpac::FlowBuffer::Policy flowbuffer_policy; + flowbuffer_policy.max_capacity = global_scope()->Lookup( + "BinPAC::flowbuffer_capacity_max")->ID_Val()->AsCount(); + flowbuffer_policy.min_capacity = global_scope()->Lookup( + "BinPAC::flowbuffer_capacity_min")->ID_Val()->AsCount(); + flowbuffer_policy.contract_threshold = global_scope()->Lookup( + "BinPAC::flowbuffer_contract_threshold")->ID_Val()->AsCount(); + binpac::init(&flowbuffer_policy); + plugin_mgr->InitBifs(); if ( reporter->Errors() > 0 ) exit(1); plugin_mgr->InitPostScript(); - broxygen_mgr->InitPostScript(); + zeekygen_mgr->InitPostScript(); broker_mgr->InitPostScript(); if ( print_plugins ) @@ -981,7 +857,7 @@ int main(int argc, char** argv) } reporter->InitOptions(); - broxygen_mgr->GenerateDocs(); + zeekygen_mgr->GenerateDocs(); if ( user_pcap_filter ) { @@ -1046,8 +922,6 @@ int main(int argc, char** argv) if ( dns_type != DNS_PRIME ) net_init(interfaces, read_files, writefile, do_watchdog); - BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); - net_done = internal_handler("net_done"); if ( ! g_policy_debug ) @@ -1074,26 +948,9 @@ int main(int argc, char** argv) exit(0); } - // Just read state file from disk. - if ( bst_file ) - { - FileSerializer s; - UnserialInfo info(&s); - info.print = stdout; - info.install_uniques = true; - if ( ! s.Read(&info, bst_file) ) - reporter->Error("Failed to read events from %s\n", bst_file); - - exit(0); - } - - persistence_serializer->SetDir((const char *)state_dir->AsString()->CheckString()); - // Print the ID. if ( id_name ) { - persistence_serializer->ReadAll(true, false); - ID* id = global_scope()->Lookup(id_name); if ( ! id ) reporter->FatalError("No such ID: %s\n", id_name); @@ -1107,14 +964,6 @@ int main(int argc, char** argv) exit(0); } - persistence_serializer->ReadAll(true, true); - - if ( dump_cfg ) - { - persistence_serializer->WriteConfig(false); - exit(0); - } - if ( profiling_interval > 0 ) { profiling_logger = new ProfileLogger(profiling_file->AsFile(), @@ -1129,9 +978,9 @@ int main(int argc, char** argv) // we don't have any other source for it. net_update_time(current_time()); - EventHandlerPtr bro_init = internal_handler("bro_init"); - if ( bro_init ) //### this should be a function - mgr.QueueEvent(bro_init, new val_list); + EventHandlerPtr zeek_init = internal_handler("zeek_init"); + if ( zeek_init ) //### this should be a function + mgr.QueueEventFast(zeek_init, val_list{}); EventRegistry::string_list* dead_handlers = event_registry->UnusedHandlers(); @@ -1177,16 +1026,19 @@ int main(int argc, char** argv) if ( override_ignore_checksums ) ignore_checksums = 1; - // Queue events reporting loaded scripts. - for ( std::list::iterator i = files_scanned.begin(); i != files_scanned.end(); i++ ) + if ( zeek_script_loaded ) { - if ( i->skipped ) - continue; + // Queue events reporting loaded scripts. + for ( std::list::iterator i = files_scanned.begin(); i != files_scanned.end(); i++ ) + { + if ( i->skipped ) + continue; - val_list* vl = new val_list; - vl->append(new StringVal(i->name.c_str())); - vl->append(val_mgr->GetCount(i->include_level)); - mgr.QueueEvent(bro_script_loaded, vl); + mgr.QueueEventFast(zeek_script_loaded, { + new StringVal(i->name.c_str()), + val_mgr->GetCount(i->include_level), + }); + } } reporter->ReportViaEvents(true); @@ -1194,10 +1046,11 @@ int main(int argc, char** argv) // Drain the event queue here to support the protocols framework configuring DPM mgr.Drain(); - if ( reporter->Errors() > 0 && ! getenv("ZEEK_ALLOW_INIT_ERRORS") ) + if ( reporter->Errors() > 0 && ! zeekenv("ZEEK_ALLOW_INIT_ERRORS") ) reporter->FatalError("errors occurred while initializing"); - broker_mgr->BroInitDone(); + broker_mgr->ZeekInitDone(); + reporter->ZeekInitDone(); analyzer_mgr->DumpDebug(); have_pending_timers = ! reading_traces && timer_mgr->Size() > 0; @@ -1277,7 +1130,6 @@ int main(int argc, char** argv) } else { - persistence_serializer->WriteState(false); terminate_bro(); } diff --git a/src/nb_dns.c b/src/nb_dns.c index f8abc167b5..f8d939b4ab 100644 --- a/src/nb_dns.c +++ b/src/nb_dns.c @@ -11,7 +11,7 @@ * crack reply buffers is private. */ -#include "bro-config.h" /* must appear before first ifdef */ +#include "zeek-config.h" /* must appear before first ifdef */ #include #include diff --git a/src/net_util.cc b/src/net_util.cc index 9f93296d39..6f195a495f 100644 --- a/src/net_util.cc +++ b/src/net_util.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/net_util.h b/src/net_util.h index 52ee53f1dd..a5e11da74b 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -3,7 +3,7 @@ #ifndef netutil_h #define netutil_h -#include "bro-config.h" +#include "zeek-config.h" // Define first. typedef enum { diff --git a/src/option.bif b/src/option.bif index 2156808763..db9ad882d9 100644 --- a/src/option.bif +++ b/src/option.bif @@ -15,10 +15,12 @@ static bool call_option_handlers_and_set_value(StringVal* name, ID* i, Val* val, { for ( auto handler_function : i->GetOptionHandlers() ) { - val_list vl(2); + bool add_loc = handler_function->FType()->AsFuncType()->ArgTypes()->Types()->length() == 3; + val_list vl(2 + add_loc); vl.append(name->Ref()); vl.append(val); - if ( handler_function->FType()->AsFuncType()->ArgTypes()->Types()->length() == 3 ) + + if ( add_loc ) vl.append(location->Ref()); val = handler_function->Call(&vl); // consumed by next call. @@ -48,10 +50,10 @@ static bool call_option_handlers_and_set_value(StringVal* name, ID* i, Val* val, ## ## Returns: true on success, false when an error occurred. ## -## .. bro:see:: Option::set_change_handler Config::set_value +## .. zeek:see:: Option::set_change_handler Config::set_value ## -## .. note:: :bro:id:`Option::set` only works on one node and does not distribute -## new values across a cluster. The higher-level :bro:id:`Config::set_value` +## .. note:: :zeek:id:`Option::set` only works on one node and does not distribute +## new values across a cluster. The higher-level :zeek:id:`Config::set_value` ## supports clusterization and should typically be used instead of this ## lower-level function. function Option::set%(ID: string, val: any, location: string &default=""%): bool @@ -105,7 +107,7 @@ function Option::set%(ID: string, val: any, location: string &default=""%): bool %} ## Set a change handler for an option. The change handler will be -## called anytime :bro:id:`Option::set` is called for the option. +## called anytime :zeek:id:`Option::set` is called for the option. ## ## ID: The ID of the option for which change notifications are desired. ## @@ -127,7 +129,7 @@ function Option::set%(ID: string, val: any, location: string &default=""%): bool ## ## Returns: true when the change handler was set, false when an error occurred. ## -## .. bro:see:: Option::set +## .. zeek:see:: Option::set function Option::set_change_handler%(ID: string, on_change: any, priority: int &default=0%): bool %{ auto i = global_scope()->Lookup(ID->CheckString()); diff --git a/src/parse.y b/src/parse.y index c0980ce8de..820e958a98 100644 --- a/src/parse.y +++ b/src/parse.y @@ -5,7 +5,7 @@ // Switching parser table type fixes ambiguity problems. %define lr.type ielr -%expect 141 +%expect 105 %token TOK_ADD TOK_ADD_TO TOK_ADDR TOK_ANY %token TOK_ATENDIF TOK_ATELSE TOK_ATIF TOK_ATIFDEF TOK_ATIFNDEF @@ -21,12 +21,10 @@ %token TOK_TIME TOK_TIMEOUT TOK_TIMER TOK_TYPE TOK_UNION TOK_VECTOR TOK_WHEN %token TOK_WHILE TOK_AS TOK_IS -%token TOK_ATTR_ADD_FUNC TOK_ATTR_ENCRYPT TOK_ATTR_DEFAULT -%token TOK_ATTR_OPTIONAL TOK_ATTR_REDEF TOK_ATTR_ROTATE_INTERVAL -%token TOK_ATTR_ROTATE_SIZE TOK_ATTR_DEL_FUNC TOK_ATTR_EXPIRE_FUNC +%token TOK_ATTR_ADD_FUNC TOK_ATTR_DEFAULT TOK_ATTR_OPTIONAL TOK_ATTR_REDEF +%token TOK_ATTR_DEL_FUNC TOK_ATTR_EXPIRE_FUNC %token TOK_ATTR_EXPIRE_CREATE TOK_ATTR_EXPIRE_READ TOK_ATTR_EXPIRE_WRITE -%token TOK_ATTR_PERSISTENT TOK_ATTR_SYNCHRONIZED -%token TOK_ATTR_RAW_OUTPUT TOK_ATTR_MERGEABLE +%token TOK_ATTR_RAW_OUTPUT %token TOK_ATTR_PRIORITY TOK_ATTR_LOG TOK_ATTR_ERROR_HANDLER %token TOK_ATTR_TYPE_COLUMN TOK_ATTR_DEPRECATED @@ -52,14 +50,14 @@ %left '$' '[' ']' '(' ')' TOK_HAS_FIELD TOK_HAS_ATTR %nonassoc TOK_AS TOK_IS -%type opt_no_test opt_no_test_block opt_deprecated TOK_PATTERN_END +%type opt_no_test opt_no_test_block TOK_PATTERN_END %type TOK_ID TOK_PATTERN_TEXT %type local_id global_id def_global_id event_id global_or_event_id resolve_id begin_func case_type %type local_id_list case_type_list %type init_class %type opt_init %type TOK_CONSTANT -%type expr opt_expr init anonymous_function +%type expr opt_expr init anonymous_function index_slice opt_deprecated %type event %type stmt stmt_list func_body for_head %type type opt_type enum_body @@ -88,7 +86,7 @@ #include "Scope.h" #include "Reporter.h" #include "Brofiler.h" -#include "broxygen/Manager.h" +#include "zeekygen/Manager.h" #include #include @@ -466,6 +464,12 @@ expr: | expr '=' expr { set_location(@1, @3); + + if ( $1->Tag() == EXPR_INDEX && $1->AsIndexExpr()->IsSlice() ) + reporter->Error("index slice assignment may not be used" + " in arbitrary expression contexts, only" + " as a statement"); + $$ = get_assign_expr($1, $3, in_init); } @@ -481,15 +485,7 @@ expr: $$ = new IndexExpr($1, $3); } - | expr '[' opt_expr ':' opt_expr ']' - { - set_location(@1, @6); - Expr* low = $3 ? $3 : new ConstExpr(val_mgr->GetCount(0)); - Expr* high = $5 ? $5 : new SizeExpr($1); - ListExpr* le = new ListExpr(low); - le->Append(high); - $$ = new IndexExpr($1, le, true); - } + | index_slice | expr '$' TOK_ID { @@ -704,7 +700,7 @@ expr: $$ = new NameExpr(id); if ( id->IsDeprecated() ) - reporter->Warning("deprecated (%s)", id->Name()); + reporter->Warning("%s", id->GetDeprecationWarning().c_str()); } } @@ -1006,7 +1002,7 @@ type: Ref($$); if ( $1->IsDeprecated() ) - reporter->Warning("deprecated (%s)", $1->Name()); + reporter->Warning("%s", $1->GetDeprecationWarning().c_str()); } } ; @@ -1039,7 +1035,7 @@ type_decl: $$ = new TypeDecl($3, $1, $4, (in_record > 0)); if ( in_record > 0 && cur_decl_type_id ) - broxygen_mgr->RecordField(cur_decl_type_id, $$, ::filename); + zeekygen_mgr->RecordField(cur_decl_type_id, $$, ::filename); } ; @@ -1073,7 +1069,7 @@ decl: TOK_MODULE TOK_ID ';' { current_module = $2; - broxygen_mgr->ModuleUsage(::filename, current_module); + zeekygen_mgr->ModuleUsage(::filename, current_module); } | TOK_EXPORT '{' { is_export = true; } decl_list '}' @@ -1082,36 +1078,36 @@ decl: | TOK_GLOBAL def_global_id opt_type init_class opt_init opt_attr ';' { add_global($2, $3, $4, $5, $6, VAR_REGULAR); - broxygen_mgr->Identifier($2); + zeekygen_mgr->Identifier($2); } | TOK_OPTION def_global_id opt_type init_class opt_init opt_attr ';' { add_global($2, $3, $4, $5, $6, VAR_OPTION); - broxygen_mgr->Identifier($2); + zeekygen_mgr->Identifier($2); } | TOK_CONST def_global_id opt_type init_class opt_init opt_attr ';' { add_global($2, $3, $4, $5, $6, VAR_CONST); - broxygen_mgr->Identifier($2); + zeekygen_mgr->Identifier($2); } | TOK_REDEF global_id opt_type init_class opt_init opt_attr ';' { add_global($2, $3, $4, $5, $6, VAR_REDEF); - broxygen_mgr->Redef($2, ::filename); + zeekygen_mgr->Redef($2, ::filename, $4, $5); } | TOK_REDEF TOK_ENUM global_id TOK_ADD_TO '{' - { parser_redef_enum($3); broxygen_mgr->Redef($3, ::filename); } + { parser_redef_enum($3); zeekygen_mgr->Redef($3, ::filename); } enum_body '}' ';' { - // Broxygen already grabbed new enum IDs as the type created them. + // Zeekygen already grabbed new enum IDs as the type created them. } | TOK_REDEF TOK_RECORD global_id - { cur_decl_type_id = $3; broxygen_mgr->Redef($3, ::filename); } + { cur_decl_type_id = $3; zeekygen_mgr->Redef($3, ::filename); } TOK_ADD_TO '{' { ++in_record; } type_decl_list @@ -1127,12 +1123,12 @@ decl: } | TOK_TYPE global_id ':' - { cur_decl_type_id = $2; broxygen_mgr->StartType($2); } + { cur_decl_type_id = $2; zeekygen_mgr->StartType($2); } type opt_attr ';' { cur_decl_type_id = 0; add_type($2, $5, $6); - broxygen_mgr->Identifier($2); + zeekygen_mgr->Identifier($2); } | func_hdr func_body @@ -1167,10 +1163,19 @@ func_hdr: begin_func($2, current_module.c_str(), FUNC_FLAVOR_FUNCTION, 0, $3, $4); $$ = $3; - broxygen_mgr->Identifier($2); + zeekygen_mgr->Identifier($2); } | TOK_EVENT event_id func_params opt_attr { + // Gracefully handle the deprecation of bro_init, bro_done, + // and bro_script_loaded + if ( streq("bro_init", $2->Name()) ) + $2 = global_scope()->Lookup("zeek_init"); + else if ( streq("bro_done", $2->Name()) ) + $2 = global_scope()->Lookup("zeek_done"); + else if ( streq("bro_script_loaded", $2->Name()) ) + $2 = global_scope()->Lookup("zeek_script_loaded"); + begin_func($2, current_module.c_str(), FUNC_FLAVOR_EVENT, 0, $3, $4); $$ = $3; @@ -1260,6 +1265,21 @@ init: | expr ; +index_slice: + expr '[' opt_expr ':' opt_expr ']' + { + set_location(@1, @6); + Expr* low = $3 ? $3 : new ConstExpr(val_mgr->GetCount(0)); + Expr* high = $5 ? $5 : new SizeExpr($1); + + if ( ! IsIntegral(low->Type()->Tag()) || ! IsIntegral(high->Type()->Tag()) ) + reporter->Error("slice notation must have integral values as indexes"); + + ListExpr* le = new ListExpr(low); + le->Append(high); + $$ = new IndexExpr($1, le, true); + } + opt_attr: attr_list | @@ -1283,10 +1303,6 @@ attr: { $$ = new Attr(ATTR_OPTIONAL); } | TOK_ATTR_REDEF { $$ = new Attr(ATTR_REDEF); } - | TOK_ATTR_ROTATE_INTERVAL '=' expr - { $$ = new Attr(ATTR_ROTATE_INTERVAL, $3); } - | TOK_ATTR_ROTATE_SIZE '=' expr - { $$ = new Attr(ATTR_ROTATE_SIZE, $3); } | TOK_ATTR_ADD_FUNC '=' expr { $$ = new Attr(ATTR_ADD_FUNC, $3); } | TOK_ATTR_DEL_FUNC '=' expr @@ -1299,18 +1315,8 @@ attr: { $$ = new Attr(ATTR_EXPIRE_READ, $3); } | TOK_ATTR_EXPIRE_WRITE '=' expr { $$ = new Attr(ATTR_EXPIRE_WRITE, $3); } - | TOK_ATTR_PERSISTENT - { $$ = new Attr(ATTR_PERSISTENT); } - | TOK_ATTR_SYNCHRONIZED - { $$ = new Attr(ATTR_SYNCHRONIZED); } - | TOK_ATTR_ENCRYPT - { $$ = new Attr(ATTR_ENCRYPT); } - | TOK_ATTR_ENCRYPT '=' expr - { $$ = new Attr(ATTR_ENCRYPT, $3); } | TOK_ATTR_RAW_OUTPUT { $$ = new Attr(ATTR_RAW_OUTPUT); } - | TOK_ATTR_MERGEABLE - { $$ = new Attr(ATTR_MERGEABLE); } | TOK_ATTR_PRIORITY '=' expr { $$ = new Attr(ATTR_PRIORITY, $3); } | TOK_ATTR_TYPE_COLUMN '=' expr @@ -1321,6 +1327,19 @@ attr: { $$ = new Attr(ATTR_ERROR_HANDLER); } | TOK_ATTR_DEPRECATED { $$ = new Attr(ATTR_DEPRECATED); } + | TOK_ATTR_DEPRECATED '=' TOK_CONSTANT + { + if ( IsString($3->Type()->Tag()) ) + $$ = new Attr(ATTR_DEPRECATED, new ConstExpr($3)); + else + { + ODesc d; + $3->Describe(&d); + reporter->Error("'&deprecated=%s' must use a string literal", + d.Description()); + $$ = new Attr(ATTR_DEPRECATED); + } + } ; stmt: @@ -1477,6 +1496,15 @@ stmt: brofiler.DecIgnoreDepth(); } + | index_slice '=' expr ';' opt_no_test + { + set_location(@1, @4); + $$ = new ExprStmt(get_assign_expr($1, $3, in_init)); + + if ( ! $5 ) + brofiler.AddStmt($$); + } + | expr ';' opt_no_test { set_location(@1, @2); @@ -1520,7 +1548,7 @@ event: YYERROR; } if ( id->IsDeprecated() ) - reporter->Warning("deprecated (%s)", id->Name()); + reporter->Warning("%s", id->GetDeprecationWarning().c_str()); } $$ = new EventExpr($1, $3); @@ -1726,7 +1754,7 @@ global_or_event_id: if ( t->Tag() != TYPE_FUNC || t->AsFuncType()->Flavor() != FUNC_FLAVOR_FUNCTION ) - reporter->Warning("deprecated (%s)", $$->Name()); + reporter->Warning("%s", $$->GetDeprecationWarning().c_str()); } delete [] $1; @@ -1772,9 +1800,23 @@ opt_no_test_block: opt_deprecated: TOK_ATTR_DEPRECATED - { $$ = true; } + { $$ = new ConstExpr(new StringVal("")); } | - { $$ = false; } + TOK_ATTR_DEPRECATED '=' TOK_CONSTANT + { + if ( IsString($3->Type()->Tag()) ) + $$ = new ConstExpr($3); + else + { + ODesc d; + $3->Describe(&d); + reporter->Error("'&deprecated=%s' must use a string literal", + d.Description()); + $$ = new ConstExpr(new StringVal("")); + } + } + | + { $$ = nullptr; } %% diff --git a/src/plugin/ComponentManager.h b/src/plugin/ComponentManager.h index 0069c77359..30b3628588 100644 --- a/src/plugin/ComponentManager.h +++ b/src/plugin/ComponentManager.h @@ -10,7 +10,7 @@ #include "Var.h" #include "Val.h" #include "Reporter.h" -#include "broxygen/Manager.h" +#include "zeekygen/Manager.h" namespace plugin { @@ -134,7 +134,7 @@ ComponentManager::ComponentManager(const string& arg_module, const string& tag_enum_type = new EnumType(module + "::" + local_id); ::ID* id = install_ID(local_id.c_str(), module.c_str(), true, true); add_type(id, tag_enum_type, 0); - broxygen_mgr->Identifier(id); + zeekygen_mgr->Identifier(id); } template @@ -244,7 +244,7 @@ void ComponentManager::RegisterComponent(C* component, string id = fmt("%s%s", prefix.c_str(), cname.c_str()); tag_enum_type->AddName(module, id.c_str(), component->Tag().AsEnumVal()->InternalInt(), true, - false); + nullptr); } } // namespace plugin diff --git a/src/plugin/Manager.cc b/src/plugin/Manager.cc index 836520d03a..ce13397046 100644 --- a/src/plugin/Manager.cc +++ b/src/plugin/Manager.cc @@ -13,6 +13,7 @@ #include "../Reporter.h" #include "../Func.h" #include "../Event.h" +#include "../util.h" using namespace plugin; @@ -173,39 +174,53 @@ bool Manager::ActivateDynamicPluginInternal(const std::string& name, bool ok_if_ DBG_LOG(DBG_PLUGINS, "Activating plugin %s", name.c_str()); - // Add the "scripts" and "bif" directories to BROPATH. + // Add the "scripts" and "bif" directories to ZEEKPATH. std::string scripts = dir + "scripts"; if ( is_dir(scripts) ) { - DBG_LOG(DBG_PLUGINS, " Adding %s to BROPATH", scripts.c_str()); + DBG_LOG(DBG_PLUGINS, " Adding %s to ZEEKPATH", scripts.c_str()); add_to_bro_path(scripts); } - // First load {scripts}/__preload__.bro automatically. - string init = dir + "scripts/__preload__.bro"; + string init; - if ( is_file(init) ) + // First load {scripts}/__preload__.zeek automatically. + for (const string& ext : script_extensions) { - DBG_LOG(DBG_PLUGINS, " Loading %s", init.c_str()); - scripts_to_load.push_back(init); + init = dir + "scripts/__preload__" + ext; + + if ( is_file(init) ) + { + DBG_LOG(DBG_PLUGINS, " Loading %s", init.c_str()); + scripts_to_load.push_back(init); + break; + } } - // Load {bif,scripts}/__load__.bro automatically. - init = dir + "lib/bif/__load__.bro"; - - if ( is_file(init) ) + // Load {bif,scripts}/__load__.zeek automatically. + for (const string& ext : script_extensions) { - DBG_LOG(DBG_PLUGINS, " Loading %s", init.c_str()); - scripts_to_load.push_back(init); + init = dir + "lib/bif/__load__" + ext; + + if ( is_file(init) ) + { + DBG_LOG(DBG_PLUGINS, " Loading %s", init.c_str()); + scripts_to_load.push_back(init); + break; + } } - init = dir + "scripts/__load__.bro"; - - if ( is_file(init) ) + for (const string& ext : script_extensions) { - DBG_LOG(DBG_PLUGINS, " Loading %s", init.c_str()); - scripts_to_load.push_back(init); + init = dir + "scripts/__load__" + ext; + + if ( is_file(init) ) + { + DBG_LOG(DBG_PLUGINS, " Loading %s", init.c_str()); + scripts_to_load.push_back(init); + break; + } } // Load shared libraries. diff --git a/src/plugin/Manager.h b/src/plugin/Manager.h index 61b8dc1047..5fd1070452 100644 --- a/src/plugin/Manager.h +++ b/src/plugin/Manager.h @@ -76,7 +76,7 @@ public: /** * Activates a plugin that SearchDynamicPlugins() has previously discovered. * Activating a plugin involves loading its dynamic module, making its - * bifs available, and adding its script paths to BROPATH. + * bifs available, and adding its script paths to ZEEKPATH. * * @param name The name of the plugin, as found previously by * SearchPlugin(). @@ -92,7 +92,7 @@ public: * * @param all If true, activates all plugins that are found. If false, * activates only those that should always be activated unconditionally, - * as specified via the BRO_PLUGIN_ACTIVATE enviroment variable. In other + * as specified via the ZEEK_PLUGIN_ACTIVATE enviroment variable. In other * words, it's \c true in standard mode and \c false in bare mode. * * @return True if all plugins have been loaded successfully. If one diff --git a/src/plugin/Plugin.h b/src/plugin/Plugin.h index 369da09037..66a9b90376 100644 --- a/src/plugin/Plugin.h +++ b/src/plugin/Plugin.h @@ -7,7 +7,7 @@ #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "analyzer/Component.h" #include "file_analysis/Component.h" #include "iosource/Component.h" @@ -69,7 +69,7 @@ extern const char* hook_name(HookType h); struct VersionNumber { int major; //< Major version number. int minor; //< Minor version number. - int patch; //< Patch version number (available since Bro 2.7). + int patch; //< Patch version number (available since Zeek 3.0). /** * Constructor. diff --git a/src/probabilistic/BitVector.cc b/src/probabilistic/BitVector.cc index 7fa80c206b..0025e417cb 100644 --- a/src/probabilistic/BitVector.cc +++ b/src/probabilistic/BitVector.cc @@ -5,7 +5,6 @@ #include #include "BitVector.h" -#include "Serializer.h" #include "digest.h" using namespace probabilistic; @@ -506,6 +505,47 @@ uint64 BitVector::Hash() const return digest; } +broker::expected BitVector::Serialize() const + { + broker::vector v = {static_cast(num_bits), static_cast(bits.size())}; + v.reserve(2 + bits.size()); + + for ( size_t i = 0; i < bits.size(); ++i ) + v.emplace_back(static_cast(bits[i])); + + return {std::move(v)}; + } + +std::unique_ptr BitVector::Unserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + if ( ! (v && v->size() >= 2) ) + return nullptr; + + auto num_bits = caf::get_if(&(*v)[0]); + auto size = caf::get_if(&(*v)[1]); + + if ( ! (num_bits && size) ) + return nullptr; + + if ( v->size() != 2 + *size ) + return nullptr; + + auto bv = std::unique_ptr(new BitVector()); + bv->num_bits = *num_bits; + + for ( size_t i = 0; i < *size; ++i ) + { + auto x = caf::get_if(&(*v)[2 + i]); + if ( ! x ) + return nullptr; + + bv->bits.push_back(*x); + } + + return bv; + } + BitVector::size_type BitVector::lowest_bit(block_type block) { block_type x = block - (block & (block - 1)); @@ -539,56 +579,3 @@ BitVector::size_type BitVector::find_from(size_type i) const return i * bits_per_block + lowest_bit(bits[i]); } -bool BitVector::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -BitVector* BitVector::Unserialize(UnserialInfo* info) - { - return reinterpret_cast(SerialObj::Unserialize(info, SER_BITVECTOR)); - } - -IMPLEMENT_SERIAL(BitVector, SER_BITVECTOR); - -bool BitVector::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BITVECTOR, SerialObj); - - if ( ! SERIALIZE(static_cast(bits.size())) ) - return false; - - for ( size_t i = 0; i < bits.size(); ++i ) - if ( ! SERIALIZE(static_cast(bits[i])) ) - return false; - - return SERIALIZE(static_cast(num_bits)); - } - -bool BitVector::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - uint64 size; - if ( ! UNSERIALIZE(&size) ) - return false; - - bits.resize(static_cast(size)); - - for ( size_t i = 0; i < bits.size(); ++i ) - { - uint64 block; - if ( ! UNSERIALIZE(&block) ) - return false; - - bits[i] = static_cast(block); - } - - uint64 n; - if ( ! UNSERIALIZE(&n) ) - return false; - - num_bits = static_cast(n); - - return true; - } diff --git a/src/probabilistic/BitVector.h b/src/probabilistic/BitVector.h index a1ff0c9ad9..ecec6f5714 100644 --- a/src/probabilistic/BitVector.h +++ b/src/probabilistic/BitVector.h @@ -6,16 +6,17 @@ #include #include -#include "SerialObj.h" +#include +#include namespace probabilistic { /** * A vector of bits. */ -class BitVector : public SerialObj { +class BitVector { public: - typedef uint64 block_type; + typedef uint64_t block_type; typedef size_t size_type; typedef bool const_reference; @@ -281,28 +282,10 @@ public: * * @return The hash. */ - uint64 Hash() const; + uint64_t Hash() const; - /** - * Serializes the bit vector. - * - * @param info The serializaton informationt to use. - * - * @return True if successful. - */ - bool Serialize(SerialInfo* info) const; - - /** - * Unserialize the bit vector. - * - * @param info The serializaton informationt to use. - * - * @return The unserialized bit vector, or null if an error occured. - */ - static BitVector* Unserialize(UnserialInfo* info); - -protected: - DECLARE_SERIAL(BitVector); + broker::expected Serialize() const; + static std::unique_ptr Unserialize(const broker::data& data); private: /** diff --git a/src/probabilistic/BloomFilter.cc b/src/probabilistic/BloomFilter.cc index ef671268b9..dd89bf9c19 100644 --- a/src/probabilistic/BloomFilter.cc +++ b/src/probabilistic/BloomFilter.cc @@ -4,12 +4,14 @@ #include #include +#include + #include "BloomFilter.h" #include "CounterVector.h" -#include "Serializer.h" #include "../util.h" +#include "../Reporter.h" using namespace probabilistic; @@ -28,29 +30,53 @@ BloomFilter::~BloomFilter() delete hasher; } -bool BloomFilter::Serialize(SerialInfo* info) const +broker::expected BloomFilter::Serialize() const { - return SerialObj::Serialize(info); + auto h = hasher->Serialize(); + + if ( ! h ) + return broker::ec::invalid_data; // Cannot serialize + + auto d = DoSerialize(); + + if ( ! d ) + return broker::ec::invalid_data; // Cannot serialize + + return {broker::vector{static_cast(Type()), std::move(*h), std::move(*d)}}; } -BloomFilter* BloomFilter::Unserialize(UnserialInfo* info) +std::unique_ptr BloomFilter::Unserialize(const broker::data& data) { - return reinterpret_cast(SerialObj::Unserialize(info, SER_BLOOMFILTER)); + auto v = caf::get_if(&data); + + if ( ! (v && v->size() == 3) ) + return nullptr; + + auto type = caf::get_if(&(*v)[0]); + if ( ! type ) + return nullptr; + + auto hasher_ = Hasher::Unserialize((*v)[1]); + if ( ! hasher_ ) + return nullptr; + + std::unique_ptr bf; + + switch ( *type ) { + case Basic: + bf = std::unique_ptr(new BasicBloomFilter()); + break; + + case Counting: + bf = std::unique_ptr(new CountingBloomFilter()); + break; } -bool BloomFilter::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BLOOMFILTER, SerialObj); + if ( ! bf->DoUnserialize((*v)[2]) ) + return nullptr; - return hasher->Serialize(info); - } - -bool BloomFilter::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - hasher = Hasher::Unserialize(info); - return hasher != 0; + bf->hasher = hasher_.release(); + return bf; } size_t BasicBloomFilter::M(double fp, size_t capacity) @@ -130,21 +156,6 @@ BasicBloomFilter::~BasicBloomFilter() delete bits; } -IMPLEMENT_SERIAL(BasicBloomFilter, SER_BASICBLOOMFILTER) - -bool BasicBloomFilter::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_BASICBLOOMFILTER, BloomFilter); - return bits->Serialize(info); - } - -bool BasicBloomFilter::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BloomFilter); - bits = BitVector::Unserialize(info); - return (bits != 0); - } - void BasicBloomFilter::Add(const HashKey* key) { Hasher::digest_vector h = hasher->Hash(key); @@ -166,6 +177,22 @@ size_t BasicBloomFilter::Count(const HashKey* key) const return 1; } +broker::expected BasicBloomFilter::DoSerialize() const + { + auto b = bits->Serialize(); + return b; + } + +bool BasicBloomFilter::DoUnserialize(const broker::data& data) + { + auto b = BitVector::Unserialize(data); + if ( ! b ) + return false; + + bits = b.release(); + return true; + } + CountingBloomFilter::CountingBloomFilter() { cells = 0; @@ -232,21 +259,6 @@ string CountingBloomFilter::InternalState() const return fmt("%" PRIu64, cells->Hash()); } -IMPLEMENT_SERIAL(CountingBloomFilter, SER_COUNTINGBLOOMFILTER) - -bool CountingBloomFilter::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_COUNTINGBLOOMFILTER, BloomFilter); - return cells->Serialize(info); - } - -bool CountingBloomFilter::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(BloomFilter); - cells = CounterVector::Unserialize(info); - return (cells != 0); - } - // TODO: Use partitioning in add/count to allow for reusing CMS bounds. void CountingBloomFilter::Add(const HashKey* key) { @@ -272,3 +284,19 @@ size_t CountingBloomFilter::Count(const HashKey* key) const return min; } + +broker::expected CountingBloomFilter::DoSerialize() const + { + auto c = cells->Serialize(); + return c; + } + +bool CountingBloomFilter::DoUnserialize(const broker::data& data) + { + auto c = CounterVector::Unserialize(data); + if ( ! c ) + return false; + + cells = c.release(); + return true; + } diff --git a/src/probabilistic/BloomFilter.h b/src/probabilistic/BloomFilter.h index 288a24d416..bc22c91014 100644 --- a/src/probabilistic/BloomFilter.h +++ b/src/probabilistic/BloomFilter.h @@ -4,6 +4,10 @@ #define PROBABILISTIC_BLOOMFILTER_H #include + +#include +#include + #include "BitVector.h" #include "Hasher.h" @@ -11,15 +15,18 @@ namespace probabilistic { class CounterVector; +/** Types of derived BloomFilter classes. */ +enum BloomFilterType { Basic, Counting }; + /** * The abstract base class for Bloom filters. */ -class BloomFilter : public SerialObj { +class BloomFilter { public: /** * Destructor. */ - ~BloomFilter() override; + virtual ~BloomFilter(); /** * Adds an element to the Bloom filter. @@ -71,28 +78,10 @@ public: */ virtual string InternalState() const = 0; - /** - * Serializes the Bloom filter. - * - * @param info The serializaton information to use. - * - * @return True if successful. - */ - bool Serialize(SerialInfo* info) const; - - /** - * Unserializes a Bloom filter. - * - * @param info The serializaton information to use. - * - * @return The unserialized Bloom filter, or null if an error - * occured. - */ - static BloomFilter* Unserialize(UnserialInfo* info); + broker::expected Serialize() const; + static std::unique_ptr Unserialize(const broker::data& data); protected: - DECLARE_ABSTRACT_SERIAL(BloomFilter); - /** * Default constructor. */ @@ -105,6 +94,10 @@ protected: */ explicit BloomFilter(const Hasher* hasher); + virtual broker::expected DoSerialize() const = 0; + virtual bool DoUnserialize(const broker::data& data) = 0; + virtual BloomFilterType Type() const = 0; + const Hasher* hasher; }; @@ -165,7 +158,7 @@ public: string InternalState() const override; protected: - DECLARE_SERIAL(BasicBloomFilter); + friend class BloomFilter; /** * Default constructor. @@ -175,6 +168,10 @@ protected: // Overridden from BloomFilter. void Add(const HashKey* key) override; size_t Count(const HashKey* key) const override; + broker::expected DoSerialize() const override; + bool DoUnserialize(const broker::data& data) override; + BloomFilterType Type() const override + { return BloomFilterType::Basic; } private: BitVector* bits; @@ -210,7 +207,7 @@ public: string InternalState() const override; protected: - DECLARE_SERIAL(CountingBloomFilter); + friend class BloomFilter; /** * Default constructor. @@ -220,6 +217,10 @@ protected: // Overridden from BloomFilter. void Add(const HashKey* key) override; size_t Count(const HashKey* key) const override; + broker::expected DoSerialize() const override; + bool DoUnserialize(const broker::data& data) override; + BloomFilterType Type() const override + { return BloomFilterType::Counting; } private: CounterVector* cells; diff --git a/src/probabilistic/CMakeLists.txt b/src/probabilistic/CMakeLists.txt index b845ecc7a2..976932c3fb 100644 --- a/src/probabilistic/CMakeLists.txt +++ b/src/probabilistic/CMakeLists.txt @@ -1,5 +1,5 @@ -include(BroSubdir) +include(ZeekSubdir) include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} diff --git a/src/probabilistic/CardinalityCounter.cc b/src/probabilistic/CardinalityCounter.cc index 64715c39fd..748cdc9e48 100644 --- a/src/probabilistic/CardinalityCounter.cc +++ b/src/probabilistic/CardinalityCounter.cc @@ -6,7 +6,6 @@ #include "CardinalityCounter.h" #include "Reporter.h" -#include "Serializer.h" using namespace probabilistic; @@ -197,49 +196,48 @@ uint64_t CardinalityCounter::GetM() const return m; } -bool CardinalityCounter::Serialize(SerialInfo* info) const +broker::expected CardinalityCounter::Serialize() const { - bool valid = true; + broker::vector v = {m, V, alpha_m}; + v.reserve(3 + m); - valid &= SERIALIZE(m); - valid &= SERIALIZE(V); - valid &= SERIALIZE(alpha_m); + for ( size_t i = 0; i < m; ++i ) + v.emplace_back(static_cast(buckets[i])); - for ( unsigned int i = 0; i < m; i++ ) - valid &= SERIALIZE((char)buckets[i]); - - return valid; + return {std::move(v)}; } -CardinalityCounter* CardinalityCounter::Unserialize(UnserialInfo* info) +std::unique_ptr CardinalityCounter::Unserialize(const broker::data& data) { - uint64_t m; - uint64_t V; - double alpha_m; + auto v = caf::get_if(&data); + if ( ! (v && v->size() >= 3) ) + return nullptr; - bool valid = true; - valid &= UNSERIALIZE(&m); - valid &= UNSERIALIZE(&V); - valid &= UNSERIALIZE(&alpha_m); + auto m = caf::get_if(&(*v)[0]); + auto V = caf::get_if(&(*v)[1]); + auto alpha_m = caf::get_if(&(*v)[2]); - CardinalityCounter* c = new CardinalityCounter(m, V, alpha_m); + if ( ! (m && V && alpha_m) ) + return nullptr; + if ( v->size() != 3 + *m ) + return nullptr; - vector& buckets = c->buckets; + auto cc = std::unique_ptr(new CardinalityCounter(*m, *V, *alpha_m)); + if ( *m != cc->m ) + return nullptr; + if ( cc->buckets.size() != * m ) + return nullptr; - for ( unsigned int i = 0; i < m; i++ ) + for ( size_t i = 0; i < *m; ++i ) { - char c; - valid &= UNSERIALIZE(&c); - buckets[i] = (uint8_t)c; + auto x = caf::get_if(&(*v)[3 + i]); + if ( ! x ) + return nullptr; + + cc->buckets[i] = *x; } - if ( ! valid ) - { - delete c; - c = 0; - } - - return c; + return cc; } /** diff --git a/src/probabilistic/CardinalityCounter.h b/src/probabilistic/CardinalityCounter.h index cde2ec402b..63047172ed 100644 --- a/src/probabilistic/CardinalityCounter.h +++ b/src/probabilistic/CardinalityCounter.h @@ -4,7 +4,11 @@ #define PROBABILISTIC_CARDINALITYCOUNTER_H #include -#include +#include +#include + +#include +#include namespace probabilistic { @@ -84,24 +88,8 @@ public: */ bool Merge(CardinalityCounter* c); - /** - * Serializes the cardinality counter. - * - * @param info The serializaton information to use. - * - * @return True if successful. - */ - bool Serialize(SerialInfo* info) const; - - /** - * Unserializes a cardinality counter. - * - * @param info The serializaton information to use. - * - * @return The unserialized cardinality counter, or null if an error - * occured. - */ - static CardinalityCounter* Unserialize(UnserialInfo* info); + broker::expected Serialize() const; + static std::unique_ptr Unserialize(const broker::data& data); protected: /** diff --git a/src/probabilistic/CounterVector.cc b/src/probabilistic/CounterVector.cc index 8608015422..b9a173356e 100644 --- a/src/probabilistic/CounterVector.cc +++ b/src/probabilistic/CounterVector.cc @@ -2,9 +2,12 @@ #include "CounterVector.h" +#include #include #include "BitVector.h" -#include "Serializer.h" +#include "util.h" + +#include using namespace probabilistic; @@ -153,46 +156,34 @@ CounterVector operator|(const CounterVector& x, const CounterVector& y) } -uint64 CounterVector::Hash() const +uint64_t CounterVector::Hash() const { return bits->Hash(); } -bool CounterVector::Serialize(SerialInfo* info) const +broker::expected CounterVector::Serialize() const { - return SerialObj::Serialize(info); + auto b = bits->Serialize(); + if ( ! b ) + return broker::ec::invalid_data; // Cannot serialize + + return {broker::vector{static_cast(width), std::move(*b)}}; } -CounterVector* CounterVector::Unserialize(UnserialInfo* info) +std::unique_ptr CounterVector::Unserialize(const broker::data& data) { - return reinterpret_cast(SerialObj::Unserialize(info, SER_COUNTERVECTOR)); + auto v = caf::get_if(&data); + if ( ! (v && v->size() >= 2) ) + return nullptr; + + auto width = caf::get_if(&(*v)[0]); + auto bits = BitVector::Unserialize((*v)[1]); + + auto cv = std::unique_ptr(new CounterVector()); + cv->width = *width; + cv->bits = bits.release(); + return cv; } -IMPLEMENT_SERIAL(CounterVector, SER_COUNTERVECTOR) -bool CounterVector::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_COUNTERVECTOR, SerialObj); - if ( ! bits->Serialize(info) ) - return false; - - return SERIALIZE(static_cast(width)); - } - -bool CounterVector::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - bits = BitVector::Unserialize(info); - if ( ! bits ) - return false; - - uint64 w; - if ( ! UNSERIALIZE(&w) ) - return false; - - width = static_cast(w); - - return true; - } diff --git a/src/probabilistic/CounterVector.h b/src/probabilistic/CounterVector.h index 422d172292..f8209fabca 100644 --- a/src/probabilistic/CounterVector.h +++ b/src/probabilistic/CounterVector.h @@ -3,7 +3,11 @@ #ifndef PROBABILISTIC_COUNTERVECTOR_H #define PROBABILISTIC_COUNTERVECTOR_H -#include "SerialObj.h" +#include +#include + +#include +#include namespace probabilistic { @@ -12,10 +16,10 @@ class BitVector; /** * A vector of counters, each of which has a fixed number of bits. */ -class CounterVector : public SerialObj { +class CounterVector { public: typedef size_t size_type; - typedef uint64 count_type; + typedef uint64_t count_type; /** * Constructs a counter vector having cells of a given width. @@ -38,7 +42,7 @@ public: /** * Destructor. */ - ~CounterVector() override; + virtual ~CounterVector(); /** * Increments a given cell. @@ -131,26 +135,10 @@ public: * * @return The hash. */ - uint64 Hash() const; + uint64_t Hash() const; - /** - * Serializes the bit vector. - * - * @param info The serializaton information to use. - * - * @return True if successful. - */ - bool Serialize(SerialInfo* info) const; - - /** - * Unserialize the counter vector. - * - * @param info The serializaton information to use. - * - * @return The unserialized counter vector, or null if an error - * occured. - */ - static CounterVector* Unserialize(UnserialInfo* info); + broker::expected Serialize() const; + static std::unique_ptr Unserialize(const broker::data& data); protected: friend CounterVector operator|(const CounterVector& x, @@ -158,8 +146,6 @@ protected: CounterVector() { } - DECLARE_SERIAL(CounterVector); - private: CounterVector& operator=(const CounterVector&); // Disable. diff --git a/src/probabilistic/Hasher.cc b/src/probabilistic/Hasher.cc index d21efbed41..2314166979 100644 --- a/src/probabilistic/Hasher.cc +++ b/src/probabilistic/Hasher.cc @@ -5,7 +5,6 @@ #include "Hasher.h" #include "NetVar.h" -#include "Serializer.h" #include "digest.h" #include "siphash24.h" @@ -41,58 +40,53 @@ Hasher::digest_vector Hasher::Hash(const HashKey* key) const return Hash(key->Key(), key->Size()); } -bool Hasher::Serialize(SerialInfo* info) const - { - return SerialObj::Serialize(info); - } - -Hasher* Hasher::Unserialize(UnserialInfo* info) - { - return reinterpret_cast(SerialObj::Unserialize(info, SER_HASHER)); - } - -bool Hasher::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_HASHER, SerialObj); - - if ( ! SERIALIZE(static_cast(k)) ) - return false; - - if ( ! SERIALIZE(static_cast(seed.h1)) ) - return false; - - return SERIALIZE(static_cast(seed.h2)); - } - -bool Hasher::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(SerialObj); - - uint16 serial_k; - if ( ! UNSERIALIZE(&serial_k) ) - return false; - - k = serial_k; - assert(k > 0); - - seed_t serial_seed; - if ( ! UNSERIALIZE(&serial_seed.h1) ) - return false; - - if ( ! UNSERIALIZE(&serial_seed.h2) ) - return false; - - seed = serial_seed; - - return true; - } - Hasher::Hasher(size_t arg_k, seed_t arg_seed) { k = arg_k; seed = arg_seed; } +broker::expected Hasher::Serialize() const + { + return {broker::vector{ + static_cast(Type()), static_cast(k), + seed.h1, seed.h2 }}; + } + +std::unique_ptr Hasher::Unserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + + if ( ! (v && v->size() == 4) ) + return nullptr; + + auto type = caf::get_if(&(*v)[0]); + auto k = caf::get_if(&(*v)[1]); + auto h1 = caf::get_if(&(*v)[2]); + auto h2 = caf::get_if(&(*v)[3]); + + if ( ! (type && k && h1 && h2) ) + return nullptr; + + std::unique_ptr hasher; + + switch ( *type ) { + case Default: + hasher = std::unique_ptr(new DefaultHasher(*k, {*h1, *h2})); + break; + + case Double: + hasher = std::unique_ptr(new DoubleHasher(*k, {*h1, *h2})); + break; + } + + // Note that the derived classed don't hold any further state of + // their own. They reconstruct all their information from their + // constructors' arguments. + + return hasher; + } + UHF::UHF() { memset(&seed, 0, sizeof(seed)); @@ -167,31 +161,6 @@ bool DefaultHasher::Equals(const Hasher* other) const return hash_functions == o->hash_functions; } -IMPLEMENT_SERIAL(DefaultHasher, SER_DEFAULTHASHER) - -bool DefaultHasher::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_DEFAULTHASHER, Hasher); - - // Nothing to do here, the base class has all we need serialized already. - return true; - } - -bool DefaultHasher::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Hasher); - - hash_functions.clear(); - for ( size_t i = 0; i < K(); ++i ) - { - Hasher::seed_t s = Seed(); - s.h1 += bro_prng(i); - hash_functions.push_back(UHF(s)); - } - - return true; - } - DoubleHasher::DoubleHasher(size_t k, seed_t seed) : Hasher(k, seed), h1(seed + bro_prng(1)), h2(seed + bro_prng(2)) { @@ -223,22 +192,3 @@ bool DoubleHasher::Equals(const Hasher* other) const return h1 == o->h1 && h2 == o->h2; } -IMPLEMENT_SERIAL(DoubleHasher, SER_DOUBLEHASHER) - -bool DoubleHasher::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_DOUBLEHASHER, Hasher); - - // Nothing to do here, the base class has all we need serialized already. - return true; - } - -bool DoubleHasher::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(Hasher); - - h1 = UHF(Seed() + bro_prng(1)); - h2 = UHF(Seed() + bro_prng(2)); - - return true; - } diff --git a/src/probabilistic/Hasher.h b/src/probabilistic/Hasher.h index 7fd2e4fb2f..3d60a264c0 100644 --- a/src/probabilistic/Hasher.h +++ b/src/probabilistic/Hasher.h @@ -3,16 +3,23 @@ #ifndef PROBABILISTIC_HASHER_H #define PROBABILISTIC_HASHER_H +#include +#include + +#include + #include "Hash.h" -#include "SerialObj.h" namespace probabilistic { +/** Types of derived Hasher classes. */ +enum HasherType { Default, Double }; + /** * Abstract base class for hashers. A hasher creates a family of hash * functions to hash an element *k* times. */ -class Hasher : public SerialObj { +class Hasher { public: typedef hash_t digest; typedef std::vector digest_vector; @@ -43,7 +50,7 @@ public: /** * Destructor. */ - ~Hasher() override { } + virtual ~Hasher() { } /** * Computes hash values for an element. @@ -99,12 +106,10 @@ public: */ seed_t Seed() const { return seed; } - bool Serialize(SerialInfo* info) const; - static Hasher* Unserialize(UnserialInfo* info); + broker::expected Serialize() const; + static std::unique_ptr Unserialize(const broker::data& data); protected: - DECLARE_ABSTRACT_SERIAL(Hasher); - Hasher() { } /** @@ -116,6 +121,8 @@ protected: */ Hasher(size_t arg_k, seed_t arg_seed); + virtual HasherType Type() const = 0; + private: size_t k; seed_t seed; @@ -181,6 +188,9 @@ public: return ! (x == y); } + broker::expected Serialize() const; + static UHF Unserialize(const broker::data& data); + private: static size_t compute_seed(Hasher::seed_t seed); @@ -208,11 +218,12 @@ public: DefaultHasher* Clone() const final; bool Equals(const Hasher* other) const final; - DECLARE_SERIAL(DefaultHasher); - private: DefaultHasher() { } + HasherType Type() const override + { return HasherType::Default; } + std::vector hash_functions; }; @@ -236,11 +247,12 @@ public: DoubleHasher* Clone() const final; bool Equals(const Hasher* other) const final; - DECLARE_SERIAL(DoubleHasher); - private: DoubleHasher() { } + HasherType Type() const override + { return HasherType::Double; } + UHF h1; UHF h2; }; diff --git a/src/probabilistic/Topk.cc b/src/probabilistic/Topk.cc index e01b4e41b6..8ff158e10d 100644 --- a/src/probabilistic/Topk.cc +++ b/src/probabilistic/Topk.cc @@ -1,15 +1,15 @@ // See the file "COPYING" in the main distribution directory for copyright. +#include + +#include "broker/Data.h" #include "probabilistic/Topk.h" #include "CompHash.h" #include "Reporter.h" -#include "Serializer.h" #include "NetVar.h" namespace probabilistic { -IMPLEMENT_SERIAL(TopkVal, SER_TOPK_VAL); - static void topk_element_hash_delete_func(void* val) { Element* e = (Element*) val; @@ -78,6 +78,13 @@ TopkVal::~TopkVal() void TopkVal::Merge(const TopkVal* value, bool doPrune) { + if ( ! value->type ) + { + // Merge-from is empty. Nothing to do. + assert(value->numElements == 0); + return; + } + if ( type == 0 ) { assert(numElements == 0); @@ -176,109 +183,13 @@ void TopkVal::Merge(const TopkVal* value, bool doPrune) } } -bool TopkVal::DoSerialize(SerialInfo* info) const +Val* TopkVal::DoClone(CloneState* state) { - DO_SERIALIZE(SER_TOPK_VAL, OpaqueVal); - - bool v = true; - - v &= SERIALIZE(size); - v &= SERIALIZE(numElements); - v &= SERIALIZE(pruned); - - bool type_present = (type != 0); - v &= SERIALIZE(type_present); - - if ( type_present ) - v &= type->Serialize(info); - else - assert(numElements == 0); - - uint64_t i = 0; - std::list::const_iterator it = buckets.begin(); - while ( it != buckets.end() ) - { - Bucket* b = *it; - uint32_t elements_count = b->elements.size(); - v &= SERIALIZE(elements_count); - v &= SERIALIZE(b->count); - - std::list::const_iterator eit = b->elements.begin(); - while ( eit != b->elements.end() ) - { - Element* element = *eit; - v &= SERIALIZE(element->epsilon); - v &= element->value->Serialize(info); - - eit++; - i++; - } - - it++; - } - - assert(i == numElements); - - return v; + auto clone = new TopkVal(size); + clone->Merge(this); + return state->NewClone(this, clone); } -bool TopkVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(OpaqueVal); - - bool v = true; - - v &= UNSERIALIZE(&size); - v &= UNSERIALIZE(&numElements); - v &= UNSERIALIZE(&pruned); - - bool type_present = false; - v &= UNSERIALIZE(&type_present); - if ( type_present ) - { - BroType* deserialized_type = BroType::Unserialize(info); - - Typify(deserialized_type); - Unref(deserialized_type); - assert(type); - } - else - assert(numElements == 0); - - uint64_t i = 0; - while ( i < numElements ) - { - Bucket* b = new Bucket(); - uint32_t elements_count; - v &= UNSERIALIZE(&elements_count); - v &= UNSERIALIZE(&b->count); - b->bucketPos = buckets.insert(buckets.end(), b); - - for ( uint64_t j = 0; j < elements_count; j++ ) - { - Element* e = new Element(); - v &= UNSERIALIZE(&e->epsilon); - e->value = Val::Unserialize(info, type); - e->parent = b; - - b->elements.insert(b->elements.end(), e); - - HashKey* key = GetHash(e->value); - assert (elementDict->Lookup(key) == 0); - - elementDict->Insert(key, e); - delete key; - - i++; - } - } - - assert(i == numElements); - - return v; - } - - VectorVal* TopkVal::GetTopK(int k) const // returns vector { if ( numElements == 0 ) @@ -497,4 +408,126 @@ void TopkVal::IncrementCounter(Element* e, unsigned int count) } } -}; +IMPLEMENT_OPAQUE_VALUE(TopkVal) + +broker::expected TopkVal::DoSerialize() const + { + broker::vector d = {size, numElements, pruned}; + + if ( type ) + { + auto t = SerializeType(type); + if ( ! t ) + return broker::ec::invalid_data; + + d.emplace_back(std::move(*t)); + } + else + d.emplace_back(broker::none()); + + uint64_t i = 0; + std::list::const_iterator it = buckets.begin(); + while ( it != buckets.end() ) + { + Bucket* b = *it; + uint32_t elements_count = b->elements.size(); + + d.emplace_back(static_cast(b->elements.size())); + d.emplace_back(b->count); + + std::list::const_iterator eit = b->elements.begin(); + while ( eit != b->elements.end() ) + { + Element* element = *eit; + d.emplace_back(element->epsilon); + auto v = bro_broker::val_to_data(element->value); + if ( ! v ) + return broker::ec::invalid_data; + + d.emplace_back(*v); + + eit++; + i++; + } + + it++; + } + + assert(i == numElements); + return {std::move(d)}; + } + + +bool TopkVal::DoUnserialize(const broker::data& data) + { + auto v = caf::get_if(&data); + + if ( ! (v && v->size() >= 4) ) + return false; + + auto size_ = caf::get_if(&(*v)[0]); + auto numElements_ = caf::get_if(&(*v)[1]); + auto pruned_ = caf::get_if(&(*v)[2]); + + if ( ! (size_ && numElements_ && pruned_) ) + return false; + + size = *size_; + numElements = *numElements_; + pruned = *pruned_; + + auto no_type = caf::get_if(&(*v)[3]); + if ( ! no_type ) + { + BroType* t = UnserializeType((*v)[3]); + if ( ! t ) + return false; + + Typify(t); + Unref(t); + } + + uint64_t i = 0; + uint64_t idx = 4; + + while ( i < numElements ) + { + Bucket* b = new Bucket(); + auto elements_count = caf::get_if(&(*v)[idx++]); + auto count = caf::get_if(&(*v)[idx++]); + + if ( ! (elements_count && count) ) + return false; + + b->count = *count; + b->bucketPos = buckets.insert(buckets.end(), b); + + for ( uint64_t j = 0; j < *elements_count; j++ ) + { + Element* e = new Element(); + auto epsilon = caf::get_if(&(*v)[idx++]); + Val* val = bro_broker::data_to_val((*v)[idx++], type); + + if ( ! (epsilon && val) ) + return false; + + e->epsilon = *epsilon; + e->value = val; + e->parent = b; + + b->elements.insert(b->elements.end(), e); + + HashKey* key = GetHash(e->value); + assert (elementDict->Lookup(key) == 0); + + elementDict->Insert(key, e); + delete key; + + i++; + } + } + + assert(i == numElements); + return true; + } +} diff --git a/src/probabilistic/Topk.h b/src/probabilistic/Topk.h index fac677a454..24d05e12af 100644 --- a/src/probabilistic/Topk.h +++ b/src/probabilistic/Topk.h @@ -122,6 +122,17 @@ public: */ void Merge(const TopkVal* value, bool doPrune=false); + /** + * Clone the Opaque Type + * + * @param state Clone state (tracking duplicate pointers) + * + * @returns cloned TopkVal + */ + Val* DoClone(CloneState* state) override; + + DECLARE_OPAQUE_VALUE(TopkVal) + protected: /** * Construct an empty TopkVal. Only used for deserialization @@ -161,8 +172,6 @@ private: uint64 size; // how many elements are we tracking? uint64 numElements; // how many elements do we have at the moment bool pruned; // was this data structure pruned? - - DECLARE_SERIAL(TopkVal); }; }; diff --git a/src/probabilistic/bloom-filter.bif b/src/probabilistic/bloom-filter.bif index 468a6eeae2..166af6d937 100644 --- a/src/probabilistic/bloom-filter.bif +++ b/src/probabilistic/bloom-filter.bif @@ -22,14 +22,14 @@ module GLOBAL; ## rate of *fp*. ## ## name: A name that uniquely identifies and seeds the Bloom filter. If empty, -## the filter will use :bro:id:`global_hash_seed` if that's set, and -## otherwise use a local seed tied to the current Bro process. Only +## the filter will use :zeek:id:`global_hash_seed` if that's set, and +## otherwise use a local seed tied to the current Zeek process. Only ## filters with the same seed can be merged with -## :bro:id:`bloomfilter_merge`. +## :zeek:id:`bloomfilter_merge`. ## ## Returns: A Bloom filter handle. ## -## .. bro:see:: bloomfilter_basic_init2 bloomfilter_counting_init bloomfilter_add +## .. zeek:see:: bloomfilter_basic_init2 bloomfilter_counting_init bloomfilter_add ## bloomfilter_lookup bloomfilter_clear bloomfilter_merge global_hash_seed function bloomfilter_basic_init%(fp: double, capacity: count, name: string &default=""%): opaque of bloomfilter @@ -50,7 +50,7 @@ function bloomfilter_basic_init%(fp: double, capacity: count, %} ## Creates a basic Bloom filter. This function serves as a low-level -## alternative to :bro:id:`bloomfilter_basic_init` where the user has full +## alternative to :zeek:id:`bloomfilter_basic_init` where the user has full ## control over the number of hash functions and cells in the underlying bit ## vector. ## @@ -59,14 +59,14 @@ function bloomfilter_basic_init%(fp: double, capacity: count, ## cells: The number of cells of the underlying bit vector. ## ## name: A name that uniquely identifies and seeds the Bloom filter. If empty, -## the filter will use :bro:id:`global_hash_seed` if that's set, and -## otherwise use a local seed tied to the current Bro process. Only +## the filter will use :zeek:id:`global_hash_seed` if that's set, and +## otherwise use a local seed tied to the current Zeek process. Only ## filters with the same seed can be merged with -## :bro:id:`bloomfilter_merge`. +## :zeek:id:`bloomfilter_merge`. ## ## Returns: A Bloom filter handle. ## -## .. bro:see:: bloomfilter_basic_init bloomfilter_counting_init bloomfilter_add +## .. zeek:see:: bloomfilter_basic_init bloomfilter_counting_init bloomfilter_add ## bloomfilter_lookup bloomfilter_clear bloomfilter_merge global_hash_seed function bloomfilter_basic_init2%(k: count, cells: count, name: string &default=""%): opaque of bloomfilter @@ -103,14 +103,14 @@ function bloomfilter_basic_init2%(k: count, cells: count, ## counter vector becomes a cell of size *w* bits. ## ## name: A name that uniquely identifies and seeds the Bloom filter. If empty, -## the filter will use :bro:id:`global_hash_seed` if that's set, and -## otherwise use a local seed tied to the current Bro process. Only +## the filter will use :zeek:id:`global_hash_seed` if that's set, and +## otherwise use a local seed tied to the current Zeek process. Only ## filters with the same seed can be merged with -## :bro:id:`bloomfilter_merge`. +## :zeek:id:`bloomfilter_merge`. ## ## Returns: A Bloom filter handle. ## -## .. bro:see:: bloomfilter_basic_init bloomfilter_basic_init2 bloomfilter_add +## .. zeek:see:: bloomfilter_basic_init bloomfilter_basic_init2 bloomfilter_add ## bloomfilter_lookup bloomfilter_clear bloomfilter_merge global_hash_seed function bloomfilter_counting_init%(k: count, cells: count, max: count, name: string &default=""%): opaque of bloomfilter @@ -139,7 +139,7 @@ function bloomfilter_counting_init%(k: count, cells: count, max: count, ## ## x: The element to add. ## -## .. bro:see:: bloomfilter_basic_init bloomfilter_basic_init2 +## .. zeek:see:: bloomfilter_basic_init bloomfilter_basic_init2 ## bloomfilter_counting_init bloomfilter_lookup bloomfilter_clear ## bloomfilter_merge function bloomfilter_add%(bf: opaque of bloomfilter, x: any%): any @@ -166,7 +166,7 @@ function bloomfilter_add%(bf: opaque of bloomfilter, x: any%): any ## ## Returns: the counter associated with *x* in *bf*. ## -## .. bro:see:: bloomfilter_basic_init bloomfilter_basic_init2 +## .. zeek:see:: bloomfilter_basic_init bloomfilter_basic_init2 ## bloomfilter_counting_init bloomfilter_add bloomfilter_clear ## bloomfilter_merge function bloomfilter_lookup%(bf: opaque of bloomfilter, x: any%): count @@ -191,7 +191,7 @@ function bloomfilter_lookup%(bf: opaque of bloomfilter, x: any%): count ## ## bf: The Bloom filter handle. ## -## .. bro:see:: bloomfilter_basic_init bloomfilter_basic_init2 +## .. zeek:see:: bloomfilter_basic_init bloomfilter_basic_init2 ## bloomfilter_counting_init bloomfilter_add bloomfilter_lookup ## bloomfilter_merge function bloomfilter_clear%(bf: opaque of bloomfilter%): any @@ -206,7 +206,7 @@ function bloomfilter_clear%(bf: opaque of bloomfilter%): any ## Merges two Bloom filters. ## -## .. note:: Currently Bloom filters created by different Bro instances cannot +## .. note:: Currently Bloom filters created by different Zeek instances cannot ## be merged. In the future, this will be supported as long as both filters ## are created with the same name. ## @@ -216,7 +216,7 @@ function bloomfilter_clear%(bf: opaque of bloomfilter%): any ## ## Returns: The union of *bf1* and *bf2*. ## -## .. bro:see:: bloomfilter_basic_init bloomfilter_basic_init2 +## .. zeek:see:: bloomfilter_basic_init bloomfilter_basic_init2 ## bloomfilter_counting_init bloomfilter_add bloomfilter_lookup ## bloomfilter_clear function bloomfilter_merge%(bf1: opaque of bloomfilter, diff --git a/src/probabilistic/cardinality-counter.bif b/src/probabilistic/cardinality-counter.bif index 4ba528bd3c..1e12765b57 100644 --- a/src/probabilistic/cardinality-counter.bif +++ b/src/probabilistic/cardinality-counter.bif @@ -17,7 +17,7 @@ module GLOBAL; ## ## Returns: a HLL cardinality handle. ## -## .. bro:see:: hll_cardinality_estimate hll_cardinality_merge_into hll_cardinality_add +## .. zeek:see:: hll_cardinality_estimate hll_cardinality_merge_into hll_cardinality_add ## hll_cardinality_copy function hll_cardinality_init%(err: double, confidence: double%): opaque of cardinality %{ @@ -35,7 +35,7 @@ function hll_cardinality_init%(err: double, confidence: double%): opaque of card ## ## Returns: true on success. ## -## .. bro:see:: hll_cardinality_estimate hll_cardinality_merge_into +## .. zeek:see:: hll_cardinality_estimate hll_cardinality_merge_into ## hll_cardinality_init hll_cardinality_copy function hll_cardinality_add%(handle: opaque of cardinality, elem: any%): bool %{ @@ -60,7 +60,7 @@ function hll_cardinality_add%(handle: opaque of cardinality, elem: any%): bool ## Merges a HLL cardinality counter into another. ## ## .. note:: The same restrictions as for Bloom filter merging apply, -## see :bro:id:`bloomfilter_merge`. +## see :zeek:id:`bloomfilter_merge`. ## ## handle1: the first HLL handle, which will contain the merged result. ## @@ -68,7 +68,7 @@ function hll_cardinality_add%(handle: opaque of cardinality, elem: any%): bool ## ## Returns: true on success. ## -## .. bro:see:: hll_cardinality_estimate hll_cardinality_add +## .. zeek:see:: hll_cardinality_estimate hll_cardinality_add ## hll_cardinality_init hll_cardinality_copy function hll_cardinality_merge_into%(handle1: opaque of cardinality, handle2: opaque of cardinality%): bool %{ @@ -90,7 +90,7 @@ function hll_cardinality_merge_into%(handle1: opaque of cardinality, handle2: op bool res = h1->Merge(h2); if ( ! res ) { - reporter->Error("Carinality counters with different parameters cannot be merged"); + reporter->Error("Cardinality counters with different parameters cannot be merged"); return val_mgr->GetBool(0); } @@ -103,7 +103,7 @@ function hll_cardinality_merge_into%(handle1: opaque of cardinality, handle2: op ## ## Returns: the cardinality estimate. Returns -1.0 if the counter is empty. ## -## .. bro:see:: hll_cardinality_merge_into hll_cardinality_add +## .. zeek:see:: hll_cardinality_merge_into hll_cardinality_add ## hll_cardinality_init hll_cardinality_copy function hll_cardinality_estimate%(handle: opaque of cardinality%): double %{ @@ -121,7 +121,7 @@ function hll_cardinality_estimate%(handle: opaque of cardinality%): double ## ## Returns: copy of handle. ## -## .. bro:see:: hll_cardinality_estimate hll_cardinality_merge_into hll_cardinality_add +## .. zeek:see:: hll_cardinality_estimate hll_cardinality_merge_into hll_cardinality_add ## hll_cardinality_init function hll_cardinality_copy%(handle: opaque of cardinality%): opaque of cardinality %{ diff --git a/src/probabilistic/top-k.bif b/src/probabilistic/top-k.bif index 8d2a8c0fd8..8691521f31 100644 --- a/src/probabilistic/top-k.bif +++ b/src/probabilistic/top-k.bif @@ -10,7 +10,7 @@ ## ## Returns: Opaque pointer to the data structure. ## -## .. bro:see:: topk_add topk_get_top topk_count topk_epsilon +## .. zeek:see:: topk_add topk_get_top topk_count topk_epsilon ## topk_size topk_sum topk_merge topk_merge_prune function topk_init%(size: count%): opaque of topk %{ @@ -28,7 +28,7 @@ function topk_init%(size: count%): opaque of topk ## ## value: observed value. ## -## .. bro:see:: topk_init topk_get_top topk_count topk_epsilon +## .. zeek:see:: topk_init topk_get_top topk_count topk_epsilon ## topk_size topk_sum topk_merge topk_merge_prune function topk_add%(handle: opaque of topk, value: any%): any %{ @@ -47,7 +47,7 @@ function topk_add%(handle: opaque of topk, value: any%): any ## ## Returns: vector of the first k elements. ## -## .. bro:see:: topk_init topk_add topk_count topk_epsilon +## .. zeek:see:: topk_init topk_add topk_count topk_epsilon ## topk_size topk_sum topk_merge topk_merge_prune function topk_get_top%(handle: opaque of topk, k: count%): any_vec %{ @@ -68,7 +68,7 @@ function topk_get_top%(handle: opaque of topk, k: count%): any_vec ## ## Returns: Overestimated number for how often the element has been encountered. ## -## .. bro:see:: topk_init topk_add topk_get_top topk_epsilon +## .. zeek:see:: topk_init topk_add topk_get_top topk_epsilon ## topk_size topk_sum topk_merge topk_merge_prune function topk_count%(handle: opaque of topk, value: any%): count %{ @@ -79,7 +79,7 @@ function topk_count%(handle: opaque of topk, value: any%): count ## Get the maximal overestimation for count. ## -## .. note:: Same restrictions as for :bro:id:`topk_count` apply. +## .. note:: Same restrictions as for :zeek:id:`topk_count` apply. ## ## handle: the TopK handle. ## @@ -88,7 +88,7 @@ function topk_count%(handle: opaque of topk, value: any%): count ## Returns: Number which represents the maximal overestimation for the count of ## this element. ## -## .. bro:see:: topk_init topk_add topk_get_top topk_count +## .. zeek:see:: topk_init topk_add topk_get_top topk_count ## topk_size topk_sum topk_merge topk_merge_prune function topk_epsilon%(handle: opaque of topk, value: any%): count %{ @@ -107,7 +107,7 @@ function topk_epsilon%(handle: opaque of topk, value: any%): count ## ## Returns: size given during initialization. ## -## .. bro:see:: topk_init topk_add topk_get_top topk_count topk_epsilon +## .. zeek:see:: topk_init topk_add topk_get_top topk_count topk_epsilon ## topk_sum topk_merge topk_merge_prune function topk_size%(handle: opaque of topk%): count %{ @@ -120,14 +120,14 @@ function topk_size%(handle: opaque of topk%): count ## ## .. note:: This is equal to the number of all inserted objects if the data ## structure never has been pruned. Do not use after -## calling :bro:id:`topk_merge_prune` (will throw a warning message if used +## calling :zeek:id:`topk_merge_prune` (will throw a warning message if used ## afterwards). ## ## handle: the TopK handle. ## ## Returns: sum of all counts. ## -## .. bro:see:: topk_init topk_add topk_get_top topk_count topk_epsilon +## .. zeek:see:: topk_init topk_add topk_get_top topk_count topk_epsilon ## topk_size topk_merge topk_merge_prune function topk_sum%(handle: opaque of topk%): count %{ @@ -145,7 +145,7 @@ function topk_sum%(handle: opaque of topk%): count ## .. note:: This does not remove any elements, the resulting data structure ## can be bigger than the maximum size given on initialization. ## -## .. bro:see:: topk_init topk_add topk_get_top topk_count topk_epsilon +## .. zeek:see:: topk_init topk_add topk_get_top topk_count topk_epsilon ## topk_size topk_sum topk_merge_prune function topk_merge%(handle1: opaque of topk, handle2: opaque of topk%): any %{ @@ -164,14 +164,14 @@ function topk_merge%(handle1: opaque of topk, handle2: opaque of topk%): any ## data structure back to the size given on initialization. ## ## .. note:: Use with care and only when being aware of the restrictions this -## entails. Do not call :bro:id:`topk_size` or :bro:id:`topk_add` afterwards, +## entails. Do not call :zeek:id:`topk_size` or :zeek:id:`topk_add` afterwards, ## results will probably not be what you expect. ## ## handle1: the TopK handle in which the second TopK structure is merged. ## ## handle2: the TopK handle in which is merged into the first TopK structure. ## -## .. bro:see:: topk_init topk_add topk_get_top topk_count topk_epsilon +## .. zeek:see:: topk_init topk_add topk_get_top topk_count topk_epsilon ## topk_size topk_sum topk_merge function topk_merge_prune%(handle1: opaque of topk, handle2: opaque of topk%): any %{ diff --git a/src/reporter.bif b/src/reporter.bif index 71fc50b49d..dd74b944d6 100644 --- a/src/reporter.bif +++ b/src/reporter.bif @@ -4,7 +4,7 @@ ##! If event handlers do exist, it's assumed they take care of determining ##! how/where to output the messages. ##! -##! See :doc:`/scripts/base/frameworks/reporter/main.bro` for a convenient +##! See :doc:`/scripts/base/frameworks/reporter/main.zeek` for a convenient ##! reporter message logging framework. module Reporter; @@ -19,7 +19,7 @@ module Reporter; ## ## Returns: Always true. ## -## .. bro:see:: reporter_info +## .. zeek:see:: reporter_info function Reporter::info%(msg: string%): bool %{ reporter->PushLocation(frame->GetCall()->GetLocationInfo()); @@ -34,7 +34,7 @@ function Reporter::info%(msg: string%): bool ## ## Returns: Always true. ## -## .. bro:see:: reporter_warning +## .. zeek:see:: reporter_warning function Reporter::warning%(msg: string%): bool %{ reporter->PushLocation(frame->GetCall()->GetLocationInfo()); @@ -50,7 +50,7 @@ function Reporter::warning%(msg: string%): bool ## ## Returns: Always true. ## -## .. bro:see:: reporter_error +## .. zeek:see:: reporter_error function Reporter::error%(msg: string%): bool %{ reporter->PushLocation(frame->GetCall()->GetLocationInfo()); diff --git a/src/rule-parse.y b/src/rule-parse.y index 3e9c8d7ddf..642a3fbf6d 100644 --- a/src/rule-parse.y +++ b/src/rule-parse.y @@ -2,7 +2,7 @@ #include #include #include -#include "bro-config.h" +#include "zeek-config.h" #include "RuleMatcher.h" #include "Reporter.h" #include "IPAddr.h" @@ -70,6 +70,7 @@ static uint8_t ip4_mask_to_len(uint32_t mask) %type value_list %type prefix_value_list %type TOK_IP value +%type ranged_value %type TOK_IP6 prefix_value %type TOK_PROT %type TOK_PATTERN_TYPE @@ -274,6 +275,16 @@ hdr_expr: value_list: value_list ',' value { $1->append(new MaskedValue($3)); $$ = $1; } + | value_list ',' ranged_value + { + int numVals = $3->length(); + for ( int idx = 0; idx < numVals; idx++ ) + { + MaskedValue* val = (*$3)[idx]; + $1->append(val); + } + $$ = $1; + } | value_list ',' TOK_IDENT { id_to_maskedvallist($3, $1); $$ = $1; } | value @@ -281,6 +292,10 @@ value_list: $$ = new maskedvalue_list(); $$->append(new MaskedValue($1)); } + | ranged_value + { + $$ = $1; + } | TOK_IDENT { $$ = new maskedvalue_list(); @@ -320,6 +335,20 @@ prefix_value: | TOK_IP6 ; +ranged_value: + TOK_INT '-' TOK_INT + { + $$ = new maskedvalue_list(); + for ( int val = $1; val <= $3; val++ ) + { + MaskedValue* masked = new MaskedValue(); + masked->val = val; + masked->mask = 0xffffffff; + $$->append(masked); + } + } + ; + value: TOK_INT { $$.val = $1; $$.mask = 0xffffffff; } diff --git a/src/rule-scan.l b/src/rule-scan.l index f280d6132b..834c672843 100644 --- a/src/rule-scan.l +++ b/src/rule-scan.l @@ -21,10 +21,10 @@ D [0-9]+ H [0-9a-fA-F]+ HEX {H} STRING \"([^\n\"]|\\\")*\" -IDCOMPONENT [0-9a-zA-Z_][0-9a-zA-Z_-]* +IDCOMPONENT [a-zA-Z_][0-9a-zA-Z_-]* ID {IDCOMPONENT}(::{IDCOMPONENT})* IP6 ("["({HEX}:){7}{HEX}"]")|("["0x{HEX}({HEX}|:)*"::"({HEX}|:)*"]")|("["({HEX}|:)*"::"({HEX}|:)*"]")|("["({HEX}|:)*"::"({HEX}|:)*({D}"."){3}{D}"]") -RE \/(\\\/)?([^/]|[^\\]\\\/)*\/ +RE \/(\\\/)?([^/]|[^\\]\\\/)*\/i? META \.[^ \t]+{WS}[^\n]+ PIDCOMPONENT [A-Za-z_][A-Za-z_0-9]* PID {PIDCOMPONENT}(::{PIDCOMPONENT})* @@ -189,8 +189,23 @@ finger { rules_lval.val = Rule::FINGER; return TOK_PATTERN_TYPE; } } {RE} { - *(yytext + strlen(yytext) - 1) = '\0'; - rules_lval.str = yytext + 1; + auto len = strlen(yytext); + + if ( yytext[len - 1] == 'i' ) + { + *(yytext + len - 2) = '\0'; + const char fmt[] = "(?i:%s)"; + int n = len + strlen(fmt); + char* s = new char[n + 5 /* slop */]; + safe_snprintf(s, n + 5, fmt, yytext + 1); + rules_lval.str = s; + } + else + { + *(yytext + len - 1) = '\0'; + rules_lval.str = yytext + 1; + } + return TOK_PATTERN; } diff --git a/src/scan.l b/src/scan.l index c2be426044..9216f8d67a 100644 --- a/src/scan.l +++ b/src/scan.l @@ -29,7 +29,7 @@ #include "Traverse.h" #include "analyzer/Analyzer.h" -#include "broxygen/Manager.h" +#include "zeekygen/Manager.h" #include "plugin/Manager.h" @@ -77,6 +77,17 @@ static string find_relative_file(const string& filename, const string& ext) return find_file(filename, bro_path(), ext); } +static string find_relative_script_file(const string& filename) + { + if ( filename.empty() ) + return string(); + + if ( filename[0] == '.' ) + return find_script_file(filename, SafeDirname(::filename).result); + else + return find_script_file(filename, bro_path()); + } + static ino_t get_inode_num(FILE* f, const string& path) { struct stat b; @@ -141,7 +152,7 @@ D [0-9]+ HEX [0-9a-fA-F]+ IDCOMPONENT [A-Za-z_][A-Za-z_0-9]* ID {IDCOMPONENT}(::{IDCOMPONENT})* -IP6 ("["({HEX}:){7}{HEX}"]")|("["0x{HEX}({HEX}|:)*"::"({HEX}|:)*"]")|("["({HEX}|:)*"::"({HEX}|:)*"]")|("["({HEX}|:)*"::"({HEX}|:)*({D}"."){3}{D}"]") +IP6 ("["({HEX}:){7}{HEX}"]")|("["0x{HEX}({HEX}|:)*"::"({HEX}|:)*"]")|("["({HEX}|:)*"::"({HEX}|:)*"]")|("["({HEX}:){6}({D}"."){3}{D}"]")|("["({HEX}|:)*"::"({HEX}|:)*({D}"."){3}{D}"]") FILE [^ \t\n]+ PREFIX [^ \t\n]+ FLOAT (({D}*"."?{D})|({D}"."?{D}*))([eE][-+]?{D})? @@ -151,19 +162,19 @@ ESCSEQ (\\([^\n]|[0-7]+|x[[:xdigit:]]+)) %% ##!.* { - broxygen_mgr->SummaryComment(::filename, yytext + 3); + zeekygen_mgr->SummaryComment(::filename, yytext + 3); } ##<.* { string hint(cur_enum_type && last_id_tok ? make_full_var_name(current_module.c_str(), last_id_tok) : ""); - broxygen_mgr->PostComment(yytext + 3, hint); + zeekygen_mgr->PostComment(yytext + 3, hint); } ##.* { if ( yytext[2] != '#' ) - broxygen_mgr->PreComment(yytext + 2); + zeekygen_mgr->PreComment(yytext + 2); } #{OWS}@no-test.* return TOK_NO_TEST; @@ -289,37 +300,6 @@ when return TOK_WHEN; &redef return TOK_ATTR_REDEF; &write_expire return TOK_ATTR_EXPIRE_WRITE; -&encrypt { - deprecated_attr(yytext); - return TOK_ATTR_ENCRYPT; - } - -&mergeable { - deprecated_attr(yytext); - return TOK_ATTR_MERGEABLE; - } - -&persistent { - deprecated_attr(yytext); - return TOK_ATTR_PERSISTENT; - } - -&rotate_interval { - deprecated_attr(yytext); - return TOK_ATTR_ROTATE_INTERVAL; - } - -&rotate_size { - deprecated_attr(yytext); - return TOK_ATTR_ROTATE_SIZE; - } - -&synchronized { - ++old_comm_usage_count; - deprecated_attr(yytext); - return TOK_ATTR_SYNCHRONIZED; - } - @deprecated.* { auto num_files = file_stack.length(); auto comment = skip_whitespace(yytext + 11); @@ -363,14 +343,14 @@ when return TOK_WHEN; @load{WS}{FILE} { const char* new_file = skip_whitespace(yytext + 5); // Skip "@load". string loader = ::filename; // load_files may change ::filename, save copy - string loading = find_relative_file(new_file, "bro"); + string loading = find_relative_script_file(new_file); (void) load_files(new_file); - broxygen_mgr->ScriptDependency(loader, loading); + zeekygen_mgr->ScriptDependency(loader, loading); } @load-sigs{WS}{FILE} { const char* file = skip_whitespace(yytext + 10); - string path = find_relative_file(file, "sig"); + string path = find_relative_file(file, ".sig"); int rc = PLUGIN_HOOK_WITH_RESULT(HOOK_LOAD_FILE, HookLoadFile(plugin::Plugin::SIGNATURES, file, path), -1); switch ( rc ) { @@ -430,7 +410,7 @@ when return TOK_WHEN; @unload{WS}{FILE} { // Skip "@unload". const char* file = skip_whitespace(yytext + 7); - string path = find_relative_file(file, "bro"); + string path = find_relative_script_file(file); if ( path.empty() ) reporter->Error("failed find file associated with @unload %s", file); @@ -624,7 +604,7 @@ static bool already_scanned(const string& path) static int load_files(const char* orig_file) { - string file_path = find_relative_file(orig_file, "bro"); + string file_path = find_relative_script_file(orig_file); int rc = PLUGIN_HOOK_WITH_RESULT(HOOK_LOAD_FILE, HookLoadFile(plugin::Plugin::SCRIPT, orig_file, file_path), -1); if ( rc == 1 ) @@ -709,7 +689,7 @@ static int load_files(const char* orig_file) else file_stack.append(new FileInfo); - broxygen_mgr->Script(file_path); + zeekygen_mgr->Script(file_path); DBG_LOG(DBG_SCRIPTS, "Loading %s", file_path.c_str()); @@ -912,7 +892,7 @@ int yywrap() if ( ! did_builtin_init && file_stack.length() == 1 ) { // ### This is a gross hack - we know that the first file - // we parse is init-bare.bro, and after it it's safe to initialize + // we parse is init-bare.zeek, and after it it's safe to initialize // the built-ins. Furthermore, we want to initialize the // built-in's *right* after parsing bro.init, so that other // source files can use built-in's when initializing globals. @@ -947,10 +927,10 @@ int yywrap() } // For each file scanned so far, and for each @prefix, look for a - // prefixed and flattened version of the loaded file in BROPATH. The - // flattening involves taking the path in BROPATH in which the + // prefixed and flattened version of the loaded file in ZEEKPATH. The + // flattening involves taking the path in ZEEKPATH in which the // scanned file lives and replacing '/' path separators with a '.' If - // the scanned file is "__load__.bro", that part of the flattened + // the scanned file is "__load__.zeek", that part of the flattened // file name is discarded. If the prefix is non-empty, it gets placed // in front of the flattened path, separated with another '.' std::list::iterator it; @@ -970,7 +950,7 @@ int yywrap() string canon = without_bropath_component(it->name); string flat = flatten_script_name(canon, prefixes[i]); - string path = find_relative_file(flat, "bro"); + string path = find_relative_script_file(flat); if ( ! path.empty() ) { diff --git a/src/setsignal.c b/src/setsignal.c index 6344820398..d740cc8215 100644 --- a/src/setsignal.c +++ b/src/setsignal.c @@ -2,7 +2,7 @@ * See the file "COPYING" in the main distribution directory for copyright. */ -#include "bro-config.h" /* must appear before first ifdef */ +#include "zeek-config.h" /* must appear before first ifdef */ #include diff --git a/src/stats.bif b/src/stats.bif index bb4d92586f..76bc88083e 100644 --- a/src/stats.bif +++ b/src/stats.bif @@ -20,12 +20,12 @@ RecordType* ReporterStats; %%} ## Returns packet capture statistics. Statistics include the number of -## packets *(i)* received by Bro, *(ii)* dropped, and *(iii)* seen on the +## packets *(i)* received by Zeek, *(ii)* dropped, and *(iii)* seen on the ## link (not always available). ## ## Returns: A record of packet statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -70,11 +70,11 @@ function get_net_stats%(%): NetStats return r; %} -## Returns Bro traffic statistics. +## Returns Zeek traffic statistics. ## ## Returns: A record with connection and packet statistics. ## -## .. bro:see:: get_dns_stats +## .. zeek:see:: get_dns_stats ## get_event_stats ## get_file_analysis_stats ## get_gap_stats @@ -121,11 +121,11 @@ function get_conn_stats%(%): ConnStats return r; %} -## Returns Bro process statistics. +## Returns Zeek process statistics. ## ## Returns: A record with process statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -182,7 +182,7 @@ function get_proc_stats%(%): ProcStats ## ## Returns: A record with event engine statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_file_analysis_stats ## get_gap_stats @@ -209,7 +209,7 @@ function get_event_stats%(%): EventStats ## ## Returns: A record with reassembler statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -238,7 +238,7 @@ function get_reassembler_stats%(%): ReassemblerStats ## ## Returns: A record with DNS lookup statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_event_stats ## get_file_analysis_stats ## get_gap_stats @@ -272,7 +272,7 @@ function get_dns_stats%(%): DNSStats ## ## Returns: A record with timer usage statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -300,7 +300,7 @@ function get_timer_stats%(%): TimerStats ## ## Returns: A record with file analysis statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_gap_stats @@ -328,7 +328,7 @@ function get_file_analysis_stats%(%): FileAnalysisStats ## ## Returns: A record with thread usage statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -354,7 +354,7 @@ function get_thread_stats%(%): ThreadStats ## ## Returns: A record with TCP gap statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -386,7 +386,7 @@ function get_gap_stats%(%): GapStats ## ## Returns: A record with matcher statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -423,7 +423,7 @@ function get_matcher_stats%(%): MatcherStats ## ## Returns: A record with Broker statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats @@ -459,7 +459,7 @@ function get_broker_stats%(%): BrokerStats ## ## Returns: A record with reporter statistics. ## -## .. bro:see:: get_conn_stats +## .. zeek:see:: get_conn_stats ## get_dns_stats ## get_event_stats ## get_file_analysis_stats diff --git a/src/strings.bif b/src/strings.bif index e7571d5c70..6c74db77e9 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -55,9 +55,9 @@ function levenshtein_distance%(s1: string, s2: string%): count ## ## Returns: The concatenation of all (string) arguments. ## -## .. bro:see:: cat cat_sep cat_string_array cat_string_array_n +## .. zeek:see:: cat cat_sep ## fmt -## join_string_vec join_string_array +## join_string_vec function string_cat%(...%): string %{ int n = 0; @@ -112,98 +112,20 @@ int vs_to_string_array(vector& vs, TableVal* tbl, } return 1; } - -BroString* cat_string_array_n(TableVal* tbl, int start, int end) - { - vector vs; - string_array_to_vs(tbl, start, end, vs); - return concatenate(vs); - } %%} -## Concatenates all elements in an array of strings. -## -## a: The :bro:type:`string_array` (``table[count] of string``). -## -## Returns: The concatenation of all elements in *a*. -## -## .. bro:see:: cat cat_sep string_cat cat_string_array_n -## fmt -## join_string_vec join_string_array -function cat_string_array%(a: string_array%): string &deprecated - %{ - TableVal* tbl = a->AsTableVal(); - return new StringVal(cat_string_array_n(tbl, 1, a->AsTable()->Length())); - %} - -## Concatenates a specific range of elements in an array of strings. -## -## a: The :bro:type:`string_array` (``table[count] of string``). -## -## start: The array index of the first element of the range. -## -## end: The array index of the last element of the range. -## -## Returns: The concatenation of the range *[start, end]* in *a*. -## -## .. bro:see:: cat string_cat cat_string_array -## fmt -## join_string_vec join_string_array -function cat_string_array_n%(a: string_array, start: count, end: count%): string &deprecated - %{ - TableVal* tbl = a->AsTableVal(); - return new StringVal(cat_string_array_n(tbl, start, end)); - %} - -## Joins all values in the given array of strings with a separator placed -## between each element. -## -## sep: The separator to place between each element. -## -## a: The :bro:type:`string_array` (``table[count] of string``). -## -## Returns: The concatenation of all elements in *a*, with *sep* placed -## between each element. -## -## .. bro:see:: cat cat_sep string_cat cat_string_array cat_string_array_n -## fmt -## join_string_vec -function join_string_array%(sep: string, a: string_array%): string &deprecated - %{ - vector vs; - TableVal* tbl = a->AsTableVal(); - int n = a->AsTable()->Length(); - - for ( int i = 1; i <= n; ++i ) - { - Val* ind = val_mgr->GetCount(i); - Val* v = tbl->Lookup(ind); - if ( ! v ) - return 0; - - vs.push_back(v->AsString()); - Unref(ind); - - if ( i < n ) - vs.push_back(sep->AsString()); - } - - return new StringVal(concatenate(vs)); - %} - ## Joins all values in the given vector of strings with a separator placed ## between each element. ## ## sep: The separator to place between each element. ## -## vec: The :bro:type:`string_vec` (``vector of string``). +## vec: The :zeek:type:`string_vec` (``vector of string``). ## ## Returns: The concatenation of all elements in *vec*, with *sep* placed ## between each element. ## -## .. bro:see:: cat cat_sep string_cat cat_string_array cat_string_array_n +## .. zeek:see:: cat cat_sep string_cat ## fmt -## join_string_array function join_string_vec%(vec: string_vec, sep: string%): string %{ ODesc d; @@ -231,39 +153,6 @@ function join_string_vec%(vec: string_vec, sep: string%): string return new StringVal(s); %} -## Sorts an array of strings. -## -## a: The :bro:type:`string_array` (``table[count] of string``). -## -## Returns: A sorted copy of *a*. -## -## .. bro:see:: sort -function sort_string_array%(a: string_array%): string_array &deprecated - %{ - TableVal* tbl = a->AsTableVal(); - int n = a->AsTable()->Length(); - - vector vs; - string_array_to_vs(tbl, 1, n, vs); - - unsigned int i, j; - for ( i = 0; i < vs.size(); ++i ) - { - const BroString* x = vs[i]; - for ( j = i; j > 0; --j ) - if ( Bstr_cmp(vs[j-1], x) <= 0 ) - break; - else - vs[j] = vs[j-1]; - vs[j] = x; - } - // sort(vs.begin(), vs.end(), Bstr_cmp); - - TableVal* b = new TableVal(string_array); - vs_to_string_array(vs, b, 1, n); - return b; - %} - ## Returns an edited version of a string that applies a special ## "backspace character" (usually ``\x08`` for backspace or ``\x7f`` for DEL). ## For example, ``edit("hello there", "e")`` returns ``"llo t"``. @@ -271,14 +160,14 @@ function sort_string_array%(a: string_array%): string_array &deprecated ## arg_s: The string to edit. ## ## arg_edit_char: A string of exactly one character that represents the -## "backspace character". If it is longer than one character Bro +## "backspace character". If it is longer than one character Zeek ## generates a run-time error and uses the first character in ## the string. ## ## Returns: An edited version of *arg_s* where *arg_edit_char* triggers the ## deletion of the last character. ## -## .. bro:see:: clean +## .. zeek:see:: clean ## to_string_literal ## escape_string ## strip @@ -558,27 +447,7 @@ Val* do_sub(StringVal* str_val, RE_Matcher* re, StringVal* repl, int do_all) ## Returns: An array of strings where each element corresponds to a substring ## in *str* separated by *re*. ## -## .. bro:see:: split1 split_all split_n str_split split_string1 split_string_all split_string_n str_split -## -## .. note:: The returned table starts at index 1. Note that conceptually the -## return value is meant to be a vector and this might change in the -## future. -## -function split%(str: string, re: pattern%): string_array &deprecated - %{ - return do_split(str, re, 0, 0); - %} - -## Splits a string into an array of strings according to a pattern. -## -## str: The string to split. -## -## re: The pattern describing the element separator in *str*. -## -## Returns: An array of strings where each element corresponds to a substring -## in *str* separated by *re*. -## -## .. bro:see:: split_string1 split_string_all split_string_n str_split +## .. zeek:see:: split_string1 split_string_all split_string_n str_split ## function split_string%(str: string, re: pattern%): string_vec %{ @@ -586,27 +455,7 @@ function split_string%(str: string, re: pattern%): string_vec %} ## Splits a string *once* into a two-element array of strings according to a -## pattern. This function is the same as :bro:id:`split`, but *str* is only -## split once (if possible) at the earliest position and an array of two strings -## is returned. -## -## str: The string to split. -## -## re: The pattern describing the separator to split *str* in two pieces. -## -## Returns: An array of strings with two elements in which the first represents -## the substring in *str* up to the first occurence of *re*, and the -## second everything after *re*. An array of one string is returned -## when *s* cannot be split. -## -## .. bro:see:: split split_all split_n str_split split_string split_string_all split_string_n str_split -function split1%(str: string, re: pattern%): string_array &deprecated - %{ - return do_split(str, re, 0, 1); - %} - -## Splits a string *once* into a two-element array of strings according to a -## pattern. This function is the same as :bro:id:`split_string`, but *str* is +## pattern. This function is the same as :zeek:id:`split_string`, but *str* is ## only split once (if possible) at the earliest position and an array of two ## strings is returned. ## @@ -619,34 +468,14 @@ function split1%(str: string, re: pattern%): string_array &deprecated ## second everything after *re*. An array of one string is returned ## when *s* cannot be split. ## -## .. bro:see:: split_string split_string_all split_string_n str_split +## .. zeek:see:: split_string split_string_all split_string_n str_split function split_string1%(str: string, re: pattern%): string_vec %{ return do_split_string(str, re, 0, 1); %} ## Splits a string into an array of strings according to a pattern. This -## function is the same as :bro:id:`split`, except that the separators are -## returned as well. For example, ``split_all("a-b--cd", /(\-)+/)`` returns -## ``{"a", "-", "b", "--", "cd"}``: odd-indexed elements do not match the -## pattern and even-indexed ones do. -## -## str: The string to split. -## -## re: The pattern describing the element separator in *str*. -## -## Returns: An array of strings where each two successive elements correspond -## to a substring in *str* of the part not matching *re* (odd-indexed) -## and the part that matches *re* (even-indexed). -## -## .. bro:see:: split split1 split_n str_split split_string split_string1 split_string_n str_split -function split_all%(str: string, re: pattern%): string_array &deprecated - %{ - return do_split(str, re, 1, 0); - %} - -## Splits a string into an array of strings according to a pattern. This -## function is the same as :bro:id:`split_string`, except that the separators +## function is the same as :zeek:id:`split_string`, except that the separators ## are returned as well. For example, ``split_string_all("a-b--cd", /(\-)+/)`` ## returns ``{"a", "-", "b", "--", "cd"}``: odd-indexed elements do match the ## pattern and even-indexed ones do not. @@ -659,15 +488,15 @@ function split_all%(str: string, re: pattern%): string_array &deprecated ## to a substring in *str* of the part not matching *re* (even-indexed) ## and the part that matches *re* (odd-indexed). ## -## .. bro:see:: split_string split_string1 split_string_n str_split +## .. zeek:see:: split_string split_string1 split_string_n str_split function split_string_all%(str: string, re: pattern%): string_vec %{ return do_split_string(str, re, 1, 0); %} ## Splits a string a given number of times into an array of strings according -## to a pattern. This function is similar to :bro:id:`split1` and -## :bro:id:`split_all`, but with customizable behavior with respect to +## to a pattern. This function is similar to :zeek:id:`split_string1` and +## :zeek:id:`split_string_all`, but with customizable behavior with respect to ## including separators in the result and the number of times to split. ## ## str: The string to split. @@ -675,33 +504,7 @@ function split_string_all%(str: string, re: pattern%): string_vec ## re: The pattern describing the element separator in *str*. ## ## incl_sep: A flag indicating whether to include the separator matches in the -## result (as in :bro:id:`split_all`). -## -## max_num_sep: The number of times to split *str*. -## -## Returns: An array of strings where, if *incl_sep* is true, each two -## successive elements correspond to a substring in *str* of the part -## not matching *re* (odd-indexed) and the part that matches *re* -## (even-indexed). -## -## .. bro:see:: split split1 split_all str_split split_string split_string1 split_string_all str_split -function split_n%(str: string, re: pattern, - incl_sep: bool, max_num_sep: count%): string_array &deprecated - %{ - return do_split(str, re, incl_sep, max_num_sep); - %} - -## Splits a string a given number of times into an array of strings according -## to a pattern. This function is similar to :bro:id:`split_string1` and -## :bro:id:`split_string_all`, but with customizable behavior with respect to -## including separators in the result and the number of times to split. -## -## str: The string to split. -## -## re: The pattern describing the element separator in *str*. -## -## incl_sep: A flag indicating whether to include the separator matches in the -## result (as in :bro:id:`split_string_all`). +## result (as in :zeek:id:`split_string_all`). ## ## max_num_sep: The number of times to split *str*. ## @@ -710,7 +513,7 @@ function split_n%(str: string, re: pattern, ## not matching *re* (even-indexed) and the part that matches *re* ## (odd-indexed). ## -## .. bro:see:: split_string split_string1 split_string_all str_split +## .. zeek:see:: split_string split_string1 split_string_all str_split function split_string_n%(str: string, re: pattern, incl_sep: bool, max_num_sep: count%): string_vec %{ @@ -729,7 +532,7 @@ function split_string_n%(str: string, re: pattern, ## Returns: A copy of *str* with the first occurence of *re* replaced with ## *repl*. ## -## .. bro:see:: gsub subst_string +## .. zeek:see:: gsub subst_string function sub%(str: string, re: pattern, repl: string%): string %{ return do_sub(str, re, repl, 0); @@ -746,7 +549,7 @@ function sub%(str: string, re: pattern, repl: string%): string ## ## Returns: A copy of *str* with all occurrences of *re* replaced with *repl*. ## -## .. bro:see:: sub subst_string +## .. zeek:see:: sub subst_string function gsub%(str: string, re: pattern, repl: string%): string %{ return do_sub(str, re, repl, 1); @@ -775,7 +578,7 @@ function strcmp%(s1: string, s2: string%): int ## Returns: The location of *little* in *big*, or 0 if *little* is not found in ## *big*. ## -## .. bro:see:: find_all find_last +## .. zeek:see:: find_all find_last function strstr%(big: string, little: string%): count %{ return val_mgr->GetCount( @@ -792,7 +595,7 @@ function strstr%(big: string, little: string%): count ## ## Returns: A copy of *s* where each occurrence of *from* is replaced with *to*. ## -## .. bro:see:: sub gsub +## .. zeek:see:: sub gsub function subst_string%(s: string, from: string, to: string%): string %{ const int little_len = from->Len(); @@ -843,7 +646,7 @@ function subst_string%(s: string, from: string, to: string%): string ## by ``isascii`` and ``isupper``) folded to lowercase ## (via ``tolower``). ## -## .. bro:see:: to_upper is_ascii +## .. zeek:see:: to_upper is_ascii function to_lower%(str: string%): string %{ const u_char* s = str->Bytes(); @@ -872,7 +675,7 @@ function to_lower%(str: string%): string ## by ``isascii`` and ``islower``) folded to uppercase ## (via ``toupper``). ## -## .. bro:see:: to_lower is_ascii +## .. zeek:see:: to_lower is_ascii function to_upper%(str: string%): string %{ const u_char* s = str->Bytes(); @@ -900,13 +703,13 @@ function to_upper%(str: string%): string ## ## If the string does not yet have a trailing NUL, one is added internally. ## -## In contrast to :bro:id:`escape_string`, this encoding is *not* fully reversible.` +## In contrast to :zeek:id:`escape_string`, this encoding is *not* fully reversible.` ## ## str: The string to escape. ## ## Returns: The escaped string. ## -## .. bro:see:: to_string_literal escape_string +## .. zeek:see:: to_string_literal escape_string function clean%(str: string%): string %{ char* s = str->AsString()->Render(); @@ -924,7 +727,7 @@ function clean%(str: string%): string ## ## Returns: The escaped string. ## -## .. bro:see:: clean escape_string +## .. zeek:see:: clean escape_string function to_string_literal%(str: string%): string %{ char* s = str->AsString()->Render(BroString::BRO_STRING_LITERAL); @@ -938,7 +741,7 @@ function to_string_literal%(str: string%): string ## Returns: False if any byte value of *str* is greater than 127, and true ## otherwise. ## -## .. bro:see:: to_upper to_lower +## .. zeek:see:: to_upper to_lower function is_ascii%(str: string%): bool %{ int n = str->Len(); @@ -957,13 +760,13 @@ function is_ascii%(str: string%): bool ## - values not in *[32, 126]* to ``\xXX`` ## - ``\`` to ``\\`` ## -## In contrast to :bro:id:`clean`, this encoding is fully reversible.` +## In contrast to :zeek:id:`clean`, this encoding is fully reversible.` ## ## str: The string to escape. ## ## Returns: The escaped string. ## -## .. bro:see:: clean to_string_literal +## .. zeek:see:: clean to_string_literal function escape_string%(s: string%): string %{ char* escstr = s->AsString()->Render(BroString::ESC_HEX | BroString::ESC_ESC); @@ -1022,7 +825,7 @@ function str_smith_waterman%(s1: string, s2: string, params: sw_params%) : sw_su ## ## Returns: A vector of strings. ## -## .. bro:see:: split split1 split_all split_n +## .. zeek:see:: split_string split_string1 split_string_all split_string_n function str_split%(s: string, idx: index_vec%): string_vec %{ vector* idx_v = idx->AsVector(); @@ -1057,7 +860,7 @@ function str_split%(s: string, idx: index_vec%): string_vec ## ## Returns: A copy of *str* with leading and trailing whitespace removed. ## -## .. bro:see:: sub gsub lstrip rstrip +## .. zeek:see:: sub gsub lstrip rstrip function strip%(str: string%): string %{ const u_char* s = str->Bytes(); @@ -1105,7 +908,7 @@ static bool should_strip(u_char c, const BroString* strip_chars) ## Returns: A copy of *str* with the characters in *chars* removed from ## the beginning. ## -## .. bro:see:: sub gsub strip rstrip +## .. zeek:see:: sub gsub strip rstrip function lstrip%(str: string, chars: string &default=" \t\n\r\v\f"%): string %{ const u_char* s = str->Bytes(); @@ -1136,7 +939,7 @@ function lstrip%(str: string, chars: string &default=" \t\n\r\v\f"%): string ## Returns: A copy of *str* with the characters in *chars* removed from ## the end. ## -## .. bro:see:: sub gsub strip lstrip +## .. zeek:see:: sub gsub strip lstrip function rstrip%(str: string, chars: string &default=" \t\n\r\v\f"%): string %{ const u_char* s = str->Bytes(); @@ -1180,7 +983,7 @@ function string_fill%(len: int, source: string%): string ## Takes a string and escapes characters that would allow execution of ## commands at the shell level. Must be used before including strings in -## :bro:id:`system` or similar calls. +## :zeek:id:`system` or similar calls. ## ## source: The string to escape. ## @@ -1191,7 +994,7 @@ function string_fill%(len: int, source: string%): string ## backslash-escaped string in double-quotes to ultimately preserve ## the literal value of all input characters. ## -## .. bro:see:: system safe_shell_quote +## .. zeek:see:: system safe_shell_quote function safe_shell_quote%(source: string%): string %{ unsigned j = 0; @@ -1220,9 +1023,9 @@ function safe_shell_quote%(source: string%): string ## Takes a string and escapes characters that would allow execution of ## commands at the shell level. Must be used before including strings in -## :bro:id:`system` or similar calls. This function is deprecated, use -## :bro:see:`safe_shell_quote` as a replacement. The difference is that -## :bro:see:`safe_shell_quote` automatically returns a value that is +## :zeek:id:`system` or similar calls. This function is deprecated, use +## :zeek:see:`safe_shell_quote` as a replacement. The difference is that +## :zeek:see:`safe_shell_quote` automatically returns a value that is ## wrapped in double-quotes, which is required to correctly and fully ## escape any characters that might be interpreted by the shell. ## @@ -1230,7 +1033,7 @@ function safe_shell_quote%(source: string%): string ## ## Returns: A shell-escaped version of *source*. ## -## .. bro:see:: system safe_shell_quote +## .. zeek:see:: system safe_shell_quote function str_shell_escape%(source: string%): string &deprecated %{ unsigned j = 0; @@ -1267,7 +1070,7 @@ function str_shell_escape%(source: string%): string &deprecated ## ## Returns: The set of strings in *str* that match *re*, or the empty set. ## -## .. bro:see: find_last strstr +## .. zeek:see: find_last strstr function find_all%(str: string, re: pattern%) : string_set %{ TableVal* a = new TableVal(string_set); @@ -1301,7 +1104,7 @@ function find_all%(str: string, re: pattern%) : string_set ## ## Returns: The last string in *str* that matches *re*, or the empty string. ## -## .. bro:see: find_all strstr +## .. zeek:see: find_all strstr function find_last%(str: string, re: pattern%) : string %{ const u_char* s = str->Bytes(); @@ -1325,7 +1128,7 @@ function find_last%(str: string, re: pattern%) : string ## ## Returns: The hex dump of the given string. ## -## .. bro:see:: string_to_ascii_hex bytestring_to_hexstr +## .. zeek:see:: string_to_ascii_hex bytestring_to_hexstr ## ## .. note:: Based on Netdude's hex editor code. ## diff --git a/src/strsep.c b/src/strsep.c index 8540ac3688..0c65402441 100644 --- a/src/strsep.c +++ b/src/strsep.c @@ -31,7 +31,7 @@ * SUCH DAMAGE. */ -#include "bro-config.h" +#include "zeek-config.h" #ifndef HAVE_STRSEP diff --git a/src/threading/BasicThread.cc b/src/threading/BasicThread.cc index 95bfd8acd0..67434957e5 100644 --- a/src/threading/BasicThread.cc +++ b/src/threading/BasicThread.cc @@ -1,7 +1,7 @@ #include -#include "bro-config.h" +#include "zeek-config.h" #include "BasicThread.h" #include "Manager.h" #include "pthread.h" diff --git a/src/threading/Formatter.cc b/src/threading/Formatter.cc index b881962732..395a7fefa6 100644 --- a/src/threading/Formatter.cc +++ b/src/threading/Formatter.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index 8468d19ea8..bd85b846f9 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -2,8 +2,8 @@ #include "SerialTypes.h" -#include "../RemoteSerializer.h" - +#include "SerializationFormat.h" +#include "Reporter.h" using namespace threading; @@ -87,11 +87,16 @@ string Field::TypeName() const Value::~Value() { - if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) - && present ) + if ( ! present ) + return; + + if ( type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC ) delete [] val.string_val.data; - if ( type == TYPE_TABLE && present ) + else if ( type == TYPE_PATTERN ) + delete [] val.pattern_text_val; + + else if ( type == TYPE_TABLE ) { for ( int i = 0; i < val.set_val.size; i++ ) delete val.set_val.vals[i]; @@ -99,7 +104,7 @@ Value::~Value() delete [] val.set_val.vals; } - if ( type == TYPE_VECTOR && present ) + else if ( type == TYPE_VECTOR ) { for ( int i = 0; i < val.vector_val.size; i++ ) delete val.vector_val.vals[i]; @@ -414,4 +419,3 @@ bool Value::Write(SerializationFormat* fmt) const return false; } - diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index 5a8361feba..b9a9c6c718 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -13,7 +13,6 @@ using namespace std; class SerializationFormat; -class RemoteSerializer; namespace threading { @@ -78,8 +77,6 @@ struct Field { string TypeName() const; private: - friend class ::RemoteSerializer; - // Force usage of constructor above. Field() {} }; @@ -129,6 +126,7 @@ struct Value { vec_t vector_val; addr_t addr_val; subnet_t subnet_val; + const char* pattern_text_val; struct { char* data; diff --git a/src/threading/formatters/Ascii.cc b/src/threading/formatters/Ascii.cc index 94d450a86f..b828616972 100644 --- a/src/threading/formatters/Ascii.cc +++ b/src/threading/formatters/Ascii.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include #include @@ -325,6 +325,29 @@ threading::Value* Ascii::ParseValue(const string& s, const string& name, TypeTag break; } + case TYPE_PATTERN: + { + string candidate = get_unescaped_string(s); + // A string is a candidate pattern iff it begins and ends with + // a '/'. Rather or not the rest of the string is legal will + // be determined later when it is given to the RE engine. + if ( candidate.size() >= 2 ) + { + if ( candidate.front() == candidate.back() && + candidate.back() == '/' ) + { + // Remove the '/'s + candidate.erase(0, 1); + candidate.erase(candidate.size() - 1); + val->val.pattern_text_val = copy_string(candidate.c_str()); + break; + } + } + + GetThread()->Error(GetThread()->Fmt("String '%s' contained no parseable pattern.", candidate.c_str())); + goto parse_error; + } + case TYPE_TABLE: case TYPE_VECTOR: // First - common initialization diff --git a/src/threading/formatters/JSON.cc b/src/threading/formatters/JSON.cc index 73e9489dc5..a324a08530 100644 --- a/src/threading/formatters/JSON.cc +++ b/src/threading/formatters/JSON.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS diff --git a/src/types.bif b/src/types.bif index babccb0f0d..98d1df0c52 100644 --- a/src/types.bif +++ b/src/types.bif @@ -1,4 +1,4 @@ -##! Declaration of various types that the Bro core uses internally. +##! Declaration of various types that the Zeek core uses internally. enum rpc_status %{ RPC_SUCCESS, @@ -141,7 +141,7 @@ enum createmode_t %{ %} # Declare record types that we want to access from the event engine. These are -# defined in init-bare.bro. +# defined in init-bare.zeek. type info_t: record; type fattr_t: record; type sattr_t: record; diff --git a/src/util-config.h.in b/src/util-config.h.in index c0817b7f5f..efe0179390 100644 --- a/src/util-config.h.in +++ b/src/util-config.h.in @@ -1,3 +1,3 @@ -#define BRO_SCRIPT_INSTALL_PATH "@BRO_SCRIPT_INSTALL_PATH@" +#define ZEEK_SCRIPT_INSTALL_PATH "@ZEEK_SCRIPT_INSTALL_PATH@" #define BRO_PLUGIN_INSTALL_PATH "@BRO_PLUGIN_INSTALL_PATH@" -#define DEFAULT_BROPATH "@DEFAULT_BROPATH@" +#define DEFAULT_ZEEKPATH "@DEFAULT_ZEEKPATH@" diff --git a/src/util.cc b/src/util.cc index cce49a7f6d..2a6a5c37c4 100644 --- a/src/util.cc +++ b/src/util.cc @@ -1,6 +1,6 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "bro-config.h" +#include "zeek-config.h" #include "util-config.h" #ifdef TIME_WITH_SYS_TIME @@ -20,6 +20,7 @@ #endif #include +#include #include #include #include @@ -52,11 +53,13 @@ #include "iosource/Manager.h" /** - * Return IP address without enclosing brackets and any leading 0x. + * Return IP address without enclosing brackets and any leading 0x. Also + * trims leading/trailing whitespace. */ std::string extract_ip(const std::string& i) { - std::string s(skip_whitespace(i.c_str())); + std::string s(strstrip(i)); + if ( s.size() > 0 && s[0] == '[' ) s.erase(0, 1); @@ -955,10 +958,10 @@ const std::string& bro_path() { if ( bro_path_value.empty() ) { - const char* path = getenv("BROPATH"); + const char* path = zeekenv("ZEEKPATH"); if ( ! path ) - path = DEFAULT_BROPATH; + path = DEFAULT_ZEEKPATH; bro_path_value = path; } @@ -976,7 +979,7 @@ extern void add_to_bro_path(const string& dir) const char* bro_plugin_path() { - const char* path = getenv("BRO_PLUGIN_PATH"); + const char* path = zeekenv("ZEEK_PLUGIN_PATH"); if ( ! path ) path = BRO_PLUGIN_INSTALL_PATH; @@ -986,7 +989,7 @@ const char* bro_plugin_path() const char* bro_plugin_activate() { - const char* names = getenv("BRO_PLUGIN_ACTIVATE"); + const char* names = zeekenv("ZEEK_PLUGIN_ACTIVATE"); if ( ! names ) names = ""; @@ -1007,7 +1010,20 @@ string bro_prefixes() return rval; } -const char* PACKAGE_LOADER = "__load__.bro"; +const array script_extensions = {".zeek", ".bro"}; + +bool is_package_loader(const string& path) + { + string filename(std::move(SafeBasename(path).result)); + + for ( const string& ext : script_extensions ) + { + if ( filename == "__load__" + ext ) + return true; + } + + return false; + } FILE* open_file(const string& path, const string& mode) { @@ -1034,13 +1050,22 @@ static bool can_read(const string& path) FILE* open_package(string& path, const string& mode) { string arg_path = path; - path.append("/").append(PACKAGE_LOADER); + path.append("/__load__"); - if ( can_read(path) ) - return open_file(path, mode); + for ( const string& ext : script_extensions ) + { + string p = path + ext; + if ( can_read(p) ) + { + path.append(ext); + return open_file(path, mode); + } + } + path.append(script_extensions[0]); + string package_loader = "__load__" + script_extensions[0]; reporter->Error("Failed to open package '%s': missing '%s' file", - arg_path.c_str(), PACKAGE_LOADER); + arg_path.c_str(), package_loader.c_str()); return 0; } @@ -1123,7 +1148,7 @@ string flatten_script_name(const string& name, const string& prefix) if ( ! rval.empty() ) rval.append("."); - if ( SafeBasename(name).result == PACKAGE_LOADER ) + if ( is_package_loader(name) ) rval.append(SafeDirname(name).result); else rval.append(name); @@ -1221,7 +1246,7 @@ string without_bropath_component(const string& path) } static string find_file_in_path(const string& filename, const string& path, - const string& opt_ext = "") + const vector& opt_ext) { if ( filename.empty() ) return string(); @@ -1239,10 +1264,13 @@ static string find_file_in_path(const string& filename, const string& path, if ( ! opt_ext.empty() ) { - string with_ext = abs_path + '.' + opt_ext; + for ( const string& ext : opt_ext ) + { + string with_ext = abs_path + ext; - if ( can_read(with_ext) ) - return with_ext; + if ( can_read(with_ext) ) + return with_ext; + } } if ( can_read(abs_path) ) @@ -1257,9 +1285,13 @@ string find_file(const string& filename, const string& path_set, vector paths; tokenize_string(path_set, ":", &paths); + vector ext; + if ( ! opt_ext.empty() ) + ext.push_back(opt_ext); + for ( size_t n = 0; n < paths.size(); ++n ) { - string f = find_file_in_path(filename, paths[n], opt_ext); + string f = find_file_in_path(filename, paths[n], ext); if ( ! f.empty() ) return f; @@ -1268,6 +1300,40 @@ string find_file(const string& filename, const string& path_set, return string(); } +static bool ends_with(const std::string& s, const std::string& ending) + { + if ( ending.size() > s.size() ) + return false; + + return std::equal(ending.rbegin(), ending.rend(), s.rbegin()); + } + +string find_script_file(const string& filename, const string& path_set) + { + vector paths; + tokenize_string(path_set, ":", &paths); + + vector ext(script_extensions.begin(), script_extensions.end()); + + for ( size_t n = 0; n < paths.size(); ++n ) + { + string f = find_file_in_path(filename, paths[n], ext); + + if ( ! f.empty() ) + return f; + } + + if ( ends_with(filename, ".bro") ) + { + // We were looking for a file explicitly ending in .bro and didn't + // find it, so fall back to one ending in .zeek, if it exists. + auto fallback = string(filename.data(), filename.size() - 4) + ".zeek"; + return find_script_file(fallback, path_set); + } + + return string(); + } + FILE* rotate_file(const char* name, RecordVal* rotate_info) { // Build file names. @@ -1322,7 +1388,7 @@ FILE* rotate_file(const char* name, RecordVal* rotate_info) const char* log_file_name(const char* tag) { - const char* env = getenv("BRO_LOG_SUFFIX"); + const char* env = zeekenv("ZEEK_LOG_SUFFIX"); return fmt("%s.%s", tag, (env ? env : "log")); } @@ -1441,13 +1507,11 @@ double current_time(bool real) double t = double(tv.tv_sec) + double(tv.tv_usec) / 1e6; - const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - - if ( ! pseudo_realtime || real || pkt_srcs.empty() ) + if ( ! pseudo_realtime || real || ! iosource_mgr || iosource_mgr->GetPktSrcs().empty() ) return t; // This obviously only works for a single source ... - iosource::PktSrc* src = pkt_srcs.front(); + iosource::PktSrc* src = iosource_mgr->GetPktSrcs().front(); if ( net_is_processing_suspended() ) return src->CurrentPacketTimestamp(); @@ -1777,3 +1841,34 @@ void bro_strerror_r(int bro_errno, char* buf, size_t buflen) // GNU vs. XSI flavors make it harder to use strerror_r. strerror_r_helper(res, buf, buflen); } + +char* zeekenv(const char* name) + { + static std::map legacy_vars = { + { "ZEEKPATH", "BROPATH" }, + { "ZEEK_PLUGIN_PATH", "BRO_PLUGIN_PATH" }, + { "ZEEK_PLUGIN_ACTIVATE", "BRO_PLUGIN_ACTIVATE" }, + { "ZEEK_PREFIXES", "BRO_PREFIXES" }, + { "ZEEK_DNS_FAKE", "BRO_DNS_FAKE" }, + { "ZEEK_SEED_FILE", "BRO_SEED_FILE" }, + { "ZEEK_LOG_SUFFIX", "BRO_LOG_SUFFIX" }, + { "ZEEK_PROFILER_FILE", "BRO_PROFILER_FILE" }, + { "ZEEK_DISABLE_ZEEKYGEN", "BRO_DISABLE_BROXYGEN" }, + { "ZEEK_DEFAULT_CONNECT_RETRY", "BRO_DEFAULT_CONNECT_RETRY" }, + { "ZEEK_BROKER_MAX_THREADS", "BRO_BROKER_MAX_THREADS" }, + { "ZEEK_DEFAULT_LISTEN_ADDRESS", "BRO_DEFAULT_LISTEN_ADDRESS" }, + { "ZEEK_DEFAULT_LISTEN_RETRY", "BRO_DEFAULT_LISTEN_RETRY" }, + }; + + auto rval = getenv(name); + + if ( rval ) + return rval; + + auto it = legacy_vars.find(name); + + if ( it == legacy_vars.end() ) + return rval; + + return getenv(it->second); + } diff --git a/src/util.h b/src/util.h index 232275d9c9..f019f4cbc1 100644 --- a/src/util.h +++ b/src/util.h @@ -4,12 +4,12 @@ #define util_h #ifdef __GNUC__ - #define BRO_DEPRECATED(msg) __attribute__ ((deprecated(msg))) + #define ZEEK_DEPRECATED(msg) __attribute__ ((deprecated(msg))) #elif defined(_MSC_VER) - #define BRO_DEPRECATED(msg) __declspec(deprecated(msg)) func + #define ZEEK_DEPRECATED(msg) __declspec(deprecated(msg)) func #else - #pragma message("Warning: BRO_DEPRECATED macro not implemented") - #define BRO_DEPRECATED(msg) + #pragma message("Warning: ZEEK_DEPRECATED macro not implemented") + #define ZEEK_DEPRECATED(msg) #endif // Expose C99 functionality from inttypes.h, which would otherwise not be @@ -26,14 +26,16 @@ #include #include +#include #include #include #include #include #include #include +#include // std::unique_ptr -#include "bro-config.h" +#include "zeek-config.h" #include "siphash24.h" #ifdef DEBUG @@ -248,16 +250,16 @@ static const SourceID SOURCE_BROKER = 0xffffffff; extern void pinpoint(); extern int int_list_cmp(const void* v1, const void* v2); -// Contains the name of the script file that gets read -// when a package is loaded (i.e., "__load__.bro). -extern const char* PACKAGE_LOADER; - extern const std::string& bro_path(); extern const char* bro_magic_path(); extern const char* bro_plugin_path(); extern const char* bro_plugin_activate(); extern std::string bro_prefixes(); +extern const std::array script_extensions; + +bool is_package_loader(const std::string& path); + extern void add_to_bro_path(const std::string& dir); @@ -308,7 +310,7 @@ std::string implode_string_vector(const std::vector& v, /** * Flatten a script name by replacing '/' path separators with '.'. - * @param file A path to a Bro script. If it is a __load__.bro, that part + * @param file A path to a Bro script. If it is a __load__.zeek, that part * is discarded when constructing the flattened the name. * @param prefix A string to prepend to the flattened script name. * @return The flattened script name. @@ -325,9 +327,9 @@ std::string flatten_script_name(const std::string& name, std::string normalize_path(const std::string& path); /** - * Strip the BROPATH component from a path. - * @param path A file/directory path that may be within a BROPATH component. - * @return *path* minus the common BROPATH component (if any) removed. + * Strip the ZEEKPATH component from a path. + * @param path A file/directory path that may be within a ZEEKPATH component. + * @return *path* minus the common ZEEKPATH component (if any) removed. */ std::string without_bropath_component(const std::string& path); @@ -341,6 +343,14 @@ std::string without_bropath_component(const std::string& path); std::string find_file(const std::string& filename, const std::string& path_set, const std::string& opt_ext = ""); +/** + * Locate a script file within a given search path. + * @param filename Name of a file to find. + * @param path_set Colon-delimited set of paths to search for the file. + * @return Path to the found file, or an empty string if not found. + */ +std::string find_script_file(const std::string& filename, const std::string& path_set); + // Wrapper around fopen(3). Emits an error when failing to open. FILE* open_file(const std::string& path, const std::string& mode = "r"); @@ -540,4 +550,19 @@ std::string canonify_name(const std::string& name); */ void bro_strerror_r(int bro_errno, char* buf, size_t buflen); +/** + * A wrapper function for getenv(). Helps check for existence of + * legacy environment variable names that map to the latest \a name. + */ +char* zeekenv(const char* name); + +/** + * Small convenience function. Does what std::make_unique does in C++14. Will not + * work on arrays. + */ +template +std::unique_ptr build_unique (Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); +} + #endif diff --git a/src/version.c.in b/src/version.c.in index 65df65da00..1b7676bf3a 100644 --- a/src/version.c.in +++ b/src/version.c.in @@ -1,5 +1,5 @@ -#include "bro-config.h" +#include "zeek-config.h" char version[] = "@VERSION@"; diff --git a/src/zeek.bif b/src/zeek.bif new file mode 100644 index 0000000000..3e2eac3740 --- /dev/null +++ b/src/zeek.bif @@ -0,0 +1,5119 @@ +##! A collection of built-in functions that implement a variety of things +##! such as general programming algorithms, string processing, math functions, +##! introspection, type conversion, file/directory manipulation, packet +##! filtering, interprocess communication and controlling protocol analyzer +##! behavior. +##! +##! You'll find most of Zeek's built-in functions that aren't protocol-specific +##! in this file. + +%%{ // C segment +#include +#include +#include +#include +#include +#include +#include + +#include "digest.h" +#include "Reporter.h" +#include "IPAddr.h" +#include "util.h" +#include "file_analysis/Manager.h" +#include "iosource/Manager.h" +#include "iosource/Packet.h" + +using namespace std; + +TableType* var_sizes; + +static iosource::PktDumper* addl_pkt_dumper = 0; + +bro_int_t parse_int(const char*& fmt) + { + bro_int_t k = 0; + while ( isdigit(*fmt) ) + { + k = k * 10 + (*fmt - '0'); + ++fmt; + } + + return k; + } + +static TypeTag ok_d_fmt[] = { + TYPE_BOOL, TYPE_ENUM, TYPE_INT, TYPE_COUNT, TYPE_COUNTER, TYPE_PORT, + TYPE_SUBNET, + TYPE_ERROR +}; +static TypeTag ok_f_fmt[] = { + TYPE_DOUBLE, TYPE_TIME, TYPE_INTERVAL, + TYPE_ERROR +}; + +static int check_fmt_type(TypeTag t, TypeTag ok[]) + { + for ( int i = 0; ok[i] != TYPE_ERROR; ++i ) + if ( ok[i] == t ) + return 1; + + return 0; + } + +static void do_fmt(const char*& fmt, Val* v, ODesc* d) + { + TypeTag t = v->Type()->Tag(); + InternalTypeTag it = v->Type()->InternalType(); + + bool zero_pad = false; + bool left_just = false; + int field_width = -1; + + // Left-align, if requested. + if ( *fmt == '-' ) + { + left_just = true; + ++fmt; + } + + // Parse field width, if given. + if ( isdigit(*fmt) ) + { + // If field width starts with zero, do zero-padding. + if ( *fmt == '0' ) + { + zero_pad = true; + ++fmt; + } + + field_width = parse_int(fmt); + } + + int precision = -1; + if ( *fmt == '.' ) + { + ++fmt; + precision = parse_int(fmt); + } + + if ( field_width > 128 || precision > 128 ) + { + builtin_error("excessive field width or precision"); + return; + } + + // Create the numerical format string. + char num_fmt[64]; + num_fmt[0] = '\0'; + + if ( field_width >= 0 ) + { + // Like sprintf(), ignore '0' if '-' is given. + const char* align = left_just ? "-" : (zero_pad ? "0" : ""); + snprintf(num_fmt, sizeof(num_fmt), "%s%d", align, field_width); + } + + if ( precision >= 0 ) + snprintf(num_fmt + strlen(num_fmt), + sizeof(num_fmt) - strlen(num_fmt), ".%d", precision); + + char fmt_buf[512]; + char out_buf[512]; + + ODesc s; + s.SetStyle(RAW_STYLE); + + if ( precision >= 0 && *fmt != 'e' && *fmt != 'f' && *fmt != 'g' ) + builtin_error("precision specified for non-floating point"); + + switch ( *fmt ) { + case 'D': + case 'T': // ISO Timestamp with microsecond precision. + { + if ( t != TYPE_TIME ) + { + builtin_error("bad type for Date/Time format", v); + break; + } + + time_t time = time_t(v->InternalDouble()); + struct tm t; + + int is_time_fmt = *fmt == 'T'; + + if ( ! localtime_r(&time, &t) ) + s.AddSP(""); + + if ( ! strftime(out_buf, sizeof(out_buf), + is_time_fmt ? + "%Y-%m-%d-%H:%M" : "%Y-%m-%d-%H:%M:%S", + &t) ) + s.AddSP(""); + + else + { + s.Add(out_buf); + + if ( is_time_fmt ) + { + double secs = v->CoerceToUnsigned() % 60; + + secs += v->InternalDouble(); + secs -= v->CoerceToUnsigned(); + + snprintf(out_buf, sizeof(out_buf), + ":%012.9f", secs); + s.Add(out_buf); + } + } + } + break; + + case 'd': + case 'x': + { + if ( *fmt == 'x' && it == TYPE_INTERNAL_ADDR ) + { + // Deficiency: we don't support num_fmt in this case. + // This makes only a very slight difference, so not + // clear it would e worth the hassle. + + snprintf(out_buf, sizeof(out_buf), "%s", + v->AsAddr().AsHexString().c_str()); + } + + else if ( ! check_fmt_type(t, ok_d_fmt) ) + { + builtin_error("bad type for %d/%x format", v); + break; + } + + else if ( it == TYPE_INTERNAL_UNSIGNED ) + { + bro_uint_t u = v->CoerceToUnsigned(); + + if ( v->Type()->IsNetworkOrder() ) + { + if ( v->Type()->Tag() == TYPE_PORT ) + u = v->AsPortVal()->Port(); + else + u = ntohl(uint32(u)); + } + + snprintf(fmt_buf, sizeof(fmt_buf), "%%%s%s", num_fmt, + *fmt == 'd' ? "llu" : "llx"); + snprintf(out_buf, sizeof(out_buf), fmt_buf, u); + } + + else + { + snprintf(fmt_buf, sizeof(fmt_buf), "%%%s%s", num_fmt, + *fmt == 'd' ? "lld" : "llx"); + snprintf(out_buf, sizeof(out_buf), fmt_buf, + v->CoerceToInt()); + } + + s.Add(out_buf); + } + break; + + case 's': + v->Describe(&s); + break; + + case 'e': + case 'f': + case 'g': + { + if ( ! check_fmt_type(t, ok_f_fmt) ) + { + builtin_error("bad type for floating-point format", v); + break; + } + + snprintf(fmt_buf, sizeof(fmt_buf), "%%%s%c", num_fmt, *fmt); + snprintf(out_buf, sizeof(out_buf), fmt_buf, v->CoerceToDouble()); + s.Add(out_buf); + } + break; + + default: + builtin_error("bad format"); + } + + // Left-padding with whitespace, if any. + if ( field_width > 0 && ! left_just ) + { + int sl = strlen(s.Description()); + while ( ++sl <= field_width ) + d->Add(" "); + } + + d->AddN((const char*)(s.Bytes()), s.Len()); + + // Right-padding with whitespace, if any. + if ( field_width > 0 && left_just ) + { + int sl = s.Len(); + while ( ++sl <= field_width ) + d->Add(" "); + } + + ++fmt; + } + +static int next_fmt(const char*& fmt, val_list* args, ODesc* d, int& n) + { + const char* fp = fmt; + + // Skip up to next format indicator. + while ( *fp && *fp != '%' ) + ++fp; + + d->AddN(fmt, fp - fmt); + + if ( *fp == '\0' ) + // No more to do. + return 0; + + fmt = fp + 1; + if ( *fmt == '%' ) + { + // "%%" -> '%' + d->Add("%"); + ++fmt; + return next_fmt(fmt, args, d, n); + } + + if ( ++n >= args->length() ) + return 0; + + do_fmt(fmt, (*args)[n], d); + + return *fmt != '\0'; + } +%%} + +# =========================================================================== +# +# Core +# +# =========================================================================== + +## Returns the current wall-clock time. +## +## In general, you should use :zeek:id:`network_time` instead +## unless you are using Zeek for non-networking uses (such as general +## scripting; not particularly recommended), because otherwise your script +## may behave very differently on live traffic versus played-back traffic +## from a save file. +## +## Returns: The wall-clock time. +## +## .. zeek:see:: network_time +function current_time%(%): time + %{ + return new Val(current_time(), TYPE_TIME); + %} + +## Returns the timestamp of the last packet processed. This function returns +## the timestamp of the most recently read packet, whether read from a +## live network interface or from a save file. +## +## Returns: The timestamp of the packet processed. +## +## .. zeek:see:: current_time +function network_time%(%): time + %{ + return new Val(network_time, TYPE_TIME); + %} + +## Returns a system environment variable. +## +## var: The name of the variable whose value to request. +## +## Returns: The system environment variable identified by *var*, or an empty +## string if it is not defined. +## +## .. zeek:see:: setenv +function getenv%(var: string%): string + %{ + const char* env_val = zeekenv(var->CheckString()); + if ( ! env_val ) + env_val = ""; // ### + return new StringVal(env_val); + %} + +## Sets a system environment variable. +## +## var: The name of the variable. +## +## val: The (new) value of the variable *var*. +## +## Returns: True on success. +## +## .. zeek:see:: getenv +function setenv%(var: string, val: string%): bool + %{ + int result = setenv(var->AsString()->CheckString(), + val->AsString()->CheckString(), 1); + + if ( result < 0 ) + return val_mgr->GetBool(0); + return val_mgr->GetBool(1); + %} + +## Shuts down the Zeek process immediately. +## +## code: The exit code to return with. +## +## .. zeek:see:: terminate +function exit%(code: int%): any + %{ + exit(code); + return 0; + %} + +## Gracefully shut down Zeek by terminating outstanding processing. +## +## Returns: True after successful termination and false when Zeek is still in +## the process of shutting down. +## +## .. zeek:see:: exit zeek_is_terminating +function terminate%(%): bool + %{ + if ( terminating ) + return val_mgr->GetBool(0); + + terminate_processing(); + return val_mgr->GetBool(1); + %} + +%%{ +// Turns the table into environment variables (if 'set' is true) or removes +// all environment variables previously generated from this table (if 'set' +// is false). +static bool prepare_environment(TableVal* tbl, bool set) + { + ListVal* idxs = tbl->ConvertToPureList(); + + for ( int i = 0; i < idxs->Length(); ++i ) + { + Val* key = idxs->Index(i); + Val* val = tbl->Lookup(key, false); + + if ( key->Type()->Tag() != TYPE_STRING || + val->Type()->Tag() != TYPE_STRING ) + { + builtin_error("system_env() needs a table[string] of string"); + return false; + } + + char* tmp = copy_string(key->AsString()->CheckString()); + to_upper(tmp); + std::string var1 = fmt("ZEEK_ARG_%s", tmp); + std::string var2 = fmt("BRO_ARG_%s", tmp); // legacy support + delete [] tmp; + + if ( set ) + { + setenv(var1.data(), val->AsString()->CheckString(), 1); + setenv(var2.data(), val->AsString()->CheckString(), 1); + } + else + { + unsetenv(var1.data()); + unsetenv(var2.data()); + } + } + + return true; + } + +static int do_system(const char* s) + { + const char* system_fmt = "(%s) 1>&2 &"; // output to stderr + char* cmd = new char[strlen(system_fmt) + strlen(s) + 1]; + + sprintf(cmd, system_fmt, s); + int status = system(cmd); + delete [] cmd; + + return status; + } +%%} + +## Invokes a command via the ``system`` function of the OS. +## The command runs in the background with ``stdout`` redirecting to +## ``stderr``. Here is a usage example: +## ``system(fmt("rm %s", safe_shell_quote(sniffed_data)));`` +## +## str: The command to execute. +## +## Returns: The return value from the OS ``system`` function. +## +## .. zeek:see:: system_env safe_shell_quote piped_exec +## +## .. note:: +## +## Note that this corresponds to the status of backgrounding the +## given command, not to the exit status of the command itself. A +## value of 127 corresponds to a failure to execute ``sh``, and -1 +## to an internal system failure. +function system%(str: string%): int + %{ + int result = do_system(str->CheckString()); + return val_mgr->GetInt(result); + %} + +## Invokes a command via the ``system`` function of the OS with a prepared +## environment. The function is essentially the same as :zeek:id:`system`, +## but changes the environment before invoking the command. +## +## str: The command to execute. +## +## env: A :zeek:type:`table` with the environment variables in the form +## of key-value pairs. Each specified environment variable name +## will be automatically prepended with ``ZEEK_ARG_``. +## +## Returns: The return value from the OS ``system`` function. +## +## .. zeek:see:: system safe_shell_quote piped_exec +function system_env%(str: string, env: table_string_of_string%): int + %{ + if ( env->Type()->Tag() != TYPE_TABLE ) + { + builtin_error("system_env() requires a table argument"); + return val_mgr->GetInt(-1); + } + + if ( ! prepare_environment(env->AsTableVal(), true) ) + return val_mgr->GetInt(-1); + + int result = do_system(str->CheckString()); + + prepare_environment(env->AsTableVal(), false); + + return val_mgr->GetInt(result); + %} + +## Opens a program with ``popen`` and writes a given string to the returned +## stream to send it to the opened process's stdin. +## +## program: The program to execute. +## +## to_write: Data to pipe to the opened program's process via ``stdin``. +## +## Returns: True on success. +## +## .. zeek:see:: system system_env +function piped_exec%(program: string, to_write: string%): bool + %{ + const char* prog = program->CheckString(); + + FILE* f = popen(prog, "w"); + if ( ! f ) + { + reporter->Error("Failed to popen %s", prog); + return val_mgr->GetBool(0); + } + + const u_char* input_data = to_write->Bytes(); + int input_data_len = to_write->Len(); + + int bytes_written = fwrite(input_data, 1, input_data_len, f); + + pclose(f); + + if ( bytes_written != input_data_len ) + { + reporter->Error("Failed to write all given data to %s", prog); + return val_mgr->GetBool(0); + } + + return val_mgr->GetBool(1); + %} + +%%{ +#include "OpaqueVal.h" +%%} + +## Computes the MD5 hash value of the provided list of arguments. +## +## Returns: The MD5 hash value of the concatenated arguments. +## +## .. zeek:see:: md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :zeek:id:`md5_hash_init` and +## friends. +function md5_hash%(...%): string + %{ + unsigned char digest[MD5_DIGEST_LENGTH]; + MD5Val::digest(@ARG@, digest); + return new StringVal(md5_digest_print(digest)); + %} + +## Computes the SHA1 hash value of the provided list of arguments. +## +## Returns: The SHA1 hash value of the concatenated arguments. +## +## .. zeek:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :zeek:id:`sha1_hash_init` and +## friends. +function sha1_hash%(...%): string + %{ + unsigned char digest[SHA_DIGEST_LENGTH]; + SHA1Val::digest(@ARG@, digest); + return new StringVal(sha1_digest_print(digest)); + %} + +## Computes the SHA256 hash value of the provided list of arguments. +## +## Returns: The SHA256 hash value of the concatenated arguments. +## +## .. zeek:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :zeek:id:`sha256_hash_init` and +## friends. +function sha256_hash%(...%): string + %{ + unsigned char digest[SHA256_DIGEST_LENGTH]; + SHA256Val::digest(@ARG@, digest); + return new StringVal(sha256_digest_print(digest)); + %} + +## Computes an HMAC-MD5 hash value of the provided list of arguments. The HMAC +## secret key is generated from available entropy when Zeek starts up, or it can +## be specified for repeatability using the ``-K`` command line flag. +## +## Returns: The HMAC-MD5 hash value of the concatenated arguments. +## +## .. zeek:see:: md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function md5_hmac%(...%): string + %{ + unsigned char hmac[MD5_DIGEST_LENGTH]; + MD5Val::hmac(@ARG@, shared_hmac_md5_key, hmac); + return new StringVal(md5_digest_print(hmac)); + %} + +## Constructs an MD5 handle to enable incremental hash computation. You can +## feed data to the returned opaque value with :zeek:id:`md5_hash_update` and +## eventually need to call :zeek:id:`md5_hash_finish` to finish the computation +## and get the hash digest. +## +## For example, when computing incremental MD5 values of transferred files in +## multiple concurrent HTTP connections, one keeps an optional handle in the +## HTTP session record. Then, one would call +## ``c$http$md5_handle = md5_hash_init()`` once before invoking +## ``md5_hash_update(c$http$md5_handle, some_more_data)`` in the +## :zeek:id:`http_entity_data` event handler. When all data has arrived, a call +## to :zeek:id:`md5_hash_finish` returns the final hash value. +## +## Returns: The opaque handle associated with this hash computation. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function md5_hash_init%(%): opaque of md5 + %{ + HashVal* digest = new MD5Val(); + digest->Init(); + return digest; + %} + +## Constructs an SHA1 handle to enable incremental hash computation. You can +## feed data to the returned opaque value with :zeek:id:`sha1_hash_update` and +## finally need to call :zeek:id:`sha1_hash_finish` to finish the computation +## and get the hash digest. +## +## For example, when computing incremental SHA1 values of transferred files in +## multiple concurrent HTTP connections, one keeps an optional handle in the +## HTTP session record. Then, one would call +## ``c$http$sha1_handle = sha1_hash_init()`` once before invoking +## ``sha1_hash_update(c$http$sha1_handle, some_more_data)`` in the +## :zeek:id:`http_entity_data` event handler. When all data has arrived, a call +## to :zeek:id:`sha1_hash_finish` returns the final hash value. +## +## Returns: The opaque handle associated with this hash computation. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_init%(%): opaque of sha1 + %{ + HashVal* digest = new SHA1Val(); + digest->Init(); + return digest; + %} + +## Constructs an SHA256 handle to enable incremental hash computation. You can +## feed data to the returned opaque value with :zeek:id:`sha256_hash_update` and +## finally need to call :zeek:id:`sha256_hash_finish` to finish the computation +## and get the hash digest. +## +## For example, when computing incremental SHA256 values of transferred files in +## multiple concurrent HTTP connections, one keeps an optional handle in the +## HTTP session record. Then, one would call +## ``c$http$sha256_handle = sha256_hash_init()`` once before invoking +## ``sha256_hash_update(c$http$sha256_handle, some_more_data)`` in the +## :zeek:id:`http_entity_data` event handler. When all data has arrived, a call +## to :zeek:id:`sha256_hash_finish` returns the final hash value. +## +## Returns: The opaque handle associated with this hash computation. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_update sha256_hash_finish +function sha256_hash_init%(%): opaque of sha256 + %{ + HashVal* digest = new SHA256Val(); + digest->Init(); + return digest; + %} + +## Updates the MD5 value associated with a given index. It is required to +## call :zeek:id:`md5_hash_init` once before calling this +## function. +## +## handle: The opaque handle associated with this hash computation. +## +## data: The data to add to the hash computation. +## +## Returns: True on success. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function md5_hash_update%(handle: opaque of md5, data: string%): bool + %{ + bool rc = static_cast(handle)->Feed(data->Bytes(), data->Len()); + return val_mgr->GetBool(rc); + %} + +## Updates the SHA1 value associated with a given index. It is required to +## call :zeek:id:`sha1_hash_init` once before calling this +## function. +## +## handle: The opaque handle associated with this hash computation. +## +## data: The data to add to the hash computation. +## +## Returns: True on success. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_update%(handle: opaque of sha1, data: string%): bool + %{ + bool rc = static_cast(handle)->Feed(data->Bytes(), data->Len()); + return val_mgr->GetBool(rc); + %} + +## Updates the SHA256 value associated with a given index. It is required to +## call :zeek:id:`sha256_hash_init` once before calling this +## function. +## +## handle: The opaque handle associated with this hash computation. +## +## data: The data to add to the hash computation. +## +## Returns: True on success. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_finish +function sha256_hash_update%(handle: opaque of sha256, data: string%): bool + %{ + bool rc = static_cast(handle)->Feed(data->Bytes(), data->Len()); + return val_mgr->GetBool(rc); + %} + +## Returns the final MD5 digest of an incremental hash computation. +## +## handle: The opaque handle associated with this hash computation. +## +## Returns: The hash value associated with the computation of *handle*. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function md5_hash_finish%(handle: opaque of md5%): string + %{ + return static_cast(handle)->Get(); + %} + +## Returns the final SHA1 digest of an incremental hash computation. +## +## handle: The opaque handle associated with this hash computation. +## +## Returns: The hash value associated with the computation of *handle*. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_finish%(handle: opaque of sha1%): string + %{ + return static_cast(handle)->Get(); + %} + +## Returns the final SHA256 digest of an incremental hash computation. +## +## handle: The opaque handle associated with this hash computation. +## +## Returns: The hash value associated with the computation of *handle*. +## +## .. zeek:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update +function sha256_hash_finish%(handle: opaque of sha256%): string + %{ + return static_cast(handle)->Get(); + %} + +## Initializes and returns a new paraglob. +## +## v: Vector of patterns to initialize the paraglob with. +## +## Returns: A new, compiled, paraglob with the patterns in *v* +## +## .. zeek:see::paraglob_match paraglob_equals paraglob_add +function paraglob_init%(v: any%) : opaque of paraglob + %{ + if ( v->Type()->Tag() != TYPE_VECTOR || + v->Type()->YieldType()->Tag() != TYPE_STRING ) + { + // reporter->Error will throw an exception. + reporter->Error("paraglob requires a vector of strings for initialization."); + return nullptr; + } + + std::vector patterns; + VectorVal* vv = v->AsVectorVal(); + for ( unsigned int i = 0; i < vv->Size(); ++i ) + { + const BroString* s = vv->Lookup(i)->AsString(); + patterns.push_back(std::string(reinterpret_cast(s->Bytes()), s->Len())); + } + + try + { + std::unique_ptr p (new paraglob::Paraglob(patterns)); + return new ParaglobVal(std::move(p)); + } + // Thrown if paraglob fails to add a pattern. + catch (const paraglob::add_error& e) + { + reporter->Error("Paraglob failed to add pattern: %s", e.what()); + return nullptr; + } + %} + +## Gets all the patterns inside the handle associated with an input string. +## +## handle: A compiled paraglob. +## +## match: string to match against the paraglob. +## +## Returns: A vector of strings matching the input string. +## +## ## .. zeek:see::paraglob_add paraglob_equals paraglob_init +function paraglob_match%(handle: opaque of paraglob, match: string%): string_vec + %{ + return static_cast(handle)->Get(match); + %} + +## Compares two paraglobs for equality. +## +## p_one: A compiled paraglob. +## +## p_two: A compiled paraglob. +## +## Returns: True if both paraglobs contain the same patterns, false otherwise. +## +## ## .. zeek:see::paraglob_add paraglob_match paraglob_init +function paraglob_equals%(p_one: opaque of paraglob, p_two: opaque of paraglob%) : bool + %{ + return val_mgr->GetBool( + *(static_cast(p_one)) == *(static_cast(p_two)) + ); + %} + +## Returns 32-bit digest of arbitrary input values using FNV-1a hash algorithm. +## See ``_. +## +## input: The desired input value to hash. +## +## Returns: The hashed value. +## +## .. zeek:see:: hrw_weight +function fnv1a32%(input: any%): count + %{ + ODesc desc(DESC_BINARY); + input->Describe(&desc); + auto bytes = desc.Bytes(); + + uint32 offset32 = 2166136261; + uint32 prime32 = 16777619; + uint32 rval = offset32; + + for ( auto i = 0; i < desc.Len(); ++i ) + { + rval ^= (uint32) bytes[i]; + rval *= prime32; + } + + return val_mgr->GetCount(rval); + %} + +## Calculates a weight value for use in a Rendezvous Hashing algorithm. +## See ``_. +## The weight function used is the one recommended in the original +## paper: ``_. +## +## key_digest: A 32-bit digest of a key. E.g. use :zeek:see:`fnv1a32` to +## produce this. +## +## site_id: A 32-bit site/node identifier. +## +## Returns: The weight value for the key/site pair. +## +## .. zeek:see:: fnv1a32 +function hrw_weight%(key_digest: count, site_id: count%): count + %{ + uint32 d = key_digest; + d &= 0x7fffffff; // 31-bit digest + int32 si = site_id; + auto a = 1103515245; + auto b = 12345; + auto m = 2147483648; // 2**31 + + int32 rval = (a * ((a * si + b) ^ d) + b) % m; + + if ( rval < 0 ) + rval += m; // [0, 2**31 - 1] + + return val_mgr->GetCount((uint64) rval); + %} + +## Generates a random number. +## +## max: The maximum value of the random number. +## +## Returns: a random positive integer in the interval *[0, max)*. +## +## .. zeek:see:: srand +## +## .. note:: +## +## This function is a wrapper about the function ``random`` +## provided by the OS. +function rand%(max: count%): count + %{ + auto result = bro_uint_t(double(max) * double(bro_random()) / (RAND_MAX + 1.0)); + return val_mgr->GetCount(result); + %} + +## Sets the seed for subsequent :zeek:id:`rand` calls. +## +## seed: The seed for the PRNG. +## +## .. zeek:see:: rand +## +## .. note:: +## +## This function is a wrapper about the function ``srandom`` +## provided by the OS. +function srand%(seed: count%): any + %{ + bro_srandom(seed); + return 0; + %} + +%%{ +#include +%%} + +## Send a string to syslog. +## +## s: The string to log via syslog +function syslog%(s: string%): any + %{ + reporter->Syslog("%s", s->CheckString()); + return 0; + %} + +## Determines the MIME type of a piece of data using Zeek's file magic +## signatures. +## +## data: The data to find the MIME type for. +## +## return_mime: Deprecated argument; does nothing, except emit a warning +## when false. +## +## Returns: The MIME type of *data*, or "" if there was an error +## or no match. This is the strongest signature match. +## +## .. zeek:see:: file_magic +function identify_data%(data: string, return_mime: bool &default=T%): string + %{ + if ( ! return_mime ) + reporter->Warning("identify_data() builtin-function only returns MIME types, but verbose file info requested"); + + string strongest_match = file_mgr->DetectMIME(data->Bytes(), data->Len()); + + if ( strongest_match.empty() ) + return new StringVal(""); + + return new StringVal(strongest_match); + %} + +## Determines the MIME type of a piece of data using Zeek's file magic +## signatures. +## +## data: The data for which to find matching MIME types. +## +## Returns: All matching signatures, in order of strength. +## +## .. zeek:see:: identify_data +function file_magic%(data: string%): mime_matches + %{ + RuleMatcher::MIME_Matches matches; + file_mgr->DetectMIME(data->Bytes(), data->Len(), &matches); + return file_analysis::GenMIMEMatchesVal(matches); + %} + +## Performs an entropy test on the given data. +## See http://www.fourmilab.ch/random. +## +## data: The data to compute the entropy for. +## +## Returns: The result of the entropy test, which contains the following +## fields. +## +## - ``entropy``: The information density expressed as a number of +## bits per character. +## +## - ``chi_square``: The chi-square test value expressed as an +## absolute number and a percentage which indicates how +## frequently a truly random sequence would exceed the value +## calculated, i.e., the degree to which the sequence tested is +## suspected of being non-random. +## +## If the percentage is greater than 99% or less than 1%, the +## sequence is almost certainly not random. If the percentage is +## between 99% and 95% or between 1% and 5%, the sequence is +## suspect. Percentages between 90\% and 95\% and 5\% and 10\% +## indicate the sequence is "almost suspect." +## +## - ``mean``: The arithmetic mean of all the bytes. If the data +## are close to random, it should be around 127.5. +## +## - ``monte_carlo_pi``: Each successive sequence of six bytes is +## used as 24-bit *x* and *y* coordinates within a square. If +## the distance of the randomly-generated point is less than the +## radius of a circle inscribed within the square, the six-byte +## sequence is considered a "hit." The percentage of hits can +## be used to calculate the value of pi. For very large streams +## the value will approach the correct value of pi if the +## sequence is close to random. +## +## - ``serial_correlation``: This quantity measures the extent to +## which each byte in the file depends upon the previous byte. +## For random sequences this value will be close to zero. +## +## .. zeek:see:: entropy_test_init entropy_test_add entropy_test_finish +function find_entropy%(data: string%): entropy_test_result + %{ + double montepi, scc, ent, mean, chisq; + montepi = scc = ent = mean = chisq = 0.0; + EntropyVal e; + e.Feed(data->Bytes(), data->Len()); + e.Get(&ent, &chisq, &mean, &montepi, &scc); + + RecordVal* ent_result = new RecordVal(entropy_test_result); + ent_result->Assign(0, new Val(ent, TYPE_DOUBLE)); + ent_result->Assign(1, new Val(chisq, TYPE_DOUBLE)); + ent_result->Assign(2, new Val(mean, TYPE_DOUBLE)); + ent_result->Assign(3, new Val(montepi, TYPE_DOUBLE)); + ent_result->Assign(4, new Val(scc, TYPE_DOUBLE)); + return ent_result; + %} + +## Initializes data structures for incremental entropy calculation. +## +## Returns: An opaque handle to be used in subsequent operations. +## +## .. zeek:see:: find_entropy entropy_test_add entropy_test_finish +function entropy_test_init%(%): opaque of entropy + %{ + return new EntropyVal(); + %} + +## Adds data to an incremental entropy calculation. +## +## handle: The opaque handle representing the entropy calculation state. +## +## data: The data to add to the entropy calculation. +## +## Returns: True on success. +## +## .. zeek:see:: find_entropy entropy_test_add entropy_test_finish +function entropy_test_add%(handle: opaque of entropy, data: string%): bool + %{ + bool status = static_cast(handle)->Feed(data->Bytes(), + data->Len()); + return val_mgr->GetBool(status); + %} + +## Finishes an incremental entropy calculation. Before using this function, +## one needs to obtain an opaque handle with :zeek:id:`entropy_test_init` and +## add data to it via :zeek:id:`entropy_test_add`. +## +## handle: The opaque handle representing the entropy calculation state. +## +## Returns: The result of the entropy test. See :zeek:id:`find_entropy` for a +## description of the individual components. +## +## .. zeek:see:: find_entropy entropy_test_init entropy_test_add +function entropy_test_finish%(handle: opaque of entropy%): entropy_test_result + %{ + double montepi, scc, ent, mean, chisq; + montepi = scc = ent = mean = chisq = 0.0; + static_cast(handle)->Get(&ent, &chisq, &mean, &montepi, &scc); + + RecordVal* ent_result = new RecordVal(entropy_test_result); + ent_result->Assign(0, new Val(ent, TYPE_DOUBLE)); + ent_result->Assign(1, new Val(chisq, TYPE_DOUBLE)); + ent_result->Assign(2, new Val(mean, TYPE_DOUBLE)); + ent_result->Assign(3, new Val(montepi, TYPE_DOUBLE)); + ent_result->Assign(4, new Val(scc, TYPE_DOUBLE)); + return ent_result; + %} + +## Creates an identifier that is unique with high probability. +## +## prefix: A custom string prepended to the result. +## +## Returns: A string identifier that is unique. +## +## .. zeek:see:: unique_id_from +function unique_id%(prefix: string%) : string + %{ + char tmp[20]; + uint64 uid = calculate_unique_id(UID_POOL_DEFAULT_SCRIPT); + return new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62, prefix->CheckString())); + %} + +## Creates an identifier that is unique with high probability. +## +## pool: A seed for determinism. +## +## prefix: A custom string prepended to the result. +## +## Returns: A string identifier that is unique. +## +## .. zeek:see:: unique_id +function unique_id_from%(pool: int, prefix: string%) : string + %{ + pool += UID_POOL_CUSTOM_SCRIPT; // Make sure we don't conflict with internal pool. + + char tmp[20]; + uint64 uid = calculate_unique_id(pool); + return new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62, prefix->CheckString())); + %} + +# =========================================================================== +# +# Generic Programming +# +# =========================================================================== + +## Removes all elements from a set or table. +## +## v: The set or table +function clear_table%(v: any%): any + %{ + if ( v->Type()->Tag() == TYPE_TABLE ) + v->AsTableVal()->RemoveAll(); + else + builtin_error("clear_table() requires a table/set argument"); + + return 0; + %} + +## Gets all subnets that contain a given subnet from a set/table[subnet]. +## +## search: the subnet to search for. +## +## t: the set[subnet] or table[subnet]. +## +## Returns: All the keys of the set or table that cover the subnet searched for. +function matching_subnets%(search: subnet, t: any%): subnet_vec + %{ + if ( t->Type()->Tag() != TYPE_TABLE || ! t->Type()->AsTableType()->IsSubNetIndex() ) + { + reporter->Error("matching_subnets needs to be called on a set[subnet]/table[subnet]."); + return nullptr; + } + + return t->AsTableVal()->LookupSubnets(search); + %} + +## For a set[subnet]/table[subnet], create a new table that contains all entries +## that contain a given subnet. +## +## search: the subnet to search for. +## +## t: the set[subnet] or table[subnet]. +## +## Returns: A new table that contains all the entries that cover the subnet searched for. +function filter_subnet_table%(search: subnet, t: any%): any + %{ + if ( t->Type()->Tag() != TYPE_TABLE || ! t->Type()->AsTableType()->IsSubNetIndex() ) + { + reporter->Error("filter_subnet_table needs to be called on a set[subnet]/table[subnet]."); + return nullptr; + } + + return t->AsTableVal()->LookupSubnetValues(search); + %} + +## Checks if a specific subnet is a member of a set/table[subnet]. +## In contrast to the ``in`` operator, this performs an exact match, not +## a longest prefix match. +## +## search: the subnet to search for. +## +## t: the set[subnet] or table[subnet]. +## +## Returns: True if the exact subnet is a member, false otherwise. +function check_subnet%(search: subnet, t: any%): bool + %{ + if ( t->Type()->Tag() != TYPE_TABLE || ! t->Type()->AsTableType()->IsSubNetIndex() ) + { + reporter->Error("check_subnet needs to be called on a set[subnet]/table[subnet]."); + return nullptr; + } + + const PrefixTable* pt = t->AsTableVal()->Subnets(); + if ( ! pt ) + { + reporter->Error("check_subnet encountered nonexisting prefix table."); + return nullptr; + } + + void* res = pt->Lookup(search, true); + + return val_mgr->GetBool(res != nullptr); + %} + +## Checks whether two objects reference the same internal object. This function +## uses equality comparison of C++ raw pointer values to determine if the two +## objects are the same. +## +## o1: The first object. +## +## o2: The second object. +## +## Returns: True if *o1* and *o2* are equal. +function same_object%(o1: any, o2: any%): bool + %{ + return val_mgr->GetBool(o1 == o2); + %} + +## Returns the number of bytes that a value occupies in memory. +## +## v: The value +## +## Returns: The number of bytes that *v* occupies. +function val_size%(v: any%): count + %{ + return val_mgr->GetCount(v->MemoryAllocation()); + %} + +## Resizes a vector. +## +## aggr: The vector instance. +## +## newsize: The new size of *aggr*. +## +## Returns: The old size of *aggr*, or 0 if *aggr* is not a :zeek:type:`vector`. +function resize%(aggr: any, newsize: count%) : count + %{ + if ( aggr->Type()->Tag() != TYPE_VECTOR ) + { + builtin_error("resize() operates on vectors"); + return 0; + } + + return val_mgr->GetCount(aggr->AsVectorVal()->Resize(newsize)); + %} + +## Tests whether a boolean vector (``vector of bool``) has *any* true +## element. +## +## v: The boolean vector instance. +## +## Returns: True if any element in *v* is true. +## +## .. zeek:see:: all_set +function any_set%(v: any%) : bool + %{ + if ( v->Type()->Tag() != TYPE_VECTOR || + v->Type()->YieldType()->Tag() != TYPE_BOOL ) + { + builtin_error("any_set() requires vector of bool"); + return val_mgr->GetFalse(); + } + + VectorVal* vv = v->AsVectorVal(); + for ( unsigned int i = 0; i < vv->Size(); ++i ) + if ( vv->Lookup(i) && vv->Lookup(i)->AsBool() ) + return val_mgr->GetTrue(); + + return val_mgr->GetFalse(); + %} + +## Tests whether *all* elements of a boolean vector (``vector of bool``) are +## true. +## +## v: The boolean vector instance. +## +## Returns: True iff all elements in *v* are true or there are no elements. +## +## .. zeek:see:: any_set +## +## .. note:: +## +## Missing elements count as false. +function all_set%(v: any%) : bool + %{ + if ( v->Type()->Tag() != TYPE_VECTOR || + v->Type()->YieldType()->Tag() != TYPE_BOOL ) + { + builtin_error("all_set() requires vector of bool"); + return val_mgr->GetFalse(); + } + + VectorVal* vv = v->AsVectorVal(); + for ( unsigned int i = 0; i < vv->Size(); ++i ) + if ( ! vv->Lookup(i) || ! vv->Lookup(i)->AsBool() ) + return val_mgr->GetFalse(); + + return val_mgr->GetTrue(); + %} + +%%{ +static Func* sort_function_comp = 0; +static Val** index_map = 0; // used for indirect sorting to support order() + +bool sort_function(Val* a, Val* b) + { + // Sort missing values as "high". + if ( ! a ) + return 0; + if ( ! b ) + return 1; + + val_list sort_func_args; + sort_func_args.append(a->Ref()); + sort_func_args.append(b->Ref()); + + Val* result = sort_function_comp->Call(&sort_func_args); + int int_result = result->CoerceToInt(); + Unref(result); + + return int_result < 0; + } + +bool indirect_sort_function(size_t a, size_t b) + { + return sort_function(index_map[a], index_map[b]); + } + +bool signed_sort_function (Val* a, Val* b) + { + if ( ! a ) + return 0; + if ( ! b ) + return 1; + + auto ia = a->CoerceToInt(); + auto ib = b->CoerceToInt(); + + return ia < ib; + } + +bool unsigned_sort_function (Val* a, Val* b) + { + if ( ! a ) + return 0; + if ( ! b ) + return 1; + + auto ia = a->CoerceToUnsigned(); + auto ib = b->CoerceToUnsigned(); + + return ia < ib; + } + +bool indirect_signed_sort_function(size_t a, size_t b) + { + return signed_sort_function(index_map[a], index_map[b]); + } + +bool indirect_unsigned_sort_function(size_t a, size_t b) + { + return unsigned_sort_function(index_map[a], index_map[b]); + } +%%} + +## Sorts a vector in place. The second argument is a comparison function that +## takes two arguments: if the vector type is ``vector of T``, then the +## comparison function must be ``function(a: T, b: T): int``, which returns +## a value less than zero if ``a < b`` for some type-specific notion of the +## less-than operator. The comparison function is optional if the type +## is an integral type (int, count, etc.). +## +## v: The vector instance to sort. +## +## Returns: The vector, sorted from minimum to maximum value. If the vector +## could not be sorted, then the original vector is returned instead. +## +## .. zeek:see:: order +function sort%(v: any, ...%) : any + %{ + v->Ref(); // we always return v + + if ( v->Type()->Tag() != TYPE_VECTOR ) + { + builtin_error("sort() requires vector"); + return v; + } + + BroType* elt_type = v->Type()->YieldType(); + Func* comp = 0; + + if ( @ARG@.length() > 2 ) + builtin_error("sort() called with extraneous argument"); + + if ( @ARG@.length() == 2 ) + { + Val* comp_val = @ARG@[1]; + if ( ! IsFunc(comp_val->Type()->Tag()) ) + { + builtin_error("second argument to sort() needs to be comparison function"); + return v; + } + + comp = comp_val->AsFunc(); + } + + if ( ! comp && ! IsIntegral(elt_type->Tag()) ) + builtin_error("comparison function required for sort() with non-integral types"); + + vector& vv = *v->AsVector(); + + if ( comp ) + { + FuncType* comp_type = comp->FType()->AsFuncType(); + if ( comp_type->YieldType()->Tag() != TYPE_INT || + ! comp_type->ArgTypes()->AllMatch(elt_type, 0) ) + { + builtin_error("invalid comparison function in call to sort()"); + return v; + } + + sort_function_comp = comp; + + sort(vv.begin(), vv.end(), sort_function); + } + else + { + if ( elt_type->InternalType() == TYPE_INTERNAL_UNSIGNED ) + sort(vv.begin(), vv.end(), unsigned_sort_function); + else + sort(vv.begin(), vv.end(), signed_sort_function); + } + + return v; + %} + +## Returns the order of the elements in a vector according to some +## comparison function. See :zeek:id:`sort` for details about the comparison +## function. +## +## v: The vector whose order to compute. +## +## Returns: A ``vector of count`` with the indices of the ordered elements. +## For example, the elements of *v* in order are (assuming ``o`` +## is the vector returned by ``order``): v[o[0]], v[o[1]], etc. +## +## .. zeek:see:: sort +function order%(v: any, ...%) : index_vec + %{ + VectorVal* result_v = new VectorVal( + internal_type("index_vec")->AsVectorType()); + + if ( v->Type()->Tag() != TYPE_VECTOR ) + { + builtin_error("order() requires vector"); + return result_v; + } + + BroType* elt_type = v->Type()->YieldType(); + Func* comp = 0; + + if ( @ARG@.length() > 2 ) + builtin_error("order() called with extraneous argument"); + + if ( @ARG@.length() == 2 ) + { + Val* comp_val = @ARG@[1]; + if ( ! IsFunc(comp_val->Type()->Tag()) ) + { + builtin_error("second argument to order() needs to be comparison function"); + return v; + } + + comp = comp_val->AsFunc(); + } + + if ( ! comp && ! IsIntegral(elt_type->Tag()) ) + builtin_error("comparison function required for order() with non-integral types"); + + vector& vv = *v->AsVector(); + auto n = vv.size(); + + // Set up initial mapping of indices directly to corresponding + // elements. + vector ind_vv(n); + index_map = new Val*[n]; + size_t i; + for ( i = 0; i < n; ++i ) + { + ind_vv[i] = i; + index_map[i] = vv[i]; + } + + if ( comp ) + { + FuncType* comp_type = comp->FType()->AsFuncType(); + if ( comp_type->YieldType()->Tag() != TYPE_INT || + ! comp_type->ArgTypes()->AllMatch(elt_type, 0) ) + { + builtin_error("invalid comparison function in call to order()"); + return v; + } + + sort_function_comp = comp; + + sort(ind_vv.begin(), ind_vv.end(), indirect_sort_function); + } + else + { + if ( elt_type->InternalType() == TYPE_INTERNAL_UNSIGNED ) + sort(ind_vv.begin(), ind_vv.end(), indirect_unsigned_sort_function); + else + sort(ind_vv.begin(), ind_vv.end(), indirect_signed_sort_function); + } + + delete [] index_map; + index_map = 0; + + // Now spin through ind_vv to read out the rearrangement. + for ( i = 0; i < n; ++i ) + { + int ind = ind_vv[i]; + result_v->Assign(i, val_mgr->GetCount(ind)); + } + + return result_v; + %} + +# =========================================================================== +# +# String Processing +# +# =========================================================================== + +## Returns the concatenation of the string representation of its arguments. The +## arguments can be of any type. For example, ``cat("foo", 3, T)`` returns +## ``"foo3T"``. +## +## Returns: A string concatentation of all arguments. +function cat%(...%): string + %{ + ODesc d; + d.SetStyle(RAW_STYLE); + + loop_over_list(@ARG@, i) + @ARG@[i]->Describe(&d); + + BroString* s = new BroString(1, d.TakeBytes(), d.Len()); + s->SetUseFreeToDelete(true); + + return new StringVal(s); + %} + +## Concatenates all arguments, with a separator placed between each one. This +## function is similar to :zeek:id:`cat`, but places a separator between each +## given argument. If any of the variable arguments is an empty string it is +## replaced by a given default string instead. +## +## sep: The separator to place between each argument. +## +## def: The default string to use when an argument is the empty string. +## +## Returns: A concatenation of all arguments with *sep* between each one and +## empty strings replaced with *def*. +## +## .. zeek:see:: cat string_cat +function cat_sep%(sep: string, def: string, ...%): string + %{ + ODesc d; + d.SetStyle(RAW_STYLE); + + int pre_size = 0; + + loop_over_list(@ARG@, i) + { + // Skip named parameters. + if ( i < 2 ) + continue; + + if ( i > 2 ) + d.Add(sep->CheckString(), 0); + + Val* v = @ARG@[i]; + if ( v->Type()->Tag() == TYPE_STRING && ! v->AsString()->Len() ) + v = def; + + v->Describe(&d); + } + + BroString* s = new BroString(1, d.TakeBytes(), d.Len()); + s->SetUseFreeToDelete(true); + + return new StringVal(s); + %} + +## Produces a formatted string à la ``printf``. The first argument is the +## *format string* and specifies how subsequent arguments are converted for +## output. It is composed of zero or more directives: ordinary characters (not +## ``%``), which are copied unchanged to the output, and conversion +## specifications, each of which fetches zero or more subsequent arguments. +## Conversion specifications begin with ``%`` and the arguments must properly +## correspond to the specifier. After the ``%``, the following characters +## may appear in sequence: +## +## - ``%``: Literal ``%`` +## +## - ``-``: Left-align field +## +## - ``[0-9]+``: The field width (< 128) +## +## - ``.``: Precision of floating point specifiers ``[efg]`` (< 128) +## +## - ``[DTdxsefg]``: Format specifier +## +## - ``[DT]``: ISO timestamp with microsecond precision +## +## - ``d``: Signed/Unsigned integer (using C-style ``%lld``/``%llu`` +## for ``int``/``count``) +## +## - ``x``: Unsigned hexadecimal (using C-style ``%llx``); +## addresses/ports are converted to host-byte order +## +## - ``s``: String (byte values less than 32 or greater than 126 +## will be escaped) +## +## - ``[efg]``: Double +## +## Returns: Returns the formatted string. Given no arguments, :zeek:id:`fmt` +## returns an empty string. Given no format string or the wrong +## number of additional arguments for the given format specifier, +## :zeek:id:`fmt` generates a run-time error. +## +## .. zeek:see:: cat cat_sep string_cat +function fmt%(...%): string + %{ + if ( @ARGC@ == 0 ) + return val_mgr->GetEmptyString(); + + Val* fmt_v = @ARG@[0]; + + // Type of fmt_v will be string here, check_built_in_call() in Func.cc + // checks that. + + const char* fmt = fmt_v->AsString()->CheckString(); + ODesc d; + d.SetStyle(RAW_STYLE); + + int n = 0; + + while ( next_fmt(fmt, @ARGS@, &d, n) ) + ; + + if ( n < @ARGC@ - 1 ) + { + builtin_error("too many arguments for format", fmt_v); + return val_mgr->GetEmptyString(); + } + + else if ( n >= @ARGC@ ) + { + builtin_error("too few arguments for format", fmt_v); + return val_mgr->GetEmptyString(); + } + + BroString* s = new BroString(1, d.TakeBytes(), d.Len()); + s->SetUseFreeToDelete(true); + + return new StringVal(s); + %} + +# =========================================================================== +# +# Math +# +# =========================================================================== + +## Computes the greatest integer less than the given :zeek:type:`double` value. +## For example, ``floor(3.14)`` returns ``3.0``, and ``floor(-3.14)`` +## returns ``-4.0``. +## +## d: The :zeek:type:`double` to manipulate. +## +## Returns: The next lowest integer of *d* as :zeek:type:`double`. +## +## .. zeek:see:: sqrt exp ln log10 +function floor%(d: double%): double + %{ + return new Val(floor(d), TYPE_DOUBLE); + %} + +## Computes the square root of a :zeek:type:`double`. +## +## x: The number to compute the square root of. +## +## Returns: The square root of *x*. +## +## .. zeek:see:: floor exp ln log10 +function sqrt%(x: double%): double + %{ + if ( x < 0 ) + { + reporter->Error("negative sqrt argument"); + return new Val(-1.0, TYPE_DOUBLE); + } + + return new Val(sqrt(x), TYPE_DOUBLE); + %} + +## Computes the exponential function. +## +## d: The argument to the exponential function. +## +## Returns: *e* to the power of *d*. +## +## .. zeek:see:: floor sqrt ln log10 +function exp%(d: double%): double + %{ + return new Val(exp(d), TYPE_DOUBLE); + %} + +## Computes the natural logarithm of a number. +## +## d: The argument to the logarithm. +## +## Returns: The natural logarithm of *d*. +## +## .. zeek:see:: exp floor sqrt log10 +function ln%(d: double%): double + %{ + return new Val(log(d), TYPE_DOUBLE); + %} + +## Computes the common logarithm of a number. +## +## d: The argument to the logarithm. +## +## Returns: The common logarithm of *d*. +## +## .. zeek:see:: exp floor sqrt ln +function log10%(d: double%): double + %{ + return new Val(log10(d), TYPE_DOUBLE); + %} + +# =========================================================================== +# +# Introspection +# +# =========================================================================== + +## Determines whether a connection has been received externally. For example, +## Broccoli or the Time Machine can send packets to Zeek via a mechanism that is +## one step lower than sending events. This function checks whether the packets +## of a connection stem from one of these external *packet sources*. +## +## c: The connection to test. +## +## Returns: True if *c* has been received externally. +function is_external_connection%(c: connection%) : bool + %{ + return val_mgr->GetBool(c && c->IsExternal()); + %} + +## Returns the ID of the analyzer which raised the current event. +## +## Returns: The ID of the analyzer which raised the current event, or 0 if +## none. +function current_analyzer%(%) : count + %{ + return val_mgr->GetCount(mgr.CurrentAnalyzer()); + %} + +## Returns Zeek's process ID. +## +## Returns: Zeek's process ID. +function getpid%(%) : count + %{ + return val_mgr->GetCount(getpid()); + %} + +%%{ +extern const char* zeek_version(); +%%} + +## Returns the Zeek version string. This function is deprecated, use +## :zeek:see:`zeek_version` instead. +## +## Returns: Zeek's version, e.g., 2.0-beta-47-debug. +function bro_version%(%): string &deprecated + %{ + return new StringVal(zeek_version()); + %} + +## Returns the Zeek version string. +## +## Returns: Zeek's version, e.g., 2.0-beta-47-debug. +function zeek_version%(%): string + %{ + return new StringVal(zeek_version()); + %} + +## Converts a record type name to a vector of strings, where each element is +## the name of a record field. Nested records are flattened. +## +## rt: The name of the record type. +## +## Returns: A string vector with the field names of *rt*. +function record_type_to_vector%(rt: string%): string_vec + %{ + VectorVal* result = + new VectorVal(internal_type("string_vec")->AsVectorType()); + + RecordType *type = internal_type(rt->CheckString())->AsRecordType(); + + if ( type ) + { + for ( int i = 0; i < type->NumFields(); ++i ) + { + StringVal* val = new StringVal(type->FieldName(i)); + result->Assign(i+1, val); + } + } + + return result; + %} + +## Returns the type name of an arbitrary Zeek variable. +## +## t: An arbitrary object. +## +## Returns: The type name of *t*. +function type_name%(t: any%): string + %{ + ODesc d; + t->Type()->Describe(&d); + + BroString* s = new BroString(1, d.TakeBytes(), d.Len()); + s->SetUseFreeToDelete(true); + + return new StringVal(s); + %} + +## Checks whether Zeek reads traffic from one or more network interfaces (as +## opposed to from a network trace in a file). Note that this function returns +## true even after Zeek has stopped reading network traffic, for example due to +## receiving a termination signal. +## +## Returns: True if reading traffic from a network interface. +## +## .. zeek:see:: reading_traces +function reading_live_traffic%(%): bool + %{ + return val_mgr->GetBool(reading_live); + %} + +## Checks whether Zeek reads traffic from a trace file (as opposed to from a +## network interface). +## +## Returns: True if reading traffic from a network trace. +## +## .. zeek:see:: reading_live_traffic +function reading_traces%(%): bool + %{ + return val_mgr->GetBool(reading_traces); + %} + + +## Generates a table of the size of all global variables. The table index is +## the variable name and the value is the variable size in bytes. +## +## Returns: A table that maps variable names to their sizes. +## +## .. zeek:see:: global_ids +function global_sizes%(%): var_sizes + %{ + TableVal* sizes = new TableVal(var_sizes); + PDict(ID)* globals = global_scope()->Vars(); + IterCookie* c = globals->InitForIteration(); + + ID* id; + while ( (id = globals->NextEntry(c)) ) + if ( id->HasVal() ) + { + Val* id_name = new StringVal(id->Name()); + Val* id_size = val_mgr->GetCount(id->ID_Val()->MemoryAllocation()); + sizes->Assign(id_name, id_size); + Unref(id_name); + } + + return sizes; + %} + +## Generates a table with information about all global identifiers. The table +## value is a record containing the type name of the identifier, whether it is +## exported, a constant, an enum constant, redefinable, and its value (if it +## has one). +## +## Returns: A table that maps identifier names to information about them. +## +## .. zeek:see:: global_sizes +function global_ids%(%): id_table + %{ + TableVal* ids = new TableVal(id_table); + PDict(ID)* globals = global_scope()->Vars(); + IterCookie* c = globals->InitForIteration(); + + ID* id; + while ( (id = globals->NextEntry(c)) ) + { + RecordVal* rec = new RecordVal(script_id); + rec->Assign(0, new StringVal(type_name(id->Type()->Tag()))); + rec->Assign(1, val_mgr->GetBool(id->IsExport())); + rec->Assign(2, val_mgr->GetBool(id->IsConst())); + rec->Assign(3, val_mgr->GetBool(id->IsEnumConst())); + rec->Assign(4, val_mgr->GetBool(id->IsOption())); + rec->Assign(5, val_mgr->GetBool(id->IsRedefinable())); + + if ( id->HasVal() ) + { + Val* val = id->ID_Val(); + Ref(val); + rec->Assign(6, val); + } + + Val* id_name = new StringVal(id->Name()); + ids->Assign(id_name, rec); + Unref(id_name); + } + + return ids; + %} + +## Returns the value of a global identifier. +## +## id: The global identifier. +## +## Returns: The value of *id*. If *id* does not describe a valid identifier, +## the string ``""`` or ``""`` is returned. +function lookup_ID%(id: string%) : any + %{ + ID* i = global_scope()->Lookup(id->CheckString()); + if ( ! i ) + return new StringVal(""); + + if ( ! i->ID_Val() ) + return new StringVal(""); + + return i->ID_Val()->Ref(); + %} + +## Generates metadata about a record's fields. The returned information +## includes the field name, whether it is logged, its value (if it has one), +## and its default value (if specified). +## +## rec: The record value or type to inspect. +## +## Returns: A table that describes the fields of a record. +function record_fields%(rec: any%): record_field_table + %{ + TableVal* fields = new TableVal(record_field_table); + + auto t = rec->Type(); + + if ( t->Tag() != TYPE_RECORD && t->Tag() != TYPE_TYPE ) + { + reporter->Error("non-record value/type passed to record_fields"); + return fields; + } + + RecordType* rt = nullptr; + RecordVal* rv = nullptr; + + if ( t->Tag() == TYPE_RECORD ) + { + rt = t->AsRecordType(); + rv = rec->AsRecordVal(); + } + else + { + t = t->AsTypeType()->Type(); + + if ( t->Tag() != TYPE_RECORD ) + { + reporter->Error("non-record value/type passed to record_fields"); + return fields; + } + + rt = t->AsRecordType(); + } + + for ( int i = 0; i < rt->NumFields(); ++i ) + { + BroType* ft = rt->FieldType(i); + TypeDecl* fd = rt->FieldDecl(i); + Val* fv = nullptr; + + if ( rv ) + fv = rv->Lookup(i); + + if ( fv ) + Ref(fv); + + bool logged = (fd->attrs && fd->FindAttr(ATTR_LOG) != 0); + + RecordVal* nr = new RecordVal(record_field); + + if ( ft->Tag() == TYPE_RECORD ) + nr->Assign(0, new StringVal("record " + ft->GetName())); + else + nr->Assign(0, new StringVal(type_name(ft->Tag()))); + + nr->Assign(1, val_mgr->GetBool(logged)); + nr->Assign(2, fv); + nr->Assign(3, rt->FieldDefault(i)); + + Val* field_name = new StringVal(rt->FieldName(i)); + fields->Assign(field_name, nr); + Unref(field_name); + } + + return fields; + %} + +## Enables detailed collection of profiling statistics. Statistics include +## CPU/memory usage, connections, TCP states/reassembler, DNS lookups, +## timers, and script-level state. The script variable :zeek:id:`profiling_file` +## holds the name of the file. +## +## .. zeek:see:: get_conn_stats +## get_dns_stats +## get_event_stats +## get_file_analysis_stats +## get_gap_stats +## get_matcher_stats +## get_net_stats +## get_proc_stats +## get_reassembler_stats +## get_thread_stats +## get_timer_stats +function do_profiling%(%) : any + %{ + if ( profiling_logger ) + profiling_logger->Log(); + + return 0; + %} + +## Checks whether a given IP address belongs to a local interface. +## +## ip: The IP address to check. +## +## Returns: True if *ip* belongs to a local interface. +function is_local_interface%(ip: addr%) : bool + %{ + if ( ip->AsAddr().IsLoopback() ) + return val_mgr->GetBool(1); + + list addrs; + + char host[MAXHOSTNAMELEN]; + + strcpy(host, "localhost"); + gethostname(host, MAXHOSTNAMELEN); + host[MAXHOSTNAMELEN-1] = '\0'; + + struct hostent* ent = gethostbyname2(host, AF_INET); + + if ( ent ) + { + for ( unsigned int len = 0; ent->h_addr_list[len]; ++len ) + addrs.push_back(IPAddr(IPv4, (uint32*)ent->h_addr_list[len], + IPAddr::Network)); + } + + ent = gethostbyname2(host, AF_INET6); + + if ( ent ) + { + for ( unsigned int len = 0; ent->h_addr_list[len]; ++len ) + addrs.push_back(IPAddr(IPv6, (uint32*)ent->h_addr_list[len], + IPAddr::Network)); + } + + list::const_iterator it; + for ( it = addrs.begin(); it != addrs.end(); ++it ) + { + if ( *it == ip->AsAddr() ) + return val_mgr->GetBool(1); + } + + return val_mgr->GetBool(0); + %} + +## Write rule matcher statistics (DFA states, transitions, memory usage, cache +## hits/misses) to a file. +## +## f: The file to write to. +## +## Returns: True (unconditionally). +## +## .. zeek:see:: get_matcher_stats +function dump_rule_stats%(f: file%): bool + %{ + if ( rule_matcher ) + rule_matcher->DumpStats(f); + + return val_mgr->GetBool(1); + %} + +## Checks if Zeek is terminating. This function is deprecated, use +## :zeek:see:`zeek_is_terminating` instead. +## +## Returns: True if Zeek is in the process of shutting down. +## +## .. zeek:see:: terminate +function bro_is_terminating%(%): bool &deprecated + %{ + return val_mgr->GetBool(terminating); + %} + +## Checks if Zeek is terminating. +## +## Returns: True if Zeek is in the process of shutting down. +## +## .. zeek:see:: terminate +function zeek_is_terminating%(%): bool + %{ + return val_mgr->GetBool(terminating); + %} + +## Returns the hostname of the machine Zeek runs on. +## +## Returns: The hostname of the machine Zeek runs on. +function gethostname%(%) : string + %{ + char buffer[MAXHOSTNAMELEN]; + if ( gethostname(buffer, MAXHOSTNAMELEN) < 0 ) + strcpy(buffer, ""); + + buffer[MAXHOSTNAMELEN-1] = '\0'; + return new StringVal(buffer); + %} + +## Returns whether an address is IPv4 or not. +## +## a: the address to check. +## +## Returns: true if *a* is an IPv4 address, else false. +function is_v4_addr%(a: addr%): bool + %{ + if ( a->AsAddr().GetFamily() == IPv4 ) + return val_mgr->GetBool(1); + else + return val_mgr->GetBool(0); + %} + +## Returns whether an address is IPv6 or not. +## +## a: the address to check. +## +## Returns: true if *a* is an IPv6 address, else false. +function is_v6_addr%(a: addr%): bool + %{ + if ( a->AsAddr().GetFamily() == IPv6 ) + return val_mgr->GetBool(1); + else + return val_mgr->GetBool(0); + %} + +## Returns whether a subnet specification is IPv4 or not. +## +## s: the subnet to check. +## +## Returns: true if *s* is an IPv4 subnet, else false. +function is_v4_subnet%(s: subnet%): bool + %{ + if ( s->AsSubNet().Prefix().GetFamily() == IPv4 ) + return val_mgr->GetBool(1); + else + return val_mgr->GetBool(0); + %} + +## Returns whether a subnet specification is IPv6 or not. +## +## s: the subnet to check. +## +## Returns: true if *s* is an IPv6 subnet, else false. +function is_v6_subnet%(s: subnet%): bool + %{ + if ( s->AsSubNet().Prefix().GetFamily() == IPv6 ) + return val_mgr->GetBool(1); + else + return val_mgr->GetBool(0); + %} + + +# =========================================================================== +# +# Conversion +# +# =========================================================================== + +## Converts the *data* field of :zeek:type:`ip6_routing` records that have +## *rtype* of 0 into a vector of addresses. +## +## s: The *data* field of an :zeek:type:`ip6_routing` record that has +## an *rtype* of 0. +## +## Returns: The vector of addresses contained in the routing header data. +function routing0_data_to_addrs%(s: string%): addr_vec + %{ + VectorVal* rval = new VectorVal(internal_type("addr_vec")->AsVectorType()); + + int len = s->Len(); + const u_char* bytes = s->Bytes(); + bytes += 4; // go past 32-bit reserved field + len -= 4; + + if ( ( len % 16 ) != 0 ) + reporter->Warning("Bad ip6_routing data length: %d", s->Len()); + + while ( len > 0 ) + { + IPAddr a(IPv6, (const uint32*) bytes, IPAddr::Network); + rval->Assign(rval->Size(), new AddrVal(a)); + bytes += 16; + len -= 16; + } + + return rval; + %} + +## Converts an :zeek:type:`addr` to an :zeek:type:`index_vec`. +## +## a: The address to convert into a vector of counts. +## +## Returns: A vector containing the host-order address representation, +## four elements in size for IPv6 addresses, or one element for IPv4. +## +## .. zeek:see:: counts_to_addr +function addr_to_counts%(a: addr%): index_vec + %{ + VectorVal* rval = new VectorVal(internal_type("index_vec")->AsVectorType()); + const uint32* bytes; + int len = a->AsAddr().GetBytes(&bytes); + + for ( int i = 0; i < len; ++i ) + rval->Assign(i, val_mgr->GetCount(ntohl(bytes[i]))); + + return rval; + %} + +## Converts an :zeek:type:`index_vec` to an :zeek:type:`addr`. +## +## v: The vector containing host-order IP address representation, +## one element for IPv4 addresses, four elements for IPv6 addresses. +## +## Returns: An IP address. +## +## .. zeek:see:: addr_to_counts +function counts_to_addr%(v: index_vec%): addr + %{ + if ( v->AsVector()->size() == 1 ) + { + return new AddrVal(htonl((*v->AsVector())[0]->AsCount())); + } + else if ( v->AsVector()->size() == 4 ) + { + uint32 bytes[4]; + for ( int i = 0; i < 4; ++i ) + bytes[i] = htonl((*v->AsVector())[i]->AsCount()); + return new AddrVal(bytes); + } + else + { + builtin_error("invalid vector size", @ARG@[0]); + uint32 bytes[4]; + memset(bytes, 0, sizeof(bytes)); + return new AddrVal(bytes); + } + %} + +## Converts an :zeek:type:`enum` to an :zeek:type:`int`. +## +## e: The :zeek:type:`enum` to convert. +## +## Returns: The :zeek:type:`int` value that corresponds to the :zeek:type:`enum`. +function enum_to_int%(e: any%): int + %{ + if ( e->Type()->Tag() != TYPE_ENUM ) + { + builtin_error("enum_to_int() requires enum value"); + return val_mgr->GetInt(-1); + } + + return val_mgr->GetInt(e->AsEnum()); + %} + +## Converts a :zeek:type:`string` to an :zeek:type:`int`. +## +## str: The :zeek:type:`string` to convert. +## +## Returns: The :zeek:type:`string` *str* as :zeek:type:`int`. +## +## .. zeek:see:: to_addr to_port to_subnet +function to_int%(str: string%): int + %{ + const char* s = str->CheckString(); + char* end_s; + + bro_int_t i = strtoll(s, &end_s, 10); + +#if 0 + // Not clear we should complain. For example, is " 205 " + // a legal conversion? + if ( s[0] == '\0' || end_s[0] != '\0' ) + builtin_error("bad conversion to integer", @ARG@[0]); +#endif + + return val_mgr->GetInt(i); + %} + + +## Converts a (positive) :zeek:type:`int` to a :zeek:type:`count`. +## +## n: The :zeek:type:`int` to convert. +## +## Returns: The :zeek:type:`int` *n* as unsigned integer, or 0 if *n* < 0. +function int_to_count%(n: int%): count + %{ + if ( n < 0 ) + { + builtin_error("bad conversion to count", @ARG@[0]); + n = 0; + } + return val_mgr->GetCount(n); + %} + +## Converts a :zeek:type:`double` to a :zeek:type:`count`. +## +## d: The :zeek:type:`double` to convert. +## +## Returns: The :zeek:type:`double` *d* as unsigned integer, or 0 if *d* < 0.0. +## +## .. zeek:see:: double_to_time +function double_to_count%(d: double%): count + %{ + if ( d < 0.0 ) + builtin_error("bad conversion to count", @ARG@[0]); + + return val_mgr->GetCount(bro_uint_t(rint(d))); + %} + +## Converts a :zeek:type:`string` to a :zeek:type:`count`. +## +## str: The :zeek:type:`string` to convert. +## +## Returns: The :zeek:type:`string` *str* as unsigned integer, or 0 if *str* has +## an invalid format. +## +## .. zeek:see:: to_addr to_int to_port to_subnet +function to_count%(str: string%): count + %{ + const char* s = str->CheckString(); + char* end_s; + + uint64 u = (uint64) strtoull(s, &end_s, 10); + + if ( s[0] == '\0' || end_s[0] != '\0' ) + { + builtin_error("bad conversion to count", @ARG@[0]); + u = 0; + } + + return val_mgr->GetCount(u); + %} + +## Converts an :zeek:type:`interval` to a :zeek:type:`double`. +## +## i: The :zeek:type:`interval` to convert. +## +## Returns: The :zeek:type:`interval` *i* as :zeek:type:`double`. +## +## .. zeek:see:: double_to_interval +function interval_to_double%(i: interval%): double + %{ + return new Val(i, TYPE_DOUBLE); + %} + +## Converts a :zeek:type:`time` value to a :zeek:type:`double`. +## +## t: The :zeek:type:`time` to convert. +## +## Returns: The :zeek:type:`time` value *t* as :zeek:type:`double`. +## +## .. zeek:see:: double_to_time +function time_to_double%(t: time%): double + %{ + return new Val(t, TYPE_DOUBLE); + %} + +## Converts a :zeek:type:`double` value to a :zeek:type:`time`. +## +## d: The :zeek:type:`double` to convert. +## +## Returns: The :zeek:type:`double` value *d* as :zeek:type:`time`. +## +## .. zeek:see:: time_to_double double_to_count +function double_to_time%(d: double%): time + %{ + return new Val(d, TYPE_TIME); + %} + +## Converts a :zeek:type:`double` to an :zeek:type:`interval`. +## +## d: The :zeek:type:`double` to convert. +## +## Returns: The :zeek:type:`double` *d* as :zeek:type:`interval`. +## +## .. zeek:see:: interval_to_double +function double_to_interval%(d: double%): interval + %{ + return new Val(d, TYPE_INTERVAL); + %} + +## Converts a :zeek:type:`port` to a :zeek:type:`count`. +## +## p: The :zeek:type:`port` to convert. +## +## Returns: The :zeek:type:`port` *p* as :zeek:type:`count`. +## +## .. zeek:see:: count_to_port +function port_to_count%(p: port%): count + %{ + return val_mgr->GetCount(p->Port()); + %} + +## Converts a :zeek:type:`count` and ``transport_proto`` to a :zeek:type:`port`. +## +## num: The :zeek:type:`port` number. +## +## proto: The transport protocol. +## +## Returns: The :zeek:type:`count` *num* as :zeek:type:`port`. +## +## .. zeek:see:: port_to_count +function count_to_port%(num: count, proto: transport_proto%): port + %{ + return val_mgr->GetPort(num, (TransportProto)proto->AsEnum()); + %} + +## Converts a :zeek:type:`string` to an :zeek:type:`addr`. +## +## ip: The :zeek:type:`string` to convert. +## +## Returns: The :zeek:type:`string` *ip* as :zeek:type:`addr`, or the unspecified +## address ``::`` if the input string does not parse correctly. +## +## .. zeek:see:: to_count to_int to_port count_to_v4_addr raw_bytes_to_v4_addr +## to_subnet +function to_addr%(ip: string%): addr + %{ + char* s = ip->AsString()->Render(); + Val* ret = new AddrVal(s); + delete [] s; + return ret; + %} + +## Checks if a string is a valid IPv4 or IPv6 address. +## +## ip: the string to check for valid IP formatting. +## +## Returns: T if the string is a valid IPv4 or IPv6 address format. +function is_valid_ip%(ip: string%): bool + %{ + char* s = ip->AsString()->Render(); + auto rval = IPAddr::IsValid(s); + delete [] s; + return val_mgr->GetBool(rval); + %} + +## Converts a :zeek:type:`string` to a :zeek:type:`subnet`. +## +## sn: The subnet to convert. +## +## Returns: The *sn* string as a :zeek:type:`subnet`, or the unspecified subnet +## ``::/0`` if the input string does not parse correctly. +## +## .. zeek:see:: to_count to_int to_port count_to_v4_addr raw_bytes_to_v4_addr +## to_addr +function to_subnet%(sn: string%): subnet + %{ + char* s = sn->AsString()->Render(); + Val* ret = new SubNetVal(s); + delete [] s; + return ret; + %} + +## Converts a :zeek:type:`addr` to a :zeek:type:`subnet`. +## +## a: The address to convert. +## +## Returns: The address as a :zeek:type:`subnet`. +## +## .. zeek:see:: to_subnet +function addr_to_subnet%(a: addr%): subnet + %{ + int width = (a->AsAddr().GetFamily() == IPv4 ? 32 : 128); + return new SubNetVal(a->AsAddr(), width); + %} + +## Converts a :zeek:type:`subnet` to an :zeek:type:`addr` by +## extracting the prefix. +## +## sn: The subnet to convert. +## +## Returns: The subnet as an :zeek:type:`addr`. +## +## .. zeek:see:: to_subnet +function subnet_to_addr%(sn: subnet%): addr + %{ + return new AddrVal(sn->Prefix()); + %} + +## Returns the width of a :zeek:type:`subnet`. +## +## sn: The subnet. +## +## Returns: The width of the subnet. +## +## .. zeek:see:: to_subnet +function subnet_width%(sn: subnet%): count + %{ + return val_mgr->GetCount(sn->Width()); + %} + +## Converts a :zeek:type:`string` to a :zeek:type:`double`. +## +## str: The :zeek:type:`string` to convert. +## +## Returns: The :zeek:type:`string` *str* as double, or 0 if *str* has +## an invalid format. +## +function to_double%(str: string%): double + %{ + const char* s = str->CheckString(); + char* end_s; + + double d = strtod(s, &end_s); + + if ( s[0] == '\0' || end_s[0] != '\0' ) + { + builtin_error("bad conversion to double", @ARG@[0]); + d = 0; + } + + return new Val(d, TYPE_DOUBLE); + %} + +## Converts a :zeek:type:`count` to an :zeek:type:`addr`. +## +## ip: The :zeek:type:`count` to convert. +## +## Returns: The :zeek:type:`count` *ip* as :zeek:type:`addr`. +## +## .. zeek:see:: raw_bytes_to_v4_addr to_addr to_subnet +function count_to_v4_addr%(ip: count%): addr + %{ + if ( ip > 4294967295LU ) + { + builtin_error("conversion of non-IPv4 count to addr", @ARG@[0]); + return new AddrVal(uint32(0)); + } + + return new AddrVal(htonl(uint32(ip))); + %} + +## Converts a :zeek:type:`string` of bytes into an IPv4 address. In particular, +## this function interprets the first 4 bytes of the string as an IPv4 address +## in network order. +## +## b: The raw bytes (:zeek:type:`string`) to convert. +## +## Returns: The byte :zeek:type:`string` *b* as :zeek:type:`addr`. +## +## .. zeek:see:: raw_bytes_to_v4_addr to_addr to_subnet +function raw_bytes_to_v4_addr%(b: string%): addr + %{ + uint32 a = 0; + + if ( b->Len() < 4 ) + builtin_error("too short a string as input to raw_bytes_to_v4_addr()"); + + else + { + const u_char* bp = b->Bytes(); + a = (bp[0] << 24) | (bp[1] << 16) | (bp[2] << 8) | bp[3]; + } + + return new AddrVal(htonl(a)); + %} + +## Converts a :zeek:type:`string` to a :zeek:type:`port`. +## +## s: The :zeek:type:`string` to convert. +## +## Returns: A :zeek:type:`port` converted from *s*. +## +## .. zeek:see:: to_addr to_count to_int to_subnet +function to_port%(s: string%): port + %{ + int port = 0; + if ( s->Len() < 10 ) + { + char* slash; + errno = 0; + port = strtol(s->CheckString(), &slash, 10); + if ( ! errno ) + { + ++slash; + if ( streq(slash, "tcp") ) + return val_mgr->GetPort(port, TRANSPORT_TCP); + else if ( streq(slash, "udp") ) + return val_mgr->GetPort(port, TRANSPORT_UDP); + else if ( streq(slash, "icmp") ) + return val_mgr->GetPort(port, TRANSPORT_ICMP); + } + } + + builtin_error("wrong port format, must be /[0-9]{1,5}\\/(tcp|udp|icmp)/"); + return val_mgr->GetPort(port, TRANSPORT_UNKNOWN); + %} + +## Converts a string of bytes (in network byte order) to a :zeek:type:`double`. +## +## s: A string of bytes containing the binary representation of a double value. +## +## Returns: The double value contained in *s*, or 0 if the conversion +## failed. +## +function bytestring_to_double%(s: string%): double + %{ + if ( s->Len() != sizeof(double) ) + { + builtin_error("bad conversion to double"); + return new Val(0.0, TYPE_DOUBLE); + } + + // See #908 for a discussion of portability. + double d; + memcpy(&d, s->Bytes(), sizeof(double)); + return new Val(ntohd(d), TYPE_DOUBLE); + %} + +## Converts a string of bytes to a :zeek:type:`count`. +## +## s: A string of bytes containing the binary representation of the value. +## +## is_le: If true, *s* is assumed to be in little endian format, else it's big endian. +## +## Returns: The value contained in *s*, or 0 if the conversion failed. +## +function bytestring_to_count%(s: string, is_le: bool &default=F%): count + %{ +#ifdef HOST_BIGENDIAN + static const bool host_bigendian = true; +#else + static const bool host_bigendian = false; +#endif + const u_char *p = s->Bytes(); + unsigned int i; + + switch ( s->Len() ) { + case sizeof(uint8): + { + uint8 value = 0; + memcpy(&value, p, sizeof(uint8)); + return val_mgr->GetCount(value); + } + + case sizeof(uint16): + { + uint16 value = 0; + + if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) ) + { + char buf[sizeof(uint16)]; + char *d = &buf[sizeof(uint16)-1]; + + for ( i = 0; i < sizeof(uint16); i++ ) + *d-- = *p++; + + memcpy(&value, buf, sizeof(uint16)); + } + else + memcpy(&value, p, sizeof(uint16)); + + return val_mgr->GetCount(value); + } + + case sizeof(uint32): + { + uint32 value = 0; + + if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) ) + { + char buf[sizeof(uint32)]; + char *d = &buf[sizeof(uint32)-1]; + + for ( i = 0; i < sizeof(uint32); i++ ) + *d-- = *p++; + + memcpy(&value, buf, sizeof(uint32)); + } + else + memcpy(&value, p, sizeof(uint32)); + + return val_mgr->GetCount(value); + } + + case sizeof(uint64): + { + uint64 value = 0; + + if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) ) + { + char buf[sizeof(uint64)]; + char *d = &buf[sizeof(uint64)-1]; + + for ( i = 0; i < sizeof(uint64); i++ ) + *d-- = *p++; + + memcpy(&value, buf, sizeof(uint64)); + } + else + memcpy(&value, p, sizeof(uint64)); + + return val_mgr->GetCount(value); + } + } + + builtin_error("unsupported byte length for bytestring_to_count"); + return val_mgr->GetCount(0); + %} + +## Converts a reverse pointer name to an address. For example, +## ``1.0.168.192.in-addr.arpa`` to ``192.168.0.1``. +## +## s: The string with the reverse pointer name. +## +## Returns: The IP address corresponding to *s*. +## +## .. zeek:see:: addr_to_ptr_name to_addr +function ptr_name_to_addr%(s: string%): addr + %{ + if ( s->Len() != 72 ) + { + int a[4]; + uint32 addr; + char ss[13]; // this will contain "in-addr.arpa" + + if ( sscanf(s->CheckString(), + "%d.%d.%d.%d.%12s", + a, a+1, a+2, a+3, ss) != 5 + || strcmp(ss, "in-addr.arpa") != 0 ) + { + builtin_error("bad PTR name", @ARG@[0]); + addr = 0; + } + else + addr = (a[3] << 24) | (a[2] << 16) | (a[1] << 8) | a[0]; + + return new AddrVal(htonl(addr)); + } + else + { + uint32 addr6[4]; + uint32 b[32]; + char ss[9]; // this will contain "ip6.arpa" + if ( sscanf(s->CheckString(), + "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x." + "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x." + "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x." + "%1x.%1x.%1x.%1x.%1x.%1x.%1x.%1x.%8s", + b+31, b+30, b+29, b+28, b+27, b+26, b+25, b+24, + b+23, b+22, b+21, b+20, b+19, b+18, b+17, b+16, + b+15, b+14, b+13, b+12, b+11, b+10, b+9, b+8, + b+7, b+6, b+5, b+4, b+3, b+2, b+1, b, ss) != 33 + || strcmp(ss, "ip6.arpa") != 0 ) + { + builtin_error("bad PTR name", @ARG@[0]); + memset(addr6, 0, sizeof addr6); + } + else + { + for ( unsigned int i = 0; i < 4; ++i ) + { + uint32 a = 0; + for ( unsigned int j = 1; j <= 8; ++j ) + a |= b[8*i+j-1] << (32-j*4); + + addr6[i] = htonl(a); + } + } + + return new AddrVal(addr6); + } + %} + +## Converts an IP address to a reverse pointer name. For example, +## ``192.168.0.1`` to ``1.0.168.192.in-addr.arpa``. +## +## a: The IP address to convert to a reverse pointer name. +## +## Returns: The reverse pointer representation of *a*. +## +## .. zeek:see:: ptr_name_to_addr to_addr +function addr_to_ptr_name%(a: addr%): string + %{ + return new StringVal(a->AsAddr().PtrName().c_str()); + %} + +## Converts a string of bytes into its hexadecimal representation. +## For example, ``"04"`` would be converted to ``"3034"``. +## +## bytestring: The string of bytes. +## +## Returns: The hexadecimal representation of *bytestring*. +## +## .. zeek:see:: hexdump hexstr_to_bytestring +function bytestring_to_hexstr%(bytestring: string%): string + %{ + bro_uint_t len = bytestring->AsString()->Len(); + const u_char* bytes = bytestring->AsString()->Bytes(); + char hexstr[(2 * len) + 1]; + + hexstr[0] = 0; + for ( bro_uint_t i = 0; i < len; ++i ) + snprintf(hexstr + (2 * i), 3, "%.2hhx", bytes[i]); + + return new StringVal(hexstr); + %} + +## Converts a hex-string into its binary representation. +## For example, ``"3034"`` would be converted to ``"04"``. +## +## The input string is assumed to contain an even number of hexadecimal digits +## (0-9, a-f, or A-F), otherwise behavior is undefined. +## +## hexstr: The hexadecimal string representation. +## +## Returns: The binary representation of *hexstr*. +## +## .. zeek:see:: hexdump bytestring_to_hexstr +function hexstr_to_bytestring%(hexstr: string%): string + %{ + bro_uint_t len = hexstr->AsString()->Len(); + if ( len % 2 != 0 ) + { + reporter->Error("Hex string '%s' has invalid length (not divisible by 2)", hexstr->CheckString()); + return val_mgr->GetEmptyString(); + } + + const char* bytes = hexstr->AsString()->CheckString(); + int outlen = (len/2); + char bytestring[outlen]; + memset(bytestring, 0, outlen); + + for ( bro_uint_t i = 0; i < len/2; ++i ) + { + int res = sscanf(bytes + (2*i), "%2hhx", &bytestring[i]); + + if ( res == EOF ) + { + reporter->Error("Hex string %s contains invalid input: %s", hexstr->CheckString(), strerror(errno)); + return val_mgr->GetEmptyString(); + } + + else if ( res != 1 ) + { + reporter->Error("Could not read hex element from input %s", hexstr->CheckString()); + return val_mgr->GetEmptyString(); + } + + } + + return new StringVal(outlen, bytestring); + %} + +## Encodes a Base64-encoded string. +## +## s: The string to encode. +## +## a: An optional custom alphabet. The empty string indicates the default +## alphabet. If given, the string must consist of 64 unique characters. +## +## Returns: The encoded version of *s*. +## +## .. zeek:see:: decode_base64 +function encode_base64%(s: string, a: string &default=""%): string + %{ + BroString* t = encode_base64(s->AsString(), a->AsString()); + if ( t ) + return new StringVal(t); + else + { + reporter->Error("Broker query has an invalid data store"); + return val_mgr->GetEmptyString(); + } + %} + +## Decodes a Base64-encoded string. +## +## s: The Base64-encoded string. +## +## a: An optional custom alphabet. The empty string indicates the default +## alphabet. If given, the string must consist of 64 unique characters. +## +## Returns: The decoded version of *s*. +## +## .. zeek:see:: decode_base64_conn encode_base64 +function decode_base64%(s: string, a: string &default=""%): string + %{ + BroString* t = decode_base64(s->AsString(), a->AsString()); + if ( t ) + return new StringVal(t); + else + { + reporter->Error("error in decoding string %s", s->CheckString()); + return val_mgr->GetEmptyString(); + } + %} + +## Decodes a Base64-encoded string that was derived from processing a connection. +## If an error is encountered decoding the string, that will be logged to +## ``weird.log`` with the associated connection. +## +## cid: The identifier of the connection that the encoding originates from. +## +## s: The Base64-encoded string. +## +## a: An optional custom alphabet. The empty string indicates the default +## alphabet. If given, the string must consist of 64 unique characters. +## +## Returns: The decoded version of *s*. +## +## .. zeek:see:: decode_base64 +function decode_base64_conn%(cid: conn_id, s: string, a: string &default=""%): string + %{ + Connection* conn = sessions->FindConnection(cid); + if ( ! conn ) + { + builtin_error("connection ID not a known connection", cid); + return val_mgr->GetEmptyString(); + } + + BroString* t = decode_base64(s->AsString(), a->AsString(), conn); + if ( t ) + return new StringVal(t); + else + { + reporter->Error("error in decoding string %s", s->CheckString()); + return val_mgr->GetEmptyString(); + } + %} + +%%{ +typedef struct { + uint32 time_low; + uint16 time_mid; + uint16 time_hi_and_version; + uint8 clock_seq_hi_and_reserved; + uint8 clock_seq_low; + uint8 node[6]; +} bro_uuid_t; +%%} + +## Converts a bytes representation of a UUID into its string form. For example, +## given a string of 16 bytes, it produces an output string in this format: +## ``550e8400-e29b-41d4-a716-446655440000``. +## See ``_. +## +## uuid: The 16 bytes of the UUID. +## +## Returns: The string representation of *uuid*. +function uuid_to_string%(uuid: string%): string + %{ + if ( uuid->Len() != 16 ) + return new StringVal(""); + + bro_uuid_t* id = (bro_uuid_t*) uuid->Bytes(); + + static char s[1024]; + char* sp = s; + + sp += snprintf(sp, s + sizeof(s) - sp, + "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", + id->time_low, id->time_mid, id->time_hi_and_version, + id->clock_seq_hi_and_reserved, id->clock_seq_low, + id->node[0], + id->node[1], + id->node[2], + id->node[3], + id->node[4], + id->node[5]); + + return new StringVal(s); + %} + +%%{ +char* to_pat_str(int sn, const char* ss) + { + const char special_re_char[] = "^$-:\"\\/|*+?.(){}[]"; + + char* pat = new char[sn * 4 + 1]; + int pat_len = 0; + + for ( int i = 0; i < sn; ++i ) + { + if ( ! strchr(special_re_char, ss[i]) ) + pat[pat_len++] = ss[i]; + else + { + pat[pat_len++] = '\\'; + pat[pat_len++] = ss[i]; + } + } + pat[pat_len] = '\0'; + return pat; + } +%%} + +## Escapes a string so that it becomes a valid :zeek:type:`pattern` and can be +## used with the :zeek:id:`string_to_pattern`. Any character from the set +## ``^$-:"\/|*+?.(){}[]`` is prefixed with a ``\``. +## +## s: The string to escape. +## +## Returns: An escaped version of *s* that has the structure of a valid +## :zeek:type:`pattern`. +## +## .. zeek:see:: string_to_pattern +## +function convert_for_pattern%(s: string%): string + %{ + char* t = to_pat_str(s->Len(), (const char*)(s->Bytes())); + StringVal* ret = new StringVal(t); + delete [] t; + return ret; + %} + +## Converts a :zeek:type:`string` into a :zeek:type:`pattern`. +## +## s: The string to convert. +## +## convert: If true, *s* is first passed through the function +## :zeek:id:`convert_for_pattern` to escape special characters of +## patterns. +## +## Returns: *s* as :zeek:type:`pattern`. +## +## .. zeek:see:: convert_for_pattern +## +## .. note:: +## +## This function must be called at Zeek startup time, e.g., in the event +## :zeek:id:`zeek_init`. +function string_to_pattern%(s: string, convert: bool%): pattern + %{ + const char* ss = (const char*) (s->Bytes()); + int sn = s->Len(); + char* pat; + + if ( convert ) + pat = to_pat_str(sn, ss); + else + { + pat = new char[sn+1]; + memcpy(pat, ss, sn); + pat[sn] = '\0'; + } + + RE_Matcher* re = new RE_Matcher(pat); + delete [] pat; + re->Compile(); + return new PatternVal(re); + %} + +## Formats a given time value according to a format string. +## +## fmt: The format string. See ``man strftime`` for the syntax. +## +## d: The time value. +## +## Returns: The time *d* formatted according to *fmt*. +function strftime%(fmt: string, d: time%) : string + %{ + static char buffer[128]; + + time_t timeval = time_t(d); + struct tm t; + + if ( ! localtime_r(&timeval, &t) || + ! strftime(buffer, 128, fmt->CheckString(), &t) ) + return new StringVal(""); + + return new StringVal(buffer); + %} + + +## Parse a textual representation of a date/time value into a ``time`` type value. +## +## fmt: The format string used to parse the following *d* argument. See ``man strftime`` +## for the syntax. +## +## d: The string representing the time. +## +## Returns: The time value calculated from parsing *d* with *fmt*. +function strptime%(fmt: string, d: string%) : time + %{ + const time_t timeval = time_t(); + struct tm t; + + if ( ! localtime_r(&timeval, &t) || + ! strptime(d->CheckString(), fmt->CheckString(), &t) ) + { + reporter->Warning("strptime conversion failed: fmt:%s d:%s", fmt->CheckString(), d->CheckString()); + return new Val(0.0, TYPE_TIME); + } + + double ret = mktime(&t); + return new Val(ret, TYPE_TIME); + %} + + +# =========================================================================== +# +# Network Type Processing +# +# =========================================================================== + +## Masks an address down to the number of given upper bits. For example, +## ``mask_addr(1.2.3.4, 18)`` returns ``1.2.0.0``. +## +## a: The address to mask. +## +## top_bits_to_keep: The number of top bits to keep in *a*; must be greater +## than 0 and less than 33 for IPv4, or 129 for IPv6. +## +## Returns: The address *a* masked down to *top_bits_to_keep* bits. +## +## .. zeek:see:: remask_addr +function mask_addr%(a: addr, top_bits_to_keep: count%): subnet + %{ + return new SubNetVal(a->AsAddr(), top_bits_to_keep); + %} + +## Takes some top bits (such as a subnet address) from one address and the other +## bits (intra-subnet part) from a second address and merges them to get a new +## address. This is useful for anonymizing at subnet level while preserving +## serial scans. +## +## a1: The address to mask with *top_bits_from_a1*. +## +## a2: The address to take the remaining bits from. +## +## top_bits_from_a1: The number of top bits to keep in *a1*; must be greater +## than 0 and less than 129. This value is always interpreted +## relative to the IPv6 bit width (v4-mapped addresses start +## at bit number 96). +## +## Returns: The address *a* masked down to *top_bits_to_keep* bits. +## +## .. zeek:see:: mask_addr +function remask_addr%(a1: addr, a2: addr, top_bits_from_a1: count%): addr + %{ + IPAddr addr1(a1->AsAddr()); + addr1.Mask(top_bits_from_a1); + IPAddr addr2(a2->AsAddr()); + addr2.ReverseMask(top_bits_from_a1); + return new AddrVal(addr1|addr2); + %} + +## Checks whether a given :zeek:type:`port` has TCP as transport protocol. +## +## p: The :zeek:type:`port` to check. +## +## Returns: True iff *p* is a TCP port. +## +## .. zeek:see:: is_udp_port is_icmp_port +function is_tcp_port%(p: port%): bool + %{ + return val_mgr->GetBool(p->IsTCP()); + %} + +## Checks whether a given :zeek:type:`port` has UDP as transport protocol. +## +## p: The :zeek:type:`port` to check. +## +## Returns: True iff *p* is a UDP port. +## +## .. zeek:see:: is_icmp_port is_tcp_port +function is_udp_port%(p: port%): bool + %{ + return val_mgr->GetBool(p->IsUDP()); + %} + +## Checks whether a given :zeek:type:`port` has ICMP as transport protocol. +## +## p: The :zeek:type:`port` to check. +## +## Returns: True iff *p* is an ICMP port. +## +## .. zeek:see:: is_tcp_port is_udp_port +function is_icmp_port%(p: port%): bool + %{ + return val_mgr->GetBool(p->IsICMP()); + %} + +%%{ +EnumVal* map_conn_type(TransportProto tp) + { + switch ( tp ) { + case TRANSPORT_UNKNOWN: + return transport_proto->GetVal(0); + break; + + case TRANSPORT_TCP: + return transport_proto->GetVal(1); + break; + + case TRANSPORT_UDP: + return transport_proto->GetVal(2); + break; + + case TRANSPORT_ICMP: + return transport_proto->GetVal(3); + break; + + default: + reporter->InternalError("bad connection type in map_conn_type()"); + } + + // Cannot be reached; + assert(false); + return 0; // Make compiler happy. + } +%%} + +## Extracts the transport protocol from a connection. +## +## cid: The connection identifier. +## +## Returns: The transport protocol of the connection identified by *cid*. +## +## .. zeek:see:: get_port_transport_proto +## get_orig_seq get_resp_seq +function get_conn_transport_proto%(cid: conn_id%): transport_proto + %{ + Connection* c = sessions->FindConnection(cid); + if ( ! c ) + { + builtin_error("unknown connection id in get_conn_transport_proto()", cid); + return transport_proto->GetVal(0); + } + + return map_conn_type(c->ConnTransport()); + %} + +## Extracts the transport protocol from a :zeek:type:`port`. +## +## p: The port. +## +## Returns: The transport protocol of the port *p*. +## +## .. zeek:see:: get_conn_transport_proto +## get_orig_seq get_resp_seq +function get_port_transport_proto%(p: port%): transport_proto + %{ + return map_conn_type(p->PortType()); + %} + +## Checks whether a connection is (still) active. +## +## c: The connection id to check. +## +## Returns: True if the connection identified by *c* exists. +## +## .. zeek:see:: lookup_connection +function connection_exists%(c: conn_id%): bool + %{ + if ( sessions->FindConnection(c) ) + return val_mgr->GetBool(1); + else + return val_mgr->GetBool(0); + %} + +## Returns the :zeek:type:`connection` record for a given connection identifier. +## +## cid: The connection ID. +## +## Returns: The :zeek:type:`connection` record for *cid*. If *cid* does not point +## to an existing connection, the function generates a run-time error +## and returns a dummy value. +## +## .. zeek:see:: connection_exists +function lookup_connection%(cid: conn_id%): connection + %{ + Connection* conn = sessions->FindConnection(cid); + if ( conn ) + return conn->BuildConnVal(); + + builtin_error("connection ID not a known connection", cid); + + // Return a dummy connection record. + RecordVal* c = new RecordVal(connection_type); + + RecordVal* id_val = new RecordVal(conn_id); + id_val->Assign(0, new AddrVal((unsigned int) 0)); + id_val->Assign(1, val_mgr->GetPort(ntohs(0), TRANSPORT_UDP)); + id_val->Assign(2, new AddrVal((unsigned int) 0)); + id_val->Assign(3, val_mgr->GetPort(ntohs(0), TRANSPORT_UDP)); + c->Assign(0, id_val); + + RecordVal* orig_endp = new RecordVal(endpoint); + orig_endp->Assign(0, val_mgr->GetCount(0)); + orig_endp->Assign(1, val_mgr->GetCount(int(0))); + + RecordVal* resp_endp = new RecordVal(endpoint); + resp_endp->Assign(0, val_mgr->GetCount(0)); + resp_endp->Assign(1, val_mgr->GetCount(int(0))); + + c->Assign(1, orig_endp); + c->Assign(2, resp_endp); + + c->Assign(3, new Val(network_time, TYPE_TIME)); + c->Assign(4, new Val(0.0, TYPE_INTERVAL)); + c->Assign(5, new TableVal(string_set)); // service + c->Assign(6, val_mgr->GetEmptyString()); // history + + return c; + %} + +%%{ +const char* conn_id_string(Val* c) + { + Val* id = (*(c->AsRecord()))[0]; + const val_list* vl = id->AsRecord(); + + const IPAddr& orig_h = (*vl)[0]->AsAddr(); + uint32 orig_p = (*vl)[1]->AsPortVal()->Port(); + const IPAddr& resp_h = (*vl)[2]->AsAddr(); + uint32 resp_p = (*vl)[3]->AsPortVal()->Port(); + + return fmt("%s/%u -> %s/%u\n", orig_h.AsString().c_str(), orig_p, + resp_h.AsString().c_str(), resp_p); + } +%%} + +## Writes the current packet to a file. +## +## file_name: The name of the file to write the packet to. +## +## Returns: True on success. +## +## .. zeek:see:: dump_packet get_current_packet +function dump_current_packet%(file_name: string%) : bool + %{ + const Packet* pkt; + + if ( ! current_pktsrc || + ! current_pktsrc->GetCurrentPacket(&pkt) ) + return val_mgr->GetBool(0); + + if ( addl_pkt_dumper && addl_pkt_dumper->Path() != file_name->CheckString()) + { + addl_pkt_dumper->Close(); + addl_pkt_dumper = nullptr; + } + + if ( ! addl_pkt_dumper ) + addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); + + if ( addl_pkt_dumper ) + { + addl_pkt_dumper->Dump(pkt); + } + + return val_mgr->GetBool( addl_pkt_dumper && ! addl_pkt_dumper->IsError()); + %} + +## Returns the currently processed PCAP packet. +## +## Returns: The currently processed packet, which is a record +## containing the timestamp, ``snaplen``, and packet data. +## +## .. zeek:see:: dump_current_packet dump_packet +function get_current_packet%(%) : pcap_packet + %{ + const Packet* p; + RecordVal* pkt = new RecordVal(pcap_packet); + + if ( ! current_pktsrc || + ! current_pktsrc->GetCurrentPacket(&p) ) + { + pkt->Assign(0, val_mgr->GetCount(0)); + pkt->Assign(1, val_mgr->GetCount(0)); + pkt->Assign(2, val_mgr->GetCount(0)); + pkt->Assign(3, val_mgr->GetCount(0)); + pkt->Assign(4, val_mgr->GetEmptyString()); + pkt->Assign(5, BifType::Enum::link_encap->GetVal(BifEnum::LINK_UNKNOWN)); + return pkt; + } + + pkt->Assign(0, val_mgr->GetCount(uint32(p->ts.tv_sec))); + pkt->Assign(1, val_mgr->GetCount(uint32(p->ts.tv_usec))); + pkt->Assign(2, val_mgr->GetCount(p->cap_len)); + pkt->Assign(3, val_mgr->GetCount(p->len)); + pkt->Assign(4, new StringVal(p->cap_len, (const char*)p->data)); + pkt->Assign(5, BifType::Enum::link_encap->GetVal(p->link_type)); + + return pkt; + %} + +## Function to get the raw headers of the currently processed packet. +## +## Returns: The :zeek:type:`raw_pkt_hdr` record containing the Layer 2, 3 and +## 4 headers of the currently processed packet. +## +## .. zeek:see:: raw_pkt_hdr get_current_packet +function get_current_packet_header%(%) : raw_pkt_hdr + %{ + const Packet* p; + + if ( current_pktsrc && + current_pktsrc->GetCurrentPacket(&p) ) + { + return p->BuildPktHdrVal(); + } + + RecordVal* hdr = new RecordVal(raw_pkt_hdr_type); + return hdr; + %} + +## Writes a given packet to a file. +## +## pkt: The PCAP packet. +## +## file_name: The name of the file to write *pkt* to. +## +## Returns: True on success +## +## .. zeek:see:: get_current_packet dump_current_packet +function dump_packet%(pkt: pcap_packet, file_name: string%) : bool + %{ + if ( addl_pkt_dumper && addl_pkt_dumper->Path() != file_name->CheckString()) + { + addl_pkt_dumper->Close(); + addl_pkt_dumper = nullptr; + } + + if ( ! addl_pkt_dumper ) + addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); + + if ( ! addl_pkt_dumper->IsError() ) + { + pkt_timeval ts; + uint32 caplen, len, link_type; + u_char *data; + + const val_list* pkt_vl = pkt->AsRecord(); + + ts.tv_sec = (*pkt_vl)[0]->AsCount(); + ts.tv_usec = (*pkt_vl)[1]->AsCount(); + caplen = (*pkt_vl)[2]->AsCount(); + len = (*pkt_vl)[3]->AsCount(); + data = (*pkt_vl)[4]->AsString()->Bytes(); + link_type = (*pkt_vl)[5]->AsEnum(); + Packet p(link_type, &ts, caplen, len, data, true); + + addl_pkt_dumper->Dump(&p); + } + + return val_mgr->GetBool(addl_pkt_dumper && ! addl_pkt_dumper->IsError()); + %} + +%%{ +#include "DNS_Mgr.h" +#include "Trigger.h" + +class LookupHostCallback : public DNS_Mgr::LookupCallback { +public: + LookupHostCallback(Trigger* arg_trigger, const CallExpr* arg_call, + bool arg_lookup_name) + { + Ref(arg_trigger); + trigger = arg_trigger; + call = arg_call; + lookup_name = arg_lookup_name; + } + + ~LookupHostCallback() + { + Unref(trigger); + } + + // Overridden from DNS_Mgr:Lookup:Callback. + virtual void Resolved(const char* name) + { + Val* result = new StringVal(name); + trigger->Cache(call, result); + Unref(result); + trigger->Release(); + } + + virtual void Resolved(TableVal* addrs) + { + // No Ref() for addrs. + trigger->Cache(call, addrs); + trigger->Release(); + } + + virtual void Timeout() + { + if ( lookup_name ) + { + Val* result = new StringVal("<\?\?\?>"); + trigger->Cache(call, result); + Unref(result); + } + + else + { + ListVal* lv = new ListVal(TYPE_ADDR); + lv->Append(new AddrVal("0.0.0.0")); + Val* result = lv->ConvertToSet(); + trigger->Cache(call, result); + Unref(result); + Unref(lv); + } + + trigger->Release(); + } + +private: + Trigger* trigger; + const CallExpr* call; + bool lookup_name; +}; +%%} + +## Issues an asynchronous reverse DNS lookup and delays the function result. +## This function can therefore only be called inside a ``when`` condition, +## e.g., ``when ( local host = lookup_addr(10.0.0.1) ) { f(host); }``. +## +## host: The IP address to lookup. +## +## Returns: The DNS name of *host*. +## +## .. zeek:see:: lookup_hostname +function lookup_addr%(host: addr%) : string + %{ + // FIXME: It should be easy to adapt the function to synchronous + // lookups if we're reading a trace. + Trigger* trigger = frame->GetTrigger(); + + if ( ! trigger) + { + builtin_error("lookup_addr() can only be called inside a when-condition"); + return new StringVal(""); + } + + frame->SetDelayed(); + trigger->Hold(); + + dns_mgr->AsyncLookupAddr(host->AsAddr(), + new LookupHostCallback(trigger, frame->GetCall(), true)); + return 0; + %} + +## Issues an asynchronous TEXT DNS lookup and delays the function result. +## This function can therefore only be called inside a ``when`` condition, +## e.g., ``when ( local h = lookup_hostname_txt("www.zeek.org") ) { f(h); }``. +## +## host: The hostname to lookup. +## +## Returns: The DNS TXT record associated with *host*. +## +## .. zeek:see:: lookup_hostname +function lookup_hostname_txt%(host: string%) : string + %{ + // FIXME: Is should be easy to adapt the function to synchronous + // lookups if we're reading a trace. + Trigger* trigger = frame->GetTrigger(); + + if ( ! trigger) + { + builtin_error("lookup_hostname_txt() can only be called inside a when-condition"); + return new StringVal(""); + } + + frame->SetDelayed(); + trigger->Hold(); + + dns_mgr->AsyncLookupNameText(host->CheckString(), + new LookupHostCallback(trigger, frame->GetCall(), true)); + return 0; + %} + +## Issues an asynchronous DNS lookup and delays the function result. +## This function can therefore only be called inside a ``when`` condition, +## e.g., ``when ( local h = lookup_hostname("www.zeek.org") ) { f(h); }``. +## +## host: The hostname to lookup. +## +## Returns: A set of DNS A and AAAA records associated with *host*. +## +## .. zeek:see:: lookup_addr +function lookup_hostname%(host: string%) : addr_set + %{ + // FIXME: Is should be easy to adapt the function to synchronous + // lookups if we're reading a trace. + Trigger* trigger = frame->GetTrigger(); + + if ( ! trigger) + { + builtin_error("lookup_hostname() can only be called inside a when-condition"); + return new StringVal(""); + } + + frame->SetDelayed(); + trigger->Hold(); + + dns_mgr->AsyncLookupName(host->CheckString(), + new LookupHostCallback(trigger, frame->GetCall(), false)); + return 0; + %} + +%%{ +#ifdef USE_GEOIP +#include + +extern "C" { +#include +#include +#include +#include +#include +} + +class MMDB { +public: + MMDB(const char* filename, struct stat info); + + ~MMDB(); + + MMDB_lookup_result_s Lookup(const struct sockaddr* const sa); + bool StaleDB(); + const char* Filename(); + +private: + MMDB_s mmdb; + struct stat file_info; + bool lookup_error; + std::chrono::time_point last_check; +}; + +MMDB::MMDB(const char* filename, struct stat info) + : file_info(info), lookup_error{false}, + last_check{std::chrono::steady_clock::now()} + { + int status = MMDB_open(filename, MMDB_MODE_MMAP, &mmdb); + + if ( MMDB_SUCCESS != status ) + { + throw std::runtime_error(MMDB_strerror(status)); + } + } + +MMDB::~MMDB() + { + MMDB_close(&mmdb); + } + +MMDB_lookup_result_s MMDB::Lookup(const struct sockaddr* const sa) + { + int mmdb_error; + MMDB_lookup_result_s result = MMDB_lookup_sockaddr(&mmdb, sa, &mmdb_error); + + if ( MMDB_SUCCESS != mmdb_error ) + { + lookup_error = true; + throw std::runtime_error(MMDB_strerror(mmdb_error)); + } + + return result; + } + +// Check to see if the Maxmind DB should be closed and reopened. This will +// happen if there was a lookup error or if the mmap'd file has been replaced +// by an external process. +bool MMDB::StaleDB() + { + struct stat buf; + using Clock = std::chrono::steady_clock; + std::chrono::time_point now = Clock::now(); + + if ( lookup_error ) + return true; + + // Only perform stat once per 5 minutes. + using Min = std::chrono::minutes; + if ( std::chrono::duration_cast(now - last_check).count() < 5 ) + return false; + + last_check = now; + + if ( 0 != stat(mmdb.filename, &buf) ) + return true; + + if ( buf.st_ino != file_info.st_ino || buf.st_mtime != file_info.st_mtime ) + { + reporter->Info("Inode change detected for MaxMind DB [%s]", + mmdb.filename); + return true; + } + + return false; + } + +const char* MMDB::Filename() + { + return mmdb.filename; + } + +std::unique_ptr mmdb_loc; +std::unique_ptr mmdb_asn; +static bool did_mmdb_loc_db_error = false; +static bool did_mmdb_asn_db_error = false; + +static bool mmdb_open(const char* filename, bool asn) + { + struct stat buf; + + if ( 0 != stat(filename, &buf) ) + { + return false; + } + + try + { + if ( asn ) + { + mmdb_asn.reset(new MMDB(filename, buf)); + } + else + { + mmdb_loc.reset(new MMDB(filename, buf)); + } + } + + catch ( const std::exception& e ) + { + if ( asn ) + did_mmdb_asn_db_error = false; + else + did_mmdb_loc_db_error = false; + + reporter->Info("Failed to open MaxMind DB: %s [%s]", filename, + e.what()); + return false; + } + + return true; + } + +static bool mmdb_open_loc(const char* filename) + { + return mmdb_open(filename, false); + } + +static bool mmdb_open_asn(const char* filename) + { + return mmdb_open(filename, true); + } + +static void mmdb_check_loc() + { + if ( mmdb_loc && mmdb_loc->StaleDB() ) + { + reporter->Info("Closing stale MaxMind DB [%s]", mmdb_loc->Filename()); + did_mmdb_loc_db_error = false; + mmdb_loc.release(); + } + } + +static void mmdb_check_asn() + { + if ( mmdb_asn && mmdb_asn->StaleDB() ) + { + reporter->Info("Closing stale MaxMind DB [%s]", mmdb_asn->Filename()); + did_mmdb_asn_db_error = false; + mmdb_asn.release(); + } + } + +static bool mmdb_lookup(const IPAddr& addr, MMDB_lookup_result_s& result, + bool asn) + { + struct sockaddr_storage ss = {0}; + + if ( IPv4 == addr.GetFamily() ) + { + struct sockaddr_in* sa = (struct sockaddr_in*)&ss; + sa->sin_family = AF_INET; + addr.CopyIPv4(&sa->sin_addr); + } + + else + { + struct sockaddr_in6* sa = (struct sockaddr_in6*)&ss; + sa->sin6_family = AF_INET6; + addr.CopyIPv6(&sa->sin6_addr); + } + + try + { + result = asn ? mmdb_asn->Lookup((struct sockaddr*)&ss) + : mmdb_loc->Lookup((struct sockaddr*)&ss); + } + + catch ( const std::exception& e ) + { + reporter->Info("MaxMind DB lookup location error [%s]", + e.what()); + return false; + } + + return result.found_entry; + } + +static bool mmdb_lookup_loc(const IPAddr& addr, MMDB_lookup_result_s& result) + { + return mmdb_lookup(addr, result, false); + } + +static bool mmdb_lookup_asn(const IPAddr& addr, MMDB_lookup_result_s& result) + { + return mmdb_lookup(addr, result, true); + } + +static Val* mmdb_getvalue(MMDB_entry_data_s* entry_data, int status, + int data_type ) + { + switch (status) + { + case MMDB_SUCCESS: + if ( entry_data->has_data ) + { + switch (data_type) + { + case MMDB_DATA_TYPE_UTF8_STRING: + return new StringVal(entry_data->data_size, + entry_data->utf8_string); + break; + + case MMDB_DATA_TYPE_DOUBLE: + return new Val(entry_data->double_value, TYPE_DOUBLE); + break; + + case MMDB_DATA_TYPE_UINT32: + return val_mgr->GetCount(entry_data->uint32); + + default: + break; + } + } + break; + + case MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR: + // key doesn't exist, nothing to do + break; + + default: + reporter->Info("MaxMind DB error [%s]", MMDB_strerror(status)); + break; + } + + return nullptr; + } + +static bool mmdb_try_open_loc () + { + // City database is always preferred over Country database. + auto mmdb_dir_val = global_scope()->Lookup("mmdb_dir")->ID_Val(); + std::string mmdb_dir = mmdb_dir_val->AsString()->CheckString(); + + if ( ! mmdb_dir.empty() ) + { + auto d = mmdb_dir + "/GeoLite2-City.mmdb"; + + if ( mmdb_open_loc(d.data()) ) + return true; + + d = mmdb_dir + "/GeoLite2-Country.mmdb"; + + if ( mmdb_open_loc(d.data()) ) + return true;; + } + + return mmdb_open_loc("/usr/share/GeoIP/GeoLite2-City.mmdb") + || mmdb_open_loc("/var/lib/GeoIP/GeoLite2-City.mmdb") + || mmdb_open_loc("/usr/local/share/GeoIP/GeoLite2-City.mmdb") + || mmdb_open_loc("/usr/local/var/GeoIP/GeoLite2-City.mmdb") + || mmdb_open_loc("/usr/share/GeoIP/GeoLite2-Country.mmdb") + || mmdb_open_loc("/var/lib/GeoIP/GeoLite2-Country.mmdb") + || mmdb_open_loc("/usr/local/share/GeoIP/GeoLite2-Country.mmdb") + || mmdb_open_loc("/usr/local/var/GeoIP/GeoLite2-Country.mmdb"); + } + +static bool mmdb_try_open_asn () + { + auto mmdb_dir_val = global_scope()->Lookup("mmdb_dir")->ID_Val(); + std::string mmdb_dir = mmdb_dir_val->AsString()->CheckString(); + + if ( ! mmdb_dir.empty() ) + { + auto d = mmdb_dir + "/GeoLite2-ASN.mmdb"; + + if ( mmdb_open_asn(d.data()) ) + return true; + } + + return mmdb_open_asn("/usr/share/GeoIP/GeoLite2-ASN.mmdb") + || mmdb_open_asn("/var/lib/GeoIP/GeoLite2-ASN.mmdb") + || mmdb_open_asn("/usr/local/share/GeoIP/GeoLite2-ASN.mmdb") + || mmdb_open_asn("/usr/local/var/GeoIP/GeoLite2-ASN.mmdb"); + } + +#endif +%%} + +## Initializes MMDB for later use of lookup_location. +## Requires Zeek to be built with ``libmaxminddb``. +## +## f: The filename of the MaxMind City or Country DB. +## +## Returns: A boolean indicating whether the db was successfully opened. +## +## .. zeek:see:: lookup_asn +function mmdb_open_location_db%(f: string%) : bool + %{ +#ifdef USE_GEOIP + return val_mgr->GetBool(mmdb_open_loc(f->CheckString())); +#else + return val_mgr->GetBool(0); +#endif + %} + +## Initializes MMDB for later use of lookup_asn. +## Requires Zeek to be built with ``libmaxminddb``. +## +## f: The filename of the MaxMind ASN DB. +## +## Returns: A boolean indicating whether the db was successfully opened. +## +## .. zeek:see:: lookup_asn +function mmdb_open_asn_db%(f: string%) : bool + %{ +#ifdef USE_GEOIP + return val_mgr->GetBool(mmdb_open_asn(f->CheckString())); +#else + return val_mgr->GetBool(0); +#endif + %} + +## Performs a geo-lookup of an IP address. +## Requires Zeek to be built with ``libmaxminddb``. +## +## a: The IP address to lookup. +## +## Returns: A record with country, region, city, latitude, and longitude. +## +## .. zeek:see:: lookup_asn +function lookup_location%(a: addr%) : geo_location + %{ + RecordVal* location = new RecordVal(geo_location); + +#ifdef USE_GEOIP + mmdb_check_loc(); + if ( ! mmdb_loc ) + { + if ( ! mmdb_try_open_loc() ) + { + if ( ! did_mmdb_loc_db_error ) + { + did_mmdb_loc_db_error = true; + builtin_error("Failed to open GeoIP location database"); + } + + return location; + } + } + + MMDB_lookup_result_s result; + + if ( mmdb_lookup_loc(a->AsAddr(), result) ) + { + MMDB_entry_data_s entry_data; + int status; + + // Get Country ISO Code + status = MMDB_get_value(&result.entry, &entry_data, + "country", "iso_code", nullptr); + location->Assign(0, mmdb_getvalue(&entry_data, status, + MMDB_DATA_TYPE_UTF8_STRING)); + + // Get Major Subdivision ISO Code + status = MMDB_get_value(&result.entry, &entry_data, + "subdivisions", "0", "iso_code", nullptr); + location->Assign(1, mmdb_getvalue(&entry_data, status, + MMDB_DATA_TYPE_UTF8_STRING)); + + // Get City English Name + status = MMDB_get_value(&result.entry, &entry_data, + "city", "names", "en", nullptr); + location->Assign(2, mmdb_getvalue(&entry_data, status, + MMDB_DATA_TYPE_UTF8_STRING)); + + // Get Location Latitude + status = MMDB_get_value(&result.entry, &entry_data, + "location", "latitude", nullptr); + location->Assign(3, mmdb_getvalue(&entry_data, status, + MMDB_DATA_TYPE_DOUBLE)); + + // Get Location Longitude + status = MMDB_get_value(&result.entry, &entry_data, + "location", "longitude", nullptr); + location->Assign(4, mmdb_getvalue(&entry_data, status, + MMDB_DATA_TYPE_DOUBLE)); + + return location; + } + +#else // not USE_GEOIP + static int missing_geoip_reported = 0; + + if ( ! missing_geoip_reported ) + { + builtin_error("Zeek was not configured for GeoIP support"); + missing_geoip_reported = 1; + } +#endif + + // We can get here even if we have MMDB support if we weren't + // able to initialize it or it didn't return any information for + // the address. + + return location; + %} + +## Performs an ASN lookup of an IP address. +## Requires Zeek to be built with ``libmaxminddb``. +## +## a: The IP address to lookup. +## +## Returns: The number of the ASN that contains *a*. +## +## .. zeek:see:: lookup_location +function lookup_asn%(a: addr%) : count + %{ +#ifdef USE_GEOIP + mmdb_check_asn(); + if ( ! mmdb_asn ) + { + if ( ! mmdb_try_open_asn() ) + { + if ( ! did_mmdb_asn_db_error ) + { + did_mmdb_asn_db_error = true; + builtin_error("Failed to open GeoIP ASN database"); + } + + return val_mgr->GetCount(0); + } + } + + MMDB_lookup_result_s result; + + if ( mmdb_lookup_asn(a->AsAddr(), result) ) + { + MMDB_entry_data_s entry_data; + int status; + + // Get Autonomous System Number + status = MMDB_get_value(&result.entry, &entry_data, + "autonomous_system_number", nullptr); + Val* asn = mmdb_getvalue(&entry_data, status, MMDB_DATA_TYPE_UINT32); + return asn == nullptr ? val_mgr->GetCount(0) : asn; + } + +#else // not USE_GEOIP + static int missing_geoip_reported = 0; + + if ( ! missing_geoip_reported ) + { + builtin_error("Zeek was not configured for GeoIP ASN support"); + missing_geoip_reported = 1; + } +#endif + + // We can get here even if we have GeoIP support, if we weren't + // able to initialize it or it didn't return any information for + // the address. + return val_mgr->GetCount(0); + %} + +## Calculates distance between two geographic locations using the haversine +## formula. Latitudes and longitudes must be given in degrees, where southern +## hemispere latitudes are negative and western hemisphere longitudes are +## negative. +## +## lat1: Latitude (in degrees) of location 1. +## +## long1: Longitude (in degrees) of location 1. +## +## lat2: Latitude (in degrees) of location 2. +## +## long2: Longitude (in degrees) of location 2. +## +## Returns: Distance in miles. +## +## .. zeek:see:: haversine_distance_ip +function haversine_distance%(lat1: double, long1: double, lat2: double, long2: double%): double + %{ + const double PI = 3.14159; + const double RADIUS = 3958.8; // Earth's radius in miles. + + double s1 = sin((lat2 - lat1) * PI/360); + double s2 = sin((long2 - long1) * PI/360); + double a = s1 * s1 + cos(lat1 * PI/180) * cos(lat2 * PI/180) * s2 * s2; + double distance = 2 * RADIUS * asin(sqrt(a)); + + return new Val(distance, TYPE_DOUBLE); + %} + +## Converts UNIX file permissions given by a mode to an ASCII string. +## +## mode: The permissions (an octal number like 0644 converted to decimal). +## +## Returns: A string representation of *mode* in the format +## ``rw[xsS]rw[xsS]rw[xtT]``. +function file_mode%(mode: count%): string + %{ + char str[12]; + char *p = str; + + /* usr */ + if (mode & S_IRUSR) + *p++ = 'r'; + else + *p++ = '-'; + + if (mode & S_IWUSR) + *p++ = 'w'; + else + *p++ = '-'; + + switch (mode & (S_IXUSR | S_ISUID)) { + case 0: + *p++ = '-'; + break; + case S_IXUSR: + *p++ = 'x'; + break; + case S_ISUID: + *p++ = 'S'; + break; + case S_IXUSR | S_ISUID: + *p++ = 's'; + break; + } + + /* group */ + if (mode & S_IRGRP) + *p++ = 'r'; + else + *p++ = '-'; + if (mode & S_IWGRP) + *p++ = 'w'; + else + *p++ = '-'; + + switch (mode & (S_IXGRP | S_ISGID)) { + case 0: + *p++ = '-'; + break; + case S_IXGRP: + *p++ = 'x'; + break; + case S_ISGID: + *p++ = 'S'; + break; + case S_IXGRP | S_ISGID: + *p++ = 's'; + break; + } + + /* other */ + if (mode & S_IROTH) + *p++ = 'r'; + else + *p++ = '-'; + if (mode & S_IWOTH) + *p++ = 'w'; + else + *p++ = '-'; + + switch (mode & (S_IXOTH | S_ISVTX)) { + case 0: + *p++ = '-'; + break; + case S_IXOTH: + *p++ = 'x'; + break; + case S_ISVTX: + *p++ = 'T'; + break; + case S_IXOTH | S_ISVTX: + *p++ = 't'; + break; + } + + *p = '\0'; + + return new StringVal(str); + %} + +# =========================================================================== +# +# Controlling Analyzer Behavior +# +# =========================================================================== + +%%{ +#include "analyzer/Manager.h" +%%} + +## Disables the analyzer which raised the current event (if the analyzer +## belongs to the given connection). +## +## cid: The connection identifier. +## +## aid: The analyzer ID. +## +## Returns: True if the connection identified by *cid* exists and has analyzer +## *aid*. +## +## .. zeek:see:: Analyzer::schedule_analyzer Analyzer::name +function disable_analyzer%(cid: conn_id, aid: count, err_if_no_conn: bool &default=T%) : bool + %{ + Connection* c = sessions->FindConnection(cid); + if ( ! c ) + { + reporter->Error("cannot find connection"); + return val_mgr->GetBool(0); + } + + analyzer::Analyzer* a = c->FindAnalyzer(aid); + if ( ! a ) + { + if ( err_if_no_conn ) + reporter->Error("connection does not have analyzer specified to disable"); + return val_mgr->GetBool(0); + } + + a->Remove(); + return val_mgr->GetBool(1); + %} + +## Informs Zeek that it should skip any further processing of the contents of +## a given connection. In particular, Zeek will refrain from reassembling the +## TCP byte stream and from generating events relating to any analyzers that +## have been processing the connection. +## +## cid: The connection ID. +## +## Returns: False if *cid* does not point to an active connection, and true +## otherwise. +## +## .. note:: +## +## Zeek will still generate connection-oriented events such as +## :zeek:id:`connection_finished`. +function skip_further_processing%(cid: conn_id%): bool + %{ + Connection* c = sessions->FindConnection(cid); + if ( ! c ) + return val_mgr->GetBool(0); + + c->SetSkip(1); + return val_mgr->GetBool(1); + %} + +## Controls whether packet contents belonging to a connection should be +## recorded (when ``-w`` option is provided on the command line). +## +## cid: The connection identifier. +## +## do_record: True to enable packet contents, and false to disable for the +## connection identified by *cid*. +## +## Returns: False if *cid* does not point to an active connection, and true +## otherwise. +## +## .. zeek:see:: skip_further_processing +## +## .. note:: +## +## This is independent of whether Zeek processes the packets of this +## connection, which is controlled separately by +## :zeek:id:`skip_further_processing`. +## +## .. zeek:see:: get_contents_file set_contents_file +function set_record_packets%(cid: conn_id, do_record: bool%): bool + %{ + Connection* c = sessions->FindConnection(cid); + if ( ! c ) + return val_mgr->GetBool(0); + + c->SetRecordPackets(do_record); + return val_mgr->GetBool(1); + %} + +## Sets an individual inactivity timeout for a connection and thus +## overrides the global inactivity timeout. +## +## cid: The connection ID. +## +## t: The new inactivity timeout for the connection identified by *cid*. +## +## Returns: The previous timeout interval. +function set_inactivity_timeout%(cid: conn_id, t: interval%): interval + %{ + Connection* c = sessions->FindConnection(cid); + if ( ! c ) + return new Val(0.0, TYPE_INTERVAL); + + double old_timeout = c->InactivityTimeout(); + c->SetInactivityTimeout(t); + + return new Val(old_timeout, TYPE_INTERVAL); + %} + +# =========================================================================== +# +# Files and Directories +# +# =========================================================================== + +## Opens a file for writing. If a file with the same name already exists, this +## function overwrites it (as opposed to :zeek:id:`open_for_append`). +## +## f: The path to the file. +## +## Returns: A :zeek:type:`file` handle for subsequent operations. +## +## .. zeek:see:: active_file open_for_append close write_file +## get_file_name set_buf flush_all mkdir enable_raw_output +## rmdir unlink rename +function open%(f: string%): file + %{ + const char* file = f->CheckString(); + + if ( streq(file, "-") ) + return new Val(new BroFile(stdout, "-", "w")); + else + return new Val(new BroFile(file, "w")); + %} + +## Opens a file for writing or appending. If a file with the same name already +## exists, this function appends to it (as opposed to :zeek:id:`open`). +## +## f: The path to the file. +## +## Returns: A :zeek:type:`file` handle for subsequent operations. +## +## .. zeek:see:: active_file open close write_file +## get_file_name set_buf flush_all mkdir enable_raw_output +## rmdir unlink rename +function open_for_append%(f: string%): file + %{ + return new Val(new BroFile(f->CheckString(), "a")); + %} + +## Closes an open file and flushes any buffered content. +## +## f: A :zeek:type:`file` handle to an open file. +## +## Returns: True on success. +## +## .. zeek:see:: active_file open open_for_append write_file +## get_file_name set_buf flush_all mkdir enable_raw_output +## rmdir unlink rename +function close%(f: file%): bool + %{ + return val_mgr->GetBool(f->Close()); + %} + +## Writes data to an open file. +## +## f: A :zeek:type:`file` handle to an open file. +## +## data: The data to write to *f*. +## +## Returns: True on success. +## +## .. zeek:see:: active_file open open_for_append close +## get_file_name set_buf flush_all mkdir enable_raw_output +## rmdir unlink rename +function write_file%(f: file, data: string%): bool + %{ + if ( ! f ) + return val_mgr->GetBool(0); + + return val_mgr->GetBool(f->Write((const char*) data->Bytes(), data->Len())); + %} + +## Alters the buffering behavior of a file. +## +## f: A :zeek:type:`file` handle to an open file. +## +## buffered: When true, *f* is fully buffered, i.e., bytes are saved in a +## buffer until the block size has been reached. When +## false, *f* is line buffered, i.e., bytes are saved up until a +## newline occurs. +## +## .. zeek:see:: active_file open open_for_append close +## get_file_name write_file flush_all mkdir enable_raw_output +## rmdir unlink rename +function set_buf%(f: file, buffered: bool%): any + %{ + f->SetBuf(buffered); + return val_mgr->GetTrue(); + %} + +## Flushes all open files to disk. +## +## Returns: True on success. +## +## .. zeek:see:: active_file open open_for_append close +## get_file_name write_file set_buf mkdir enable_raw_output +## rmdir unlink rename +function flush_all%(%): bool + %{ + return val_mgr->GetBool(fflush(0) == 0); + %} + +## Creates a new directory. +## +## f: The directory name. +## +## Returns: True if the operation succeeds or if *f* already exists, +## and false if the file creation fails. +## +## .. zeek:see:: active_file open_for_append close write_file +## get_file_name set_buf flush_all enable_raw_output +## rmdir unlink rename +function mkdir%(f: string%): bool + %{ + const char* filename = f->CheckString(); + + if ( mkdir(filename, 0777) < 0 ) + { + int error = errno; + struct stat filestat; + // check if already exists and is directory. + if ( errno == EEXIST && stat(filename, &filestat) == 0 + && S_ISDIR(filestat.st_mode) ) + return val_mgr->GetBool(1); + + builtin_error(fmt("cannot create directory '%s': %s", filename, + strerror(error))); + return val_mgr->GetBool(0); + } + else + return val_mgr->GetBool(1); + %} + + +## Removes a directory. +## +## d: The directory name. +## +## Returns: True if the operation succeeds, and false if the +## directory delete operation fails. +## +## .. zeek:see:: active_file open_for_append close write_file +## get_file_name set_buf flush_all enable_raw_output +## mkdir unlink rename +function rmdir%(d: string%): bool + %{ + const char* dirname = d->CheckString(); + + if ( rmdir(dirname) < 0 ) + { + builtin_error(fmt("cannot remove directory '%s': %s", dirname, + strerror(errno))); + return val_mgr->GetBool(0); + } + else + return val_mgr->GetBool(1); + %} + +## Removes a file from a directory. +## +## f: the file to delete. +## +## Returns: True if the operation succeeds and the file was deleted, +## and false if the deletion fails. +## +## .. zeek:see:: active_file open_for_append close write_file +## get_file_name set_buf flush_all enable_raw_output +## mkdir rmdir rename +function unlink%(f: string%): bool + %{ + const char* filename = f->CheckString(); + + if ( unlink(filename) < 0 ) + { + builtin_error(fmt("cannot unlink file '%s': %s", filename, + strerror(errno))); + return val_mgr->GetBool(0); + } + else + return val_mgr->GetBool(1); + %} + +## Renames a file from src_f to dst_f. +## +## src_f: the name of the file to rename. +## +## dest_f: the name of the file after the rename operation. +## +## Returns: True if the rename succeeds and false otherwise. +## +## .. zeek:see:: active_file open_for_append close write_file +## get_file_name set_buf flush_all enable_raw_output +## mkdir rmdir unlink +function rename%(src_f: string, dst_f: string%): bool + %{ + const char* src_filename = src_f->CheckString(); + const char* dst_filename = dst_f->CheckString(); + + if ( rename(src_filename, dst_filename) < 0 ) + { + builtin_error(fmt("cannot rename file '%s' to '%s': %s", src_filename, + dst_filename, strerror(errno))); + return val_mgr->GetBool(0); + } + else + return val_mgr->GetBool(1); + %} + +## Checks whether a given file is open. +## +## f: The file to check. +## +## Returns: True if *f* is an open :zeek:type:`file`. +## +## .. todo:: Rename to ``is_open``. +function active_file%(f: file%): bool + %{ + return val_mgr->GetBool(f->IsOpen()); + %} + +## Gets the filename associated with a file handle. +## +## f: The file handle to inquire the name for. +## +## Returns: The filename associated with *f*. +## +## .. zeek:see:: open +function get_file_name%(f: file%): string + %{ + if ( ! f ) + return val_mgr->GetEmptyString(); + + return new StringVal(f->Name()); + %} + +## Rotates a file. +## +## f: An open file handle. +## +## Returns: Rotation statistics which include the original file name, the name +## after the rotation, and the time when *f* was opened/closed. +## +## .. zeek:see:: rotate_file_by_name calc_next_rotate +function rotate_file%(f: file%): rotate_info &deprecated + %{ + RecordVal* info = f->Rotate(); + if ( info ) + return info; + + // Record indicating error. + info = new RecordVal(rotate_info); + info->Assign(0, val_mgr->GetEmptyString()); + info->Assign(1, val_mgr->GetEmptyString()); + info->Assign(2, new Val(0.0, TYPE_TIME)); + info->Assign(3, new Val(0.0, TYPE_TIME)); + + return info; + %} + +## Rotates a file identified by its name. +## +## f: The name of the file to rotate +## +## Returns: Rotation statistics which include the original file name, the name +## after the rotation, and the time when *f* was opened/closed. +## +## .. zeek:see:: rotate_file calc_next_rotate +function rotate_file_by_name%(f: string%): rotate_info &deprecated + %{ + RecordVal* info = new RecordVal(rotate_info); + + bool is_pkt_dumper = false; + bool is_addl_pkt_dumper = false; + + // Special case: one of current dump files. + if ( pkt_dumper && streq(pkt_dumper->Path().c_str(), f->CheckString()) ) + { + is_pkt_dumper = true; + pkt_dumper->Close(); + } + + if ( addl_pkt_dumper && + streq(addl_pkt_dumper->Path().c_str(), f->CheckString()) ) + { + is_addl_pkt_dumper = true; + addl_pkt_dumper->Close(); + } + + FILE* file = rotate_file(f->CheckString(), info); + if ( ! file ) + { + // Record indicating error. + info->Assign(0, val_mgr->GetEmptyString()); + info->Assign(1, val_mgr->GetEmptyString()); + info->Assign(2, new Val(0.0, TYPE_TIME)); + info->Assign(3, new Val(0.0, TYPE_TIME)); + return info; + } + + fclose(file); + + if ( is_pkt_dumper ) + { + info->Assign(2, new Val(pkt_dumper->OpenTime(), TYPE_TIME)); + pkt_dumper->Open(); + } + + if ( is_addl_pkt_dumper ) + info->Assign(2, new Val(addl_pkt_dumper->OpenTime(), TYPE_TIME)); + + return info; + %} + +## Calculates the duration until the next time a file is to be rotated, based +## on a given rotate interval. +## +## i: The rotate interval to base the calculation on. +## +## Returns: The duration until the next file rotation time. +## +## .. zeek:see:: rotate_file rotate_file_by_name +function calc_next_rotate%(i: interval%) : interval &deprecated + %{ + const char* base_time = log_rotate_base_time ? + log_rotate_base_time->AsString()->CheckString() : 0; + + double base = parse_rotate_base_time(base_time); + return new Val(calc_next_rotate(network_time, i, base), TYPE_INTERVAL); + %} + +## Returns the size of a given file. +## +## f: The name of the file whose size to lookup. +## +## Returns: The size of *f* in bytes. +function file_size%(f: string%) : double + %{ + struct stat s; + + if ( stat(f->CheckString(), &s) < 0 ) + return new Val(-1.0, TYPE_DOUBLE); + + return new Val(double(s.st_size), TYPE_DOUBLE); + %} + +## Disables sending :zeek:id:`print_hook` events to remote peers for a given +## file. In a +## distributed setup, communicating Zeek instances generate the event +## :zeek:id:`print_hook` for each print statement and send it to the remote +## side. When disabled for a particular file, these events will not be +## propagated to other peers. +## +## f: The file to disable :zeek:id:`print_hook` events for. +## +## .. zeek:see:: enable_raw_output +function disable_print_hook%(f: file%): any + %{ + f->DisablePrintHook(); + return 0; + %} + +## Prevents escaping of non-ASCII characters when writing to a file. +## This function is equivalent to :zeek:attr:`&raw_output`. +## +## f: The file to disable raw output for. +## +## .. zeek:see:: disable_print_hook +function enable_raw_output%(f: file%): any + %{ + f->EnableRawOutput(); + return 0; + %} + +# =========================================================================== +# +# Packet Filtering +# +# =========================================================================== + +## Installs a filter to drop packets from a given IP source address with +## a certain probability if none of a given set of TCP flags are set. +## Note that for IPv6 packets with a Destination options header that has +## the Home Address option, this filters out against that home address. +## +## ip: The IP address to drop. +## +## tcp_flags: If none of these TCP flags are set, drop packets from *ip* with +## probability *prob*. +## +## prob: The probability [0.0, 1.0] used to drop packets from *ip*. +## +## Returns: True (unconditionally). +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## Pcap::error +## +## .. todo:: The return value should be changed to any. +function install_src_addr_filter%(ip: addr, tcp_flags: count, prob: double%) : bool + %{ + sessions->GetPacketFilter()->AddSrc(ip->AsAddr(), tcp_flags, prob); + return val_mgr->GetBool(1); + %} + +## Installs a filter to drop packets originating from a given subnet with +## a certain probability if none of a given set of TCP flags are set. +## +## snet: The subnet to drop packets from. +## +## tcp_flags: If none of these TCP flags are set, drop packets from *snet* with +## probability *prob*. +## +## prob: The probability [0.0, 1.0] used to drop packets from *snet*. +## +## Returns: True (unconditionally). +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## Pcap::error +## +## .. todo:: The return value should be changed to any. +function install_src_net_filter%(snet: subnet, tcp_flags: count, prob: double%) : bool + %{ + sessions->GetPacketFilter()->AddSrc(snet, tcp_flags, prob); + return val_mgr->GetBool(1); + %} + +## Removes a source address filter. +## +## ip: The IP address for which a source filter was previously installed. +## +## Returns: True on success. +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## Pcap::error +function uninstall_src_addr_filter%(ip: addr%) : bool + %{ + return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveSrc(ip->AsAddr())); + %} + +## Removes a source subnet filter. +## +## snet: The subnet for which a source filter was previously installed. +## +## Returns: True on success. +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## Pcap::error +function uninstall_src_net_filter%(snet: subnet%) : bool + %{ + return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveSrc(snet)); + %} + +## Installs a filter to drop packets destined to a given IP address with +## a certain probability if none of a given set of TCP flags are set. +## Note that for IPv6 packets with a routing type header and non-zero +## segments left, this filters out against the final destination of the +## packet according to the routing extension header. +## +## ip: Drop packets to this IP address. +## +## tcp_flags: If none of these TCP flags are set, drop packets to *ip* with +## probability *prob*. +## +## prob: The probability [0.0, 1.0] used to drop packets to *ip*. +## +## Returns: True (unconditionally). +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## Pcap::error +## +## .. todo:: The return value should be changed to any. +function install_dst_addr_filter%(ip: addr, tcp_flags: count, prob: double%) : bool + %{ + sessions->GetPacketFilter()->AddDst(ip->AsAddr(), tcp_flags, prob); + return val_mgr->GetBool(1); + %} + +## Installs a filter to drop packets destined to a given subnet with +## a certain probability if none of a given set of TCP flags are set. +## +## snet: Drop packets to this subnet. +## +## tcp_flags: If none of these TCP flags are set, drop packets to *snet* with +## probability *prob*. +## +## prob: The probability [0.0, 1.0] used to drop packets to *snet*. +## +## Returns: True (unconditionally). +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## Pcap::error +## +## .. todo:: The return value should be changed to any. +function install_dst_net_filter%(snet: subnet, tcp_flags: count, prob: double%) : bool + %{ + sessions->GetPacketFilter()->AddDst(snet, tcp_flags, prob); + return val_mgr->GetBool(1); + %} + +## Removes a destination address filter. +## +## ip: The IP address for which a destination filter was previously installed. +## +## Returns: True on success. +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_net_filter +## Pcap::error +function uninstall_dst_addr_filter%(ip: addr%) : bool + %{ + return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveDst(ip->AsAddr())); + %} + +## Removes a destination subnet filter. +## +## snet: The subnet for which a destination filter was previously installed. +## +## Returns: True on success. +## +## .. zeek:see:: Pcap::precompile_pcap_filter +## Pcap::install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## Pcap::error +function uninstall_dst_net_filter%(snet: subnet%) : bool + %{ + return val_mgr->GetBool(sessions->GetPacketFilter()->RemoveDst(snet)); + %} + +## Checks whether the last raised event came from a remote peer. +## +## Returns: True if the last raised event came from a remote peer. +function is_remote_event%(%) : bool + %{ + return val_mgr->GetBool(mgr.CurrentSource() != SOURCE_LOCAL); + %} + +## Stops Zeek's packet processing. This function is used to synchronize +## distributed trace processing with communication enabled +## (*pseudo-realtime* mode). +## +## .. zeek:see:: continue_processing +function suspend_processing%(%) : any + %{ + net_suspend_processing(); + return 0; + %} + +## Resumes Zeek's packet processing. +## +## .. zeek:see:: suspend_processing +function continue_processing%(%) : any + %{ + net_continue_processing(); + return 0; + %} + +# =========================================================================== +# +# Internal Functions +# +# =========================================================================== + +## Manually triggers the signature engine for a given connection. +## This is an internal function. +function match_signatures%(c: connection, pattern_type: int, s: string, + bol: bool, eol: bool, + from_orig: bool, clear: bool%) : bool + %{ + if ( ! rule_matcher ) + return val_mgr->GetBool(0); + + c->Match((Rule::PatternType) pattern_type, s->Bytes(), s->Len(), + from_orig, bol, eol, clear); + + return val_mgr->GetBool(1); + %} + +# =========================================================================== +# +# Deprecated Functions +# +# =========================================================================== + + + +%%{ +#include "Anon.h" +%%} + +## Preserves the prefix of an IP address in anonymization. +## +## a: The address to preserve. +## +## width: The number of bits from the top that should remain intact. +## +## .. zeek:see:: preserve_subnet anonymize_addr +## +## .. todo:: Currently dysfunctional. +function preserve_prefix%(a: addr, width: count%): any + %{ + AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; + if ( ip_anon ) + { + if ( a->AsAddr().GetFamily() == IPv6 ) + builtin_error("preserve_prefix() not supported for IPv6 addresses"); + else + { + const uint32* bytes; + a->AsAddr().GetBytes(&bytes); + ip_anon->PreservePrefix(*bytes, width); + } + } + + + return 0; + %} + +## Preserves the prefix of a subnet in anonymization. +## +## a: The subnet to preserve. +## +## .. zeek:see:: preserve_prefix anonymize_addr +## +## .. todo:: Currently dysfunctional. +function preserve_subnet%(a: subnet%): any + %{ + DEBUG_MSG("%s/%d\n", a->Prefix().AsString().c_str(), a->Width()); + AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; + if ( ip_anon ) + { + if ( a->AsSubNet().Prefix().GetFamily() == IPv6 ) + builtin_error("preserve_subnet() not supported for IPv6 addresses"); + else + { + const uint32* bytes; + a->AsSubNet().Prefix().GetBytes(&bytes); + ip_anon->PreservePrefix(*bytes, a->AsSubNet().Length()); + } + } + + return 0; + %} + +## Anonymizes an IP address. +## +## a: The address to anonymize. +## +## cl: The anonymization class, which can take on three different values: +## +## - ``ORIG_ADDR``: Tag *a* as an originator address. +## +## - ``RESP_ADDR``: Tag *a* as an responder address. +## +## - ``OTHER_ADDR``: Tag *a* as an arbitrary address. +## +## Returns: An anonymized version of *a*. +## +## .. zeek:see:: preserve_prefix preserve_subnet +## +## .. todo:: Currently dysfunctional. +function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr + %{ + int anon_class = cl->InternalInt(); + if ( anon_class < 0 || anon_class >= NUM_ADDR_ANONYMIZATION_CLASSES ) + builtin_error("anonymize_addr(): invalid ip addr anonymization class"); + + if ( a->AsAddr().GetFamily() == IPv6 ) + { + builtin_error("anonymize_addr() not supported for IPv6 addresses"); + return 0; + } + else + { + const uint32* bytes; + a->AsAddr().GetBytes(&bytes); + return new AddrVal(anonymize_ip(*bytes, + (enum ip_addr_anonymization_class_t) anon_class)); + } + %} diff --git a/src/zeekygen/CMakeLists.txt b/src/zeekygen/CMakeLists.txt new file mode 100644 index 0000000000..79abc56f94 --- /dev/null +++ b/src/zeekygen/CMakeLists.txt @@ -0,0 +1,25 @@ +# See the file "COPYING" in the main distribution directory for copyright. + +include(ZeekSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +set(zeekygen_SRCS + Manager.cc + Info.h + PackageInfo.cc + ScriptInfo.cc + IdentifierInfo.cc + Target.cc + Configuration.cc + ReStructuredTextTable.cc + utils.cc +) + +bif_target(zeekygen.bif) +bro_add_subdir_library(zeekygen ${zeekygen_SRCS}) + +add_dependencies(bro_zeekygen generate_outputs) diff --git a/src/broxygen/Configuration.cc b/src/zeekygen/Configuration.cc similarity index 87% rename from src/broxygen/Configuration.cc rename to src/zeekygen/Configuration.cc index 4780e6ad99..dbbbebf578 100644 --- a/src/broxygen/Configuration.cc +++ b/src/zeekygen/Configuration.cc @@ -11,7 +11,7 @@ #include #include -using namespace broxygen; +using namespace zeekygen; using namespace std; static TargetFactory create_target_factory() @@ -37,7 +37,7 @@ Config::Config(const string& arg_file, const string& delim) ifstream f(file.c_str()); if ( ! f.is_open() ) - reporter->FatalError("failed to open Broxygen config file '%s': %s", + reporter->FatalError("failed to open Zeekygen config file '%s': %s", file.c_str(), strerror(errno)); string line; @@ -59,20 +59,20 @@ Config::Config(const string& arg_file, const string& delim) continue; if ( tokens.size() != 3 ) - reporter->FatalError("malformed Broxygen target in %s:%u: %s", + reporter->FatalError("malformed Zeekygen target in %s:%u: %s", file.c_str(), line_number, line.c_str()); Target* target = target_factory.Create(tokens[0], tokens[2], tokens[1]); if ( ! target ) - reporter->FatalError("unknown Broxygen target type: %s", + reporter->FatalError("unknown Zeekygen target type: %s", tokens[0].c_str()); targets.push_back(target); } if ( f.bad() ) - reporter->InternalError("error reading Broxygen config file '%s': %s", + reporter->InternalError("error reading Zeekygen config file '%s': %s", file.c_str(), strerror(errno)); } @@ -99,5 +99,5 @@ time_t Config::GetModificationTime() const if ( file.empty() ) return 0; - return broxygen::get_mtime(file); + return zeekygen::get_mtime(file); } diff --git a/src/broxygen/Configuration.h b/src/zeekygen/Configuration.h similarity index 80% rename from src/broxygen/Configuration.h rename to src/zeekygen/Configuration.h index d41deb2c71..97ca125275 100644 --- a/src/broxygen/Configuration.h +++ b/src/zeekygen/Configuration.h @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_CONFIGURATION_H -#define BROXYGEN_CONFIGURATION_H +#ifndef ZEEKYGEN_CONFIGURATION_H +#define ZEEKYGEN_CONFIGURATION_H #include "Info.h" #include "Target.h" @@ -9,7 +9,7 @@ #include #include -namespace broxygen { +namespace zeekygen { /** * Manages the generation of reStructuredText documents corresponding to @@ -22,8 +22,8 @@ class Config { public: /** - * Read a Broxygen configuration file, parsing all targets in it. - * @param file The file containing a list of Broxygen targets. If it's + * Read a Zeekygen configuration file, parsing all targets in it. + * @param file The file containing a list of Zeekygen targets. If it's * an empty string most methods are a no-op. * @param delim The delimiter between target fields. */ @@ -41,7 +41,7 @@ public: void FindDependencies(const std::vector& infos); /** - * Build each Broxygen target (i.e. write out the reST documents to disk). + * Build each Zeekygen target (i.e. write out the reST documents to disk). */ void GenerateDocs() const; @@ -58,6 +58,6 @@ private: TargetFactory target_factory; }; -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/zeekygen/IdentifierInfo.cc b/src/zeekygen/IdentifierInfo.cc new file mode 100644 index 0000000000..5db21ed956 --- /dev/null +++ b/src/zeekygen/IdentifierInfo.cc @@ -0,0 +1,181 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "IdentifierInfo.h" +#include "utils.h" + +#include "Desc.h" +#include "Val.h" +#include "Expr.h" + +using namespace std; +using namespace zeekygen; + +IdentifierInfo::IdentifierInfo(ID* arg_id, ScriptInfo* script) + : Info(), + comments(), id(arg_id), initial_val(), redefs(), fields(), + last_field_seen(), declaring_script(script) + { + Ref(id); + + if ( id->ID_Val() && (id->IsOption() || id->IsRedefinable()) ) + initial_val = id->ID_Val()->Clone(); + } + +IdentifierInfo::~IdentifierInfo() + { + Unref(id); + Unref(initial_val); + + for ( redef_list::const_iterator it = redefs.begin(); it != redefs.end(); + ++it ) + delete *it; + + for ( record_field_map::const_iterator it = fields.begin(); + it != fields.end(); ++it ) + delete it->second; + } + +void IdentifierInfo::AddRedef(const string& script, init_class ic, + Expr* init_expr, const vector& comments) + { + Redefinition* redef = new Redefinition(script, ic, init_expr, comments); + redefs.push_back(redef); + } + +void IdentifierInfo::AddRecordField(const TypeDecl* field, + const string& script, + vector& comments) + { + RecordField* rf = new RecordField(); + rf->field = new TypeDecl(*field); + rf->from_script = script; + rf->comments = comments; + fields[rf->field->id] = rf; + last_field_seen = rf; + } + +vector IdentifierInfo::GetComments() const + { + return comments; + } + +vector IdentifierInfo::GetFieldComments(const string& field) const + { + record_field_map::const_iterator it = fields.find(field); + + if ( it == fields.end() ) + return vector(); + + return it->second->comments; + } + +list +IdentifierInfo::GetRedefs(const string& from_script) const + { + list rval; + + for ( redef_list::const_iterator it = redefs.begin(); it != redefs.end(); + ++it ) + { + if ( from_script == (*it)->from_script ) + rval.push_back(*(*it)); + } + + return rval; + } + +string IdentifierInfo::GetDeclaringScriptForField(const string& field) const + { + record_field_map::const_iterator it = fields.find(field); + + if ( it == fields.end() ) + return ""; + + return it->second->from_script; + } + +string IdentifierInfo::DoReStructuredText(bool roles_only) const + { + ODesc d; + d.SetIndentSpaces(3); + d.SetQuotes(true); + id->DescribeReST(&d, roles_only); + + if ( comments.empty() ) + return d.Description(); + + d.ClearIndentLevel(); + d.PushIndent(); + + for ( size_t i = 0; i < comments.size(); ++i ) + { + if ( i > 0 ) + d.NL(); + + if ( IsFunc(id->Type()->Tag()) ) + { + string s = comments[i]; + + if ( zeekygen::prettify_params(s) ) + d.NL(); + + d.Add(s.c_str()); + } + else + d.Add(comments[i].c_str()); + } + + return d.Description(); + } + +time_t IdentifierInfo::DoGetModificationTime() const + { + // Could probably get away with just checking the set of scripts that + // contributed to the ID declaration/redefinitions, but this is easier... + return declaring_script->GetModificationTime(); + } + +IdentifierInfo::Redefinition::Redefinition( + std::string arg_script, + init_class arg_ic, + Expr* arg_expr, + std::vector arg_comments) + : from_script(std::move(arg_script)), + ic(arg_ic), + init_expr(arg_expr ? arg_expr->Ref() : nullptr), + comments(std::move(arg_comments)) + { + } + +IdentifierInfo::Redefinition::Redefinition(const IdentifierInfo::Redefinition& other) + { + from_script = other.from_script; + ic = other.ic; + init_expr = other.init_expr; + comments = other.comments; + + if ( init_expr ) + init_expr->Ref(); + } + +IdentifierInfo::Redefinition& +IdentifierInfo::Redefinition::operator=(const IdentifierInfo::Redefinition& other) + { + if ( &other == this ) + return *this; + + from_script = other.from_script; + ic = other.ic; + init_expr = other.init_expr; + comments = other.comments; + + if ( init_expr ) + init_expr->Ref(); + + return *this; + } + +IdentifierInfo::Redefinition::~Redefinition() + { + Unref(init_expr); + } diff --git a/src/broxygen/IdentifierInfo.h b/src/zeekygen/IdentifierInfo.h similarity index 77% rename from src/broxygen/IdentifierInfo.h rename to src/zeekygen/IdentifierInfo.h index be7e721838..5457772c52 100644 --- a/src/broxygen/IdentifierInfo.h +++ b/src/zeekygen/IdentifierInfo.h @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_IDENTIFIERINFO_H -#define BROXYGEN_IDENTIFIERINFO_H +#ifndef ZEEKYGEN_IDENTIFIERINFO_H +#define ZEEKYGEN_IDENTIFIERINFO_H #include "Info.h" #include "ScriptInfo.h" @@ -14,7 +14,7 @@ #include #include -namespace broxygen { +namespace zeekygen { class ScriptInfo; @@ -38,11 +38,17 @@ public: */ ~IdentifierInfo() override; + /** + * Returns the initial value of the identifier. + */ + Val* InitialVal() const + { return initial_val; } + /** * Add a comment associated with the identifier. If the identifier is a * record type and it's in the middle of parsing fields, the comment is * associated with the last field that was parsed. - * @param comment A string extracted from Broxygen-style comment. + * @param comment A string extracted from Zeekygen-style comment. */ void AddComment(const std::string& comment) { last_field_seen ? last_field_seen->comments.push_back(comment) @@ -59,9 +65,12 @@ public: /** * Register a redefinition of the identifier. * @param from_script The script in which the redef occurred. + * @param ic The initialization class used (e.g. =, +=, -=) + * @param init_expr The initialization expression used. * @param comments Comments associated with the redef statement. */ - void AddRedef(const std::string& from_script, + void AddRedef(const std::string& from_script, init_class ic, + Expr* init_expr, const std::vector& comments); /** @@ -102,13 +111,13 @@ public: std::string GetDeclaringScriptForField(const std::string& field) const; /** - * @return All Broxygen comments associated with the identifier. + * @return All Zeekygen comments associated with the identifier. */ std::vector GetComments() const; /** * @param field A record field name. - * @return All Broxygen comments associated with the record field. + * @return All Zeekygen comments associated with the record field. */ std::vector GetFieldComments(const std::string& field) const; @@ -117,8 +126,19 @@ public: */ struct Redefinition { std::string from_script; /**< Name of script doing the redef. */ - std::string new_val_desc; /**< Description of new value bound to ID. */ - std::vector comments; /**< Broxygen comments on redef. */ + init_class ic; + Expr* init_expr; + std::vector comments; /**< Zeekygen comments on redef. */ + + Redefinition(std::string arg_script, init_class arg_ic, + Expr* arg_expr, + std::vector arg_comments); + + Redefinition(const Redefinition& other); + + Redefinition& operator=(const Redefinition& other); + + ~Redefinition(); }; /** @@ -129,6 +149,13 @@ public: */ std::list GetRedefs(const std::string& from_script) const; + /** + * Get a list of information about redefinitions of the identifier. + * @return A list of redefs that occurred for the identifier. + */ + const std::list& GetRedefs() const + { return redefs; } + private: time_t DoGetModificationTime() const override; @@ -152,13 +179,13 @@ private: std::vector comments; ID* id; - std::string initial_val_desc; + Val* initial_val; redef_list redefs; record_field_map fields; RecordField* last_field_seen; ScriptInfo* declaring_script; }; -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/broxygen/Info.h b/src/zeekygen/Info.h similarity index 89% rename from src/broxygen/Info.h rename to src/zeekygen/Info.h index 9df73f899f..f6e09cb498 100644 --- a/src/broxygen/Info.h +++ b/src/zeekygen/Info.h @@ -1,15 +1,15 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_INFO_H -#define BROXYGEN_INFO_H +#ifndef ZEEKYGEN_INFO_H +#define ZEEKYGEN_INFO_H #include #include -namespace broxygen { +namespace zeekygen { /** - * Abstract base class for any thing that Broxygen can document. + * Abstract base class for any thing that Zeekygen can document. */ class Info { @@ -68,6 +68,6 @@ private: { } }; -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/broxygen/Manager.cc b/src/zeekygen/Manager.cc similarity index 84% rename from src/broxygen/Manager.cc rename to src/zeekygen/Manager.cc index c54b05754e..57cc19d531 100644 --- a/src/broxygen/Manager.cc +++ b/src/zeekygen/Manager.cc @@ -7,7 +7,7 @@ #include #include -using namespace broxygen; +using namespace zeekygen; using namespace std; static void DbgAndWarn(const char* msg) @@ -19,7 +19,7 @@ static void DbgAndWarn(const char* msg) return; reporter->Warning("%s", msg); - DBG_LOG(DBG_BROXYGEN, "%s", msg); + DBG_LOG(DBG_ZEEKYGEN, "%s", msg); } static void WarnMissingScript(const char* type, const ID* id, @@ -28,7 +28,7 @@ static void WarnMissingScript(const char* type, const ID* id, if ( script == "" ) return; - DbgAndWarn(fmt("Can't generate Broxygen doumentation for %s %s, " + DbgAndWarn(fmt("Can't generate Zeekygen doumentation for %s %s, " "lookup of %s failed", type, id->Name(), script.c_str())); } @@ -64,7 +64,7 @@ Manager::Manager(const string& arg_config, const string& bro_command) identifiers(), all_info(), last_identifier_seen(), incomplete_type(), enum_mappings(), config(arg_config), bro_mtime() { - if ( getenv("BRO_DISABLE_BROXYGEN") ) + if ( zeekenv("ZEEK_DISABLE_ZEEKYGEN") ) disabled = true; // If running bro without the "-X" option, then we don't need bro_mtime. @@ -83,7 +83,7 @@ Manager::Manager(const string& arg_config, const string& bro_command) // a PATH component that starts with a tilde (such as "~/bin"). A simple // workaround is to just run bro with a relative or absolute path. if ( path_to_bro.empty() || stat(path_to_bro.c_str(), &s) < 0 ) - reporter->InternalError("Broxygen can't get mtime of bro binary %s (try again by specifying the absolute or relative path to Bro): %s", + reporter->InternalError("Zeekygen can't get mtime of zeek binary %s (try again by specifying the absolute or relative path to Zeek): %s", path_to_bro.c_str(), strerror(errno)); bro_mtime = s.st_mtime; @@ -129,7 +129,7 @@ void Manager::Script(const string& path) if ( scripts.GetInfo(name) ) { - DbgAndWarn(fmt("Duplicate Broxygen script documentation: %s", + DbgAndWarn(fmt("Duplicate Zeekygen script documentation: %s", name.c_str())); return; } @@ -137,7 +137,7 @@ void Manager::Script(const string& path) ScriptInfo* info = new ScriptInfo(name, path); scripts.map[name] = info; all_info.push_back(info); - DBG_LOG(DBG_BROXYGEN, "Made ScriptInfo %s", name.c_str()); + DBG_LOG(DBG_ZEEKYGEN, "Made ScriptInfo %s", name.c_str()); if ( ! info->IsPkgLoader() ) return; @@ -146,7 +146,7 @@ void Manager::Script(const string& path) if ( packages.GetInfo(name) ) { - DbgAndWarn(fmt("Duplicate Broxygen package documentation: %s", + DbgAndWarn(fmt("Duplicate Zeekygen package documentation: %s", name.c_str())); return; } @@ -154,7 +154,7 @@ void Manager::Script(const string& path) PackageInfo* pkginfo = new PackageInfo(name); packages.map[name] = pkginfo; all_info.push_back(pkginfo); - DBG_LOG(DBG_BROXYGEN, "Made PackageInfo %s", name.c_str()); + DBG_LOG(DBG_ZEEKYGEN, "Made PackageInfo %s", name.c_str()); } void Manager::ScriptDependency(const string& path, const string& dep) @@ -164,7 +164,7 @@ void Manager::ScriptDependency(const string& path, const string& dep) if ( dep.empty() ) { - DbgAndWarn(fmt("Empty Broxygen script doc dependency: %s", + DbgAndWarn(fmt("Empty Zeekygen script doc dependency: %s", path.c_str())); return; } @@ -175,17 +175,17 @@ void Manager::ScriptDependency(const string& path, const string& dep) if ( ! script_info ) { - DbgAndWarn(fmt("Failed to add Broxygen script doc dependency %s " + DbgAndWarn(fmt("Failed to add Zeekygen script doc dependency %s " "for %s", depname.c_str(), name.c_str())); return; } script_info->AddDependency(depname); - DBG_LOG(DBG_BROXYGEN, "Added script dependency %s for %s", + DBG_LOG(DBG_ZEEKYGEN, "Added script dependency %s for %s", depname.c_str(), name.c_str()); for ( size_t i = 0; i < comment_buffer.size(); ++i ) - DbgAndWarn(fmt("Discarded extraneous Broxygen comment: %s", + DbgAndWarn(fmt("Discarded extraneous Zeekygen comment: %s", comment_buffer[i].c_str())); } @@ -199,13 +199,13 @@ void Manager::ModuleUsage(const string& path, const string& module) if ( ! script_info ) { - DbgAndWarn(fmt("Failed to add Broxygen module usage %s in %s", + DbgAndWarn(fmt("Failed to add Zeekygen module usage %s in %s", module.c_str(), name.c_str())); return; } script_info->AddModule(module); - DBG_LOG(DBG_BROXYGEN, "Added module usage %s in %s", + DBG_LOG(DBG_ZEEKYGEN, "Added module usage %s in %s", module.c_str(), name.c_str()); } @@ -246,7 +246,7 @@ void Manager::StartType(ID* id) if ( id->GetLocationInfo() == &no_location ) { - DbgAndWarn(fmt("Can't generate broxygen doumentation for %s, " + DbgAndWarn(fmt("Can't generate zeekygen doumentation for %s, " "no location available", id->Name())); return; } @@ -261,7 +261,7 @@ void Manager::StartType(ID* id) } incomplete_type = CreateIdentifierInfo(id, script_info); - DBG_LOG(DBG_BROXYGEN, "Made IdentifierInfo (incomplete) %s, in %s", + DBG_LOG(DBG_ZEEKYGEN, "Made IdentifierInfo (incomplete) %s, in %s", id->Name(), script.c_str()); } @@ -279,7 +279,7 @@ void Manager::Identifier(ID* id) { if ( incomplete_type->Name() == id->Name() ) { - DBG_LOG(DBG_BROXYGEN, "Finished document for type %s", id->Name()); + DBG_LOG(DBG_ZEEKYGEN, "Finished document for type %s", id->Name()); incomplete_type->CompletedTypeDecl(); incomplete_type = 0; return; @@ -309,7 +309,7 @@ void Manager::Identifier(ID* id) { // Internally-created identifier (e.g. file/proto analyzer enum tags). // Handled specially since they don't have a script location. - DBG_LOG(DBG_BROXYGEN, "Made internal IdentifierInfo %s", + DBG_LOG(DBG_ZEEKYGEN, "Made internal IdentifierInfo %s", id->Name()); CreateIdentifierInfo(id, 0); return; @@ -325,7 +325,7 @@ void Manager::Identifier(ID* id) } CreateIdentifierInfo(id, script_info); - DBG_LOG(DBG_BROXYGEN, "Made IdentifierInfo %s, in script %s", + DBG_LOG(DBG_ZEEKYGEN, "Made IdentifierInfo %s, in script %s", id->Name(), script.c_str()); } @@ -339,7 +339,7 @@ void Manager::RecordField(const ID* id, const TypeDecl* field, if ( ! idd ) { - DbgAndWarn(fmt("Can't generate broxygen doumentation for " + DbgAndWarn(fmt("Can't generate zeekygen doumentation for " "record field %s, unknown record: %s", field->id, id->Name())); return; @@ -348,11 +348,12 @@ void Manager::RecordField(const ID* id, const TypeDecl* field, string script = NormalizeScriptPath(path); idd->AddRecordField(field, script, comment_buffer); comment_buffer.clear(); - DBG_LOG(DBG_BROXYGEN, "Document record field %s, identifier %s, script %s", + DBG_LOG(DBG_ZEEKYGEN, "Document record field %s, identifier %s, script %s", field->id, id->Name(), script.c_str()); } -void Manager::Redef(const ID* id, const string& path) +void Manager::Redef(const ID* id, const string& path, + init_class ic, Expr* init_expr) { if ( disabled ) return; @@ -365,7 +366,7 @@ void Manager::Redef(const ID* id, const string& path) if ( ! id_info ) { - DbgAndWarn(fmt("Can't generate broxygen doumentation for " + DbgAndWarn(fmt("Can't generate zeekygen doumentation for " "redef of %s, identifier lookup failed", id->Name())); return; @@ -380,11 +381,11 @@ void Manager::Redef(const ID* id, const string& path) return; } - id_info->AddRedef(from_script, comment_buffer); + id_info->AddRedef(from_script, ic, init_expr, comment_buffer); script_info->AddRedef(id_info); comment_buffer.clear(); last_identifier_seen = id_info; - DBG_LOG(DBG_BROXYGEN, "Added redef of %s from %s", + DBG_LOG(DBG_ZEEKYGEN, "Added redef of %s from %s", id->Name(), from_script.c_str()); } @@ -421,7 +422,7 @@ void Manager::PostComment(const string& comment, const string& id_hint) if ( last_identifier_seen ) last_identifier_seen->AddComment(RemoveLeadingSpace(comment)); else - DbgAndWarn(fmt("Discarded unassociated Broxygen comment %s", + DbgAndWarn(fmt("Discarded unassociated Zeekygen comment %s", comment.c_str())); return; diff --git a/src/broxygen/Manager.h b/src/zeekygen/Manager.h similarity index 83% rename from src/broxygen/Manager.h rename to src/zeekygen/Manager.h index 7978adc180..99c6b353cc 100644 --- a/src/broxygen/Manager.h +++ b/src/zeekygen/Manager.h @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_MANAGER_H -#define BROXYGEN_MANAGER_H +#ifndef ZEEKYGEN_MANAGER_H +#define ZEEKYGEN_MANAGER_H #include "Configuration.h" #include "Info.h" @@ -21,7 +21,7 @@ #include #include -namespace broxygen { +namespace zeekygen { /** * Map of info objects. Just a wrapper around std::map to improve code @@ -54,7 +54,7 @@ public: /** * Ctor. - * @param config Path to a Broxygen config file if documentation is to be + * @param config Path to a Zeekygen config file if documentation is to be * written to disk. * @param bro_command The command used to invoke the bro process. * It's used when checking for out-of-date targets. If the bro binary is @@ -80,7 +80,7 @@ public: void InitPostScript(); /** - * Builds all Broxygen targets specified by config file and write out + * Builds all Zeekygen targets specified by config file and write out * documentation to disk. */ void GenerateDocs() const; @@ -136,28 +136,31 @@ public: * Register a redefinition of a particular identifier. * @param id The identifier being redef'd. * @param path Absolute path to a Bro script doing the redef. + * @param ic The initialization class that was used (e.g. =, +=, -=). + * @param init_expr The intiialization expression that was used. */ - void Redef(const ID* id, const std::string& path); + void Redef(const ID* id, const std::string& path, + init_class ic = INIT_NONE, Expr* init_expr = nullptr); /** - * Register Broxygen script summary content. + * Register Zeekygen script summary content. * @param path Absolute path to a Bro script. - * @param comment Broxygen-style summary comment ("##!") to associate with + * @param comment Zeekygen-style summary comment ("##!") to associate with * script given by \a path. */ void SummaryComment(const std::string& path, const std::string& comment); /** - * Register a Broxygen comment ("##") for an upcoming identifier (i.e. + * Register a Zeekygen comment ("##") for an upcoming identifier (i.e. * this content is buffered and consumed by next identifier/field * declaration. - * @param comment Content of the Broxygen comment. + * @param comment Content of the Zeekygen comment. */ void PreComment(const std::string& comment); /** - * Register a Broxygen comment ("##<") for the last identifier seen. - * @param comment Content of the Broxygen comment. + * Register a Zeekygen comment ("##<") for the last identifier seen. + * @param comment Content of the Zeekygen comment. * @param identifier_hint Expected name of identifier with which to * associate \a comment. */ @@ -179,8 +182,8 @@ public: { return identifiers.GetInfo(name); } /** - * @param name Name of a Bro script ("normalized" to be a path relative - * to a component within BROPATH). + * @param name Name of a Zeek script ("normalized" to be a path relative + * to a component within ZEEKPATH). * @return a script info object associated with \a name or a null pointer * if it's not a known script name. */ @@ -188,8 +191,8 @@ public: { return scripts.GetInfo(name); } /** - * @param name Nmae of a Bro script package ("normalized" to be a path - * relative to a component within BROPATH). + * @param name Name of a Zeek script package ("normalized" to be a path + * relative to a component within ZEEKPATH). * @return a package info object assocated with \a name or a null pointer * if it's not a known package name. */ @@ -197,11 +200,11 @@ public: { return packages.GetInfo(name); } /** - * Check if a Broxygen target is up-to-date. - * @param target_file output file of a Broxygen target. + * Check if a Zeekygen target is up-to-date. + * @param target_file output file of a Zeekygen target. * @param dependencies all dependencies of the target. * @return true if modification time of \a target_file is newer than - * modification time of Bro binary, Broxygen config file, and all + * modification time of Bro binary, Zeekygen config file, and all * dependencies, else false. */ template @@ -241,7 +244,7 @@ bool Manager::IsUpToDate(const string& target_file, // Doesn't exist. return false; - reporter->InternalError("Broxygen failed to stat target file '%s': %s", + reporter->InternalError("Zeekygen failed to stat target file '%s': %s", target_file.c_str(), strerror(errno)); } @@ -258,8 +261,8 @@ bool Manager::IsUpToDate(const string& target_file, return true; } -} // namespace broxygen +} // namespace zeekygen -extern broxygen::Manager* broxygen_mgr; +extern zeekygen::Manager* zeekygen_mgr; #endif diff --git a/src/broxygen/PackageInfo.cc b/src/zeekygen/PackageInfo.cc similarity index 85% rename from src/broxygen/PackageInfo.cc rename to src/zeekygen/PackageInfo.cc index 1cbff5a07f..4fe1ba8ad9 100644 --- a/src/broxygen/PackageInfo.cc +++ b/src/zeekygen/PackageInfo.cc @@ -9,7 +9,7 @@ #include using namespace std; -using namespace broxygen; +using namespace zeekygen; PackageInfo::PackageInfo(const string& arg_name) : Info(), @@ -23,7 +23,7 @@ PackageInfo::PackageInfo(const string& arg_name) ifstream f(readme_file.c_str()); if ( ! f.is_open() ) - reporter->InternalWarning("Broxygen failed to open '%s': %s", + reporter->InternalWarning("Zeekygen failed to open '%s': %s", readme_file.c_str(), strerror(errno)); string line; @@ -32,7 +32,7 @@ PackageInfo::PackageInfo(const string& arg_name) readme.push_back(line); if ( f.bad() ) - reporter->InternalWarning("Broxygen error reading '%s': %s", + reporter->InternalWarning("Zeekygen error reading '%s': %s", readme_file.c_str(), strerror(errno)); } @@ -54,5 +54,5 @@ time_t PackageInfo::DoGetModificationTime() const if ( readme_file.empty() ) return 0; - return broxygen::get_mtime(readme_file); + return zeekygen::get_mtime(readme_file); } diff --git a/src/zeekygen/PackageInfo.h b/src/zeekygen/PackageInfo.h new file mode 100644 index 0000000000..044abd88e7 --- /dev/null +++ b/src/zeekygen/PackageInfo.h @@ -0,0 +1,50 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef ZEEKYGEN_PACKAGEINFO_H +#define ZEEKYGEN_PACKAGEINFO_H + +#include "Info.h" + +#include +#include + +namespace zeekygen { + +/** + * Information about a Zeek script package. + */ +class PackageInfo : public Info { + +public: + + /** + * Ctor. + * @param name The name of the Zeek script package (relative path from a + * component within ZEEKPATH). + */ + explicit PackageInfo(const std::string& name); + + /** + * @return The content of the package's README file, each line being + * an element in the returned vector. If the package has no README, the + * vector is empty. + */ + std::vector GetReadme() const + { return readme; } + +private: + + time_t DoGetModificationTime() const override; + + std::string DoName() const override + { return pkg_name; } + + std::string DoReStructuredText(bool roles_only) const override; + + std::string pkg_name; + std::vector readme; +}; + +} // namespace zeekygen + +#endif diff --git a/src/broxygen/ReStructuredTextTable.cc b/src/zeekygen/ReStructuredTextTable.cc similarity index 98% rename from src/broxygen/ReStructuredTextTable.cc rename to src/zeekygen/ReStructuredTextTable.cc index 2cdb774224..55c576a2a4 100644 --- a/src/broxygen/ReStructuredTextTable.cc +++ b/src/zeekygen/ReStructuredTextTable.cc @@ -5,7 +5,7 @@ #include using namespace std; -using namespace broxygen; +using namespace zeekygen; ReStructuredTextTable::ReStructuredTextTable(size_t arg_num_cols) : num_cols(arg_num_cols), rows(), longest_row_in_column() diff --git a/src/broxygen/ReStructuredTextTable.h b/src/zeekygen/ReStructuredTextTable.h similarity index 92% rename from src/broxygen/ReStructuredTextTable.h rename to src/zeekygen/ReStructuredTextTable.h index 34cc30c332..aefa8aaa26 100644 --- a/src/broxygen/ReStructuredTextTable.h +++ b/src/zeekygen/ReStructuredTextTable.h @@ -1,12 +1,12 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_RESTTABLE_H -#define BROXYGEN_RESTTABLE_H +#ifndef ZEEKYGEN_RESTTABLE_H +#define ZEEKYGEN_RESTTABLE_H #include #include -namespace broxygen { +namespace zeekygen { /** * A reST table with arbitrary number of columns. @@ -48,6 +48,6 @@ private: std::vector longest_row_in_column; }; -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/broxygen/ScriptInfo.cc b/src/zeekygen/ScriptInfo.cc similarity index 82% rename from src/broxygen/ScriptInfo.cc rename to src/zeekygen/ScriptInfo.cc index a32d96cdd5..d55b42b7bc 100644 --- a/src/broxygen/ScriptInfo.cc +++ b/src/zeekygen/ScriptInfo.cc @@ -10,7 +10,7 @@ #include "Desc.h" using namespace std; -using namespace broxygen; +using namespace zeekygen; bool IdInfoComp::operator ()(const IdentifierInfo* lhs, const IdentifierInfo* rhs) const @@ -24,11 +24,11 @@ static vector summary_comment(const vector& cmnts) for ( size_t i = 0; i < cmnts.size(); ++i ) { - size_t end = broxygen::end_of_first_sentence(cmnts[i]); + size_t end = zeekygen::end_of_first_sentence(cmnts[i]); if ( end == string::npos ) { - if ( broxygen::is_all_whitespace(cmnts[i]) ) + if ( zeekygen::is_all_whitespace(cmnts[i]) ) break; rval.push_back(cmnts[i]); @@ -86,7 +86,7 @@ static string make_summary(const string& heading, char underline, char border, add_summary_rows(d, summary_comment((*it)->GetComments()), &table); } - return broxygen::make_heading(heading, underline) + table.AsString(border) + return zeekygen::make_heading(heading, underline) + table.AsString(border) + "\n"; } @@ -115,7 +115,7 @@ static string make_redef_summary(const string& heading, char underline, add_summary_rows(d, summary_comment(iit->comments), &table); } - return broxygen::make_heading(heading, underline) + table.AsString(border) + return zeekygen::make_heading(heading, underline) + table.AsString(border) + "\n"; } @@ -125,7 +125,7 @@ static string make_details(const string& heading, char underline, if ( id_list.empty() ) return ""; - string rval = broxygen::make_heading(heading, underline); + string rval = zeekygen::make_heading(heading, underline); for ( id_info_list::const_iterator it = id_list.begin(); it != id_list.end(); ++it ) @@ -143,7 +143,7 @@ static string make_redef_details(const string& heading, char underline, if ( id_set.empty() ) return ""; - string rval = broxygen::make_heading(heading, underline); + string rval = zeekygen::make_heading(heading, underline); for ( id_info_set::const_iterator it = id_set.begin(); it != id_set.end(); ++it ) @@ -158,7 +158,7 @@ static string make_redef_details(const string& heading, char underline, ScriptInfo::ScriptInfo(const string& arg_name, const string& arg_path) : Info(), name(arg_name), path(arg_path), - is_pkg_loader(SafeBasename(name).result == PACKAGE_LOADER), + is_pkg_loader(is_package_loader(name)), dependencies(), module_usages(), comments(), id_info(), redef_options(), constants(), state_vars(), types(), events(), hooks(), functions(), redefs() @@ -178,13 +178,13 @@ void ScriptInfo::DoInitPostScript() IdentifierInfo* info = it->second; ID* id = info->GetID(); - if ( ! broxygen::is_public_api(id) ) + if ( ! zeekygen::is_public_api(id) ) continue; if ( id->AsType() ) { types.push_back(info); - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a type", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a type", id->Name(), name.c_str()); continue; } @@ -193,17 +193,17 @@ void ScriptInfo::DoInitPostScript() { switch ( id->Type()->AsFuncType()->Flavor() ) { case FUNC_FLAVOR_HOOK: - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a hook", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a hook", id->Name(), name.c_str()); hooks.push_back(info); break; case FUNC_FLAVOR_EVENT: - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a event", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a event", id->Name(), name.c_str()); events.push_back(info); break; case FUNC_FLAVOR_FUNCTION: - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a function", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a function", id->Name(), name.c_str()); functions.push_back(info); break; @@ -219,13 +219,13 @@ void ScriptInfo::DoInitPostScript() { if ( id->FindAttr(ATTR_REDEF) ) { - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a redef_option", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a redef_option", id->Name(), name.c_str()); redef_options.push_back(info); } else { - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a constant", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a constant", id->Name(), name.c_str()); constants.push_back(info); } @@ -234,7 +234,7 @@ void ScriptInfo::DoInitPostScript() } else if ( id->IsOption() ) { - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as an runtime option", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as an runtime option", id->Name(), name.c_str()); options.push_back(info); @@ -246,19 +246,19 @@ void ScriptInfo::DoInitPostScript() // documentation. continue; - DBG_LOG(DBG_BROXYGEN, "Filter id '%s' in '%s' as a state variable", + DBG_LOG(DBG_ZEEKYGEN, "Filter id '%s' in '%s' as a state variable", id->Name(), name.c_str()); state_vars.push_back(info); } // The following enum types are automatically created internally in Bro, // so just manually associating them with scripts for now. - if ( name == "base/frameworks/input/main.bro" ) + if ( name == "base/frameworks/input/main.zeek" ) { auto id = global_scope()->Lookup("Input::Reader"); types.push_back(new IdentifierInfo(id, this)); } - else if ( name == "base/frameworks/logging/main.bro" ) + else if ( name == "base/frameworks/logging/main.zeek" ) { auto id = global_scope()->Lookup("Log::Writer"); types.push_back(new IdentifierInfo(id, this)); @@ -275,11 +275,11 @@ string ScriptInfo::DoReStructuredText(bool roles_only) const string rval; rval += ":tocdepth: 3\n\n"; - rval += broxygen::make_heading(name, '='); + rval += zeekygen::make_heading(name, '='); for ( string_set::const_iterator it = module_usages.begin(); it != module_usages.end(); ++it ) - rval += ".. bro:namespace:: " + *it + "\n"; + rval += ".. zeek:namespace:: " + *it + "\n"; rval += "\n"; @@ -314,7 +314,7 @@ string ScriptInfo::DoReStructuredText(bool roles_only) const if ( it != dependencies.begin() ) rval += ", "; - string path = find_file(*it, bro_path(), "bro"); + string path = find_script_file(*it, bro_path()); string doc = *it; if ( ! path.empty() && is_dir(path.c_str()) ) @@ -329,7 +329,7 @@ string ScriptInfo::DoReStructuredText(bool roles_only) const //rval += fmt(":Source File: :download:`/scripts/%s`\n", name.c_str()); rval += "\n"; - rval += broxygen::make_heading("Summary", '~'); + rval += zeekygen::make_heading("Summary", '~'); rval += make_summary("Runtime Options", '#', '=', options); rval += make_summary("Redefinable Options", '#', '=', redef_options); rval += make_summary("Constants", '#', '=', constants); @@ -340,7 +340,7 @@ string ScriptInfo::DoReStructuredText(bool roles_only) const rval += make_summary("Hooks", '#', '=', hooks); rval += make_summary("Functions", '#', '=', functions); rval += "\n"; - rval += broxygen::make_heading("Detailed Interface", '~'); + rval += zeekygen::make_heading("Detailed Interface", '~'); rval += make_details("Runtime Options", '#', options); rval += make_details("Redefinable Options", '#', redef_options); rval += make_details("Constants", '#', constants); @@ -356,20 +356,25 @@ string ScriptInfo::DoReStructuredText(bool roles_only) const time_t ScriptInfo::DoGetModificationTime() const { - time_t most_recent = broxygen::get_mtime(path); + time_t most_recent = zeekygen::get_mtime(path); for ( string_set::const_iterator it = dependencies.begin(); it != dependencies.end(); ++it ) { - Info* info = broxygen_mgr->GetScriptInfo(*it); + Info* info = zeekygen_mgr->GetScriptInfo(*it); if ( ! info ) { - string pkg_name = *it + "/" + PACKAGE_LOADER; - info = broxygen_mgr->GetScriptInfo(pkg_name); + for (const string& ext : script_extensions) + { + string pkg_name = *it + "/__load__" + ext; + info = zeekygen_mgr->GetScriptInfo(pkg_name); + if ( info ) + break; + } if ( ! info ) - reporter->InternalWarning("Broxygen failed to get mtime of %s", + reporter->InternalWarning("Zeekygen failed to get mtime of %s", it->c_str()); continue; } diff --git a/src/broxygen/ScriptInfo.h b/src/zeekygen/ScriptInfo.h similarity index 89% rename from src/broxygen/ScriptInfo.h rename to src/zeekygen/ScriptInfo.h index d7328ef7c8..8567111b5e 100644 --- a/src/broxygen/ScriptInfo.h +++ b/src/zeekygen/ScriptInfo.h @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_SCRIPTINFO_H -#define BROXYGEN_SCRIPTINFO_H +#ifndef ZEEKYGEN_SCRIPTINFO_H +#define ZEEKYGEN_SCRIPTINFO_H #include "Info.h" #include "IdentifierInfo.h" @@ -12,7 +12,7 @@ #include #include -namespace broxygen { +namespace zeekygen { class IdentifierInfo; @@ -33,13 +33,13 @@ public: /** * Ctor. - * @param name Name of script: a path relative to a component in BROPATH. + * @param name Name of script: a path relative to a component in ZEEKPATH. * @param path Absolute path to the script. */ ScriptInfo(const std::string& name, const std::string& path); /** - * Associate a Broxygen summary comment ("##!") with the script. + * Associate a Zeekygen summary comment ("##!") with the script. * @param comment String extracted from the comment. */ void AddComment(const std::string& comment) @@ -48,7 +48,7 @@ public: /** * Register a dependency on another script. * @param name Name of a script with this one @loads. This is the - * "normalized" name (a path relative to a component in BROPATH). + * "normalized" name (a path relative to a component in ZEEKPATH). */ void AddDependency(const std::string& name) { dependencies.insert(name); } @@ -77,13 +77,13 @@ public: { redefs.insert(info); } /** - * @return Whether the script is a package loader (i.e. "__load__.bro"). + * @return Whether the script is a package loader (i.e. "__load__.zeek"). */ bool IsPkgLoader() const { return is_pkg_loader; } /** - * @return All the scripts Broxygen summary comments. + * @return All the scripts Zeekygen summary comments. */ std::vector GetComments() const; @@ -119,6 +119,6 @@ private: id_info_set redefs; }; -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/broxygen/Target.cc b/src/zeekygen/Target.cc similarity index 88% rename from src/broxygen/Target.cc rename to src/zeekygen/Target.cc index dba0d67d6c..afb96cbd8b 100644 --- a/src/broxygen/Target.cc +++ b/src/zeekygen/Target.cc @@ -16,7 +16,7 @@ #include using namespace std; -using namespace broxygen; +using namespace zeekygen; static void write_plugin_section_heading(FILE* f, const plugin::Plugin* p) { @@ -38,7 +38,7 @@ static void write_analyzer_component(FILE* f, const analyzer::Component* c) if ( atag->Lookup("Analyzer", tag.c_str()) < 0 ) reporter->InternalError("missing analyzer tag for %s", tag.c_str()); - fprintf(f, ":bro:enum:`Analyzer::%s`\n\n", tag.c_str()); + fprintf(f, ":zeek:enum:`Analyzer::%s`\n\n", tag.c_str()); } static void write_analyzer_component(FILE* f, const file_analysis::Component* c) @@ -49,7 +49,7 @@ static void write_analyzer_component(FILE* f, const file_analysis::Component* c) if ( atag->Lookup("Files", tag.c_str()) < 0 ) reporter->InternalError("missing analyzer tag for %s", tag.c_str()); - fprintf(f, ":bro:enum:`Files::%s`\n\n", tag.c_str()); + fprintf(f, ":zeek:enum:`Files::%s`\n\n", tag.c_str()); } static void write_plugin_components(FILE* f, const plugin::Plugin* p) @@ -123,13 +123,13 @@ static void write_plugin_bif_items(FILE* f, const plugin::Plugin* p, for ( it = bifitems.begin(); it != bifitems.end(); ++it ) { - broxygen::IdentifierInfo* doc = broxygen_mgr->GetIdentifierInfo( + zeekygen::IdentifierInfo* doc = zeekygen_mgr->GetIdentifierInfo( it->GetID()); if ( doc ) fprintf(f, "%s\n\n", doc->ReStructuredText().c_str()); else - reporter->InternalWarning("Broxygen ID lookup failed: %s\n", + reporter->InternalWarning("Zeekygen ID lookup failed: %s\n", it->GetID().c_str()); } } @@ -138,10 +138,10 @@ static void WriteAnalyzerTagDefn(FILE* f, const string& module) { string tag_id = module + "::Tag"; - broxygen::IdentifierInfo* doc = broxygen_mgr->GetIdentifierInfo(tag_id); + zeekygen::IdentifierInfo* doc = zeekygen_mgr->GetIdentifierInfo(tag_id); if ( ! doc ) - reporter->InternalError("Broxygen failed analyzer tag lookup: %s", + reporter->InternalError("Zeekygen failed analyzer tag lookup: %s", tag_id.c_str()); fprintf(f, "%s\n", doc->ReStructuredText().c_str()); @@ -177,7 +177,7 @@ static vector filter_matches(const vector& from, Target* t) if ( t->MatchesPattern(d) ) { - DBG_LOG(DBG_BROXYGEN, "'%s' matched pattern for target '%s'", + DBG_LOG(DBG_ZEEKYGEN, "'%s' matched pattern for target '%s'", d->Name().c_str(), t->Name().c_str()); rval.push_back(d); } @@ -194,14 +194,14 @@ TargetFile::TargetFile(const string& arg_name) string dir = SafeDirname(name).result; if ( ! ensure_intermediate_dirs(dir.c_str()) ) - reporter->FatalError("Broxygen failed to make dir %s", + reporter->FatalError("Zeekygen failed to make dir %s", dir.c_str()); } f = fopen(name.c_str(), "w"); if ( ! f ) - reporter->FatalError("Broxygen failed to open '%s' for writing: %s", + reporter->FatalError("Zeekygen failed to open '%s' for writing: %s", name.c_str(), strerror(errno)); } @@ -210,7 +210,7 @@ TargetFile::~TargetFile() if ( f ) fclose(f); - DBG_LOG(DBG_BROXYGEN, "Wrote out-of-date target '%s'", name.c_str()); + DBG_LOG(DBG_ZEEKYGEN, "Wrote out-of-date target '%s'", name.c_str()); } @@ -245,11 +245,11 @@ void AnalyzerTarget::DoFindDependencies(const std::vector& infos) void AnalyzerTarget::DoGenerate() const { - if ( broxygen_mgr->IsUpToDate(Name(), vector()) ) + if ( zeekygen_mgr->IsUpToDate(Name(), vector()) ) return; if ( Pattern() != "*" ) - reporter->InternalWarning("Broxygen only implements analyzer target" + reporter->InternalWarning("Zeekygen only implements analyzer target" " pattern '*'"); TargetFile file(Name()); @@ -313,7 +313,7 @@ void PackageTarget::DoFindDependencies(const vector& infos) pkg_deps = filter_matches(infos, this); if ( pkg_deps.empty() ) - reporter->FatalError("No match for Broxygen target '%s' pattern '%s'", + reporter->FatalError("No match for Zeekygen target '%s' pattern '%s'", Name().c_str(), Pattern().c_str()); for ( size_t i = 0; i < infos.size(); ++i ) @@ -329,7 +329,7 @@ void PackageTarget::DoFindDependencies(const vector& infos) pkg_deps[j]->Name().size())) continue; - DBG_LOG(DBG_BROXYGEN, "Script %s associated with package %s", + DBG_LOG(DBG_ZEEKYGEN, "Script %s associated with package %s", script->Name().c_str(), pkg_deps[j]->Name().c_str()); pkg_manifest[pkg_deps[j]].push_back(script); script_deps.push_back(script); @@ -339,8 +339,8 @@ void PackageTarget::DoFindDependencies(const vector& infos) void PackageTarget::DoGenerate() const { - if ( broxygen_mgr->IsUpToDate(Name(), script_deps) && - broxygen_mgr->IsUpToDate(Name(), pkg_deps) ) + if ( zeekygen_mgr->IsUpToDate(Name(), script_deps) && + zeekygen_mgr->IsUpToDate(Name(), pkg_deps) ) return; TargetFile file(Name()); @@ -382,13 +382,13 @@ void PackageIndexTarget::DoFindDependencies(const vector& infos) pkg_deps = filter_matches(infos, this); if ( pkg_deps.empty() ) - reporter->FatalError("No match for Broxygen target '%s' pattern '%s'", + reporter->FatalError("No match for Zeekygen target '%s' pattern '%s'", Name().c_str(), Pattern().c_str()); } void PackageIndexTarget::DoGenerate() const { - if ( broxygen_mgr->IsUpToDate(Name(), pkg_deps) ) + if ( zeekygen_mgr->IsUpToDate(Name(), pkg_deps) ) return; TargetFile file(Name()); @@ -402,7 +402,7 @@ void ScriptTarget::DoFindDependencies(const vector& infos) script_deps = filter_matches(infos, this); if ( script_deps.empty() ) - reporter->FatalError("No match for Broxygen target '%s' pattern '%s'", + reporter->FatalError("No match for Zeekygen target '%s' pattern '%s'", Name().c_str(), Pattern().c_str()); if ( ! IsDir() ) @@ -410,7 +410,7 @@ void ScriptTarget::DoFindDependencies(const vector& infos) for ( size_t i = 0; i < script_deps.size(); ++i ) { - if ( SafeBasename(script_deps[i]->Name()).result == PACKAGE_LOADER ) + if ( is_package_loader(script_deps[i]->Name()) ) { string pkg_dir = SafeDirname(script_deps[i]->Name()).result; string target_file = Name() + pkg_dir + "/index.rst"; @@ -471,7 +471,7 @@ void ScriptTarget::DoGenerate() const if ( IsDir() ) { // Target name is a dir, matching scripts are written within that dir - // with a dir tree that parallels the script's BROPATH location. + // with a dir tree that parallels the script's ZEEKPATH location. set targets; vector dir_contents = dir_contents_recursive(Name()); @@ -483,7 +483,7 @@ void ScriptTarget::DoGenerate() const vector dep; dep.push_back(script_deps[i]); - if ( broxygen_mgr->IsUpToDate(target_filename, dep) ) + if ( zeekygen_mgr->IsUpToDate(target_filename, dep) ) continue; TargetFile file(target_filename); @@ -508,7 +508,7 @@ void ScriptTarget::DoGenerate() const reporter->Warning("Failed to unlink %s: %s", f.c_str(), strerror(errno)); - DBG_LOG(DBG_BROXYGEN, "Delete stale script file %s", f.c_str()); + DBG_LOG(DBG_ZEEKYGEN, "Delete stale script file %s", f.c_str()); } return; @@ -516,7 +516,7 @@ void ScriptTarget::DoGenerate() const // Target is a single file, all matching scripts get written there. - if ( broxygen_mgr->IsUpToDate(Name(), script_deps) ) + if ( zeekygen_mgr->IsUpToDate(Name(), script_deps) ) return; TargetFile file(Name()); @@ -527,7 +527,7 @@ void ScriptTarget::DoGenerate() const void ScriptSummaryTarget::DoGenerate() const { - if ( broxygen_mgr->IsUpToDate(Name(), script_deps) ) + if ( zeekygen_mgr->IsUpToDate(Name(), script_deps) ) return; TargetFile file(Name()); @@ -552,7 +552,7 @@ void ScriptSummaryTarget::DoGenerate() const void ScriptIndexTarget::DoGenerate() const { - if ( broxygen_mgr->IsUpToDate(Name(), script_deps) ) + if ( zeekygen_mgr->IsUpToDate(Name(), script_deps) ) return; TargetFile file(Name()); @@ -577,13 +577,13 @@ void IdentifierTarget::DoFindDependencies(const vector& infos) id_deps = filter_matches(infos, this); if ( id_deps.empty() ) - reporter->FatalError("No match for Broxygen target '%s' pattern '%s'", + reporter->FatalError("No match for Zeekygen target '%s' pattern '%s'", Name().c_str(), Pattern().c_str()); } void IdentifierTarget::DoGenerate() const { - if ( broxygen_mgr->IsUpToDate(Name(), id_deps) ) + if ( zeekygen_mgr->IsUpToDate(Name(), id_deps) ) return; TargetFile file(Name()); diff --git a/src/broxygen/Target.h b/src/zeekygen/Target.h similarity index 95% rename from src/broxygen/Target.h rename to src/zeekygen/Target.h index 9a5a23107c..4062f8a788 100644 --- a/src/broxygen/Target.h +++ b/src/zeekygen/Target.h @@ -1,7 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_TARGET_H -#define BROXYGEN_TARGET_H +#ifndef ZEEKYGEN_TARGET_H +#define ZEEKYGEN_TARGET_H #include "Info.h" #include "PackageInfo.h" @@ -13,7 +13,7 @@ #include #include -namespace broxygen { +namespace zeekygen { /** * Helper class to create files in arbitrary file paths and automatically @@ -39,9 +39,9 @@ struct TargetFile { }; /** - * A Broxygen target abstract base class. A target is generally any portion of + * A Zeekygen target abstract base class. A target is generally any portion of * documentation that Bro can build. It's identified by a type (e.g. script, - * identifier, package), a pattern (e.g. "example.bro", "HTTP::Info"), and + * identifier, package), a pattern (e.g. "example.zeek", "HTTP::Info"), and * a path to an output file. */ class Target { @@ -125,7 +125,7 @@ public: /** * Register a new target type. - * @param type_name The target type name as it will appear in Broxygen + * @param type_name The target type name as it will appear in Zeekygen * config files. */ template @@ -136,7 +136,7 @@ public: /** * Instantiate a target. - * @param type_name The target type name as it appears in Broxygen config + * @param type_name The target type name as it appears in Zeekygen config * files. * @param name The output file name of the target. * @param pattern The dependency pattern of the target. @@ -294,7 +294,7 @@ public: * @param name Output file name or directory. If it's a directory, * then one document for each script that matches the pattern is written to * the directory in a directory structure which mirrors the script's path - * relative to a component in BROPATH. + * relative to a component in ZEEKPATH. * @param pattern Dependency pattern. */ ScriptTarget(const std::string& name, const std::string& pattern) @@ -384,6 +384,6 @@ private: std::vector id_deps; }; -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/broxygen/utils.cc b/src/zeekygen/utils.cc similarity index 83% rename from src/broxygen/utils.cc rename to src/zeekygen/utils.cc index 93f822b846..b04790ee92 100644 --- a/src/broxygen/utils.cc +++ b/src/zeekygen/utils.cc @@ -7,10 +7,10 @@ #include #include -using namespace broxygen; +using namespace zeekygen; using namespace std; -bool broxygen::prettify_params(string& s) +bool zeekygen::prettify_params(string& s) { size_t identifier_start_pos = 0; bool in_identifier = false; @@ -76,29 +76,29 @@ bool broxygen::prettify_params(string& s) return false; } -bool broxygen::is_public_api(const ID* id) +bool zeekygen::is_public_api(const ID* id) { return (id->Scope() == SCOPE_GLOBAL) || (id->Scope() == SCOPE_MODULE && id->IsExport()); } -time_t broxygen::get_mtime(const string& filename) +time_t zeekygen::get_mtime(const string& filename) { struct stat s; if ( stat(filename.c_str(), &s) < 0 ) - reporter->InternalError("Broxygen failed to stat file '%s': %s", + reporter->InternalError("Zeekygen failed to stat file '%s': %s", filename.c_str(), strerror(errno)); return s.st_mtime; } -string broxygen::make_heading(const string& heading, char underline) +string zeekygen::make_heading(const string& heading, char underline) { return heading + "\n" + string(heading.size(), underline) + "\n"; } -size_t broxygen::end_of_first_sentence(const string& s) +size_t zeekygen::end_of_first_sentence(const string& s) { size_t rval = 0; @@ -119,7 +119,7 @@ size_t broxygen::end_of_first_sentence(const string& s) return rval; } -bool broxygen::is_all_whitespace(const string& s) +bool zeekygen::is_all_whitespace(const string& s) { for ( size_t i = 0; i < s.size(); ++i ) if ( ! isspace(s[i]) ) @@ -128,7 +128,7 @@ bool broxygen::is_all_whitespace(const string& s) return true; } -string broxygen::redef_indication(const string& from_script) +string zeekygen::redef_indication(const string& from_script) { return fmt("(present if :doc:`/scripts/%s` is loaded)", from_script.c_str()); diff --git a/src/broxygen/utils.h b/src/zeekygen/utils.h similarity index 88% rename from src/broxygen/utils.h rename to src/zeekygen/utils.h index 7e11019a3d..07430f66ba 100644 --- a/src/broxygen/utils.h +++ b/src/zeekygen/utils.h @@ -1,18 +1,18 @@ // See the file "COPYING" in the main distribution directory for copyright. -#ifndef BROXYGEN_UTILS_H -#define BROXYGEN_UTILS_H +#ifndef ZEEKYGEN_UTILS_H +#define ZEEKYGEN_UTILS_H #include "ID.h" #include -namespace broxygen { +namespace zeekygen { /** - * Transform content of a Broxygen comment which may contain function + * Transform content of a Zeekygen comment which may contain function * parameter or return value documentation to a prettier reST format. - * @param s Content from a Broxygen comment to transform. "id: ..." and + * @param s Content from a Zeekygen comment to transform. "id: ..." and * "Returns: ..." change to ":id: ..." and ":returns: ...". * @return Whether any content in \a s was transformed. */ @@ -62,6 +62,6 @@ bool is_all_whitespace(const std::string& s); */ std::string redef_indication(const std::string& from_script); -} // namespace broxygen +} // namespace zeekygen #endif diff --git a/src/zeekygen/zeekygen.bif b/src/zeekygen/zeekygen.bif new file mode 100644 index 0000000000..d97cd782bd --- /dev/null +++ b/src/zeekygen/zeekygen.bif @@ -0,0 +1,97 @@ +# See the file "COPYING" in the main distribution directory for copyright. + +##! Functions for querying script, package, or variable documentation. + +%%{ +#include "zeekygen/Manager.h" +#include "util.h" + +static StringVal* comments_to_val(const vector& comments) + { + return new StringVal(implode_string_vector(comments)); + } +%%} + +## Retrieve the Zeekygen-style comments (``##``) associated with an identifier +## (e.g. a variable or type). +## +## name: a script-level identifier for which to retrieve comments. +## +## Returns: comments associated with *name*. If *name* is not a known +## identifier, an empty string is returned. +function get_identifier_comments%(name: string%): string + %{ + using namespace zeekygen; + IdentifierInfo* d = zeekygen_mgr->GetIdentifierInfo(name->CheckString()); + + if ( ! d ) + return val_mgr->GetEmptyString(); + + return comments_to_val(d->GetComments()); + %} + +## Retrieve the Zeekygen-style summary comments (``##!``) associated with +## a Zeek script. +## +## name: the name of a Zeek script. It must be a relative path to where +## it is located within a particular component of ZEEKPATH and use +## the same file name extension/suffix as the actual file (e.g. ".zeek"). +## +## Returns: summary comments associated with script with *name*. If +## *name* is not a known script, an empty string is returned. +function get_script_comments%(name: string%): string + %{ + using namespace zeekygen; + ScriptInfo* d = zeekygen_mgr->GetScriptInfo(name->CheckString()); + + if ( ! d ) + return val_mgr->GetEmptyString(); + + return comments_to_val(d->GetComments()); + %} + +## Retrieve the contents of a Zeek script package's README file. +## +## name: the name of a Zeek script package. It must be a relative path +## to where it is located within a particular component of ZEEKPATH. +## +## Returns: contents of the package's README file. If *name* is not a known +## package, an empty string is returned. +function get_package_readme%(name: string%): string + %{ + using namespace zeekygen; + PackageInfo* d = zeekygen_mgr->GetPackageInfo(name->CheckString()); + + if ( ! d ) + return val_mgr->GetEmptyString(); + + return comments_to_val(d->GetReadme()); + %} + +## Retrieve the Zeekygen-style comments (``##``) associated with a record field. +## +## name: the name of a record type and a field within it formatted like +## a typical record field access: "$". +## +## Returns: comments associated with the record field. If *name* does +## not point to a known record type or a known field within a record +## type, an empty string is returned. +function get_record_field_comments%(name: string%): string + %{ + using namespace zeekygen; + string accessor = name->CheckString(); + size_t i = accessor.find('$'); + + if ( i > accessor.size() - 2 ) + return val_mgr->GetEmptyString(); + + string id = accessor.substr(0, i); + + IdentifierInfo* d = zeekygen_mgr->GetIdentifierInfo(id); + + if ( ! d ) + return val_mgr->GetEmptyString(); + + string field = accessor.substr(i + 1); + return comments_to_val(d->GetFieldComments(field)); + %} diff --git a/testing/README b/testing/README index ba407fcc67..37f8aa9014 100644 --- a/testing/README +++ b/testing/README @@ -1,13 +1,13 @@ -This directory contains suites for testing for Bro's correct +This directory contains suites for testing for Zeek's correct operation: btest/ - An ever-growing set of small unit tests testing Bro's + An ever-growing set of small unit tests testing Zeek's functionality. external/ A framework for downloading additional test sets that run more - complex Bro configuration on larger traces files. Due to their + complex Zeek configuration on larger traces files. Due to their size, these are not included directly. See the README for more information. diff --git a/testing/btest/Baseline/bifs.capture_state_updates/out b/testing/btest/Baseline/bifs.capture_state_updates/out deleted file mode 100644 index 62a6e3c9df..0000000000 --- a/testing/btest/Baseline/bifs.capture_state_updates/out +++ /dev/null @@ -1 +0,0 @@ -T diff --git a/testing/btest/Baseline/bifs.cat_string_array/out b/testing/btest/Baseline/bifs.cat_string_array/out deleted file mode 100644 index 963f826db9..0000000000 --- a/testing/btest/Baseline/bifs.cat_string_array/out +++ /dev/null @@ -1,3 +0,0 @@ -isatest -thisisatest -isa diff --git a/testing/btest/Baseline/bifs.decode_base64/out b/testing/btest/Baseline/bifs.decode_base64/out index aa265d2148..bb04766fd8 100644 --- a/testing/btest/Baseline/bifs.decode_base64/out +++ b/testing/btest/Baseline/bifs.decode_base64/out @@ -6,9 +6,3 @@ bro bro bro bro -bro -bro -bro -bro -bro -bro diff --git a/testing/btest/Baseline/bifs.decode_base64_conn/weird.log b/testing/btest/Baseline/bifs.decode_base64_conn/weird.log index 2479b39969..cdee200f0b 100644 --- a/testing/btest/Baseline/bifs.decode_base64_conn/weird.log +++ b/testing/btest/Baseline/bifs.decode_base64_conn/weird.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-36 +#open 2019-06-07-01-59-08 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1254722767.875996 ClEkJM2Vm5giqnMf4h 10.10.1.4 1470 74.53.140.153 25 base64_illegal_encoding incomplete base64 group, padding with 12 bits of 0 F bro -1437831787.861602 CmES5u32sYpV7JYN 192.168.133.100 49648 192.168.133.102 25 base64_illegal_encoding incomplete base64 group, padding with 12 bits of 0 F bro -1437831799.610433 C3eiCBGOLw3VtHfOj 192.168.133.100 49655 17.167.150.73 443 base64_illegal_encoding incomplete base64 group, padding with 12 bits of 0 F bro -#close 2016-07-13-16-12-36 +1254722767.875996 ClEkJM2Vm5giqnMf4h 10.10.1.4 1470 74.53.140.153 25 base64_illegal_encoding incomplete base64 group, padding with 12 bits of 0 F zeek +1437831787.861602 CmES5u32sYpV7JYN 192.168.133.100 49648 192.168.133.102 25 base64_illegal_encoding incomplete base64 group, padding with 12 bits of 0 F zeek +1437831799.610433 C3eiCBGOLw3VtHfOj 192.168.133.100 49655 17.167.150.73 443 base64_illegal_encoding incomplete base64 group, padding with 12 bits of 0 F zeek +#close 2019-06-07-01-59-08 diff --git a/testing/btest/Baseline/bifs.encode_base64/out b/testing/btest/Baseline/bifs.encode_base64/out index 3008115853..cacea20cca 100644 --- a/testing/btest/Baseline/bifs.encode_base64/out +++ b/testing/btest/Baseline/bifs.encode_base64/out @@ -2,9 +2,6 @@ YnJv YnJv YnJv }n-v -YnJv -YnJv -}n-v cGFkZGluZw== cGFkZGluZzE= cGFkZGluZzEy diff --git a/testing/btest/Baseline/bifs.global_sizes/out b/testing/btest/Baseline/bifs.global_sizes/out index 76c40b297a..fe0e737de0 100644 --- a/testing/btest/Baseline/bifs.global_sizes/out +++ b/testing/btest/Baseline/bifs.global_sizes/out @@ -1 +1 @@ -found bro_init +found zeek_init diff --git a/testing/btest/Baseline/bifs.join_string/out b/testing/btest/Baseline/bifs.join_string/out index e916fc304a..dbfa4c1e52 100644 --- a/testing/btest/Baseline/bifs.join_string/out +++ b/testing/btest/Baseline/bifs.join_string/out @@ -1,6 +1,3 @@ -this * is * a * test -thisisatest -mytest this__is__another__test thisisanothertest Test diff --git a/testing/btest/Baseline/bifs.lookup_ID/out b/testing/btest/Baseline/bifs.lookup_ID/out index 64b6379deb..40170b1f7c 100644 --- a/testing/btest/Baseline/bifs.lookup_ID/out +++ b/testing/btest/Baseline/bifs.lookup_ID/out @@ -1,4 +1,4 @@ -bro test +zeek test diff --git a/testing/btest/Baseline/bifs.merge_pattern/out b/testing/btest/Baseline/bifs.merge_pattern/out deleted file mode 100644 index fe8ebc3c01..0000000000 --- a/testing/btest/Baseline/bifs.merge_pattern/out +++ /dev/null @@ -1,2 +0,0 @@ -match -match diff --git a/testing/btest/Baseline/bifs.sort_string_array/out b/testing/btest/Baseline/bifs.sort_string_array/out deleted file mode 100644 index 533844768d..0000000000 --- a/testing/btest/Baseline/bifs.sort_string_array/out +++ /dev/null @@ -1,4 +0,0 @@ -a -is -test -this diff --git a/testing/btest/Baseline/bifs.split/out b/testing/btest/Baseline/bifs.split/out deleted file mode 100644 index 0ec2541f3d..0000000000 --- a/testing/btest/Baseline/bifs.split/out +++ /dev/null @@ -1,32 +0,0 @@ -t -s is a t -t ---------------------- -t -s is a test ---------------------- -t -hi -s is a t -es -t ---------------------- -t -s is a test ---------------------- -t -hi -s is a test ---------------------- -[, thi, s i, s a tes, t] ---------------------- -X-Mailer -Testing Test (http://www.example.com) ---------------------- -A -= - B -= - C -= - D diff --git a/testing/btest/Baseline/bifs.to_double_from_string/error b/testing/btest/Baseline/bifs.to_double_from_string/error index d6c6c0c75b..ed0ae3a1f9 100644 --- a/testing/btest/Baseline/bifs.to_double_from_string/error +++ b/testing/btest/Baseline/bifs.to_double_from_string/error @@ -1,2 +1,2 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 15: bad conversion to double (to_double(d) and NotADouble) -error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.bro, line 16: bad conversion to double (to_double(d) and ) +error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.zeek, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.zeek, line 15: bad conversion to double (to_double(d) and NotADouble) +error in /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.zeek, line 7 and /da/home/robin/bro/master/testing/btest/.tmp/bifs.to_double_from_string/to_double_from_string.zeek, line 16: bad conversion to double (to_double(d) and ) diff --git a/testing/btest/Baseline/broker.opaque/.stderr b/testing/btest/Baseline/broker.opaque/.stderr new file mode 100644 index 0000000000..bf07a71a21 --- /dev/null +++ b/testing/btest/Baseline/broker.opaque/.stderr @@ -0,0 +1 @@ +error: incompatible Bloom filter types diff --git a/testing/btest/Baseline/broker.opaque/out b/testing/btest/Baseline/broker.opaque/out new file mode 100644 index 0000000000..35bf821c47 --- /dev/null +++ b/testing/btest/Baseline/broker.opaque/out @@ -0,0 +1,53 @@ +============ Topk +[b, a, c] +[b, a, c] +============ HLL +3.000069 +3.000069 +3.000069 +============ Bloom +0 +1 +0 +1 +============ Hashes +5b9164ad6f496d9dee12ec7634ce253f +5b9164ad6f496d9dee12ec7634ce253f +30ae97492ce1da88d0e7117ace0a60a6f9e1e0bc +30ae97492ce1da88d0e7117ace0a60a6f9e1e0bc +25b6746d5172ed6352966a013d93ac846e1110d5a25e8f183b5931f4688842a1 +25b6746d5172ed6352966a013d93ac846e1110d5a25e8f183b5931f4688842a1 +============ X509 +[version=3, serial=040000000001154B5AC394, subject=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, issuer=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, cn=GlobalSign Root CA, not_valid_before=904651200.0, not_valid_after=1832673600.0, key_alg=rsaEncryption, sig_alg=sha1WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=] +[version=3, serial=040000000001154B5AC394, subject=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, issuer=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, cn=GlobalSign Root CA, not_valid_before=904651200.0, not_valid_after=1832673600.0, key_alg=rsaEncryption, sig_alg=sha1WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=] +============ Entropy +[entropy=4.715374, chi_square=591.981818, mean=75.472727, monte_carlo_pi=4.0, serial_correlation=-0.11027] +[entropy=4.715374, chi_square=591.981818, mean=75.472727, monte_carlo_pi=4.0, serial_correlation=-0.11027] +============ broker::Data +broker::data{{hi, there}} +broker::data{{hi, there}} +T +============ broker::Set +| [data=broker::data{!}] | [data=broker::data{!}] + > [data=broker::data{hi}] | [data=broker::data{hi}] +| [data=broker::data{hi}] | [data=broker::data{hi}] + > [data=broker::data{there}] | [data=broker::data{there}] +| [data=broker::data{there}] | [data=broker::data{there}] +============ broker::Table +| [key=[data=broker::data{!}], val=[data=broker::data{30}]] | [key=[data=broker::data{!}], val=[data=broker::data{30}]] + > [key=[data=broker::data{hi}], val=[data=broker::data{10}]] | [key=[data=broker::data{hi}], val=[data=broker::data{10}]] +| [key=[data=broker::data{hi}], val=[data=broker::data{10}]] | [key=[data=broker::data{hi}], val=[data=broker::data{10}]] + > [key=[data=broker::data{there}], val=[data=broker::data{20}]] | [key=[data=broker::data{there}], val=[data=broker::data{20}]] +| [key=[data=broker::data{there}], val=[data=broker::data{20}]] | [key=[data=broker::data{there}], val=[data=broker::data{20}]] +============ broker::Vector +| [data=broker::data{hi}] | [data=broker::data{hi}] + > [data=broker::data{there}] | [data=broker::data{there}] +| [data=broker::data{there}] | [data=broker::data{there}] + > [data=broker::data{!}] | [data=broker::data{!}] +| [data=broker::data{!}] | [data=broker::data{!}] +============ broker::Record +| [data=broker::data{hi}] | [data=broker::data{hi}] + > [data=broker::data{there}] | [data=broker::data{there}] +| [data=broker::data{there}] | [data=broker::data{there}] + > [data=broker::data{!}] | [data=broker::data{!}] +| [data=broker::data{!}] | [data=broker::data{!}] diff --git a/testing/btest/Baseline/broker.store.ops/master.out b/testing/btest/Baseline/broker.store.ops/master.out index afb7c84fb4..80546431b5 100644 --- a/testing/btest/Baseline/broker.store.ops/master.out +++ b/testing/btest/Baseline/broker.store.ops/master.out @@ -6,7 +6,7 @@ [6], { y, x -}, Broker::SUCCESS, [data=broker::data{[1/tcp, 2/tcp, 3/tcp]}] +}, Broker::SUCCESS, [data=broker::data{(1/tcp, 2/tcp, 3/tcp)}] [7], two, Broker::SUCCESS, [data=broker::data{230}] [8], three, Broker::SUCCESS, [data=broker::data{320}] [9], four, Broker::SUCCESS, [data=broker::data{{1, 2, 3}}] @@ -14,7 +14,7 @@ keys, [status=Broker::SUCCESS, result=[data=broker::data{{four, one, set, str, t [11], str, Broker::SUCCESS, [data=broker::data{foobar}] [12], set, Broker::SUCCESS, [data=broker::data{{A, B, C}}] [13], table, Broker::SUCCESS, [data=broker::data{{a -> 1, c -> 3}}] -[14], vec, Broker::SUCCESS, [data=broker::data{[1, 2, 3, 4]}] +[14], vec, Broker::SUCCESS, [data=broker::data{(1, 2, 3, 4)}] [15], one, [status=Broker::SUCCESS, result=[data=broker::data{T}]] [16], NOPE, [status=Broker::SUCCESS, result=[data=broker::data{F}]] [17], vec, Broker::SUCCESS, [data=broker::data{2}] diff --git a/testing/btest/Baseline/broker.store.record/master.out b/testing/btest/Baseline/broker.store.record/master.out index 9e82505b41..6af2d5b737 100644 --- a/testing/btest/Baseline/broker.store.record/master.out +++ b/testing/btest/Baseline/broker.store.record/master.out @@ -2,7 +2,7 @@ T T T -[data=broker::data{[hi, hello, 37]}], [s1=hi, s2=hello, c=37] +[data=broker::data{(hi, hello, 37)}], [s1=hi, s2=hello, c=37] [data=broker::data{hi}] [data=broker::data{hello}] @@ -11,7 +11,7 @@ T T 3 [data=broker::data{goodbye}] -[data=broker::data{[hi, goodbye, 37]}], [s1=hi, s2=goodbye, c=37] +[data=broker::data{(hi, goodbye, 37)}], [s1=hi, s2=goodbye, c=37] | [data=broker::data{hi}] | [data=broker::data{goodbye}] diff --git a/testing/btest/Baseline/broker.store.sqlite/out b/testing/btest/Baseline/broker.store.sqlite/out index 621474aef2..00c805d3ba 100644 --- a/testing/btest/Baseline/broker.store.sqlite/out +++ b/testing/btest/Baseline/broker.store.sqlite/out @@ -10,4 +10,4 @@ five, Broker::FAILURE, [data=] { y, x -}, Broker::SUCCESS, [data=broker::data{[1/tcp, 2/tcp, 3/tcp]}] +}, Broker::SUCCESS, [data=broker::data{(1/tcp, 2/tcp, 3/tcp)}] diff --git a/testing/btest/Baseline/broker.store.vector/master.out b/testing/btest/Baseline/broker.store.vector/master.out index e442646af8..173941abf1 100644 --- a/testing/btest/Baseline/broker.store.vector/master.out +++ b/testing/btest/Baseline/broker.store.vector/master.out @@ -4,24 +4,24 @@ T T T 4 -[data=broker::data{[hi, salutations, hello, greetings]}], [hi, salutations, hello, greetings] +[data=broker::data{(hi, salutations, hello, greetings)}], [hi, salutations, hello, greetings] | [data=broker::data{hi}] | [data=broker::data{salutations}] | [data=broker::data{hello}] | [data=broker::data{greetings}] [data=broker::data{hello}] -[data=broker::data{[hi, salutations, bah, greetings]}], [hi, salutations, bah, greetings] +[data=broker::data{(hi, salutations, bah, greetings)}], [hi, salutations, bah, greetings] [data=broker::data{bah}] [data=broker::data{hi}] -[data=broker::data{[hi, salutations, bah, greetings]}], [hi, salutations, bah, greetings] +[data=broker::data{(hi, salutations, bah, greetings)}], [hi, salutations, bah, greetings] [data=broker::data{bah}] -[data=broker::data{[hi, salutations, greetings]}], [hi, salutations, greetings] +[data=broker::data{(hi, salutations, greetings)}], [hi, salutations, greetings] 3 T 0 -[data=broker::data{[]}], [] +[data=broker::data{()}], [] diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index 44ef942ae3..dfa186c419 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -3,101 +3,101 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-42 +#open 2019-06-07-02-20-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332784981.078396 - - - - - bad_IP_checksum - F bro -#close 2016-07-13-16-12-42 +1332784981.078396 - - - - - bad_IP_checksum - F zeek +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-42 +#open 2019-06-07-02-20-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332784885.686428 CHhAvVGS1DHFjwGM9 127.0.0.1 30000 127.0.0.1 80 bad_TCP_checksum - F bro -#close 2016-07-13-16-12-42 +1332784885.686428 CHhAvVGS1DHFjwGM9 127.0.0.1 30000 127.0.0.1 80 bad_TCP_checksum - F zeek +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-43 +#open 2019-06-07-02-20-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332784933.501023 CHhAvVGS1DHFjwGM9 127.0.0.1 30000 127.0.0.1 13000 bad_UDP_checksum - F bro -#close 2016-07-13-16-12-43 +1332784933.501023 CHhAvVGS1DHFjwGM9 127.0.0.1 30000 127.0.0.1 13000 bad_UDP_checksum - F zeek +#close 2019-06-07-02-20-04 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-43 +#open 2019-06-07-02-20-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334075363.536871 CHhAvVGS1DHFjwGM9 192.168.1.100 8 192.168.1.101 0 bad_ICMP_checksum - F bro -#close 2016-07-13-16-12-43 +1334075363.536871 CHhAvVGS1DHFjwGM9 192.168.1.100 8 192.168.1.101 0 bad_ICMP_checksum - F zeek +#close 2019-06-07-02-20-04 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-44 +#open 2019-06-07-02-20-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332785210.013051 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F bro -1332785210.013051 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 80 bad_TCP_checksum - F bro -#close 2016-07-13-16-12-44 +1332785210.013051 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F zeek +1332785210.013051 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 80 bad_TCP_checksum - F zeek +#close 2019-06-07-02-20-05 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-44 +#open 2019-06-07-02-20-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332782580.798420 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F bro -1332782580.798420 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 13000 bad_UDP_checksum - F bro -#close 2016-07-13-16-12-44 +1332782580.798420 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F zeek +1332782580.798420 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 13000 bad_UDP_checksum - F zeek +#close 2019-06-07-02-20-05 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-45 +#open 2019-06-07-02-20-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334075111.800086 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F bro -1334075111.800086 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:78:1:32::1 129 bad_ICMP_checksum - F bro -#close 2016-07-13-16-12-45 +1334075111.800086 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F zeek +1334075111.800086 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:78:1:32::1 129 bad_ICMP_checksum - F zeek +#close 2019-06-07-02-20-06 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-45 +#open 2019-06-07-02-20-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332785250.469132 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro -#close 2016-07-13-16-12-45 +1332785250.469132 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F zeek +#close 2019-06-07-02-20-06 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-46 +#open 2019-06-07-02-20-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332781342.923813 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro -#close 2016-07-13-16-12-46 +1332781342.923813 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F zeek +#close 2019-06-07-02-20-07 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-46 +#open 2019-06-07-02-20-07 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334074939.467194 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro -#close 2016-07-13-16-12-47 +1334074939.467194 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F zeek +#close 2019-06-07-02-20-07 diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out index 5c99e9390a..50619c654f 100644 --- a/testing/btest/Baseline/core.checksums/good.out +++ b/testing/btest/Baseline/core.checksums/good.out @@ -3,68 +3,68 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-46 +#open 2019-06-07-02-20-07 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334074939.467194 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro -#close 2016-07-13-16-12-47 +1334074939.467194 CHhAvVGS1DHFjwGM9 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F zeek +#close 2019-06-07-02-20-07 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-49 +#open 2019-06-07-02-20-08 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332785125.596793 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F bro -#close 2016-07-13-16-12-49 +1332785125.596793 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F zeek +#close 2019-06-07-02-20-08 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-49 +#open 2019-06-07-02-20-09 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1332782508.592037 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F bro -#close 2016-07-13-16-12-49 +1332782508.592037 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::2 0 routing0_hdr - F zeek +#close 2019-06-07-02-20-09 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-50 +#open 2019-06-07-02-20-09 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F bro -#close 2016-07-13-16-12-50 +1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F zeek +#close 2019-06-07-02-20-09 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-50 +#open 2019-06-07-02-20-09 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F bro -#close 2016-07-13-16-12-50 +1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F zeek +#close 2019-06-07-02-20-09 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-50 +#open 2019-06-07-02-20-09 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F bro -#close 2016-07-13-16-12-50 +1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F zeek +#close 2019-06-07-02-20-09 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-12-50 +#open 2019-06-07-02-20-09 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F bro -#close 2016-07-13-16-12-50 +1334075027.053380 - 2001:4f8:4:7:2e0:81ff:fe52:ffff 0 2001:78:1:32::1 0 routing0_hdr - F zeek +#close 2019-06-07-02-20-09 diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log index ee45663170..ab6fb323d2 100644 --- a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log +++ b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#open 2012-04-05-21-56-51 +#open 2019-06-07-01-59-20 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1333663011.602839 - - - - - unknown_protocol - F bro -#close 2012-04-05-21-56-51 +1333663011.602839 - - - - - unknown_protocol - F zeek +#close 2019-06-07-01-59-20 diff --git a/testing/btest/Baseline/core.div-by-zero/out b/testing/btest/Baseline/core.div-by-zero/out index dca1894e32..702d00c156 100644 --- a/testing/btest/Baseline/core.div-by-zero/out +++ b/testing/btest/Baseline/core.div-by-zero/out @@ -1,5 +1,5 @@ -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.bro, line 6: division by zero (a / b) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.bro, line 11: division by zero (a / b) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.bro, line 16: division by zero (a / b) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.bro, line 21: modulo by zero (a % b) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.bro, line 26: modulo by zero (a % b) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.zeek, line 6: division by zero (a / b) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.zeek, line 11: division by zero (a / b) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.zeek, line 16: division by zero (a / b) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.zeek, line 21: modulo by zero (a % b) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.div-by-zero/div-by-zero.zeek, line 26: modulo by zero (a % b) diff --git a/testing/btest/Baseline/core.expr-exception/reporter.log b/testing/btest/Baseline/core.expr-exception/reporter.log index f546142dca..e2e1a4103f 100644 --- a/testing/btest/Baseline/core.expr-exception/reporter.log +++ b/testing/btest/Baseline/core.expr-exception/reporter.log @@ -6,13 +6,13 @@ #open 2011-03-18-19-06-08 #fields ts level message location #types time enum string string -1300475168.783842 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.915940 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.916118 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.918295 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.952193 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.952228 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.954761 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475168.962628 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 -1300475169.780331 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.bro, line 10 +1300475168.783842 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.915940 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.916118 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.918295 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.952193 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.952228 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.954761 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475168.962628 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 +1300475169.780331 Reporter::ERROR field value missing (c$ftp) /da/home/robin/bro/master/testing/btest/.tmp/core.expr-exception/expr-exception.zeek, line 10 #close 2011-03-18-19-06-13 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one0 b/testing/btest/Baseline/core.file-caching-cloning/one0 similarity index 100% rename from testing/btest/Baseline/core.file-caching-serialization/one0 rename to testing/btest/Baseline/core.file-caching-cloning/one0 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one1 b/testing/btest/Baseline/core.file-caching-cloning/one1 similarity index 100% rename from testing/btest/Baseline/core.file-caching-serialization/one1 rename to testing/btest/Baseline/core.file-caching-cloning/one1 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one2 b/testing/btest/Baseline/core.file-caching-cloning/one2 similarity index 100% rename from testing/btest/Baseline/core.file-caching-serialization/one2 rename to testing/btest/Baseline/core.file-caching-cloning/one2 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two0 b/testing/btest/Baseline/core.file-caching-cloning/two0 similarity index 100% rename from testing/btest/Baseline/core.file-caching-serialization/two0 rename to testing/btest/Baseline/core.file-caching-cloning/two0 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two1 b/testing/btest/Baseline/core.file-caching-cloning/two1 similarity index 100% rename from testing/btest/Baseline/core.file-caching-serialization/two1 rename to testing/btest/Baseline/core.file-caching-cloning/two1 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two2 b/testing/btest/Baseline/core.file-caching-cloning/two2 similarity index 100% rename from testing/btest/Baseline/core.file-caching-serialization/two2 rename to testing/btest/Baseline/core.file-caching-cloning/two2 diff --git a/testing/btest/Baseline/core.init-error/out b/testing/btest/Baseline/core.init-error/out index 50aea70a75..3079bdfcbd 100644 --- a/testing/btest/Baseline/core.init-error/out +++ b/testing/btest/Baseline/core.init-error/out @@ -1,4 +1,4 @@ -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/core.init-error/init-error.bro, line 15: no such index (v[10]) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/core.init-error/init-error.zeek, line 15: no such index (v[10]) fatal error: errors occurred while initializing 1st event 2nd event diff --git a/testing/btest/Baseline/core.ip-broken-header/weird.log b/testing/btest/Baseline/core.ip-broken-header/weird.log index a416f90e66..8aca8dc371 100644 --- a/testing/btest/Baseline/core.ip-broken-header/weird.log +++ b/testing/btest/Baseline/core.ip-broken-header/weird.log @@ -3,463 +3,463 @@ #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-20-30 +#open 2019-06-07-01-59-22 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1500557630.000000 - b100:7265::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - b100:7265:6300::8004:ef 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:ff:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557630.000000 - 255.255.0.0 0 255.255.255.223 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929:1000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:0:ffff:9ff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6900:0:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:2304:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:28fd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6369:2a29:: 0 0:80:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6900:0:400:2a29:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fb2a:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffbf:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:fcff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff02:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff32:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929:1000:0:6904:27ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:3afd:ffff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:69ff:ffff:ffff:ffff:ffff 0 3b1e:400:ff:0:6929:c200:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:700:fe:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 40:3bff:bf:0:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:840:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:ffe6:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00:40:0:21ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:ffff:ffff:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:ff7f:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:ff3a 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:0:ff00:69:2980:0:69 0 c400:ff3b:bfff:0:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:e374:6929::6927:ff 0 0:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:2705:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:63ce:80:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29:0:4:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:3af7 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7df 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:840:0:ffff:ff01:: 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:71fd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:2:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0:7265:6374:6929:ff:0:27ff:28 0 126:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:fffe:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:69ff:ff00:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:fef9:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ff3a:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:6904:40 0 bf:ff3b:0:ff00:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:8000::ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 38bf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:69ff:ffff:ffff:ffff:ffff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:80:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:5:1ff:f7ff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:ff:ff00:6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:180:: 0 bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:0:ff00:69:2980:0:29 0 c400:ff3b:bfff:0:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929:600:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7463:2a72:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b000:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 255.255.0.0 0 255.255.255.237 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0:7265:6374:6929:ff:27:a800:ff 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:f9fe:ffbf:ffff:0:ff28:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - - - - - ip_hdr_len_zero - F bro -1500557631.000000 - 0.0.0.0 0 0.0.65.95 0 invalid_IP_header_size - F bro -1500557631.000000 - b100:7265:6374:7129:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b101:0:74:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7fd 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fb03:12ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 400:fffe:bfff::ecec:ecfc:ecec 0 ecec:ecec:ecec:ec00:ffff:ffff:fffd:ffff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6369:aa29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:2600:0:8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:8000:40:0:16ef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:0:1000:6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 ff00:bf3b:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b800:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:f2:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:3a40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:91:8bd6:ff00:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:5445:52ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:8b:0:ffff:ffff:f7fd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fff7:820 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:9d8b:d5d5:ffff:fffc:ffff:ffff 0 3bbf:ff00:40:6e:756d:5f70:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b198:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929:0:100:6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:0:100:0:480:ffbf 0 3bff:0:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29:2:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:fff8:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9cc2:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:f8fe:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ff21:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6b74:6929::6904:ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:ffff:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7229:6374:6929::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:f7fd:ffff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b104:7265:6374:2a29::6904:ff 0 3bbf:ff03:40:0:ffff:ffff:f5fd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:8000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0.0.0.0 0 0.0.255.255 0 invalid_IP_header_size - F bro -1500557631.000000 - b100:7265:6374:6900:8000:400:2a29:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:4900:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:636f:6d29::5704:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:723a:6374:6929::6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00::ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0:7265:6374:6929:ff:0:27ff:28 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929:100:0:6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:0:ffff:6804:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:0 0 80bf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6827:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:440:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff00:40::80ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:908 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00::ffff:ff03:bffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6300:0:8000:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:8e00:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:9f74:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:ffff:ffff:fffd:f701 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300::8004:ff 0 3b3f:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:6e:7d6d:5f70:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:fbff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:9529:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:3600:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bb7:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0.0.0.0 0 0.53.0.0 0 invalid_IP_header_size - F bro -1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:39:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:ffff:fbfd:ffff:0:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929:0:8000:6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7228:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff80::ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7fc 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 100:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7200:6300:4:ff27:65fe:bfff:ff 0 ffff:0:ffff:ff3a:f700:8000:20:8ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:47:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f706 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:e369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265::6904:2aff 0 c540:ff:ffbf:ffde:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300::8001:0 0 ::40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0:7265:6374:6929:ff:27:2800:ff 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:f8:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:900:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7d8 0 invalid_inner_IP_version - F bro -1500557631.000000 - ffff:ff27:ffff:ffff::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:f7ff:fdff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:0:3a00:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:0:ff40:ff00:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:63ce:29:69:7400:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:6500:72:6369:2a:2900:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:2100::8004:ef 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:6e:756d:5f70:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:100:: 0 invalid_inner_IP_version - F bro -1500557631.000000 - 0.0.0.0 0 0.0.0.0 0 invalid_IP_header_size - F bro -1500557631.000000 - b100:7265:6374:6929:1:0:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:ff:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929:0:69:4:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557631.000000 - b100:7265:6374:6929::ff:3bff 0 4bf:8080:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:0:4ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:63f4:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6900:0:400:2a29:2aff 0 3bbf:ff00:3a:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:637b:6929::6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:340:80:ffef:ffff:fffd:f7fb 0 invalid_inner_IP_version - F bro -1500557632.000000 - b300:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:ae74:6929:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6374:6929::6904:1 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:ff:ffff:ffff:ffff 0 ffbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ff01:1:ffff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:0:4:0:80ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:0:40ff:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ff7a:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:434f:4e54:454e:5453:5f44 0 4ebf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:ff:ff:fff7:ffff:fdff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:0:80::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:900 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3b01::ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:3a00:0:6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::692a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffd8:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:40:8:ff00:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6927:bf 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:69a9::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:5265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::97fb:ff00 0 c440:108:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:8000 0 invalid_inner_IP_version - F bro -1500557632.000000 - 32.0.8.99 0 0.0.0.0 0 invalid_IP_header_size - F bro -1500557632.000000 - b100:6500:72:6369:2a29:0:6980:ff 0 3bbf:8000:40:0:16ef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::693b:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 0.0.0.0 0 0.255.255.255 0 invalid_IP_header_size - F bro -1500557632.000000 - b100:7265:6374:6929::6928:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:5049:415f:5544:5000:0:6904:5544 0 50bf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:0:1000:8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:3c0:ffff::fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 fe:8d9a:948b:96d6:ff00:21:6904:ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8014:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6301::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:63ce:69:7421:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:69:d529:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff27:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff02:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - ffff:ffff:ffff:ffff::8004:ff 0 ffff:ffff:ffff:ff00:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 7200:65:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7263:692a:7429::6904:ff 0 3b:bf00:40ff:0:ffff:ffff:ffff:3af7 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6306:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffe:1ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 50ff:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6900:2900:0:6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6305:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 101.99.116.105 0 41.0.255.0 0 invalid_IP_header_size - F bro -1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 ::40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 0:7265:6374:6900:0:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 2700:7265:6300:0:100:0:8004:ff00 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7200:400:65:6327:101:3ffe:ff 0 ffff:0:ffff:ff3a:2000:f8d4:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:ff:ff00:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:637c:6900:0:400:2a29:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:e374:6929::6904:ff 0 3bbf:ff00:40:a:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:fd00:40:0:fffc:ffff:f720:fd3a 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:722a:2374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ef 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29:ffff:ffff:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:ff01:0 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:fff2:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:2704:40:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:ff 0 6800:f265:6374:6929:11:27:c00:68 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:725f:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7200:400:65:6327:fffe:bfff:0 0 5000:ff:ffff:ffff:fdf7:ff3a:2000:800 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:8000:0 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:722a:6374:6929:400:4:0:ff69 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 7dbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8084:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:0:ffff:ffff:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29:100:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ff00:ffff:3a20:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ff7d:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6369:2a22:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b300:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40::ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:80:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:3a 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff00:0:8080 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2008:2b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:3b00:ff:0:6929:0:f7fd:ffff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:9:0:9704:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:80fd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ffcc:c219:aa00:0:c9:640d:eb3c 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:a78b:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bff:4000:bf00:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:5265:6300::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7218:400:65:6327:fffe:bfff:ff 0 ffff:20:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 71.97.99.109 0 0.16.0.41 0 invalid_IP_header_size - F bro -1500557632.000000 - b100:7221:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929:ffff:ffff:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:7fef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:d0d6:ffff:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:29ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:6:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:0:ecff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffef:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:e929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:27ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 3a00:7265:6374:6929::8004:ff 0 c540:fe:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:40:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f728 0 invalid_inner_IP_version - F bro -1500557632.000000 - 65:63b1:7274:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::2104:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6328:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - f100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:72:6328:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7200:400:65:ffff:ffff:ffff:ffff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:fdff:ffff:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:6500:6fd:188:4747:4747:61fd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 0.0.0.255 0 11.0.255.0 0 invalid_IP_header_size_in_tunnel - F bro -1500557632.000000 - b100:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:7fff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:27ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff4e:5654:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374::80:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:3b 0 ff:ffbf:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:6500:91:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:840:ff:ffff:feff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6301::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:ffff:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:69:7429:0:690a:ff 0 40:0:ff3b:bf:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6927:10ff 0 0:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6329:ffff:2a74:ffff:ffff:ffff 0 3bbf:ff00:40:6e:756d:3b70:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 143.9.0.0 0 0.98.0.237 0 invalid_IP_header_size - F bro -1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:feff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 fffb:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7200:6365::8004:ff 0 3bbf:ff00:840:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - 0:7265:6374:6929:ff:27:2800:ff 0 100:0:143:4f4e:5445:4e00:0:704c 0 invalid_inner_IP_version - F bro -1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ff02:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557632.000000 - b100:7265:6374:6909::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00:40:0:feff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:2a60 0 3bbf:ff00:40:21:ffff:ffff:ffbd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:8040:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 2a72:6300:b165:7429:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:639a:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::ff00:480 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:0:8:: 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b000:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:21e6:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6301:0:29:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:ff:ff40:0:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::3b04:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::8804:ff 0 3bbf:ff80:40:0:ffff:ffff:102:800 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 33bf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:60:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3b9f:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b13b:bfff:0:4000:ff:ffff:ffff:fdf7 0 ff3a:2000:800:1e04:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:0 0 ::80:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b165:6300:7274:6929::400:ff 0 3bbf:ff00:40:0:ffff:ffff:f7fd:ffff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff3b 0 0:bfff:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::3b:bfff 0 ff04:0:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:69:74a9:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:2aff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:6374:65:69:7229:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6377:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b128:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:4:0:6904:ff 0 3b1e:400:ff:0:6929:2700:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:fd00:40:0:ffff:ffff:ffff:3af7 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:722a:6374:6929::6968:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bff:bf00:40:0:ffff:ffff:fffd:e7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7261:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:7929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:df00::80ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7263:65ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:ffe6:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:f8:0:ff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:692d::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::4:fd 0 c3bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:3b 0 bf:ffff:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6900:ec00:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 e21e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6928:ffff:fd00:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff3b:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::ff00:bfff 0 3b00:400:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:520:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ffff 0 ffff:ffff:ffff:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:28:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::80fb:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c2a:7200:6374:6929:1000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:693a::6127:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ff7f:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:6929:0:fffe:bfff:ff 0 ffff:ff68:0:4000:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ef 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::4:ff 0 3bbf:2700:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:6929::6904:ff 0 3bbf:ff00:40:27:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::2a:0 0 ::6a:ffff:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6900:a:400:2a29:3b2a 0 ffbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b1ff:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:6500:72:6369:2a29:3b00:690a:ff 0 3bbf:fb00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:722a:6374:: 0 ffff:ffff:ffff:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:722a:6374:6929:1000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:2aff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:60:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:9500:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7200:63:65::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:fc 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6900:0 0 80bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:63ce:69:2129:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:3a:ffef:ff:ffff:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:c1:800:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:9265:6300:69:7429:0:690a:ff 0 40:3bff:bf:0:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:ffff:dffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:1ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:724a:6374:6929:: 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:f6 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:0 0 ffff:ff:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6500:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:0:a:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6900::2900:0 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 68.80.95.104 0 109.115.117.0 0 invalid_IP_header_size - F bro -1500557633.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6374:692b::6904:ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6900:29:0:6914:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:6500:72:e369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f728 0 invalid_inner_IP_version - F bro -1500557633.000000 - 8:1e:400:ff00:0:3200:8004:ff 0 3bff:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:f7fd 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:8ba:0:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300::8004:ff 0 48bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7365:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:5600:800:2b00:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:4021:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 0:7265:6374:6929:ff:6:27ff:28 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6b74:6909::6904:ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ff48:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:7400:2969:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:69:7429:0:690a:ff 0 40:3bff:c5:0:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265::6904:2a3a 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:f9ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7261:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:9fd6:ffff:2:800 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6300:69:7429:8000:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - ffff:ffff:ffff:ffff:: 0 ::40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:400:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:6929::ff00:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:fffe:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:ffff::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 4f00:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:8000::6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:1:400:8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 0.255.255.0 0 0.0.0.0 0 invalid_IP_header_size - F bro -1500557633.000000 - b100:7265:6374:6929:4:0:6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:342b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:6929:400:0:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ffa8:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffdd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - b100:7265:1::69 0 c400:ff3b:bfff:0:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557633.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:ffff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 9c00:722a:6374:6929:1001:900:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:40:0:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 9c00:722a:6374:6929::6904:eff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - ffdb:ffff:3b00::ff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:60:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6374:6929:ffff:ffff:8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6300:669:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6374:6929::693b:bdff 0 0:4000:ff:ffff:fdff:fff7:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 0.71.103.97 0 99.116.0.128 0 invalid_IP_header_size - F bro -1500557634.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:ff00:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:63ce:69:7429:0:690a:b1 0 3bbf:ff00:40:0:ffff:ffff:ffe6:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:40:0:29ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 6500:0:6fd:188:4747:4747:6163:7400 0 0:2c29:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 9c00:722a:6374:6929:8000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:6500:72:6369:2900:2a00:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6374:2a29::6904:ff 0 29bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:10:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 9c00:7265:6374:6929::612f:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ffc3:2000:82b:0:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 9c00:722a:6374:6929:1000:100:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f728 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:6374:6929:ff:ffff:ff04:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - b100:7265:0:ff00:69:2980:0:69 0 c4ff:bf00:ff00:3b:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -1500557634.000000 - 9c00:7265:6374:69d1::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -#close 2017-10-19-17-20-30 +1500557630.000000 - b100:7265::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - b100:7265:6300::8004:ef 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:ff:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557630.000000 - 255.255.0.0 0 255.255.255.223 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929:1000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:0:ffff:9ff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6900:0:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:2304:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:28fd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6369:2a29:: 0 0:80:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6900:0:400:2a29:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fb2a:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffbf:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:fcff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff02:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff32:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929:1000:0:6904:27ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:3afd:ffff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:69ff:ffff:ffff:ffff:ffff 0 3b1e:400:ff:0:6929:c200:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:700:fe:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 40:3bff:bf:0:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:840:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:ffe6:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00:40:0:21ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:ffff:ffff:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:ff7f:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:ff3a 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:0:ff00:69:2980:0:69 0 c400:ff3b:bfff:0:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:e374:6929::6927:ff 0 0:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:2705:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:63ce:80:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29:0:4:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:3af7 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7df 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:840:0:ffff:ff01:: 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:71fd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:2:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0:7265:6374:6929:ff:0:27ff:28 0 126:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:fffe:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:69ff:ff00:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:fef9:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ff3a:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:6904:40 0 bf:ff3b:0:ff00:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:8000::ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 38bf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:69ff:ffff:ffff:ffff:ffff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:80:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:5:1ff:f7ff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:ff:ff00:6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:180:: 0 bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:0:ff00:69:2980:0:29 0 c400:ff3b:bfff:0:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929:600:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7463:2a72:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b000:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 255.255.0.0 0 255.255.255.237 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0:7265:6374:6929:ff:27:a800:ff 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:f9fe:ffbf:ffff:0:ff28:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - - - - - ip_hdr_len_zero - F zeek +1500557631.000000 - 0.0.0.0 0 0.0.65.95 0 invalid_IP_header_size - F zeek +1500557631.000000 - b100:7265:6374:7129:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b101:0:74:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7fd 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fb03:12ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 400:fffe:bfff::ecec:ecfc:ecec 0 ecec:ecec:ecec:ec00:ffff:ffff:fffd:ffff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6369:aa29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:2600:0:8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:8000:40:0:16ef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:0:1000:6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 ff00:bf3b:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b800:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:f2:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:3a40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:91:8bd6:ff00:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:5445:52ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:8b:0:ffff:ffff:f7fd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fff7:820 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:9d8b:d5d5:ffff:fffc:ffff:ffff 0 3bbf:ff00:40:6e:756d:5f70:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b198:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929:0:100:6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:0:100:0:480:ffbf 0 3bff:0:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29:2:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:fff8:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9cc2:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:f8fe:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ff21:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6b74:6929::6904:ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:ffff:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7229:6374:6929::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:f7fd:ffff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b104:7265:6374:2a29::6904:ff 0 3bbf:ff03:40:0:ffff:ffff:f5fd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:8000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0.0.0.0 0 0.0.255.255 0 invalid_IP_header_size - F zeek +1500557631.000000 - b100:7265:6374:6900:8000:400:2a29:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:4900:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:636f:6d29::5704:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:723a:6374:6929::6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00::ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0:7265:6374:6929:ff:0:27ff:28 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929:100:0:6127:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:0:ffff:6804:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:0 0 80bf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6827:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:440:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff00:40::80ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:908 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00::ffff:ff03:bffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6300:0:8000:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:8e00:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:9f74:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:ffff:ffff:fffd:f701 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300::8004:ff 0 3b3f:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:6e:7d6d:5f70:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:fbff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:9529:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:3600:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bb7:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0.0.0.0 0 0.53.0.0 0 invalid_IP_header_size - F zeek +1500557631.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:39:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:ffff:fbfd:ffff:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929:0:8000:6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7228:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff80::ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7fc 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c00:7265:6374:6929::6927:ff 0 100:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7200:6300:4:ff27:65fe:bfff:ff 0 ffff:0:ffff:ff3a:f700:8000:20:8ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:47:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f706 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:e369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265::6904:2aff 0 c540:ff:ffbf:ffde:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300::8001:0 0 ::40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0:7265:6374:6929:ff:27:2800:ff 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:f8:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:900:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7d8 0 invalid_inner_IP_version - F zeek +1500557631.000000 - ffff:ff27:ffff:ffff::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:f7ff:fdff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:0:3a00:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:0:ff40:ff00:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:63ce:29:69:7400:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:6500:72:6369:2a:2900:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:2100::8004:ef 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:6e:756d:5f70:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:100:: 0 invalid_inner_IP_version - F zeek +1500557631.000000 - 0.0.0.0 0 0.0.0.0 0 invalid_IP_header_size - F zeek +1500557631.000000 - b100:7265:6374:6929:1:0:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:ff:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929:0:69:4:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557631.000000 - b100:7265:6374:6929::ff:3bff 0 4bf:8080:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:0:4ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:63f4:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6900:0:400:2a29:2aff 0 3bbf:ff00:3a:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:637b:6929::6904:ff 0 3b00:40:ffbf:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:340:80:ffef:ffff:fffd:f7fb 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b300:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:ae74:6929:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6374:6929::6904:1 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:ff:ffff:ffff:ffff 0 ffbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ff01:1:ffff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:0:4:0:80ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:0:40ff:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ff7a:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:434f:4e54:454e:5453:5f44 0 4ebf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:ff:ff:fff7:ffff:fdff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:0:80::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:900 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3b01::ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:3a00:0:6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::692a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:ffff:ffd8:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:40:8:ff00:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6927:bf 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:69a9::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:5265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::97fb:ff00 0 c440:108:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:8000 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 32.0.8.99 0 0.0.0.0 0 invalid_IP_header_size - F zeek +1500557632.000000 - b100:6500:72:6369:2a29:0:6980:ff 0 3bbf:8000:40:0:16ef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::693b:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 0.0.0.0 0 0.255.255.255 0 invalid_IP_header_size - F zeek +1500557632.000000 - b100:7265:6374:6929::6928:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:5049:415f:5544:5000:0:6904:5544 0 50bf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:0:1000:8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:3c0:ffff::fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 fe:8d9a:948b:96d6:ff00:21:6904:ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8014:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6301::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:63ce:69:7421:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:69:d529:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff27:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff02:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - ffff:ffff:ffff:ffff::8004:ff 0 ffff:ffff:ffff:ff00:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 7200:65:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7263:692a:7429::6904:ff 0 3b:bf00:40ff:0:ffff:ffff:ffff:3af7 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6306:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffe:1ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 50ff:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6900:2900:0:6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6305:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 101.99.116.105 0 41.0.255.0 0 invalid_IP_header_size - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 ::40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 0:7265:6374:6900:0:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 2700:7265:6300:0:100:0:8004:ff00 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7200:400:65:6327:101:3ffe:ff 0 ffff:0:ffff:ff3a:2000:f8d4:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:ff:ff00:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:637c:6900:0:400:2a29:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:e374:6929::6904:ff 0 3bbf:ff00:40:a:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:fd00:40:0:fffc:ffff:f720:fd3a 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:722a:2374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ef 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29:ffff:ffff:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:ff01:0 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:fff2:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:2704:40:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:ff 0 6800:f265:6374:6929:11:27:c00:68 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:725f:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7200:400:65:6327:fffe:bfff:0 0 5000:ff:ffff:ffff:fdf7:ff3a:2000:800 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:8000:0 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:722a:6374:6929:400:4:0:ff69 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 7dbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8084:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:0:ffff:ffff:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29:100:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ff00:ffff:3a20:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ff7d:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6369:2a22:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b300:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40::ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:80:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:3a 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff00:0:8080 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2008:2b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:3b00:ff:0:6929:0:f7fd:ffff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:9:0:9704:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:80fd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ffcc:c219:aa00:0:c9:640d:eb3c 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:a78b:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bff:4000:bf00:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:5265:6300::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7218:400:65:6327:fffe:bfff:ff 0 ffff:20:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 71.97.99.109 0 0.16.0.41 0 invalid_IP_header_size - F zeek +1500557632.000000 - b100:7221:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929:ffff:ffff:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:7fef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:d0d6:ffff:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:0:29ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:6:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:0:ecff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffef:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:e929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:27ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 3a00:7265:6374:6929::8004:ff 0 c540:fe:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:40:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f728 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 65:63b1:7274:6929::8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::2104:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6328:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - f100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:72:6328:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7200:400:65:ffff:ffff:ffff:ffff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:fdff:ffff:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6127:fb 0 3bbf:6500:6fd:188:4747:4747:61fd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 0.0.0.255 0 11.0.255.0 0 invalid_IP_header_size_in_tunnel - F zeek +1500557632.000000 - b100:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:7fff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:27ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff4e:5654:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374::80:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:3b 0 ff:ffbf:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:6500:91:6369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:ff3a:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:840:ff:ffff:feff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6301::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:ffff:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:69:7429:0:690a:ff 0 40:0:ff3b:bf:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6927:10ff 0 0:7265:6374:6929::6904:ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6329:ffff:2a74:ffff:ffff:ffff 0 3bbf:ff00:40:6e:756d:3b70:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 143.9.0.0 0 0.98.0.237 0 invalid_IP_header_size - F zeek +1500557632.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:feff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 fffb:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7200:6365::8004:ff 0 3bbf:ff00:840:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 0:7265:6374:6929:ff:27:2800:ff 0 100:0:143:4f4e:5445:4e00:0:704c 0 invalid_inner_IP_version - F zeek +1500557632.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ff02:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557632.000000 - b100:7265:6374:6909::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:100:0:4:ff 0 3bbf:ff00:40:0:feff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:2a60 0 3bbf:ff00:40:21:ffff:ffff:ffbd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:6929::6127:ff 0 3bbf:ff00:8040:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 2a72:6300:b165:7429:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:639a:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::ff00:480 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:0:8:: 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b000:7265:63ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:21e6:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6301:0:29:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:ff:ff40:0:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::3b04:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::8804:ff 0 3bbf:ff80:40:0:ffff:ffff:102:800 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 33bf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:60:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3b9f:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b13b:bfff:0:4000:ff:ffff:ffff:fdf7 0 ff3a:2000:800:1e04:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:0 0 ::80:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b165:6300:7274:6929::400:ff 0 3bbf:ff00:40:0:ffff:ffff:f7fd:ffff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff3b 0 0:bfff:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::3b:bfff 0 ff04:0:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:69:74a9:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bbf:ff00:40:0:ffff:2aff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:6374:65:69:7229:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6377:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300::4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b128:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:4:0:6904:ff 0 3b1e:400:ff:0:6929:2700:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:722a:6374:6929::6904:ff 0 3bbf:fd00:40:0:ffff:ffff:ffff:3af7 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:722a:6374:6929::6968:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:69:7429:0:6904:ff 0 3bff:bf00:40:0:ffff:ffff:fffd:e7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7261:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:7929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:df00::80ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7263:65ce:69:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:ffe6:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - ffff:ffff:ffff:ffff::8004:ff 0 3bbf:ff01:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:f8:0:ff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:692d::6927:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::4:fd 0 c3bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:3b 0 bf:ffff:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6900:ec00:400:2a29:6aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 e21e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6928:ffff:fd00:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:40:0:ffff:ff3b:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::ff00:bfff 0 3b00:400:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:520:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ffff 0 ffff:ffff:ffff:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:69:7429:0:690a:ff 0 3bbf:ff00:28:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::80fb:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c2a:7200:6374:6929:1000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:693a::6127:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c20:722a:6374:6929:800:0:6904:ff 0 3bbf:ff00:40:0:ffff:ff7f:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:6929:0:fffe:bfff:ff 0 ffff:ff68:0:4000:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:82b:0:f7ef 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::4:ff 0 3bbf:2700:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:6929::6904:ff 0 3bbf:ff00:40:27:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::2a:0 0 ::6a:ffff:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6900:a:400:2a29:3b2a 0 ffbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b1ff:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:6500:72:6369:2a29:3b00:690a:ff 0 3bbf:fb00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:722a:6374:: 0 ffff:ffff:ffff:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:722a:6374:6929:1000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:2aff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:60:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29:ffff:ffff:ffff:ffff 0 3bbf:ff00:40:9500:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7200:63:65::8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:fc 0 ffff:0:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6900:0 0 80bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:63ce:69:2129:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:6500:72:6369:2a29:0:690a:ff 0 3bbf:ff00:40:3a:ffef:ff:ffff:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3bbf:ff00:c1:800:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:9265:6300:69:7429:0:690a:ff 0 40:3bff:bf:0:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:ffff:dffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:: 0 80:ff00:40:0:1ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:724a:6374:6929:: 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:f6 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:0 0 ffff:ff:ffff:ff3a:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6500:0:100:0:8004:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:0:a:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6900::2900:0 0 80:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 68.80.95.104 0 109.115.117.0 0 invalid_IP_header_size - F zeek +1500557633.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6374:692b::6904:ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6900:29:0:6914:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:6500:72:e369:2a29:0:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f728 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 8:1e:400:ff00:0:3200:8004:ff 0 3bff:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:f7fd 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:8ba:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300::8004:ff 0 48bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7365:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ff3a:5600:800:2b00:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:4021:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 0:7265:6374:6929:ff:6:27ff:28 0 100:0:143:4f4e:5445:4e54:535f:524c 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:6929::6927:ff 0 0:7265:6b74:6909::6904:ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::4:ff 0 3bbf:ff00:40:0:ffff:ff48:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:7400:2969:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:69:7429:0:690a:ff 0 40:3bff:c5:0:ffff:ffff:fdff:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265::6904:2a3a 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:f9ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7261:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:9fd6:ffff:2:800 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6300:69:7429:8000:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - ffff:ffff:ffff:ffff:: 0 ::40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff80:40:400:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:6929::ff00:ff 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:2aff 0 3bbf:ff00:40:21:fffe:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:ffff::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 4f00:7265:6374:6929::6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929::6904:ff 0 3b1e:8000::6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:1:400:8004:ff 0 3bbf:ff80:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 0.255.255.0 0 0.0.0.0 0 invalid_IP_header_size - F zeek +1500557633.000000 - b100:7265:6374:6929:4:0:6904:ff 0 3b1e:400:ff:0:6929:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7200:400:65:6327:fffe:bfff:ff 0 ffff:0:ffff:ff3a:2000:342b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:6929:400:0:4:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:7265:6374:6929::6927:ff 0 3bbf:ffa8:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:6374:2a29::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffdd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - b100:7265:1::69 0 c400:ff3b:bfff:0:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557633.000000 - 9c00:722a:6374:6929:400:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:ffff:ffff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 9c00:722a:6374:6929:1001:900:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6374:6929::8004:ff 0 3bbf:ff00:40:0:40:0:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 9c00:722a:6374:6929::6904:eff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - ffdb:ffff:3b00::ff:ffff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:60:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6374:6929:ffff:ffff:8004:ff 0 3bbf:ff80:ffff:0:4000:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6300:669:7429:0:690a:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6374:6929::693b:bdff 0 0:4000:ff:ffff:fdff:fff7:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 0.71.103.97 0 99.116.0.128 0 invalid_IP_header_size - F zeek +1500557634.000000 - b100:7265:6300::8004:ff 0 3bbf:ff00:40:ff00:ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:63ce:69:7429:0:690a:b1 0 3bbf:ff00:40:0:ffff:ffff:ffe6:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:63ce:69:7429:db00:690a:ff 0 3bbf:ff00:40:0:29ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 6500:0:6fd:188:4747:4747:6163:7400 0 0:2c29:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 9c00:722a:6374:6929:8000:0:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:6500:72:6369:2900:2a00:690a:ff 0 3bbf:ff00:40:0:ffef:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6374:2a29::6904:ff 0 29bf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6374:6929::6904:ff 0 3b00:40:ffbf:10:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 9c00:7265:6374:6929::612f:fb 0 3bbf:ff00:40:0:ffff:ffff:fbfd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6300:2704:0:fffe:bfff:ff 0 ffff:0:ffff:ffc3:2000:82b:0:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 9c00:722a:6374:6929:1000:100:6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f728 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:6374:6929:ff:ffff:ff04:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - b100:7265:0:ff00:69:2980:0:69 0 c4ff:bf00:ff00:3b:40ff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +1500557634.000000 - 9c00:7265:6374:69d1::6904:ff 0 3bbf:ff00:40:0:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +#close 2019-06-07-01-59-22 diff --git a/testing/btest/Baseline/core.leaks.broker.data/bro..stdout b/testing/btest/Baseline/core.leaks.broker.data/bro..stdout deleted file mode 100644 index a58dc4a480..0000000000 --- a/testing/btest/Baseline/core.leaks.broker.data/bro..stdout +++ /dev/null @@ -1,118 +0,0 @@ -Broker::BOOL -Broker::INT -Broker::COUNT -Broker::DOUBLE -Broker::STRING -Broker::ADDR -Broker::SUBNET -Broker::PORT -Broker::TIME -Broker::INTERVAL -Broker::ENUM -Broker::SET -Broker::TABLE -Broker::VECTOR -Broker::VECTOR -*************************** -T -F -1 -0 --1 -1 -0 -1.1 --11.1 -hello -1.2.3.4 -192.168.0.0/16 -22/tcp -42.0 -180.0 -Broker::BOOL -{ -two, -one, -three -} -{ -[two] = 2, -[one] = 1, -[three] = 3 -} -[zero, one, two] -[a=, b=bee, c=1] -[a=test, b=bee, c=1] -[a=test, b=testagain, c=1] -*************************** -0 -T -1 -T -F -T -2 -F -2 -T -1 -F -{ -bye -} -T -0 -{ - -} -*************************** -0 -[data=] -1 -T -42 -F -[data=] -2 -[data=broker::data{7}] -2 -37 -[data=broker::data{42}] -1 -[data=] -1 -T -0 -{ - -} -*************************** -0 -T -T -T -T -[hi, salutations, hello, greetings] -4 -[data=broker::data{hello}] -[data=broker::data{bah}] -[data=broker::data{hi}] -[hi, salutations, bah, greetings] -[data=broker::data{bah}] -[hi, salutations, greetings] -3 -T -0 -[] -*************************** -3 -T -T -T -[data=broker::data{hi}] -[data=broker::data{hello}] -[data=broker::data{37}] -3 -T -3 -[data=broker::data{goodbye}] diff --git a/testing/btest/Baseline/core.leaks.broker.data/zeek..stdout b/testing/btest/Baseline/core.leaks.broker.data/zeek..stdout new file mode 100644 index 0000000000..033e8af5f2 --- /dev/null +++ b/testing/btest/Baseline/core.leaks.broker.data/zeek..stdout @@ -0,0 +1,120 @@ +Broker::BOOL +Broker::INT +Broker::COUNT +Broker::DOUBLE +Broker::STRING +Broker::ADDR +Broker::SUBNET +Broker::PORT +Broker::TIME +Broker::INTERVAL +Broker::ENUM +Broker::SET +Broker::TABLE +Broker::VECTOR +Broker::VECTOR +*************************** +T +F +1 +0 +-1 +1 +0 +1.1 +-11.1 +hello +1.2.3.4 +192.168.0.0/16 +22/tcp +42.0 +180.0 +Broker::BOOL +{ +two, +one, +three +} +{ +[two] = 2, +[one] = 1, +[three] = 3 +} +[zero, one, two] +[a=, b=bee, c=1] +[a=test, b=bee, c=1] +[a=test, b=testagain, c=1] +*************************** +0 +T +1 +T +F +T +2 +F +2 +T +1 +F +{ +bye +} +T +0 +{ + +} +*************************** +0 +[data=] +1 +T +42 +F +[data=] +2 +[data=broker::data{7}] +2 +37 +[data=broker::data{42}] +1 +[data=] +1 +T +0 +{ + +} +*************************** +0 +T +T +T +T +[hi, salutations, hello, greetings] +4 +[data=broker::data{hello}] +[data=broker::data{bah}] +[data=broker::data{hi}] +[hi, salutations, bah, greetings] +[data=broker::data{bah}] +[hi, salutations, greetings] +3 +T +0 +[] +*************************** +3 +T +T +T +[data=broker::data{hi}] +[data=broker::data{hello}] +[data=broker::data{37}] +3 +T +3 +[data=broker::data{goodbye}] +*************************** +T diff --git a/testing/btest/Baseline/core.load-explicit-bro-suffix-fallback/out b/testing/btest/Baseline/core.load-explicit-bro-suffix-fallback/out new file mode 100644 index 0000000000..c67eefbfc1 --- /dev/null +++ b/testing/btest/Baseline/core.load-explicit-bro-suffix-fallback/out @@ -0,0 +1 @@ +loaded foo.zeek diff --git a/testing/btest/Baseline/core.load-file-extension/bro_only b/testing/btest/Baseline/core.load-file-extension/bro_only new file mode 100644 index 0000000000..bb2333014b --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/bro_only @@ -0,0 +1 @@ +Bro script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/bro_preferred b/testing/btest/Baseline/core.load-file-extension/bro_preferred new file mode 100644 index 0000000000..bb2333014b --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/bro_preferred @@ -0,0 +1 @@ +Bro script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/bro_preferred_2 b/testing/btest/Baseline/core.load-file-extension/bro_preferred_2 new file mode 100644 index 0000000000..bb2333014b --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/bro_preferred_2 @@ -0,0 +1 @@ +Bro script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/no_extension b/testing/btest/Baseline/core.load-file-extension/no_extension new file mode 100644 index 0000000000..b9cfe8016f --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/no_extension @@ -0,0 +1 @@ +No file extension script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/xyz_preferred b/testing/btest/Baseline/core.load-file-extension/xyz_preferred new file mode 100644 index 0000000000..8883b557a3 --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/xyz_preferred @@ -0,0 +1 @@ +Non-standard file extension script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/zeek_only b/testing/btest/Baseline/core.load-file-extension/zeek_only new file mode 100644 index 0000000000..129000059a --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/zeek_only @@ -0,0 +1 @@ +Zeek script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/zeek_preferred b/testing/btest/Baseline/core.load-file-extension/zeek_preferred new file mode 100644 index 0000000000..129000059a --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/zeek_preferred @@ -0,0 +1 @@ +Zeek script loaded diff --git a/testing/btest/Baseline/core.load-file-extension/zeek_script_preferred b/testing/btest/Baseline/core.load-file-extension/zeek_script_preferred new file mode 100644 index 0000000000..129000059a --- /dev/null +++ b/testing/btest/Baseline/core.load-file-extension/zeek_script_preferred @@ -0,0 +1 @@ +Zeek script loaded diff --git a/testing/btest/Baseline/core.load-pkg/output b/testing/btest/Baseline/core.load-pkg/output index 119b2f9a18..ab438bfe3b 100644 --- a/testing/btest/Baseline/core.load-pkg/output +++ b/testing/btest/Baseline/core.load-pkg/output @@ -1 +1,2 @@ -Foo loaded +test.zeek loaded +__load__.zeek loaded diff --git a/testing/btest/Baseline/core.load-pkg/output2 b/testing/btest/Baseline/core.load-pkg/output2 new file mode 100644 index 0000000000..1021a36092 --- /dev/null +++ b/testing/btest/Baseline/core.load-pkg/output2 @@ -0,0 +1,2 @@ +test.zeek loaded +__load__.bro loaded diff --git a/testing/btest/Baseline/core.load-prefixes/output b/testing/btest/Baseline/core.load-prefixes/output index ea35b3a8c0..05e54cb3b9 100644 --- a/testing/btest/Baseline/core.load-prefixes/output +++ b/testing/btest/Baseline/core.load-prefixes/output @@ -1,4 +1,4 @@ -loaded lcl2.base.utils.site.bro -loaded lcl.base.utils.site.bro +loaded lcl2.base.utils.site.zeek +loaded lcl.base.utils.site.zeek loaded lcl2.base.protocols.http.bro -loaded lcl.base.protocols.http.bro +loaded lcl.base.protocols.http.zeek diff --git a/testing/btest/Baseline/core.load-unload/output2 b/testing/btest/Baseline/core.load-unload/output2 new file mode 100644 index 0000000000..bd327f15d4 --- /dev/null +++ b/testing/btest/Baseline/core.load-unload/output2 @@ -0,0 +1 @@ +Loaded: dontloadme.bro diff --git a/testing/btest/Baseline/core.negative-time/weird.log b/testing/btest/Baseline/core.negative-time/weird.log index 6c88ea26ef..ccc9a520af 100644 --- a/testing/btest/Baseline/core.negative-time/weird.log +++ b/testing/btest/Baseline/core.negative-time/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-05-23-20-20-21 +#open 2019-06-07-01-59-25 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1425182592.408334 - - - - - negative_packet_timestamp - F bro -#close 2016-05-23-20-20-21 +1425182592.408334 - - - - - negative_packet_timestamp - F zeek +#close 2019-06-07-01-59-25 diff --git a/testing/btest/Baseline/core.old_comm_usage/out b/testing/btest/Baseline/core.old_comm_usage/out deleted file mode 100644 index 219a2f5620..0000000000 --- a/testing/btest/Baseline/core.old_comm_usage/out +++ /dev/null @@ -1,2 +0,0 @@ -warning in /Users/jon/projects/bro/bro/testing/btest/.tmp/core.old_comm_usage/old_comm_usage.bro, line 6: deprecated (terminate_communication) -fatal error: Detected old, deprecated communication system usages that will not work unless you explicitly take action to initizialize and set up the old comm. system. Set the 'old_comm_usage_is_ok' flag to bypass this error if you've taken such actions, but the suggested solution is to port scripts to use the new Broker API. diff --git a/testing/btest/Baseline/core.option-errors-2/.stderr b/testing/btest/Baseline/core.option-errors-2/.stderr index 90011d5c85..ef9fb3ae4e 100644 --- a/testing/btest/Baseline/core.option-errors-2/.stderr +++ b/testing/btest/Baseline/core.option-errors-2/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-errors-2/option-errors.bro, line 2: option variable must be initialized (testbool) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-errors-2/option-errors.zeek, line 2: option variable must be initialized (testbool) diff --git a/testing/btest/Baseline/core.option-errors-3/.stderr b/testing/btest/Baseline/core.option-errors-3/.stderr index ffe699c739..a3c52db614 100644 --- a/testing/btest/Baseline/core.option-errors-3/.stderr +++ b/testing/btest/Baseline/core.option-errors-3/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-errors-3/option-errors.bro, line 3: option is not a modifiable lvalue (testopt) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-errors-3/option-errors.zeek, line 3: option is not a modifiable lvalue (testopt) diff --git a/testing/btest/Baseline/core.option-errors/.stderr b/testing/btest/Baseline/core.option-errors/.stderr index 27a73e180d..3e5dc6c86c 100644 --- a/testing/btest/Baseline/core.option-errors/.stderr +++ b/testing/btest/Baseline/core.option-errors/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-errors/option-errors.bro, line 4: no type given (testbool) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-errors/option-errors.zeek, line 4: no type given (testbool) diff --git a/testing/btest/Baseline/core.option-runtime-errors-10/.stderr b/testing/btest/Baseline/core.option-runtime-errors-10/.stderr index 3b4cf422f5..6f385fbb29 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-10/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-10/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-10/option-runtime-errors.bro, line 7: ID 'A' is not an option (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-10/option-runtime-errors.zeek, line 7: ID 'A' is not an option (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-11/.stderr b/testing/btest/Baseline/core.option-runtime-errors-11/.stderr index 8fd7de5d2e..b0f531df70 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-11/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-11/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-11/option-runtime-errors.bro, line 4: Option::on_change needs function argument; got 'count' for ID 'A' (Option::set_change_handler(A, A, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-11/option-runtime-errors.zeek, line 4: Option::on_change needs function argument; got 'count' for ID 'A' (Option::set_change_handler(A, A, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-12/.stderr b/testing/btest/Baseline/core.option-runtime-errors-12/.stderr index 635b287c6b..bd38eea092 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-12/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-12/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-12/option-runtime-errors.bro, line 7: Third argument of passed function has to be string in Option::on_change for ID 'A'; got 'count' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-12/option-runtime-errors.zeek, line 7: Third argument of passed function has to be string in Option::on_change for ID 'A'; got 'count' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-13/.stderr b/testing/btest/Baseline/core.option-runtime-errors-13/.stderr index 7b58339d8b..738cfff6e5 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-13/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-13/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-13/option-runtime-errors.bro, line 7: Wrong number of arguments for passed function in Option::on_change for ID 'A'; expected 2 or 3, got 4 (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-13/option-runtime-errors.zeek, line 7: Wrong number of arguments for passed function in Option::on_change for ID 'A'; expected 2 or 3, got 4 (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-2/.stderr b/testing/btest/Baseline/core.option-runtime-errors-2/.stderr index ad027f69db..25d102b9f7 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-2/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-2/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-2/option-runtime-errors.bro, line 3: Incompatible type for set of ID 'A': got 'string', need 'count' (Option::set(A, hi, )) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-2/option-runtime-errors.zeek, line 3: Incompatible type for set of ID 'A': got 'string', need 'count' (Option::set(A, hi, )) diff --git a/testing/btest/Baseline/core.option-runtime-errors-3/.stderr b/testing/btest/Baseline/core.option-runtime-errors-3/.stderr index 2c98b170b7..d784841888 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-3/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-3/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-3/option-runtime-errors.bro, line 3: ID 'A' is not an option (Option::set(A, 6, )) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-3/option-runtime-errors.zeek, line 3: ID 'A' is not an option (Option::set(A, 6, )) diff --git a/testing/btest/Baseline/core.option-runtime-errors-4/.stderr b/testing/btest/Baseline/core.option-runtime-errors-4/.stderr index a965ddd3ae..ec76dc4be4 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-4/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-4/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-4/option-runtime-errors.bro, line 7: Second argument of passed function has to be count in Option::on_change for ID 'A'; got 'bool' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-4/option-runtime-errors.zeek, line 7: Second argument of passed function has to be count in Option::on_change for ID 'A'; got 'bool' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-5/.stderr b/testing/btest/Baseline/core.option-runtime-errors-5/.stderr index d931ff062a..4130f865d6 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-5/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-5/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-5/option-runtime-errors.bro, line 7: Wrong number of arguments for passed function in Option::on_change for ID 'A'; expected 2 or 3, got 1 (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-5/option-runtime-errors.zeek, line 7: Wrong number of arguments for passed function in Option::on_change for ID 'A'; expected 2 or 3, got 1 (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-6/.stderr b/testing/btest/Baseline/core.option-runtime-errors-6/.stderr index 593c239155..ee01ccfb1f 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-6/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-6/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-6/option-runtime-errors.bro, line 7: Passed function needs to return type 'count' for ID 'A'; got 'bool' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-6/option-runtime-errors.zeek, line 7: Passed function needs to return type 'count' for ID 'A'; got 'bool' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-7/.stderr b/testing/btest/Baseline/core.option-runtime-errors-7/.stderr index 57f7b5c21b..6d5f9f4595 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-7/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-7/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-7/option-runtime-errors.bro, line 7: Option::on_change needs function argument; not hook or event (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-7/option-runtime-errors.zeek, line 7: Option::on_change needs function argument; not hook or event (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-8/.stderr b/testing/btest/Baseline/core.option-runtime-errors-8/.stderr index 2e7735f433..90cec05f47 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-8/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-8/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-8/option-runtime-errors.bro, line 7: Option::on_change needs function argument; not hook or event (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-8/option-runtime-errors.zeek, line 7: Option::on_change needs function argument; not hook or event (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors-9/.stderr b/testing/btest/Baseline/core.option-runtime-errors-9/.stderr index a95196eef7..f2ce6efd83 100644 --- a/testing/btest/Baseline/core.option-runtime-errors-9/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors-9/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-9/option-runtime-errors.bro, line 5: Could not find ID named 'A' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors-9/option-runtime-errors.zeek, line 5: Could not find ID named 'A' (Option::set_change_handler(A, option_changed, (coerce 0 to int))) diff --git a/testing/btest/Baseline/core.option-runtime-errors/.stderr b/testing/btest/Baseline/core.option-runtime-errors/.stderr index f3ad46d382..a8362f52c0 100644 --- a/testing/btest/Baseline/core.option-runtime-errors/.stderr +++ b/testing/btest/Baseline/core.option-runtime-errors/.stderr @@ -1 +1 @@ -error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors/option-runtime-errors.bro, line 8: Could not find ID named 'B' (Option::set(B, 6, )) +error in /Users/johanna/corelight/bro/testing/btest/.tmp/core.option-runtime-errors/option-runtime-errors.zeek, line 9: Could not find ID named 'B' (Option::set(B, 6, )) diff --git a/testing/btest/Baseline/core.pcap.filter-error/output b/testing/btest/Baseline/core.pcap.filter-error/output index 82804bb483..f52fdf7e0a 100644 --- a/testing/btest/Baseline/core.pcap.filter-error/output +++ b/testing/btest/Baseline/core.pcap.filter-error/output @@ -1,3 +1,3 @@ -fatal error in /home/robin/bro/master/scripts/base/frameworks/packet-filter/./main.bro, line 282: Bad pcap filter 'kaputt' +fatal error in /home/robin/bro/master/scripts/base/frameworks/packet-filter/./main.zeek, line 282: Bad pcap filter 'kaputt' ---- error, cannot compile BPF filter "kaputt, too" diff --git a/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log b/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log index 3c442060c0..04e2ae193e 100644 --- a/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log +++ b/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path packet_filter -#open 2016-07-13-16-12-56 +#open 2019-06-07-01-59-28 #fields ts node filter init success #types time string string bool bool -1468426376.541368 bro port 50000 T T -#close 2016-07-13-16-12-56 +1559872768.563861 zeek port 50000 T T +#close 2019-06-07-01-59-28 diff --git a/testing/btest/Baseline/core.plugins.hooks/output b/testing/btest/Baseline/core.plugins.hooks/output deleted file mode 100644 index 87f20f8512..0000000000 --- a/testing/btest/Baseline/core.plugins.hooks/output +++ /dev/null @@ -1,2289 +0,0 @@ -0.000000 MetaHookPost CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_BACKDOOR)) -> -0.000000 MetaHookPost CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_INTERCONN)) -> -0.000000 MetaHookPost CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_STEPPINGSTONE)) -> -0.000000 MetaHookPost CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_TCPSTATS)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_AYIYA, 5072/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DHCP, 67/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DHCP, 68/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNP3, 20000/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 137/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 53/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 53/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 5353/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 5355/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_FTP, 21/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_FTP, 2811/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_GTPV1, 2123/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_GTPV1, 2152/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 1080/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 3128/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 631/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 80/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 8000/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 8080/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 81/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 8888/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6666/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6667/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6668/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6669/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_MODBUS, 502/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_RADIUS, 1812/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SMTP, 25/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SMTP, 587/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SNMP, 161/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SNMP, 162/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SOCKS, 1080/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSH, 22/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 443/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 5223/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 563/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 585/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 614/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 636/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 989/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 990/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 992/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 993/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 995/tcp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SYSLOG, 514/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_TEREDO, 3544/udp)) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_AYIYA, {5072/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_DHCP, {67/udp,68/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_DNP3, {20000/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_DNS, {5355/udp,53/tcp,5353/udp,137/udp,53/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_FTP, {2811/tcp,21/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_GTPV1, {2152/udp,2123/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_HTTP, {631/tcp,8888/tcp,3128/tcp,80/tcp,1080/tcp,8000/tcp,81/tcp,8080/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_IRC, {6669/tcp,6666/tcp,6668/tcp,6667/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_MODBUS, {502/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_RADIUS, {1812/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SMTP, {25/tcp,587/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SNMP, {162/udp,161/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SOCKS, {1080/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SSH, {22/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SSL, {5223/tcp,585/tcp,614/tcp,993/tcp,636/tcp,989/tcp,995/tcp,443/tcp,563/tcp,990/tcp,992/tcp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SYSLOG, {514/udp})) -> -0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_TEREDO, {3544/udp})) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Cluster::is_enabled, ()) -> -0.000000 MetaHookPost CallFunction(Files::register_analyzer_add_callback, (Files::ANALYZER_EXTRACT, FileExtract::on_add{ if (!FileExtract::args?$extract_filename) FileExtract::args$extract_filename = cat(extract-, FileExtract::f$source, -, FileExtract::f$id)FileExtract::f$info$extracted = FileExtract::args$extract_filenameFileExtract::args$extract_filename = build_path_compressed(FileExtract::prefix, FileExtract::args$extract_filename)mkdir(FileExtract::prefix)})) -> -0.000000 MetaHookPost CallFunction(Files::register_protocol, (Analyzer::ANALYZER_FTP_DATA, [get_file_handle=FTP::get_file_handle{ if (!FTP::c$id$resp_h, FTP::c$id$resp_p in FTP::ftp_data_expected) return ()return (cat(Analyzer::ANALYZER_FTP_DATA, FTP::c$start_time, FTP::c$id, FTP::is_orig))}, describe=FTP::describe_file{ FTP::cid{ if (FTP::f$source != FTP) return ()for ([FTP::cid] in FTP::f$conns) { if (FTP::f$conns[FTP::cid]?$ftp) return (FTP::describe(FTP::f$conns[FTP::cid]$ftp))}return ()}}])) -> -0.000000 MetaHookPost CallFunction(Files::register_protocol, (Analyzer::ANALYZER_HTTP, [get_file_handle=HTTP::get_file_handle{ if (!HTTP::c?$http) return ()if (HTTP::c$http$range_request && !HTTP::is_orig) { return (cat(Analyzer::ANALYZER_HTTP, HTTP::is_orig, HTTP::c$id$orig_h, HTTP::build_url(HTTP::c$http)))}else{ HTTP::mime_depth = HTTP::is_orig ? HTTP::c$http$orig_mime_depth : HTTP::c$http$resp_mime_depthreturn (cat(Analyzer::ANALYZER_HTTP, HTTP::c$start_time, HTTP::is_orig, HTTP::c$http$trans_depth, HTTP::mime_depth, id_string(HTTP::c$id)))}}, describe=HTTP::describe_file{ HTTP::cid{ if (HTTP::f$source != HTTP) return ()for ([HTTP::cid] in HTTP::f$conns) { if (HTTP::f$conns[HTTP::cid]?$http) return (HTTP::build_url_http(HTTP::f$conns[HTTP::cid]$http))}return ()}}])) -> -0.000000 MetaHookPost CallFunction(Files::register_protocol, (Analyzer::ANALYZER_IRC_DATA, [get_file_handle=IRC::get_file_handle{ return (cat(Analyzer::ANALYZER_IRC_DATA, IRC::c$start_time, IRC::c$id, IRC::is_orig))}, describe=anonymous-function{ return ()}])) -> -0.000000 MetaHookPost CallFunction(Files::register_protocol, (Analyzer::ANALYZER_SMTP, [get_file_handle=SMTP::get_file_handle{ return (cat(Analyzer::ANALYZER_SMTP, SMTP::c$start_time, SMTP::c$smtp$trans_depth, SMTP::c$smtp_state$mime_depth))}, describe=SMTP::describe_file{ SMTP::cid{ if (SMTP::f$source != SMTP) return ()for ([SMTP::cid] in SMTP::f$conns) { SMTP::c = SMTP::f$conns[SMTP::cid]return (SMTP::describe(SMTP::c$smtp))}return ()}}])) -> -0.000000 MetaHookPost CallFunction(Files::register_protocol, (Analyzer::ANALYZER_SSL, [get_file_handle=SSL::get_file_handle{ return ()}, describe=SSL::describe_file{ SSL::cid{ if (SSL::f$source != SSL || !SSL::f?$info || !SSL::f$info?$x509 || !SSL::f$info$x509?$certificate) return ()for ([SSL::cid] in SSL::f$conns) { if (SSL::f$conns[SSL::cid]?$ssl) { SSL::c = SSL::f$conns[SSL::cid]return (cat(SSL::c$id$resp_h, :, SSL::c$id$resp_p))}}return (cat(Serial: , SSL::f$info$x509$certificate$serial, Subject: , SSL::f$info$x509$certificate$subject, Issuer: , SSL::f$info$x509$certificate$issuer))}}])) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Cluster::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Communication::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Conn::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (DHCP::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (DNP3::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (DNS::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (DPD::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (FTP::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Files::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (HTTP::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (IRC::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Intel::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Modbus::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Notice::ALARM_LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Notice::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (PacketFilter::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (RADIUS::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Reporter::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (SMTP::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (SNMP::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (SOCKS::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (SSH::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (SSL::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Signatures::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Software::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Syslog::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Tunnel::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Unified2::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Weird::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, (X509::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Cluster::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Communication::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Conn::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (DHCP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (DNP3::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (DNS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (DPD::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (FTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Files::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (HTTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (IRC::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Intel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Notice::ALARM_LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Notice::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (PacketFilter::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (RADIUS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Reporter::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (SMTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (SNMP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (SOCKS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (SSH::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (SSL::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Signatures::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, (X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Cluster::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Communication::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Conn::LOG, [columns=, ev=Conn::log_conn])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (DHCP::LOG, [columns=, ev=DHCP::log_dhcp])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (DNP3::LOG, [columns=, ev=DNP3::log_dnp3])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (DNS::LOG, [columns=, ev=DNS::log_dns])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (DPD::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (FTP::LOG, [columns=, ev=FTP::log_ftp])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Files::LOG, [columns=, ev=Files::log_files])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (HTTP::LOG, [columns=, ev=HTTP::log_http])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (IRC::LOG, [columns=, ev=IRC::irc_log])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Intel::LOG, [columns=, ev=Intel::log_intel])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Modbus::LOG, [columns=, ev=Modbus::log_modbus])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Notice::ALARM_LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Notice::LOG, [columns=, ev=Notice::log_notice])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (PacketFilter::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (RADIUS::LOG, [columns=, ev=RADIUS::log_radius])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Reporter::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (SMTP::LOG, [columns=, ev=SMTP::log_smtp])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (SNMP::LOG, [columns=, ev=SNMP::log_snmp])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (SOCKS::LOG, [columns=, ev=SOCKS::log_socks])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (SSH::LOG, [columns=, ev=SSH::log_ssh])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (SSL::LOG, [columns=, ev=SSL::log_ssl])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Signatures::LOG, [columns=, ev=Signatures::log_signature])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Software::LOG, [columns=, ev=Software::log_software])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Syslog::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Tunnel::LOG, [columns=, ev=])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -> -0.000000 MetaHookPost CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1402676765.077455, node=bro, filter=ip or not ip, init=T, success=T])) -> -0.000000 MetaHookPost CallFunction(Log::write, (PacketFilter::LOG, [ts=1402676765.077455, node=bro, filter=ip or not ip, init=T, success=T])) -> -0.000000 MetaHookPost CallFunction(Notice::want_pp, ()) -> -0.000000 MetaHookPost CallFunction(PacketFilter::build, ()) -> -0.000000 MetaHookPost CallFunction(PacketFilter::combine_filters, (ip or not ip, and, )) -> -0.000000 MetaHookPost CallFunction(PacketFilter::install, ()) -> -0.000000 MetaHookPost CallFunction(SumStats::add_observe_plugin_dependency, (SumStats::STD_DEV, SumStats::VARIANCE)) -> -0.000000 MetaHookPost CallFunction(SumStats::add_observe_plugin_dependency, (SumStats::VARIANCE, SumStats::AVERAGE)) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::AVERAGE, anonymous-function{ if (!SumStats::rv?$average) SumStats::rv$average = SumStats::valelseSumStats::rv$average += (SumStats::val - SumStats::rv$average) / (coerce SumStats::rv$num to double)})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::HLL_UNIQUE, anonymous-function{ if (!SumStats::rv?$card) { SumStats::rv$card = hll_cardinality_init(SumStats::r$hll_error_margin, SumStats::r$hll_confidence)SumStats::rv$hll_error_margin = SumStats::r$hll_error_marginSumStats::rv$hll_confidence = SumStats::r$hll_confidence}hll_cardinality_add(SumStats::rv$card, SumStats::obs)SumStats::rv$hll_unique = double_to_count(hll_cardinality_estimate(SumStats::rv$card))})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::LAST, anonymous-function{ if (0 < SumStats::r$num_last_elements) { if (!SumStats::rv?$last_elements) SumStats::rv$last_elements = Queue::init((coerce [$max_len=SumStats::r$num_last_elements] to Queue::Settings))Queue::put(SumStats::rv$last_elements, SumStats::obs)}})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::MAX, anonymous-function{ if (!SumStats::rv?$max) SumStats::rv$max = SumStats::valelseif (SumStats::rv$max < SumStats::val) SumStats::rv$max = SumStats::val})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::MIN, anonymous-function{ if (!SumStats::rv?$min) SumStats::rv$min = SumStats::valelseif (SumStats::val < SumStats::rv$min) SumStats::rv$min = SumStats::val})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::SAMPLE, anonymous-function{ SumStats::sample_add_sample(SumStats::obs, SumStats::rv)})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::STD_DEV, anonymous-function{ SumStats::calc_std_dev(SumStats::rv)})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::SUM, anonymous-function{ SumStats::rv$sum += SumStats::val})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::TOPK, anonymous-function{ topk_add(SumStats::rv$topk, SumStats::obs)})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::UNIQUE, anonymous-function{ if (!SumStats::rv?$unique_vals) SumStats::rv$unique_vals = (coerce set() to set[SumStats::Observation])if (SumStats::r?$unique_max) SumStats::rv$unique_max = SumStats::r$unique_maxif (!SumStats::r?$unique_max || flattenSumStats::rv$unique_vals <= SumStats::r$unique_max) add SumStats::rv$unique_vals[SumStats::obs]SumStats::rv$unique = flattenSumStats::rv$unique_vals})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, (SumStats::VARIANCE, anonymous-function{ if (1 < SumStats::rv$num) SumStats::rv$var_s += ((SumStats::val - SumStats::rv$prev_avg) * (SumStats::val - SumStats::rv$average))SumStats::calc_variance(SumStats::rv)SumStats::rv$prev_avg = SumStats::rv$average})) -> -0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugins, ()) -> -0.000000 MetaHookPost CallFunction(bro_init, ()) -> -0.000000 MetaHookPost CallFunction(filter_change_tracking, ()) -> -0.000000 MetaHookPost CallFunction(set_to_regex, ({}, (^\.?|\.)(~~)$)) -> -0.000000 MetaHookPost CallFunction(set_to_regex, ({}, (^\.?|\.)(~~)$)) -> -0.000000 MetaHookPost DrainEvents() -> -0.000000 MetaHookPost DrainEvents() -> -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(../main) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_ARP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_AYIYA.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_BackDoor.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_BitTorrent.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_ConnSize.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_DCE_RPC.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_DHCP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_DNP3.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_DNS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_FTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_FTP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_File.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_FileExtract.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_FileExtract.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_FileHash.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Finger.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_GTPv1.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Gnutella.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_HTTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_HTTP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_ICMP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_IRC.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Ident.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_InterConn.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Login.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Login.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_MIME.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Modbus.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_NCP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_NTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_NetBIOS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_NetBIOS.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_NetFlow.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_PIA.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_POP3.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_RADIUS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_RPC.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SMB.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SMTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SMTP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SNMP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SNMP.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SOCKS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SSH.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SSL.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_SteppingStone.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Syslog.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_TCP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_TCP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Teredo.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_UDP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Unified2.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_Unified2.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_X509.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_X509.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_X509.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./Bro_ZIP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./actions/add-geodata) -> -1 -0.000000 MetaHookPost LoadFile(./actions/drop) -> -1 -0.000000 MetaHookPost LoadFile(./actions/email_admin) -> -1 -0.000000 MetaHookPost LoadFile(./actions/page) -> -1 -0.000000 MetaHookPost LoadFile(./actions/pp-alarms) -> -1 -0.000000 MetaHookPost LoadFile(./addrs) -> -1 -0.000000 MetaHookPost LoadFile(./analyzer.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./average) -> -1 -0.000000 MetaHookPost LoadFile(./average) -> -1 -0.000000 MetaHookPost LoadFile(./bloom-filter.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./bro.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./broxygen.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./cardinality-counter.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./const.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./consts.bro) -> -1 -0.000000 MetaHookPost LoadFile(./contents) -> -1 -0.000000 MetaHookPost LoadFile(./dcc-send) -> -1 -0.000000 MetaHookPost LoadFile(./dcc-send) -> -1 -0.000000 MetaHookPost LoadFile(./entities) -> -1 -0.000000 MetaHookPost LoadFile(./entities) -> -1 -0.000000 MetaHookPost LoadFile(./entities) -> -1 -0.000000 MetaHookPost LoadFile(./entities) -> -1 -0.000000 MetaHookPost LoadFile(./event.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./exec) -> -1 -0.000000 MetaHookPost LoadFile(./extend-email/hostnames) -> -1 -0.000000 MetaHookPost LoadFile(./file_analysis.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./gridftp) -> -1 -0.000000 MetaHookPost LoadFile(./hll_unique) -> -1 -0.000000 MetaHookPost LoadFile(./inactivity) -> -1 -0.000000 MetaHookPost LoadFile(./info) -> -1 -0.000000 MetaHookPost LoadFile(./info) -> -1 -0.000000 MetaHookPost LoadFile(./info) -> -1 -0.000000 MetaHookPost LoadFile(./info) -> -1 -0.000000 MetaHookPost LoadFile(./info) -> -1 -0.000000 MetaHookPost LoadFile(./input) -> -1 -0.000000 MetaHookPost LoadFile(./input.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./last) -> -1 -0.000000 MetaHookPost LoadFile(./logging.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./magic) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main) -> -1 -0.000000 MetaHookPost LoadFile(./main.bro) -> -1 -0.000000 MetaHookPost LoadFile(./max) -> -1 -0.000000 MetaHookPost LoadFile(./min) -> -1 -0.000000 MetaHookPost LoadFile(./mozilla-ca-list) -> -1 -0.000000 MetaHookPost LoadFile(./netstats) -> -1 -0.000000 MetaHookPost LoadFile(./non-cluster) -> -1 -0.000000 MetaHookPost LoadFile(./non-cluster) -> -1 -0.000000 MetaHookPost LoadFile(./patterns) -> -1 -0.000000 MetaHookPost LoadFile(./plugins) -> -1 -0.000000 MetaHookPost LoadFile(./polling) -> -1 -0.000000 MetaHookPost LoadFile(./postprocessors) -> -1 -0.000000 MetaHookPost LoadFile(./readers/ascii) -> -1 -0.000000 MetaHookPost LoadFile(./readers/benchmark) -> -1 -0.000000 MetaHookPost LoadFile(./readers/binary) -> -1 -0.000000 MetaHookPost LoadFile(./readers/raw) -> -1 -0.000000 MetaHookPost LoadFile(./readers/sqlite) -> -1 -0.000000 MetaHookPost LoadFile(./reporter.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./sample) -> -1 -0.000000 MetaHookPost LoadFile(./scp) -> -1 -0.000000 MetaHookPost LoadFile(./sftp) -> -1 -0.000000 MetaHookPost LoadFile(./site) -> -1 -0.000000 MetaHookPost LoadFile(./std-dev) -> -1 -0.000000 MetaHookPost LoadFile(./strings.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./sum) -> -1 -0.000000 MetaHookPost LoadFile(./top-k.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./topk) -> -1 -0.000000 MetaHookPost LoadFile(./types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./unique) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils) -> -1 -0.000000 MetaHookPost LoadFile(./utils-commands) -> -1 -0.000000 MetaHookPost LoadFile(./utils-commands) -> -1 -0.000000 MetaHookPost LoadFile(./utils-commands) -> -1 -0.000000 MetaHookPost LoadFile(./utils.bro) -> -1 -0.000000 MetaHookPost LoadFile(./variance) -> -1 -0.000000 MetaHookPost LoadFile(./variance) -> -1 -0.000000 MetaHookPost LoadFile(./weird) -> -1 -0.000000 MetaHookPost LoadFile(./writers/ascii) -> -1 -0.000000 MetaHookPost LoadFile(./writers/dataseries) -> -1 -0.000000 MetaHookPost LoadFile(./writers/elasticsearch) -> -1 -0.000000 MetaHookPost LoadFile(./writers/none) -> -1 -0.000000 MetaHookPost LoadFile(./writers/sqlite) -> -1 -0.000000 MetaHookPost LoadFile(/Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/hooks.bro) -> -1 -0.000000 MetaHookPost LoadFile(/Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/lib/bif/__load__.bro) -> -1 -0.000000 MetaHookPost LoadFile(/Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/scripts/__load__.bro) -> -1 -0.000000 MetaHookPost LoadFile(base/bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/analyzer.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/bro.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/const.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/event.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/file_analysis.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/input.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/logging.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/plugins) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/plugins/Bro_SNMP.types.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/reporter.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/strings.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/bif/types.bif) -> -1 -0.000000 MetaHookPost LoadFile(base/files/extract) -> -1 -0.000000 MetaHookPost LoadFile(base/files/hash) -> -1 -0.000000 MetaHookPost LoadFile(base/files/hash) -> -1 -0.000000 MetaHookPost LoadFile(base/files/unified2) -> -1 -0.000000 MetaHookPost LoadFile(base/files/x509) -> -1 -0.000000 MetaHookPost LoadFile(base/files/x509) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/analyzer) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/analyzer) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/analyzer) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/analyzer) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/cluster) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/cluster) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/cluster) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/cluster) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/cluster) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/cluster) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/communication) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/control) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/control) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/dpd) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/files) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/input) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/input) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/intel) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/logging) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/logging) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/notice) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/packet-filter) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/packet-filter) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/packet-filter/utils) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/reporter) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/reporter) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/signatures) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/software) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/sumstats) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/sumstats) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/sumstats) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/sumstats) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/sumstats/main) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/tunnels) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/tunnels) -> -1 -0.000000 MetaHookPost LoadFile(base/frameworks/tunnels) -> -1 -0.000000 MetaHookPost LoadFile(base/init-default.bro) -> -1 -0.000000 MetaHookPost LoadFile(base/misc/find-checksum-offloading) -> -1 -0.000000 MetaHookPost LoadFile(base/misc/find-filtered-trace) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/conn) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/conn) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/conn) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/dhcp) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/dnp3) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/dns) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/ftp) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/http) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/irc) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/modbus) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/pop3) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/radius) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/smtp) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/snmp) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/socks) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/ssh) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/ssl) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/ssl) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/ssl) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/syslog) -> -1 -0.000000 MetaHookPost LoadFile(base/protocols/tunnels) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/active-http) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/addrs) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/conn-ids) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/dir) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/dir) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/directions-and-hosts) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/directions-and-hosts) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/directions-and-hosts) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/directions-and-hosts) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/exec) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/exec) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/files) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/files) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/files) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/files) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/files) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/files) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/numbers) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/numbers) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/numbers) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/numbers) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/paths) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/paths) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/paths) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/paths) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/paths) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/paths) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/patterns) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/queue) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/queue) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/queue) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/site) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/strings) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/strings) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/strings) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/thresholds) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/thresholds) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/time) -> -1 -0.000000 MetaHookPost LoadFile(base/utils/urls) -> -1 -0.000000 MetaHookPost QueueEvent(bro_init()) -> false -0.000000 MetaHookPost QueueEvent(filter_change_tracking()) -> false -0.000000 MetaHookPre CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_BACKDOOR)) -0.000000 MetaHookPre CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_INTERCONN)) -0.000000 MetaHookPre CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_STEPPINGSTONE)) -0.000000 MetaHookPre CallFunction(Analyzer::disable_analyzer, (Analyzer::ANALYZER_TCPSTATS)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_AYIYA, 5072/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DHCP, 67/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DHCP, 68/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNP3, 20000/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 137/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 53/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 53/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 5353/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_DNS, 5355/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_FTP, 21/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_FTP, 2811/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_GTPV1, 2123/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_GTPV1, 2152/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 1080/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 3128/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 631/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 80/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 8000/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 8080/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 81/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_HTTP, 8888/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6666/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6667/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6668/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_IRC, 6669/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_MODBUS, 502/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_RADIUS, 1812/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SMTP, 25/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SMTP, 587/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SNMP, 161/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SNMP, 162/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SOCKS, 1080/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSH, 22/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 443/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 5223/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 563/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 585/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 614/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 636/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 989/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 990/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 992/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 993/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SSL, 995/tcp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_SYSLOG, 514/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, (Analyzer::ANALYZER_TEREDO, 3544/udp)) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_AYIYA, {5072/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_DHCP, {67/udp,68/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_DNP3, {20000/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_DNS, {5355/udp,53/tcp,5353/udp,137/udp,53/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_FTP, {2811/tcp,21/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_GTPV1, {2152/udp,2123/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_HTTP, {631/tcp,8888/tcp,3128/tcp,80/tcp,1080/tcp,8000/tcp,81/tcp,8080/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_IRC, {6669/tcp,6666/tcp,6668/tcp,6667/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_MODBUS, {502/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_RADIUS, {1812/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SMTP, {25/tcp,587/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SNMP, {162/udp,161/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SOCKS, {1080/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SSH, {22/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SSL, {5223/tcp,585/tcp,614/tcp,993/tcp,636/tcp,989/tcp,995/tcp,443/tcp,563/tcp,990/tcp,992/tcp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_SYSLOG, {514/udp})) -0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, (Analyzer::ANALYZER_TEREDO, {3544/udp})) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Cluster::is_enabled, ()) -0.000000 MetaHookPre CallFunction(Files::register_analyzer_add_callback, (Files::ANALYZER_EXTRACT, FileExtract::on_add{ if (!FileExtract::args?$extract_filename) FileExtract::args$extract_filename = cat(extract-, FileExtract::f$source, -, FileExtract::f$id)FileExtract::f$info$extracted = FileExtract::args$extract_filenameFileExtract::args$extract_filename = build_path_compressed(FileExtract::prefix, FileExtract::args$extract_filename)mkdir(FileExtract::prefix)})) -0.000000 MetaHookPre CallFunction(Files::register_protocol, (Analyzer::ANALYZER_FTP_DATA, [get_file_handle=FTP::get_file_handle{ if (!FTP::c$id$resp_h, FTP::c$id$resp_p in FTP::ftp_data_expected) return ()return (cat(Analyzer::ANALYZER_FTP_DATA, FTP::c$start_time, FTP::c$id, FTP::is_orig))}, describe=FTP::describe_file{ FTP::cid{ if (FTP::f$source != FTP) return ()for ([FTP::cid] in FTP::f$conns) { if (FTP::f$conns[FTP::cid]?$ftp) return (FTP::describe(FTP::f$conns[FTP::cid]$ftp))}return ()}}])) -0.000000 MetaHookPre CallFunction(Files::register_protocol, (Analyzer::ANALYZER_HTTP, [get_file_handle=HTTP::get_file_handle{ if (!HTTP::c?$http) return ()if (HTTP::c$http$range_request && !HTTP::is_orig) { return (cat(Analyzer::ANALYZER_HTTP, HTTP::is_orig, HTTP::c$id$orig_h, HTTP::build_url(HTTP::c$http)))}else{ HTTP::mime_depth = HTTP::is_orig ? HTTP::c$http$orig_mime_depth : HTTP::c$http$resp_mime_depthreturn (cat(Analyzer::ANALYZER_HTTP, HTTP::c$start_time, HTTP::is_orig, HTTP::c$http$trans_depth, HTTP::mime_depth, id_string(HTTP::c$id)))}}, describe=HTTP::describe_file{ HTTP::cid{ if (HTTP::f$source != HTTP) return ()for ([HTTP::cid] in HTTP::f$conns) { if (HTTP::f$conns[HTTP::cid]?$http) return (HTTP::build_url_http(HTTP::f$conns[HTTP::cid]$http))}return ()}}])) -0.000000 MetaHookPre CallFunction(Files::register_protocol, (Analyzer::ANALYZER_IRC_DATA, [get_file_handle=IRC::get_file_handle{ return (cat(Analyzer::ANALYZER_IRC_DATA, IRC::c$start_time, IRC::c$id, IRC::is_orig))}, describe=anonymous-function{ return ()}])) -0.000000 MetaHookPre CallFunction(Files::register_protocol, (Analyzer::ANALYZER_SMTP, [get_file_handle=SMTP::get_file_handle{ return (cat(Analyzer::ANALYZER_SMTP, SMTP::c$start_time, SMTP::c$smtp$trans_depth, SMTP::c$smtp_state$mime_depth))}, describe=SMTP::describe_file{ SMTP::cid{ if (SMTP::f$source != SMTP) return ()for ([SMTP::cid] in SMTP::f$conns) { SMTP::c = SMTP::f$conns[SMTP::cid]return (SMTP::describe(SMTP::c$smtp))}return ()}}])) -0.000000 MetaHookPre CallFunction(Files::register_protocol, (Analyzer::ANALYZER_SSL, [get_file_handle=SSL::get_file_handle{ return ()}, describe=SSL::describe_file{ SSL::cid{ if (SSL::f$source != SSL || !SSL::f?$info || !SSL::f$info?$x509 || !SSL::f$info$x509?$certificate) return ()for ([SSL::cid] in SSL::f$conns) { if (SSL::f$conns[SSL::cid]?$ssl) { SSL::c = SSL::f$conns[SSL::cid]return (cat(SSL::c$id$resp_h, :, SSL::c$id$resp_p))}}return (cat(Serial: , SSL::f$info$x509$certificate$serial, Subject: , SSL::f$info$x509$certificate$subject, Issuer: , SSL::f$info$x509$certificate$issuer))}}])) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Cluster::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Communication::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Conn::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (DHCP::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (DNP3::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (DNS::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (DPD::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (FTP::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Files::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (HTTP::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (IRC::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Intel::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Modbus::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Notice::ALARM_LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Notice::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (PacketFilter::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (RADIUS::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Reporter::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (SMTP::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (SNMP::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (SOCKS::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (SSH::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (SSL::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Signatures::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Software::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Syslog::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Tunnel::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Unified2::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Weird::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, (X509::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Cluster::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Communication::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Conn::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (DHCP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (DNP3::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (DNS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (DPD::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (FTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Files::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (HTTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (IRC::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Intel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Notice::ALARM_LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Notice::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (PacketFilter::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (RADIUS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Reporter::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (SMTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (SNMP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (SOCKS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (SSH::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (SSL::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Signatures::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, (X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Cluster::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Communication::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Conn::LOG, [columns=, ev=Conn::log_conn])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (DHCP::LOG, [columns=, ev=DHCP::log_dhcp])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (DNP3::LOG, [columns=, ev=DNP3::log_dnp3])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (DNS::LOG, [columns=, ev=DNS::log_dns])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (DPD::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (FTP::LOG, [columns=, ev=FTP::log_ftp])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Files::LOG, [columns=, ev=Files::log_files])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (HTTP::LOG, [columns=, ev=HTTP::log_http])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (IRC::LOG, [columns=, ev=IRC::irc_log])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Intel::LOG, [columns=, ev=Intel::log_intel])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Modbus::LOG, [columns=, ev=Modbus::log_modbus])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Notice::ALARM_LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Notice::LOG, [columns=, ev=Notice::log_notice])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (PacketFilter::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (RADIUS::LOG, [columns=, ev=RADIUS::log_radius])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Reporter::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (SMTP::LOG, [columns=, ev=SMTP::log_smtp])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (SNMP::LOG, [columns=, ev=SNMP::log_snmp])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (SOCKS::LOG, [columns=, ev=SOCKS::log_socks])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (SSH::LOG, [columns=, ev=SSH::log_ssh])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (SSL::LOG, [columns=, ev=SSL::log_ssl])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Signatures::LOG, [columns=, ev=Signatures::log_signature])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Software::LOG, [columns=, ev=Software::log_software])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Syslog::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Tunnel::LOG, [columns=, ev=])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) -0.000000 MetaHookPre CallFunction(Log::create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -0.000000 MetaHookPre CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1402676765.077455, node=bro, filter=ip or not ip, init=T, success=T])) -0.000000 MetaHookPre CallFunction(Log::write, (PacketFilter::LOG, [ts=1402676765.077455, node=bro, filter=ip or not ip, init=T, success=T])) -0.000000 MetaHookPre CallFunction(Notice::want_pp, ()) -0.000000 MetaHookPre CallFunction(PacketFilter::build, ()) -0.000000 MetaHookPre CallFunction(PacketFilter::combine_filters, (ip or not ip, and, )) -0.000000 MetaHookPre CallFunction(PacketFilter::install, ()) -0.000000 MetaHookPre CallFunction(SumStats::add_observe_plugin_dependency, (SumStats::STD_DEV, SumStats::VARIANCE)) -0.000000 MetaHookPre CallFunction(SumStats::add_observe_plugin_dependency, (SumStats::VARIANCE, SumStats::AVERAGE)) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::AVERAGE, anonymous-function{ if (!SumStats::rv?$average) SumStats::rv$average = SumStats::valelseSumStats::rv$average += (SumStats::val - SumStats::rv$average) / (coerce SumStats::rv$num to double)})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::HLL_UNIQUE, anonymous-function{ if (!SumStats::rv?$card) { SumStats::rv$card = hll_cardinality_init(SumStats::r$hll_error_margin, SumStats::r$hll_confidence)SumStats::rv$hll_error_margin = SumStats::r$hll_error_marginSumStats::rv$hll_confidence = SumStats::r$hll_confidence}hll_cardinality_add(SumStats::rv$card, SumStats::obs)SumStats::rv$hll_unique = double_to_count(hll_cardinality_estimate(SumStats::rv$card))})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::LAST, anonymous-function{ if (0 < SumStats::r$num_last_elements) { if (!SumStats::rv?$last_elements) SumStats::rv$last_elements = Queue::init((coerce [$max_len=SumStats::r$num_last_elements] to Queue::Settings))Queue::put(SumStats::rv$last_elements, SumStats::obs)}})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::MAX, anonymous-function{ if (!SumStats::rv?$max) SumStats::rv$max = SumStats::valelseif (SumStats::rv$max < SumStats::val) SumStats::rv$max = SumStats::val})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::MIN, anonymous-function{ if (!SumStats::rv?$min) SumStats::rv$min = SumStats::valelseif (SumStats::val < SumStats::rv$min) SumStats::rv$min = SumStats::val})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::SAMPLE, anonymous-function{ SumStats::sample_add_sample(SumStats::obs, SumStats::rv)})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::STD_DEV, anonymous-function{ SumStats::calc_std_dev(SumStats::rv)})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::SUM, anonymous-function{ SumStats::rv$sum += SumStats::val})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::TOPK, anonymous-function{ topk_add(SumStats::rv$topk, SumStats::obs)})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::UNIQUE, anonymous-function{ if (!SumStats::rv?$unique_vals) SumStats::rv$unique_vals = (coerce set() to set[SumStats::Observation])if (SumStats::r?$unique_max) SumStats::rv$unique_max = SumStats::r$unique_maxif (!SumStats::r?$unique_max || flattenSumStats::rv$unique_vals <= SumStats::r$unique_max) add SumStats::rv$unique_vals[SumStats::obs]SumStats::rv$unique = flattenSumStats::rv$unique_vals})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, (SumStats::VARIANCE, anonymous-function{ if (1 < SumStats::rv$num) SumStats::rv$var_s += ((SumStats::val - SumStats::rv$prev_avg) * (SumStats::val - SumStats::rv$average))SumStats::calc_variance(SumStats::rv)SumStats::rv$prev_avg = SumStats::rv$average})) -0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugins, ()) -0.000000 MetaHookPre CallFunction(bro_init, ()) -0.000000 MetaHookPre CallFunction(filter_change_tracking, ()) -0.000000 MetaHookPre CallFunction(set_to_regex, ({}, (^\.?|\.)(~~)$)) -0.000000 MetaHookPre CallFunction(set_to_regex, ({}, (^\.?|\.)(~~)$)) -0.000000 MetaHookPre DrainEvents() -0.000000 MetaHookPre DrainEvents() -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(../main) -0.000000 MetaHookPre LoadFile(./Bro_ARP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_AYIYA.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_BackDoor.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_BitTorrent.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_ConnSize.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_DCE_RPC.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_DHCP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_DNP3.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_DNS.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_FTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_FTP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_File.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_FileExtract.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_FileExtract.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_FileHash.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Finger.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_GTPv1.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Gnutella.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_HTTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_HTTP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_ICMP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_IRC.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Ident.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_InterConn.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Login.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Login.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_MIME.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Modbus.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_NCP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_NTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_NetBIOS.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_NetBIOS.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_NetFlow.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_PIA.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_POP3.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_RADIUS.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_RPC.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SMB.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SMTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SMTP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SNMP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SNMP.types.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SOCKS.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SSH.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SSL.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_SteppingStone.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Syslog.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_TCP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_TCP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Teredo.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_UDP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Unified2.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_Unified2.types.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_X509.events.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_X509.functions.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_X509.types.bif.bro) -0.000000 MetaHookPre LoadFile(./Bro_ZIP.events.bif.bro) -0.000000 MetaHookPre LoadFile(./actions/add-geodata) -0.000000 MetaHookPre LoadFile(./actions/drop) -0.000000 MetaHookPre LoadFile(./actions/email_admin) -0.000000 MetaHookPre LoadFile(./actions/page) -0.000000 MetaHookPre LoadFile(./actions/pp-alarms) -0.000000 MetaHookPre LoadFile(./addrs) -0.000000 MetaHookPre LoadFile(./analyzer.bif.bro) -0.000000 MetaHookPre LoadFile(./average) -0.000000 MetaHookPre LoadFile(./average) -0.000000 MetaHookPre LoadFile(./bloom-filter.bif.bro) -0.000000 MetaHookPre LoadFile(./bro.bif.bro) -0.000000 MetaHookPre LoadFile(./broxygen.bif.bro) -0.000000 MetaHookPre LoadFile(./cardinality-counter.bif.bro) -0.000000 MetaHookPre LoadFile(./const.bif.bro) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts.bif.bro) -0.000000 MetaHookPre LoadFile(./consts.bro) -0.000000 MetaHookPre LoadFile(./contents) -0.000000 MetaHookPre LoadFile(./dcc-send) -0.000000 MetaHookPre LoadFile(./dcc-send) -0.000000 MetaHookPre LoadFile(./entities) -0.000000 MetaHookPre LoadFile(./entities) -0.000000 MetaHookPre LoadFile(./entities) -0.000000 MetaHookPre LoadFile(./entities) -0.000000 MetaHookPre LoadFile(./event.bif.bro) -0.000000 MetaHookPre LoadFile(./events.bif.bro) -0.000000 MetaHookPre LoadFile(./exec) -0.000000 MetaHookPre LoadFile(./extend-email/hostnames) -0.000000 MetaHookPre LoadFile(./file_analysis.bif.bro) -0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./functions.bif.bro) -0.000000 MetaHookPre LoadFile(./gridftp) -0.000000 MetaHookPre LoadFile(./hll_unique) -0.000000 MetaHookPre LoadFile(./inactivity) -0.000000 MetaHookPre LoadFile(./info) -0.000000 MetaHookPre LoadFile(./info) -0.000000 MetaHookPre LoadFile(./info) -0.000000 MetaHookPre LoadFile(./info) -0.000000 MetaHookPre LoadFile(./info) -0.000000 MetaHookPre LoadFile(./input) -0.000000 MetaHookPre LoadFile(./input.bif.bro) -0.000000 MetaHookPre LoadFile(./last) -0.000000 MetaHookPre LoadFile(./logging.bif.bro) -0.000000 MetaHookPre LoadFile(./magic) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main) -0.000000 MetaHookPre LoadFile(./main.bro) -0.000000 MetaHookPre LoadFile(./max) -0.000000 MetaHookPre LoadFile(./min) -0.000000 MetaHookPre LoadFile(./mozilla-ca-list) -0.000000 MetaHookPre LoadFile(./netstats) -0.000000 MetaHookPre LoadFile(./non-cluster) -0.000000 MetaHookPre LoadFile(./non-cluster) -0.000000 MetaHookPre LoadFile(./patterns) -0.000000 MetaHookPre LoadFile(./plugins) -0.000000 MetaHookPre LoadFile(./polling) -0.000000 MetaHookPre LoadFile(./postprocessors) -0.000000 MetaHookPre LoadFile(./readers/ascii) -0.000000 MetaHookPre LoadFile(./readers/benchmark) -0.000000 MetaHookPre LoadFile(./readers/binary) -0.000000 MetaHookPre LoadFile(./readers/raw) -0.000000 MetaHookPre LoadFile(./readers/sqlite) -0.000000 MetaHookPre LoadFile(./reporter.bif.bro) -0.000000 MetaHookPre LoadFile(./sample) -0.000000 MetaHookPre LoadFile(./scp) -0.000000 MetaHookPre LoadFile(./sftp) -0.000000 MetaHookPre LoadFile(./site) -0.000000 MetaHookPre LoadFile(./std-dev) -0.000000 MetaHookPre LoadFile(./strings.bif.bro) -0.000000 MetaHookPre LoadFile(./sum) -0.000000 MetaHookPre LoadFile(./top-k.bif.bro) -0.000000 MetaHookPre LoadFile(./topk) -0.000000 MetaHookPre LoadFile(./types.bif.bro) -0.000000 MetaHookPre LoadFile(./unique) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils) -0.000000 MetaHookPre LoadFile(./utils-commands) -0.000000 MetaHookPre LoadFile(./utils-commands) -0.000000 MetaHookPre LoadFile(./utils-commands) -0.000000 MetaHookPre LoadFile(./utils.bro) -0.000000 MetaHookPre LoadFile(./variance) -0.000000 MetaHookPre LoadFile(./variance) -0.000000 MetaHookPre LoadFile(./weird) -0.000000 MetaHookPre LoadFile(./writers/ascii) -0.000000 MetaHookPre LoadFile(./writers/dataseries) -0.000000 MetaHookPre LoadFile(./writers/elasticsearch) -0.000000 MetaHookPre LoadFile(./writers/none) -0.000000 MetaHookPre LoadFile(./writers/sqlite) -0.000000 MetaHookPre LoadFile(/Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/hooks.bro) -0.000000 MetaHookPre LoadFile(/Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/lib/bif/__load__.bro) -0.000000 MetaHookPre LoadFile(/Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/scripts/__load__.bro) -0.000000 MetaHookPre LoadFile(base/bif) -0.000000 MetaHookPre LoadFile(base/bif/analyzer.bif) -0.000000 MetaHookPre LoadFile(base/bif/bro.bif) -0.000000 MetaHookPre LoadFile(base/bif/const.bif.bro) -0.000000 MetaHookPre LoadFile(base/bif/event.bif) -0.000000 MetaHookPre LoadFile(base/bif/file_analysis.bif) -0.000000 MetaHookPre LoadFile(base/bif/input.bif) -0.000000 MetaHookPre LoadFile(base/bif/logging.bif) -0.000000 MetaHookPre LoadFile(base/bif/plugins) -0.000000 MetaHookPre LoadFile(base/bif/plugins/Bro_SNMP.types.bif) -0.000000 MetaHookPre LoadFile(base/bif/reporter.bif) -0.000000 MetaHookPre LoadFile(base/bif/strings.bif) -0.000000 MetaHookPre LoadFile(base/bif/types.bif) -0.000000 MetaHookPre LoadFile(base/files/extract) -0.000000 MetaHookPre LoadFile(base/files/hash) -0.000000 MetaHookPre LoadFile(base/files/hash) -0.000000 MetaHookPre LoadFile(base/files/unified2) -0.000000 MetaHookPre LoadFile(base/files/x509) -0.000000 MetaHookPre LoadFile(base/files/x509) -0.000000 MetaHookPre LoadFile(base/frameworks/analyzer) -0.000000 MetaHookPre LoadFile(base/frameworks/analyzer) -0.000000 MetaHookPre LoadFile(base/frameworks/analyzer) -0.000000 MetaHookPre LoadFile(base/frameworks/analyzer) -0.000000 MetaHookPre LoadFile(base/frameworks/cluster) -0.000000 MetaHookPre LoadFile(base/frameworks/cluster) -0.000000 MetaHookPre LoadFile(base/frameworks/cluster) -0.000000 MetaHookPre LoadFile(base/frameworks/cluster) -0.000000 MetaHookPre LoadFile(base/frameworks/cluster) -0.000000 MetaHookPre LoadFile(base/frameworks/cluster) -0.000000 MetaHookPre LoadFile(base/frameworks/communication) -0.000000 MetaHookPre LoadFile(base/frameworks/control) -0.000000 MetaHookPre LoadFile(base/frameworks/control) -0.000000 MetaHookPre LoadFile(base/frameworks/dpd) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/files) -0.000000 MetaHookPre LoadFile(base/frameworks/input) -0.000000 MetaHookPre LoadFile(base/frameworks/input) -0.000000 MetaHookPre LoadFile(base/frameworks/intel) -0.000000 MetaHookPre LoadFile(base/frameworks/logging) -0.000000 MetaHookPre LoadFile(base/frameworks/logging) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/notice) -0.000000 MetaHookPre LoadFile(base/frameworks/packet-filter) -0.000000 MetaHookPre LoadFile(base/frameworks/packet-filter) -0.000000 MetaHookPre LoadFile(base/frameworks/packet-filter/utils) -0.000000 MetaHookPre LoadFile(base/frameworks/reporter) -0.000000 MetaHookPre LoadFile(base/frameworks/reporter) -0.000000 MetaHookPre LoadFile(base/frameworks/signatures) -0.000000 MetaHookPre LoadFile(base/frameworks/software) -0.000000 MetaHookPre LoadFile(base/frameworks/sumstats) -0.000000 MetaHookPre LoadFile(base/frameworks/sumstats) -0.000000 MetaHookPre LoadFile(base/frameworks/sumstats) -0.000000 MetaHookPre LoadFile(base/frameworks/sumstats) -0.000000 MetaHookPre LoadFile(base/frameworks/sumstats/main) -0.000000 MetaHookPre LoadFile(base/frameworks/tunnels) -0.000000 MetaHookPre LoadFile(base/frameworks/tunnels) -0.000000 MetaHookPre LoadFile(base/frameworks/tunnels) -0.000000 MetaHookPre LoadFile(base/init-default.bro) -0.000000 MetaHookPre LoadFile(base/misc/find-checksum-offloading) -0.000000 MetaHookPre LoadFile(base/misc/find-filtered-trace) -0.000000 MetaHookPre LoadFile(base/protocols/conn) -0.000000 MetaHookPre LoadFile(base/protocols/conn) -0.000000 MetaHookPre LoadFile(base/protocols/conn) -0.000000 MetaHookPre LoadFile(base/protocols/dhcp) -0.000000 MetaHookPre LoadFile(base/protocols/dnp3) -0.000000 MetaHookPre LoadFile(base/protocols/dns) -0.000000 MetaHookPre LoadFile(base/protocols/ftp) -0.000000 MetaHookPre LoadFile(base/protocols/http) -0.000000 MetaHookPre LoadFile(base/protocols/irc) -0.000000 MetaHookPre LoadFile(base/protocols/modbus) -0.000000 MetaHookPre LoadFile(base/protocols/pop3) -0.000000 MetaHookPre LoadFile(base/protocols/radius) -0.000000 MetaHookPre LoadFile(base/protocols/smtp) -0.000000 MetaHookPre LoadFile(base/protocols/snmp) -0.000000 MetaHookPre LoadFile(base/protocols/socks) -0.000000 MetaHookPre LoadFile(base/protocols/ssh) -0.000000 MetaHookPre LoadFile(base/protocols/ssl) -0.000000 MetaHookPre LoadFile(base/protocols/ssl) -0.000000 MetaHookPre LoadFile(base/protocols/ssl) -0.000000 MetaHookPre LoadFile(base/protocols/syslog) -0.000000 MetaHookPre LoadFile(base/protocols/tunnels) -0.000000 MetaHookPre LoadFile(base/utils/active-http) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/addrs) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/conn-ids) -0.000000 MetaHookPre LoadFile(base/utils/dir) -0.000000 MetaHookPre LoadFile(base/utils/dir) -0.000000 MetaHookPre LoadFile(base/utils/directions-and-hosts) -0.000000 MetaHookPre LoadFile(base/utils/directions-and-hosts) -0.000000 MetaHookPre LoadFile(base/utils/directions-and-hosts) -0.000000 MetaHookPre LoadFile(base/utils/directions-and-hosts) -0.000000 MetaHookPre LoadFile(base/utils/exec) -0.000000 MetaHookPre LoadFile(base/utils/exec) -0.000000 MetaHookPre LoadFile(base/utils/files) -0.000000 MetaHookPre LoadFile(base/utils/files) -0.000000 MetaHookPre LoadFile(base/utils/files) -0.000000 MetaHookPre LoadFile(base/utils/files) -0.000000 MetaHookPre LoadFile(base/utils/files) -0.000000 MetaHookPre LoadFile(base/utils/files) -0.000000 MetaHookPre LoadFile(base/utils/numbers) -0.000000 MetaHookPre LoadFile(base/utils/numbers) -0.000000 MetaHookPre LoadFile(base/utils/numbers) -0.000000 MetaHookPre LoadFile(base/utils/numbers) -0.000000 MetaHookPre LoadFile(base/utils/paths) -0.000000 MetaHookPre LoadFile(base/utils/paths) -0.000000 MetaHookPre LoadFile(base/utils/paths) -0.000000 MetaHookPre LoadFile(base/utils/paths) -0.000000 MetaHookPre LoadFile(base/utils/paths) -0.000000 MetaHookPre LoadFile(base/utils/paths) -0.000000 MetaHookPre LoadFile(base/utils/patterns) -0.000000 MetaHookPre LoadFile(base/utils/queue) -0.000000 MetaHookPre LoadFile(base/utils/queue) -0.000000 MetaHookPre LoadFile(base/utils/queue) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/site) -0.000000 MetaHookPre LoadFile(base/utils/strings) -0.000000 MetaHookPre LoadFile(base/utils/strings) -0.000000 MetaHookPre LoadFile(base/utils/strings) -0.000000 MetaHookPre LoadFile(base/utils/thresholds) -0.000000 MetaHookPre LoadFile(base/utils/thresholds) -0.000000 MetaHookPre LoadFile(base/utils/time) -0.000000 MetaHookPre LoadFile(base/utils/urls) -0.000000 MetaHookPre QueueEvent(bro_init()) -0.000000 MetaHookPre QueueEvent(filter_change_tracking()) -0.000000 | HookCallFunction Analyzer::disable_analyzer(Analyzer::ANALYZER_BACKDOOR) -0.000000 | HookCallFunction Analyzer::disable_analyzer(Analyzer::ANALYZER_INTERCONN) -0.000000 | HookCallFunction Analyzer::disable_analyzer(Analyzer::ANALYZER_STEPPINGSTONE) -0.000000 | HookCallFunction Analyzer::disable_analyzer(Analyzer::ANALYZER_TCPSTATS) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_AYIYA, 5072/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DHCP, 67/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DHCP, 68/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DNP3, 20000/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DNS, 137/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DNS, 53/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DNS, 53/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DNS, 5353/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_DNS, 5355/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_FTP, 21/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_FTP, 2811/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_GTPV1, 2123/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_GTPV1, 2152/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 1080/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 3128/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 631/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 80/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 8000/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 8080/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 81/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_HTTP, 8888/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_IRC, 6666/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_IRC, 6667/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_IRC, 6668/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_IRC, 6669/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_MODBUS, 502/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_RADIUS, 1812/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SMTP, 25/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SMTP, 587/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SNMP, 161/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SNMP, 162/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SOCKS, 1080/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSH, 22/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 443/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 5223/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 563/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 585/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 614/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 636/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 989/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 990/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 992/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 993/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SSL, 995/tcp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SYSLOG, 514/udp) -0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_TEREDO, 3544/udp) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_AYIYA, {5072/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, {67/udp,68/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_DNP3, {20000/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_DNS, {5355/udp,53/tcp,5353/udp,137/udp,53/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_FTP, {2811/tcp,21/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_GTPV1, {2152/udp,2123/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_HTTP, {631/tcp,8888/tcp,3128/tcp,80/tcp,1080/tcp,8000/tcp,81/tcp,8080/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, {6669/tcp,6666/tcp,6668/tcp,6667/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_MODBUS, {502/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_RADIUS, {1812/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, {25/tcp,587/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SNMP, {162/udp,161/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SOCKS, {1080/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SSH, {22/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SSL, {5223/tcp,585/tcp,614/tcp,993/tcp,636/tcp,989/tcp,995/tcp,443/tcp,563/tcp,990/tcp,992/tcp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SYSLOG, {514/udp}) -0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_TEREDO, {3544/udp}) -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Cluster::is_enabled() -0.000000 | HookCallFunction Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, FileExtract::on_add{ if (!FileExtract::args?$extract_filename) FileExtract::args$extract_filename = cat(extract-, FileExtract::f$source, -, FileExtract::f$id)FileExtract::f$info$extracted = FileExtract::args$extract_filenameFileExtract::args$extract_filename = build_path_compressed(FileExtract::prefix, FileExtract::args$extract_filename)mkdir(FileExtract::prefix)}) -0.000000 | HookCallFunction Files::register_protocol(Analyzer::ANALYZER_FTP_DATA, [get_file_handle=FTP::get_file_handle{ if (!FTP::c$id$resp_h, FTP::c$id$resp_p in FTP::ftp_data_expected) return ()return (cat(Analyzer::ANALYZER_FTP_DATA, FTP::c$start_time, FTP::c$id, FTP::is_orig))}, describe=FTP::describe_file{ FTP::cid{ if (FTP::f$source != FTP) return ()for ([FTP::cid] in FTP::f$conns) { if (FTP::f$conns[FTP::cid]?$ftp) return (FTP::describe(FTP::f$conns[FTP::cid]$ftp))}return ()}}]) -0.000000 | HookCallFunction Files::register_protocol(Analyzer::ANALYZER_HTTP, [get_file_handle=HTTP::get_file_handle{ if (!HTTP::c?$http) return ()if (HTTP::c$http$range_request && !HTTP::is_orig) { return (cat(Analyzer::ANALYZER_HTTP, HTTP::is_orig, HTTP::c$id$orig_h, HTTP::build_url(HTTP::c$http)))}else{ HTTP::mime_depth = HTTP::is_orig ? HTTP::c$http$orig_mime_depth : HTTP::c$http$resp_mime_depthreturn (cat(Analyzer::ANALYZER_HTTP, HTTP::c$start_time, HTTP::is_orig, HTTP::c$http$trans_depth, HTTP::mime_depth, id_string(HTTP::c$id)))}}, describe=HTTP::describe_file{ HTTP::cid{ if (HTTP::f$source != HTTP) return ()for ([HTTP::cid] in HTTP::f$conns) { if (HTTP::f$conns[HTTP::cid]?$http) return (HTTP::build_url_http(HTTP::f$conns[HTTP::cid]$http))}return ()}}]) -0.000000 | HookCallFunction Files::register_protocol(Analyzer::ANALYZER_IRC_DATA, [get_file_handle=IRC::get_file_handle{ return (cat(Analyzer::ANALYZER_IRC_DATA, IRC::c$start_time, IRC::c$id, IRC::is_orig))}, describe=anonymous-function{ return ()}]) -0.000000 | HookCallFunction Files::register_protocol(Analyzer::ANALYZER_SMTP, [get_file_handle=SMTP::get_file_handle{ return (cat(Analyzer::ANALYZER_SMTP, SMTP::c$start_time, SMTP::c$smtp$trans_depth, SMTP::c$smtp_state$mime_depth))}, describe=SMTP::describe_file{ SMTP::cid{ if (SMTP::f$source != SMTP) return ()for ([SMTP::cid] in SMTP::f$conns) { SMTP::c = SMTP::f$conns[SMTP::cid]return (SMTP::describe(SMTP::c$smtp))}return ()}}]) -0.000000 | HookCallFunction Files::register_protocol(Analyzer::ANALYZER_SSL, [get_file_handle=SSL::get_file_handle{ return ()}, describe=SSL::describe_file{ SSL::cid{ if (SSL::f$source != SSL || !SSL::f?$info || !SSL::f$info?$x509 || !SSL::f$info$x509?$certificate) return ()for ([SSL::cid] in SSL::f$conns) { if (SSL::f$conns[SSL::cid]?$ssl) { SSL::c = SSL::f$conns[SSL::cid]return (cat(SSL::c$id$resp_h, :, SSL::c$id$resp_p))}}return (cat(Serial: , SSL::f$info$x509$certificate$serial, Subject: , SSL::f$info$x509$certificate$subject, Issuer: , SSL::f$info$x509$certificate$issuer))}}]) -0.000000 | HookCallFunction Log::add_default_filter(Cluster::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Communication::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Conn::LOG) -0.000000 | HookCallFunction Log::add_default_filter(DHCP::LOG) -0.000000 | HookCallFunction Log::add_default_filter(DNP3::LOG) -0.000000 | HookCallFunction Log::add_default_filter(DNS::LOG) -0.000000 | HookCallFunction Log::add_default_filter(DPD::LOG) -0.000000 | HookCallFunction Log::add_default_filter(FTP::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Files::LOG) -0.000000 | HookCallFunction Log::add_default_filter(HTTP::LOG) -0.000000 | HookCallFunction Log::add_default_filter(IRC::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Intel::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Modbus::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Notice::ALARM_LOG) -0.000000 | HookCallFunction Log::add_default_filter(Notice::LOG) -0.000000 | HookCallFunction Log::add_default_filter(PacketFilter::LOG) -0.000000 | HookCallFunction Log::add_default_filter(RADIUS::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Reporter::LOG) -0.000000 | HookCallFunction Log::add_default_filter(SMTP::LOG) -0.000000 | HookCallFunction Log::add_default_filter(SNMP::LOG) -0.000000 | HookCallFunction Log::add_default_filter(SOCKS::LOG) -0.000000 | HookCallFunction Log::add_default_filter(SSH::LOG) -0.000000 | HookCallFunction Log::add_default_filter(SSL::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Signatures::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Software::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Syslog::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Tunnel::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Unified2::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Weird::LOG) -0.000000 | HookCallFunction Log::add_default_filter(X509::LOG) -0.000000 | HookCallFunction Log::add_filter(Cluster::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Communication::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Conn::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(DHCP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(DNP3::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(DNS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(DPD::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(FTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Files::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(HTTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(IRC::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Intel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Notice::ALARM_LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Notice::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(PacketFilter::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(RADIUS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Reporter::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(SMTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(SNMP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(SOCKS::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(SSH::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(SSL::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Signatures::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::create_stream(Cluster::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(Communication::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(Conn::LOG, [columns=, ev=Conn::log_conn]) -0.000000 | HookCallFunction Log::create_stream(DHCP::LOG, [columns=, ev=DHCP::log_dhcp]) -0.000000 | HookCallFunction Log::create_stream(DNP3::LOG, [columns=, ev=DNP3::log_dnp3]) -0.000000 | HookCallFunction Log::create_stream(DNS::LOG, [columns=, ev=DNS::log_dns]) -0.000000 | HookCallFunction Log::create_stream(DPD::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(FTP::LOG, [columns=, ev=FTP::log_ftp]) -0.000000 | HookCallFunction Log::create_stream(Files::LOG, [columns=, ev=Files::log_files]) -0.000000 | HookCallFunction Log::create_stream(HTTP::LOG, [columns=, ev=HTTP::log_http]) -0.000000 | HookCallFunction Log::create_stream(IRC::LOG, [columns=, ev=IRC::irc_log]) -0.000000 | HookCallFunction Log::create_stream(Intel::LOG, [columns=, ev=Intel::log_intel]) -0.000000 | HookCallFunction Log::create_stream(Modbus::LOG, [columns=, ev=Modbus::log_modbus]) -0.000000 | HookCallFunction Log::create_stream(Notice::ALARM_LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(Notice::LOG, [columns=, ev=Notice::log_notice]) -0.000000 | HookCallFunction Log::create_stream(PacketFilter::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(RADIUS::LOG, [columns=, ev=RADIUS::log_radius]) -0.000000 | HookCallFunction Log::create_stream(Reporter::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(SMTP::LOG, [columns=, ev=SMTP::log_smtp]) -0.000000 | HookCallFunction Log::create_stream(SNMP::LOG, [columns=, ev=SNMP::log_snmp]) -0.000000 | HookCallFunction Log::create_stream(SOCKS::LOG, [columns=, ev=SOCKS::log_socks]) -0.000000 | HookCallFunction Log::create_stream(SSH::LOG, [columns=, ev=SSH::log_ssh]) -0.000000 | HookCallFunction Log::create_stream(SSL::LOG, [columns=, ev=SSL::log_ssl]) -0.000000 | HookCallFunction Log::create_stream(Signatures::LOG, [columns=, ev=Signatures::log_signature]) -0.000000 | HookCallFunction Log::create_stream(Software::LOG, [columns=, ev=Software::log_software]) -0.000000 | HookCallFunction Log::create_stream(Syslog::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(Tunnel::LOG, [columns=, ev=]) -0.000000 | HookCallFunction Log::create_stream(Unified2::LOG, [columns=, ev=Unified2::log_unified2]) -0.000000 | HookCallFunction Log::create_stream(Weird::LOG, [columns=, ev=Weird::log_weird]) -0.000000 | HookCallFunction Log::create_stream(X509::LOG, [columns=, ev=X509::log_x509]) -0.000000 | HookCallFunction Log::default_path_func(PacketFilter::LOG, , [ts=1402676765.077455, node=bro, filter=ip or not ip, init=T, success=T]) -0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1402676765.077455, node=bro, filter=ip or not ip, init=T, success=T]) -0.000000 | HookCallFunction Notice::want_pp() -0.000000 | HookCallFunction PacketFilter::build() -0.000000 | HookCallFunction PacketFilter::combine_filters(ip or not ip, and, ) -0.000000 | HookCallFunction PacketFilter::install() -0.000000 | HookCallFunction SumStats::add_observe_plugin_dependency(SumStats::STD_DEV, SumStats::VARIANCE) -0.000000 | HookCallFunction SumStats::add_observe_plugin_dependency(SumStats::VARIANCE, SumStats::AVERAGE) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::AVERAGE, anonymous-function{ if (!SumStats::rv?$average) SumStats::rv$average = SumStats::valelseSumStats::rv$average += (SumStats::val - SumStats::rv$average) / (coerce SumStats::rv$num to double)}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::HLL_UNIQUE, anonymous-function{ if (!SumStats::rv?$card) { SumStats::rv$card = hll_cardinality_init(SumStats::r$hll_error_margin, SumStats::r$hll_confidence)SumStats::rv$hll_error_margin = SumStats::r$hll_error_marginSumStats::rv$hll_confidence = SumStats::r$hll_confidence}hll_cardinality_add(SumStats::rv$card, SumStats::obs)SumStats::rv$hll_unique = double_to_count(hll_cardinality_estimate(SumStats::rv$card))}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::LAST, anonymous-function{ if (0 < SumStats::r$num_last_elements) { if (!SumStats::rv?$last_elements) SumStats::rv$last_elements = Queue::init((coerce [$max_len=SumStats::r$num_last_elements] to Queue::Settings))Queue::put(SumStats::rv$last_elements, SumStats::obs)}}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::MAX, anonymous-function{ if (!SumStats::rv?$max) SumStats::rv$max = SumStats::valelseif (SumStats::rv$max < SumStats::val) SumStats::rv$max = SumStats::val}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::MIN, anonymous-function{ if (!SumStats::rv?$min) SumStats::rv$min = SumStats::valelseif (SumStats::val < SumStats::rv$min) SumStats::rv$min = SumStats::val}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::SAMPLE, anonymous-function{ SumStats::sample_add_sample(SumStats::obs, SumStats::rv)}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::STD_DEV, anonymous-function{ SumStats::calc_std_dev(SumStats::rv)}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::SUM, anonymous-function{ SumStats::rv$sum += SumStats::val}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::TOPK, anonymous-function{ topk_add(SumStats::rv$topk, SumStats::obs)}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::UNIQUE, anonymous-function{ if (!SumStats::rv?$unique_vals) SumStats::rv$unique_vals = (coerce set() to set[SumStats::Observation])if (SumStats::r?$unique_max) SumStats::rv$unique_max = SumStats::r$unique_maxif (!SumStats::r?$unique_max || flattenSumStats::rv$unique_vals <= SumStats::r$unique_max) add SumStats::rv$unique_vals[SumStats::obs]SumStats::rv$unique = flattenSumStats::rv$unique_vals}) -0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::VARIANCE, anonymous-function{ if (1 < SumStats::rv$num) SumStats::rv$var_s += ((SumStats::val - SumStats::rv$prev_avg) * (SumStats::val - SumStats::rv$average))SumStats::calc_variance(SumStats::rv)SumStats::rv$prev_avg = SumStats::rv$average}) -0.000000 | HookCallFunction SumStats::register_observe_plugins() -0.000000 | HookCallFunction bro_init() -0.000000 | HookCallFunction filter_change_tracking() -0.000000 | HookCallFunction set_to_regex({}, (^\.?|\.)(~~)$) -0.000000 | HookCallFunction set_to_regex({}, (^\.?|\.)(~~)$) -0.000000 | HookDrainEvents -0.000000 | HookDrainEvents -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ../main.bro/bro -0.000000 | HookLoadFile ./Bro_ARP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_AYIYA.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_BackDoor.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_BitTorrent.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_ConnSize.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_DCE_RPC.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_DHCP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_DNP3.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_DNS.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_FTP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_FTP.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_File.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_FileExtract.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_FileExtract.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_FileHash.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Finger.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_GTPv1.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Gnutella.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_HTTP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_HTTP.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_ICMP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_IRC.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Ident.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_InterConn.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Login.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Login.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_MIME.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Modbus.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_NCP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_NTP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_NetBIOS.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_NetBIOS.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_NetFlow.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_PIA.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_POP3.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_RADIUS.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_RPC.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SMB.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SMTP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SMTP.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SNMP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SNMP.types.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SOCKS.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SSH.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SSL.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_SteppingStone.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Syslog.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_TCP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_TCP.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Teredo.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_UDP.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Unified2.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_Unified2.types.bif.bro/bro -0.000000 | HookLoadFile ./Bro_X509.events.bif.bro/bro -0.000000 | HookLoadFile ./Bro_X509.functions.bif.bro/bro -0.000000 | HookLoadFile ./Bro_X509.types.bif.bro/bro -0.000000 | HookLoadFile ./Bro_ZIP.events.bif.bro/bro -0.000000 | HookLoadFile ./actions/add-geodata.bro/bro -0.000000 | HookLoadFile ./actions/drop.bro/bro -0.000000 | HookLoadFile ./actions/email_admin.bro/bro -0.000000 | HookLoadFile ./actions/page.bro/bro -0.000000 | HookLoadFile ./actions/pp-alarms.bro/bro -0.000000 | HookLoadFile ./addrs.bro/bro -0.000000 | HookLoadFile ./analyzer.bif.bro/bro -0.000000 | HookLoadFile ./average.bro/bro -0.000000 | HookLoadFile ./average.bro/bro -0.000000 | HookLoadFile ./bloom-filter.bif.bro/bro -0.000000 | HookLoadFile ./bro.bif.bro/bro -0.000000 | HookLoadFile ./broxygen.bif.bro/bro -0.000000 | HookLoadFile ./cardinality-counter.bif.bro/bro -0.000000 | HookLoadFile ./const.bif.bro/bro -0.000000 | HookLoadFile ./consts.bif.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./consts.bro/bro -0.000000 | HookLoadFile ./contents.bro/bro -0.000000 | HookLoadFile ./dcc-send.bro/bro -0.000000 | HookLoadFile ./dcc-send.bro/bro -0.000000 | HookLoadFile ./entities.bro/bro -0.000000 | HookLoadFile ./entities.bro/bro -0.000000 | HookLoadFile ./entities.bro/bro -0.000000 | HookLoadFile ./entities.bro/bro -0.000000 | HookLoadFile ./event.bif.bro/bro -0.000000 | HookLoadFile ./events.bif.bro/bro -0.000000 | HookLoadFile ./exec.bro/bro -0.000000 | HookLoadFile ./extend-email/hostnames.bro/bro -0.000000 | HookLoadFile ./file_analysis.bif.bro/bro -0.000000 | HookLoadFile ./files.bro/bro -0.000000 | HookLoadFile ./files.bro/bro -0.000000 | HookLoadFile ./files.bro/bro -0.000000 | HookLoadFile ./files.bro/bro -0.000000 | HookLoadFile ./files.bro/bro -0.000000 | HookLoadFile ./functions.bif.bro/bro -0.000000 | HookLoadFile ./gridftp.bro/bro -0.000000 | HookLoadFile ./hll_unique.bro/bro -0.000000 | HookLoadFile ./inactivity.bro/bro -0.000000 | HookLoadFile ./info.bro/bro -0.000000 | HookLoadFile ./info.bro/bro -0.000000 | HookLoadFile ./info.bro/bro -0.000000 | HookLoadFile ./info.bro/bro -0.000000 | HookLoadFile ./info.bro/bro -0.000000 | HookLoadFile ./input.bif.bro/bro -0.000000 | HookLoadFile ./input.bro/bro -0.000000 | HookLoadFile ./last.bro/bro -0.000000 | HookLoadFile ./logging.bif.bro/bro -0.000000 | HookLoadFile ./magic.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./main.bro/bro -0.000000 | HookLoadFile ./max.bro/bro -0.000000 | HookLoadFile ./min.bro/bro -0.000000 | HookLoadFile ./mozilla-ca-list.bro/bro -0.000000 | HookLoadFile ./netstats.bro/bro -0.000000 | HookLoadFile ./non-cluster.bro/bro -0.000000 | HookLoadFile ./non-cluster.bro/bro -0.000000 | HookLoadFile ./patterns.bro/bro -0.000000 | HookLoadFile ./plugins.bro/bro -0.000000 | HookLoadFile ./polling.bro/bro -0.000000 | HookLoadFile ./postprocessors.bro/bro -0.000000 | HookLoadFile ./readers/ascii.bro/bro -0.000000 | HookLoadFile ./readers/benchmark.bro/bro -0.000000 | HookLoadFile ./readers/binary.bro/bro -0.000000 | HookLoadFile ./readers/raw.bro/bro -0.000000 | HookLoadFile ./readers/sqlite.bro/bro -0.000000 | HookLoadFile ./reporter.bif.bro/bro -0.000000 | HookLoadFile ./sample.bro/bro -0.000000 | HookLoadFile ./scp.bro/bro -0.000000 | HookLoadFile ./sftp.bro/bro -0.000000 | HookLoadFile ./site.bro/bro -0.000000 | HookLoadFile ./std-dev.bro/bro -0.000000 | HookLoadFile ./strings.bif.bro/bro -0.000000 | HookLoadFile ./sum.bro/bro -0.000000 | HookLoadFile ./top-k.bif.bro/bro -0.000000 | HookLoadFile ./topk.bro/bro -0.000000 | HookLoadFile ./types.bif.bro/bro -0.000000 | HookLoadFile ./unique.bro/bro -0.000000 | HookLoadFile ./utils-commands.bro/bro -0.000000 | HookLoadFile ./utils-commands.bro/bro -0.000000 | HookLoadFile ./utils-commands.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./utils.bro/bro -0.000000 | HookLoadFile ./variance.bro/bro -0.000000 | HookLoadFile ./variance.bro/bro -0.000000 | HookLoadFile ./weird.bro/bro -0.000000 | HookLoadFile ./writers/ascii.bro/bro -0.000000 | HookLoadFile ./writers/dataseries.bro/bro -0.000000 | HookLoadFile ./writers/elasticsearch.bro/bro -0.000000 | HookLoadFile ./writers/none.bro/bro -0.000000 | HookLoadFile ./writers/sqlite.bro/bro -0.000000 | HookLoadFile /Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/hooks.bro/bro -0.000000 | HookLoadFile /Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/lib/bif/__load__.bro/bro -0.000000 | HookLoadFile /Users/robin/bro/dynamic-plugins-2.3/testing/btest/.tmp/core.plugins.hooks/scripts/__load__.bro/bro -0.000000 | HookLoadFile base/bif.bro/bro -0.000000 | HookLoadFile base/bif/analyzer.bif/bif -0.000000 | HookLoadFile base/bif/bro.bif/bif -0.000000 | HookLoadFile base/bif/const.bif.bro/bro -0.000000 | HookLoadFile base/bif/event.bif/bif -0.000000 | HookLoadFile base/bif/file_analysis.bif/bif -0.000000 | HookLoadFile base/bif/input.bif/bif -0.000000 | HookLoadFile base/bif/logging.bif/bif -0.000000 | HookLoadFile base/bif/plugins.bro/bro -0.000000 | HookLoadFile base/bif/plugins/Bro_SNMP.types.bif/bif -0.000000 | HookLoadFile base/bif/reporter.bif/bif -0.000000 | HookLoadFile base/bif/strings.bif/bif -0.000000 | HookLoadFile base/bif/types.bif/bif -0.000000 | HookLoadFile base/files/extract.bro/bro -0.000000 | HookLoadFile base/files/hash.bro/bro -0.000000 | HookLoadFile base/files/hash.bro/bro -0.000000 | HookLoadFile base/files/unified2.bro/bro -0.000000 | HookLoadFile base/files/x509.bro/bro -0.000000 | HookLoadFile base/files/x509.bro/bro -0.000000 | HookLoadFile base/frameworks/analyzer.bro/bro -0.000000 | HookLoadFile base/frameworks/analyzer.bro/bro -0.000000 | HookLoadFile base/frameworks/analyzer.bro/bro -0.000000 | HookLoadFile base/frameworks/analyzer.bro/bro -0.000000 | HookLoadFile base/frameworks/cluster.bro/bro -0.000000 | HookLoadFile base/frameworks/cluster.bro/bro -0.000000 | HookLoadFile base/frameworks/cluster.bro/bro -0.000000 | HookLoadFile base/frameworks/cluster.bro/bro -0.000000 | HookLoadFile base/frameworks/cluster.bro/bro -0.000000 | HookLoadFile base/frameworks/cluster.bro/bro -0.000000 | HookLoadFile base/frameworks/communication.bro/bro -0.000000 | HookLoadFile base/frameworks/control.bro/bro -0.000000 | HookLoadFile base/frameworks/control.bro/bro -0.000000 | HookLoadFile base/frameworks/dpd.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/files.bro/bro -0.000000 | HookLoadFile base/frameworks/input.bro/bro -0.000000 | HookLoadFile base/frameworks/input.bro/bro -0.000000 | HookLoadFile base/frameworks/intel.bro/bro -0.000000 | HookLoadFile base/frameworks/logging.bro/bro -0.000000 | HookLoadFile base/frameworks/logging.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/notice.bro/bro -0.000000 | HookLoadFile base/frameworks/packet-filter.bro/bro -0.000000 | HookLoadFile base/frameworks/packet-filter.bro/bro -0.000000 | HookLoadFile base/frameworks/packet-filter/utils.bro/bro -0.000000 | HookLoadFile base/frameworks/reporter.bro/bro -0.000000 | HookLoadFile base/frameworks/reporter.bro/bro -0.000000 | HookLoadFile base/frameworks/signatures.bro/bro -0.000000 | HookLoadFile base/frameworks/software.bro/bro -0.000000 | HookLoadFile base/frameworks/sumstats.bro/bro -0.000000 | HookLoadFile base/frameworks/sumstats.bro/bro -0.000000 | HookLoadFile base/frameworks/sumstats.bro/bro -0.000000 | HookLoadFile base/frameworks/sumstats.bro/bro -0.000000 | HookLoadFile base/frameworks/sumstats/main.bro/bro -0.000000 | HookLoadFile base/frameworks/tunnels.bro/bro -0.000000 | HookLoadFile base/frameworks/tunnels.bro/bro -0.000000 | HookLoadFile base/frameworks/tunnels.bro/bro -0.000000 | HookLoadFile base/init-default.bro/bro -0.000000 | HookLoadFile base/misc/find-checksum-offloading.bro/bro -0.000000 | HookLoadFile base/misc/find-filtered-trace.bro/bro -0.000000 | HookLoadFile base/protocols/conn.bro/bro -0.000000 | HookLoadFile base/protocols/conn.bro/bro -0.000000 | HookLoadFile base/protocols/conn.bro/bro -0.000000 | HookLoadFile base/protocols/dhcp.bro/bro -0.000000 | HookLoadFile base/protocols/dnp3.bro/bro -0.000000 | HookLoadFile base/protocols/dns.bro/bro -0.000000 | HookLoadFile base/protocols/ftp.bro/bro -0.000000 | HookLoadFile base/protocols/http.bro/bro -0.000000 | HookLoadFile base/protocols/irc.bro/bro -0.000000 | HookLoadFile base/protocols/modbus.bro/bro -0.000000 | HookLoadFile base/protocols/pop3.bro/bro -0.000000 | HookLoadFile base/protocols/radius.bro/bro -0.000000 | HookLoadFile base/protocols/smtp.bro/bro -0.000000 | HookLoadFile base/protocols/snmp.bro/bro -0.000000 | HookLoadFile base/protocols/socks.bro/bro -0.000000 | HookLoadFile base/protocols/ssh.bro/bro -0.000000 | HookLoadFile base/protocols/ssl.bro/bro -0.000000 | HookLoadFile base/protocols/ssl.bro/bro -0.000000 | HookLoadFile base/protocols/ssl.bro/bro -0.000000 | HookLoadFile base/protocols/syslog.bro/bro -0.000000 | HookLoadFile base/protocols/tunnels.bro/bro -0.000000 | HookLoadFile base/utils/active-http.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/addrs.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/conn-ids.bro/bro -0.000000 | HookLoadFile base/utils/dir.bro/bro -0.000000 | HookLoadFile base/utils/dir.bro/bro -0.000000 | HookLoadFile base/utils/directions-and-hosts.bro/bro -0.000000 | HookLoadFile base/utils/directions-and-hosts.bro/bro -0.000000 | HookLoadFile base/utils/directions-and-hosts.bro/bro -0.000000 | HookLoadFile base/utils/directions-and-hosts.bro/bro -0.000000 | HookLoadFile base/utils/exec.bro/bro -0.000000 | HookLoadFile base/utils/exec.bro/bro -0.000000 | HookLoadFile base/utils/files.bro/bro -0.000000 | HookLoadFile base/utils/files.bro/bro -0.000000 | HookLoadFile base/utils/files.bro/bro -0.000000 | HookLoadFile base/utils/files.bro/bro -0.000000 | HookLoadFile base/utils/files.bro/bro -0.000000 | HookLoadFile base/utils/files.bro/bro -0.000000 | HookLoadFile base/utils/numbers.bro/bro -0.000000 | HookLoadFile base/utils/numbers.bro/bro -0.000000 | HookLoadFile base/utils/numbers.bro/bro -0.000000 | HookLoadFile base/utils/numbers.bro/bro -0.000000 | HookLoadFile base/utils/paths.bro/bro -0.000000 | HookLoadFile base/utils/paths.bro/bro -0.000000 | HookLoadFile base/utils/paths.bro/bro -0.000000 | HookLoadFile base/utils/paths.bro/bro -0.000000 | HookLoadFile base/utils/paths.bro/bro -0.000000 | HookLoadFile base/utils/paths.bro/bro -0.000000 | HookLoadFile base/utils/patterns.bro/bro -0.000000 | HookLoadFile base/utils/queue.bro/bro -0.000000 | HookLoadFile base/utils/queue.bro/bro -0.000000 | HookLoadFile base/utils/queue.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/site.bro/bro -0.000000 | HookLoadFile base/utils/strings.bro/bro -0.000000 | HookLoadFile base/utils/strings.bro/bro -0.000000 | HookLoadFile base/utils/strings.bro/bro -0.000000 | HookLoadFile base/utils/thresholds.bro/bro -0.000000 | HookLoadFile base/utils/thresholds.bro/bro -0.000000 | HookLoadFile base/utils/time.bro/bro -0.000000 | HookLoadFile base/utils/urls.bro/bro -0.000000 | HookQueueEvent bro_init() -0.000000 | HookQueueEvent filter_change_tracking() -1362692526.869344 MetaHookPost CallFunction(ChecksumOffloading::check, ()) -> -1362692526.869344 MetaHookPost CallFunction(filter_change_tracking, ()) -> -1362692526.869344 MetaHookPost CallFunction(new_connection, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.0, service={}, addl=, hot=0, history=, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692526.869344 MetaHookPost DrainEvents() -> -1362692526.869344 MetaHookPost DrainEvents() -> -1362692526.869344 MetaHookPost DrainEvents() -> -1362692526.869344 MetaHookPost DrainEvents() -> -1362692526.869344 MetaHookPost DrainEvents() -> -1362692526.869344 MetaHookPost DrainEvents() -> -1362692526.869344 MetaHookPost QueueEvent(ChecksumOffloading::check()) -> false -1362692526.869344 MetaHookPost QueueEvent(filter_change_tracking()) -> false -1362692526.869344 MetaHookPost QueueEvent(new_connection([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.0, service={}, addl=, hot=0, history=, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false -1362692526.869344 MetaHookPost UpdateNetworkTime(1362692526.869344) -> -1362692526.869344 MetaHookPre CallFunction(ChecksumOffloading::check, ()) -1362692526.869344 MetaHookPre CallFunction(filter_change_tracking, ()) -1362692526.869344 MetaHookPre CallFunction(new_connection, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.0, service={}, addl=, hot=0, history=, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692526.869344 MetaHookPre DrainEvents() -1362692526.869344 MetaHookPre DrainEvents() -1362692526.869344 MetaHookPre DrainEvents() -1362692526.869344 MetaHookPre DrainEvents() -1362692526.869344 MetaHookPre DrainEvents() -1362692526.869344 MetaHookPre DrainEvents() -1362692526.869344 MetaHookPre QueueEvent(ChecksumOffloading::check()) -1362692526.869344 MetaHookPre QueueEvent(filter_change_tracking()) -1362692526.869344 MetaHookPre QueueEvent(new_connection([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.0, service={}, addl=, hot=0, history=, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692526.869344 MetaHookPre UpdateNetworkTime(1362692526.869344) -1362692526.869344 | HookUpdateNetworkTime 1362692526.869344 -1362692526.869344 | HookCallFunction ChecksumOffloading::check() -1362692526.869344 | HookCallFunction filter_change_tracking() -1362692526.869344 | HookCallFunction new_connection([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.0, service={}, addl=, hot=0, history=, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692526.869344 | HookDrainEvents -1362692526.869344 | HookDrainEvents -1362692526.869344 | HookDrainEvents -1362692526.869344 | HookDrainEvents -1362692526.869344 | HookDrainEvents -1362692526.869344 | HookDrainEvents -1362692526.869344 | HookQueueEvent ChecksumOffloading::check() -1362692526.869344 | HookQueueEvent filter_change_tracking() -1362692526.869344 | HookQueueEvent new_connection([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.0, service={}, addl=, hot=0, history=, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692526.939084 MetaHookPost CallFunction(connection_established, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.06974, service={}, addl=, hot=0, history=Sh, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692526.939084 MetaHookPost DrainEvents() -> -1362692526.939084 MetaHookPost DrainEvents() -> -1362692526.939084 MetaHookPost QueueEvent(connection_established([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.06974, service={}, addl=, hot=0, history=Sh, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false -1362692526.939084 MetaHookPost UpdateNetworkTime(1362692526.939084) -> -1362692526.939084 MetaHookPre CallFunction(connection_established, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.06974, service={}, addl=, hot=0, history=Sh, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692526.939084 MetaHookPre DrainEvents() -1362692526.939084 MetaHookPre DrainEvents() -1362692526.939084 MetaHookPre QueueEvent(connection_established([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.06974, service={}, addl=, hot=0, history=Sh, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692526.939084 MetaHookPre UpdateNetworkTime(1362692526.939084) -1362692526.939084 | HookUpdateNetworkTime 1362692526.939084 -1362692526.939084 | HookCallFunction connection_established([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.06974, service={}, addl=, hot=0, history=Sh, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692526.939084 | HookDrainEvents -1362692526.939084 | HookDrainEvents -1362692526.939084 | HookQueueEvent connection_established([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0], start_time=1362692526.869344, duration=0.06974, service={}, addl=, hot=0, history=Sh, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692526.939378 MetaHookPost DrainEvents() -> -1362692526.939378 MetaHookPost DrainEvents() -> -1362692526.939378 MetaHookPost UpdateNetworkTime(1362692526.939378) -> -1362692526.939378 MetaHookPre DrainEvents() -1362692526.939378 MetaHookPre DrainEvents() -1362692526.939378 MetaHookPre UpdateNetworkTime(1362692526.939378) -1362692526.939378 | HookUpdateNetworkTime 1362692526.939378 -1362692526.939378 | HookDrainEvents -1362692526.939378 | HookDrainEvents -1362692526.939527 MetaHookPost CallFunction(Analyzer::name, (Analyzer::ANALYZER_HTTP)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::new_http_session, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -> -1362692526.939527 MetaHookPost CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(http_begin_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(http_end_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, USER-AGENT, Wget/1.14 (darwin12.2.0))) -> -1362692526.939527 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, ACCEPT, */*)) -> -1362692526.939527 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -> -1362692526.939527 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -> -1362692526.939527 MetaHookPost CallFunction(http_message_done, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -> -1362692526.939527 MetaHookPost CallFunction(http_request, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], GET, /download/CHANGES.bro-aux.txt, /download/CHANGES.bro-aux.txt, 1.1)) -> -1362692526.939527 MetaHookPost CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -> -1362692526.939527 MetaHookPost CallFunction(protocol_confirmation, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) -> -1362692526.939527 MetaHookPost DrainEvents() -> -1362692526.939527 MetaHookPost DrainEvents() -> -1362692526.939527 MetaHookPost DrainEvents() -> -1362692526.939527 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, ACCEPT, */*)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, USER-AGENT, Wget/1.14 (darwin12.2.0))) -> false -1362692526.939527 MetaHookPost QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -> false -1362692526.939527 MetaHookPost QueueEvent(http_request([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], GET, /download/CHANGES.bro-aux.txt, /download/CHANGES.bro-aux.txt, 1.1)) -> false -1362692526.939527 MetaHookPost UpdateNetworkTime(1362692526.939527) -> -1362692526.939527 MetaHookPre CallFunction(Analyzer::name, (Analyzer::ANALYZER_HTTP)) -1362692526.939527 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::new_http_session, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T)) -1362692526.939527 MetaHookPre CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(http_begin_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(http_end_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, USER-AGENT, Wget/1.14 (darwin12.2.0))) -1362692526.939527 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, ACCEPT, */*)) -1362692526.939527 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -1362692526.939527 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -1362692526.939527 MetaHookPre CallFunction(http_message_done, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -1362692526.939527 MetaHookPre CallFunction(http_request, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], GET, /download/CHANGES.bro-aux.txt, /download/CHANGES.bro-aux.txt, 1.1)) -1362692526.939527 MetaHookPre CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -1362692526.939527 MetaHookPre CallFunction(protocol_confirmation, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) -1362692526.939527 MetaHookPre DrainEvents() -1362692526.939527 MetaHookPre DrainEvents() -1362692526.939527 MetaHookPre DrainEvents() -1362692526.939527 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, ACCEPT, */*)) -1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, USER-AGENT, Wget/1.14 (darwin12.2.0))) -1362692526.939527 MetaHookPre QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -1362692526.939527 MetaHookPre QueueEvent(http_request([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], GET, /download/CHANGES.bro-aux.txt, /download/CHANGES.bro-aux.txt, 1.1)) -1362692526.939527 MetaHookPre UpdateNetworkTime(1362692526.939527) -1362692526.939527 | HookUpdateNetworkTime 1362692526.939527 -1362692526.939527 | HookCallFunction Analyzer::name(Analyzer::ANALYZER_HTTP) -1362692526.939527 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction HTTP::new_http_session([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, T) -1362692526.939527 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, USER-AGENT, Wget/1.14 (darwin12.2.0)) -1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, ACCEPT, */*) -1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org) -1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive) -1362692526.939527 | HookCallFunction http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124]) -1362692526.939527 | HookCallFunction http_request([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], GET, /download/CHANGES.bro-aux.txt, /download/CHANGES.bro-aux.txt, 1.1) -1362692526.939527 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]) -1362692526.939527 | HookCallFunction protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3) -1362692526.939527 | HookDrainEvents -1362692526.939527 | HookDrainEvents -1362692526.939527 | HookDrainEvents -1362692526.939527 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookQueueEvent http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, ACCEPT, */*) -1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive) -1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org) -1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, USER-AGENT, Wget/1.14 (darwin12.2.0)) -1362692526.939527 | HookQueueEvent http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124]) -1362692526.939527 | HookQueueEvent http_request([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0], start_time=1362692526.869344, duration=0.070183, service={HTTP}, addl=, hot=0, history=ShAD, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], GET, /download/CHANGES.bro-aux.txt, /download/CHANGES.bro-aux.txt, 1.1) -1362692527.008509 MetaHookPost DrainEvents() -> -1362692527.008509 MetaHookPost DrainEvents() -> -1362692527.008509 MetaHookPost UpdateNetworkTime(1362692527.008509) -> -1362692527.008509 MetaHookPre DrainEvents() -1362692527.008509 MetaHookPre DrainEvents() -1362692527.008509 MetaHookPre UpdateNetworkTime(1362692527.008509) -1362692527.008509 | HookUpdateNetworkTime 1362692527.008509 -1362692527.008509 | HookDrainEvents -1362692527.008509 | HookDrainEvents -1362692527.009512 MetaHookPost CallFunction(HTTP::code_in_range, (200, 100, 199)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009512 MetaHookPost CallFunction(http_begin_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-TYPE, text/plain; charset=UTF-8)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, SERVER, Apache/2.4.3 (Fedora))) -> -1362692527.009512 MetaHookPost CallFunction(http_reply, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> -1362692527.009512 MetaHookPost DrainEvents() -> -1362692527.009512 MetaHookPost DrainEvents() -> -1362692527.009512 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-TYPE, text/plain; charset=UTF-8)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, SERVER, Apache/2.4.3 (Fedora))) -> false -1362692527.009512 MetaHookPost QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> false -1362692527.009512 MetaHookPost UpdateNetworkTime(1362692527.009512) -> -1362692527.009512 MetaHookPre CallFunction(HTTP::code_in_range, (200, 100, 199)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009512 MetaHookPre CallFunction(http_begin_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-TYPE, text/plain; charset=UTF-8)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, SERVER, Apache/2.4.3 (Fedora))) -1362692527.009512 MetaHookPre CallFunction(http_reply, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -1362692527.009512 MetaHookPre DrainEvents() -1362692527.009512 MetaHookPre DrainEvents() -1362692527.009512 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-TYPE, text/plain; charset=UTF-8)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, SERVER, Apache/2.4.3 (Fedora))) -1362692527.009512 MetaHookPre QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -1362692527.009512 MetaHookPre UpdateNetworkTime(1362692527.009512) -1362692527.009512 | HookUpdateNetworkTime 1362692527.009512 -1362692527.009512 | HookCallFunction HTTP::code_in_range(200, 100, 199) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009512 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-TYPE, text/plain; charset=UTF-8) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0") -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, SERVER, Apache/2.4.3 (Fedora)) -1362692527.009512 | HookCallFunction http_reply([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) -1362692527.009512 | HookDrainEvents -1362692527.009512 | HookDrainEvents -1362692527.009512 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-TYPE, text/plain; charset=UTF-8) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0") -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, SERVER, Apache/2.4.3 (Fedora)) -1362692527.009512 | HookQueueEvent http_reply([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=1448, state=4, num_pkts=2, num_bytes_ip=112, flow_label=0], start_time=1362692526.869344, duration=0.140168, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) -1362692527.009721 MetaHookPost CallFunction(Files::add_analyzers_for_mime_type, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=], text/plain)) -> -1362692527.009721 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=])) -> -1362692527.009721 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> -1362692527.009721 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009721 MetaHookPost CallFunction(file_new, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=])) -> -1362692527.009721 MetaHookPost CallFunction(file_over_new_connection, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=], [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009721 MetaHookPost CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009721 MetaHookPost CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -> -1362692527.009721 MetaHookPost DrainEvents() -> -1362692527.009721 MetaHookPost DrainEvents() -> -1362692527.009721 MetaHookPost DrainEvents() -> -1362692527.009721 MetaHookPost DrainEvents() -> -1362692527.009721 MetaHookPost QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=])) -> false -1362692527.009721 MetaHookPost QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=], [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009721 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009721 MetaHookPost UpdateNetworkTime(1362692527.009721) -> -1362692527.009721 MetaHookPre CallFunction(Files::add_analyzers_for_mime_type, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=], text/plain)) -1362692527.009721 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=])) -1362692527.009721 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -1362692527.009721 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre CallFunction(file_new, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=])) -1362692527.009721 MetaHookPre CallFunction(file_over_new_connection, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=], [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -1362692527.009721 MetaHookPre DrainEvents() -1362692527.009721 MetaHookPre DrainEvents() -1362692527.009721 MetaHookPre DrainEvents() -1362692527.009721 MetaHookPre DrainEvents() -1362692527.009721 MetaHookPre QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=])) -1362692527.009721 MetaHookPre QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=], [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre UpdateNetworkTime(1362692527.009721) -1362692527.009721 | HookUpdateNetworkTime 1362692527.009721 -1362692527.009721 | HookCallFunction Files::add_analyzers_for_mime_type([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=], text/plain) -1362692527.009721 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=]) -1362692527.009721 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) -1362692527.009721 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookCallFunction file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=]) -1362692527.009721 | HookCallFunction file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=], [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]) -1362692527.009721 | HookDrainEvents -1362692527.009721 | HookDrainEvents -1362692527.009721 | HookDrainEvents -1362692527.009721 | HookDrainEvents -1362692527.009721 | HookQueueEvent file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=]) -1362692527.009721 | HookQueueEvent file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009721, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=], [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009765 MetaHookPost DrainEvents() -> -1362692527.009765 MetaHookPost DrainEvents() -> -1362692527.009765 MetaHookPost UpdateNetworkTime(1362692527.009765) -> -1362692527.009765 MetaHookPre DrainEvents() -1362692527.009765 MetaHookPre DrainEvents() -1362692527.009765 MetaHookPre UpdateNetworkTime(1362692527.009765) -1362692527.009765 | HookUpdateNetworkTime 1362692527.009765 -1362692527.009765 | HookDrainEvents -1362692527.009765 | HookDrainEvents -1362692527.009775 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> -1362692527.009775 MetaHookPost CallFunction(HTTP::code_in_range, (200, 100, 199)) -> -1362692527.009775 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009775 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009775 MetaHookPost CallFunction(Log::default_path_func, (Files::LOG, , [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> -1362692527.009775 MetaHookPost CallFunction(Log::default_path_func, (HTTP::LOG, , [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> -1362692527.009775 MetaHookPost CallFunction(Log::write, (Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> -1362692527.009775 MetaHookPost CallFunction(Log::write, (HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> -1362692527.009775 MetaHookPost CallFunction(file_state_remove, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> -1362692527.009775 MetaHookPost CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009775 MetaHookPost CallFunction(http_end_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009775 MetaHookPost CallFunction(http_message_done, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -> -1362692527.009775 MetaHookPost CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -> -1362692527.009775 MetaHookPost DrainEvents() -> -1362692527.009775 MetaHookPost DrainEvents() -> -1362692527.009775 MetaHookPost DrainEvents() -> -1362692527.009775 MetaHookPost DrainEvents() -> -1362692527.009775 MetaHookPost DrainEvents() -> -1362692527.009775 MetaHookPost QueueEvent(file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> false -1362692527.009775 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009775 MetaHookPost QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009775 MetaHookPost QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -> false -1362692527.009775 MetaHookPost UpdateNetworkTime(1362692527.009775) -> -1362692527.009775 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -1362692527.009775 MetaHookPre CallFunction(HTTP::code_in_range, (200, 100, 199)) -1362692527.009775 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009775 MetaHookPre CallFunction(Log::default_path_func, (Files::LOG, , [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -1362692527.009775 MetaHookPre CallFunction(Log::default_path_func, (HTTP::LOG, , [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -1362692527.009775 MetaHookPre CallFunction(Log::write, (Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -1362692527.009775 MetaHookPre CallFunction(Log::write, (HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -1362692527.009775 MetaHookPre CallFunction(file_state_remove, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -1362692527.009775 MetaHookPre CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre CallFunction(http_end_entity, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre CallFunction(http_message_done, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -1362692527.009775 MetaHookPre CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -1362692527.009775 MetaHookPre DrainEvents() -1362692527.009775 MetaHookPre DrainEvents() -1362692527.009775 MetaHookPre DrainEvents() -1362692527.009775 MetaHookPre DrainEvents() -1362692527.009775 MetaHookPre DrainEvents() -1362692527.009775 MetaHookPre QueueEvent(file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -1362692527.009775 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -1362692527.009775 MetaHookPre UpdateNetworkTime(1362692527.009775) -1362692527.009775 | HookUpdateNetworkTime 1362692527.009775 -1362692527.009775 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) -1362692527.009775 | HookCallFunction HTTP::code_in_range(200, 100, 199) -1362692527.009775 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009775 | HookCallFunction Log::default_path_func(Files::LOG, , [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) -1362692527.009775 | HookCallFunction Log::default_path_func(HTTP::LOG, , [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) -1362692527.009775 | HookCallFunction Log::write(Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) -1362692527.009775 | HookCallFunction Log::write(HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) -1362692527.009775 | HookCallFunction file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) -1362692527.009775 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookCallFunction http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookCallFunction http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280]) -1362692527.009775 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]) -1362692527.009775 | HookDrainEvents -1362692527.009775 | HookDrainEvents -1362692527.009775 | HookDrainEvents -1362692527.009775 | HookDrainEvents -1362692527.009775 | HookDrainEvents -1362692527.009775 | HookQueueEvent file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]] = [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=2896, state=4, num_pkts=3, num_bytes_ip=1612, flow_label=0], start_time=1362692526.869344, duration=0.140377, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009775, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) -1362692527.009775 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookQueueEvent http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookQueueEvent http_message_done([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=4, num_pkts=3, num_bytes_ip=304, flow_label=0], resp=[size=5007, state=4, num_pkts=5, num_bytes_ip=4612, flow_label=0], start_time=1362692526.869344, duration=0.140431, service={HTTP}, addl=, hot=0, history=ShADad, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={[1] = [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280]) -1362692527.009855 MetaHookPost DrainEvents() -> -1362692527.009855 MetaHookPost DrainEvents() -> -1362692527.009855 MetaHookPost UpdateNetworkTime(1362692527.009855) -> -1362692527.009855 MetaHookPre DrainEvents() -1362692527.009855 MetaHookPre DrainEvents() -1362692527.009855 MetaHookPre UpdateNetworkTime(1362692527.009855) -1362692527.009855 | HookUpdateNetworkTime 1362692527.009855 -1362692527.009855 | HookDrainEvents -1362692527.009855 | HookDrainEvents -1362692527.009887 MetaHookPost DrainEvents() -> -1362692527.009887 MetaHookPost DrainEvents() -> -1362692527.009887 MetaHookPost UpdateNetworkTime(1362692527.009887) -> -1362692527.009887 MetaHookPre DrainEvents() -1362692527.009887 MetaHookPre DrainEvents() -1362692527.009887 MetaHookPre UpdateNetworkTime(1362692527.009887) -1362692527.009887 | HookUpdateNetworkTime 1362692527.009887 -1362692527.009887 | HookDrainEvents -1362692527.009887 | HookDrainEvents -1362692527.011846 MetaHookPost DrainEvents() -> -1362692527.011846 MetaHookPost DrainEvents() -> -1362692527.011846 MetaHookPost UpdateNetworkTime(1362692527.011846) -> -1362692527.011846 MetaHookPre DrainEvents() -1362692527.011846 MetaHookPre DrainEvents() -1362692527.011846 MetaHookPre UpdateNetworkTime(1362692527.011846) -1362692527.011846 | HookUpdateNetworkTime 1362692527.011846 -1362692527.011846 | HookDrainEvents -1362692527.011846 | HookDrainEvents -1362692527.080828 MetaHookPost DrainEvents() -> -1362692527.080828 MetaHookPost DrainEvents() -> -1362692527.080828 MetaHookPost UpdateNetworkTime(1362692527.080828) -> -1362692527.080828 MetaHookPre DrainEvents() -1362692527.080828 MetaHookPre DrainEvents() -1362692527.080828 MetaHookPre UpdateNetworkTime(1362692527.080828) -1362692527.080828 | HookUpdateNetworkTime 1362692527.080828 -1362692527.080828 | HookDrainEvents -1362692527.080828 | HookDrainEvents -1362692527.080972 MetaHookPost CallFunction(ChecksumOffloading::check, ()) -> -1362692527.080972 MetaHookPost CallFunction(ChecksumOffloading::check, ()) -> -1362692527.080972 MetaHookPost CallFunction(Conn::conn_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}], extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp)) -> -1362692527.080972 MetaHookPost CallFunction(Conn::determine_service, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}], extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692527.080972 MetaHookPost CallFunction(Conn::set_conn, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692527.080972 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692527.080972 MetaHookPost CallFunction(Log::default_path_func, (Conn::LOG, , [ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}])) -> -1362692527.080972 MetaHookPost CallFunction(Log::write, (Conn::LOG, [ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}])) -> -1362692527.080972 MetaHookPost CallFunction(bro_done, ()) -> -1362692527.080972 MetaHookPost CallFunction(connection_state_remove, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692527.080972 MetaHookPost CallFunction(filter_change_tracking, ()) -> -1362692527.080972 MetaHookPost CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692527.080972 MetaHookPost CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -> -1362692527.080972 MetaHookPost CallFunction(net_done, (1362692527.080972)) -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost DrainEvents() -> -1362692527.080972 MetaHookPost QueueEvent(ChecksumOffloading::check()) -> false -1362692527.080972 MetaHookPost QueueEvent(ChecksumOffloading::check()) -> false -1362692527.080972 MetaHookPost QueueEvent(bro_done()) -> false -1362692527.080972 MetaHookPost QueueEvent(connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false -1362692527.080972 MetaHookPost QueueEvent(filter_change_tracking()) -> false -1362692527.080972 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false -1362692527.080972 MetaHookPost UpdateNetworkTime(1362692527.080972) -> -1362692527.080972 MetaHookPre CallFunction(ChecksumOffloading::check, ()) -1362692527.080972 MetaHookPre CallFunction(ChecksumOffloading::check, ()) -1362692527.080972 MetaHookPre CallFunction(Conn::conn_state, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}], extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp)) -1362692527.080972 MetaHookPre CallFunction(Conn::determine_service, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}], extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692527.080972 MetaHookPre CallFunction(Conn::set_conn, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692527.080972 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692527.080972 MetaHookPre CallFunction(Log::default_path_func, (Conn::LOG, , [ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}])) -1362692527.080972 MetaHookPre CallFunction(Log::write, (Conn::LOG, [ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}])) -1362692527.080972 MetaHookPre CallFunction(bro_done, ()) -1362692527.080972 MetaHookPre CallFunction(connection_state_remove, ([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692527.080972 MetaHookPre CallFunction(filter_change_tracking, ()) -1362692527.080972 MetaHookPre CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692527.080972 MetaHookPre CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp])) -1362692527.080972 MetaHookPre CallFunction(net_done, (1362692527.080972)) -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre DrainEvents() -1362692527.080972 MetaHookPre QueueEvent(ChecksumOffloading::check()) -1362692527.080972 MetaHookPre QueueEvent(ChecksumOffloading::check()) -1362692527.080972 MetaHookPre QueueEvent(bro_done()) -1362692527.080972 MetaHookPre QueueEvent(connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692527.080972 MetaHookPre QueueEvent(filter_change_tracking()) -1362692527.080972 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692527.080972 MetaHookPre UpdateNetworkTime(1362692527.080972) -1362692527.080972 | HookUpdateNetworkTime 1362692527.080972 -1362692527.080972 | HookCallFunction ChecksumOffloading::check() -1362692527.080972 | HookCallFunction ChecksumOffloading::check() -1362692527.080972 | HookCallFunction Conn::conn_state([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}], extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp) -1362692527.080972 | HookCallFunction Conn::determine_service([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=[ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=, local_orig=, missed_bytes=0, history=, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}], extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692527.080972 | HookCallFunction Conn::set_conn([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692527.080972 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692527.080972 | HookCallFunction Log::default_path_func(Conn::LOG, , [ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}]) -1362692527.080972 | HookCallFunction Log::write(Conn::LOG, [ts=1362692526.869344, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents={}]) -1362692527.080972 | HookCallFunction bro_done() -1362692527.080972 | HookCallFunction connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692527.080972 | HookCallFunction filter_change_tracking() -1362692527.080972 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692527.080972 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]) -1362692527.080972 | HookCallFunction net_done(1362692527.080972) -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookDrainEvents -1362692527.080972 | HookQueueEvent ChecksumOffloading::check() -1362692527.080972 | HookQueueEvent ChecksumOffloading::check() -1362692527.080972 | HookQueueEvent bro_done() -1362692527.080972 | HookQueueEvent connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692527.080972 | HookQueueEvent filter_change_tracking() -1362692527.080972 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], orig=[size=136, state=5, num_pkts=7, num_bytes_ip=512, flow_label=0], resp=[size=5007, state=5, num_pkts=7, num_bytes_ip=5379, flow_label=0], start_time=1362692526.869344, duration=0.211484, service={HTTP}, addl=, hot=0, history=ShADadFf, uid=CXWv6p3arKYeMETxOg, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=[ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp], trans_depth=1, method=GET, host=bro.org, uri=/download/CHANGES.bro-aux.txt, referrer=, user_agent=Wget/1.14 (darwin12.2.0), request_body_len=0, response_body_len=4705, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_mime_types=[text/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) diff --git a/testing/btest/Baseline/core.print-bpf-filters/conn.log b/testing/btest/Baseline/core.print-bpf-filters/conn.log index f14621c261..7f4eaf9740 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/conn.log +++ b/testing/btest/Baseline/core.print-bpf-filters/conn.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#open 2019-03-12-03-25-14 +#open 2019-06-07-02-20-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] 1278600802.069419 CHhAvVGS1DHFjwGM9 10.20.80.1 50343 10.0.0.15 80 tcp - 0.004152 9 3429 SF - - 0 ShADadfF 7 381 7 3801 - -#close 2019-03-12-03-25-14 +#close 2019-06-07-02-20-04 diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index d8067da821..84caba4d8c 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -3,28 +3,28 @@ #empty_field (empty) #unset_field - #path packet_filter -#open 2019-03-12-03-25-12 +#open 2019-06-07-02-20-03 #fields ts node filter init success #types time string string bool bool -1552361112.763592 bro ip or not ip T T -#close 2019-03-12-03-25-12 +1559874003.309984 zeek ip or not ip T T +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#open 2019-03-12-03-25-13 +#open 2019-06-07-02-20-03 #fields ts node filter init success #types time string string bool bool -1552361113.442916 bro port 42 T T -#close 2019-03-12-03-25-13 +1559874003.872388 zeek port 42 T T +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#open 2019-03-12-03-25-14 +#open 2019-06-07-02-20-04 #fields ts node filter init success #types time string string bool bool -1552361114.111534 bro (vlan) and (ip or not ip) T T -#close 2019-03-12-03-25-14 +1559874004.312190 zeek (vlan) and (ip or not ip) T T +#close 2019-06-07-02-20-04 diff --git a/testing/btest/Baseline/core.print-bpf-filters/output2 b/testing/btest/Baseline/core.print-bpf-filters/output2 index d46c6b1f1f..9f2e8a5002 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output2 +++ b/testing/btest/Baseline/core.print-bpf-filters/output2 @@ -1,4 +1,5 @@ 2 1080 +1 123 1 135 1 137 1 139 @@ -55,8 +56,8 @@ 1 992 1 993 1 995 -62 and -61 or -62 port +63 and +62 or +63 port 42 tcp -20 udp +21 udp diff --git a/testing/btest/Baseline/core.reporter-error-in-handler/output b/testing/btest/Baseline/core.reporter-error-in-handler/output index ab5309b659..85014657a3 100644 --- a/testing/btest/Baseline/core.reporter-error-in-handler/output +++ b/testing/btest/Baseline/core.reporter-error-in-handler/output @@ -1,3 +1,3 @@ -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 28: no such index (a[1]) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.bro, line 22: no such index (a[2]) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.zeek, line 28: no such index (a[1]) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter-error-in-handler/reporter-error-in-handler.zeek, line 22: no such index (a[2]) 1st error printed on script level diff --git a/testing/btest/Baseline/core.reporter-fmt-strings/output b/testing/btest/Baseline/core.reporter-fmt-strings/output index bbd76f3447..4e31478caa 100644 --- a/testing/btest/Baseline/core.reporter-fmt-strings/output +++ b/testing/btest/Baseline/core.reporter-fmt-strings/output @@ -1 +1 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-fmt-strings/reporter-fmt-strings.bro, line 9: not an event (dont_interpret_this(%s)) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-fmt-strings/reporter-fmt-strings.zeek, line 9: not an event (dont_interpret_this(%s)) diff --git a/testing/btest/Baseline/core.reporter-parse-error/output b/testing/btest/Baseline/core.reporter-parse-error/output index 76535f75d1..4dd922fd24 100644 --- a/testing/btest/Baseline/core.reporter-parse-error/output +++ b/testing/btest/Baseline/core.reporter-parse-error/output @@ -1 +1 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-parse-error/reporter-parse-error.bro, line 7: unknown identifier TESTFAILURE, at or near "TESTFAILURE" +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-parse-error/reporter-parse-error.zeek, line 7: unknown identifier TESTFAILURE, at or near "TESTFAILURE" diff --git a/testing/btest/Baseline/core.reporter-runtime-error/output b/testing/btest/Baseline/core.reporter-runtime-error/output index 695e2e2f81..7e0ab11845 100644 --- a/testing/btest/Baseline/core.reporter-runtime-error/output +++ b/testing/btest/Baseline/core.reporter-runtime-error/output @@ -1,2 +1,2 @@ -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.bro, line 12: no such index (a[1]) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/core.reporter-runtime-error/reporter-runtime-error.zeek, line 12: no such index (a[1]) fatal error: failed to execute script statements at top-level scope diff --git a/testing/btest/Baseline/core.reporter-type-mismatch/output b/testing/btest/Baseline/core.reporter-type-mismatch/output index 23eefd13e8..d54e6e2b9b 100644 --- a/testing/btest/Baseline/core.reporter-type-mismatch/output +++ b/testing/btest/Baseline/core.reporter-type-mismatch/output @@ -1,3 +1,3 @@ -error in string and /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11: arithmetic mixed with non-arithmetic (string and 42) -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11 and string: type mismatch (42 and string) -error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.bro, line 11: argument type mismatch in event invocation (foo(42)) +error in string and /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.zeek, line 11: arithmetic mixed with non-arithmetic (string and 42) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.zeek, line 11 and string: type mismatch (42 and string) +error in /da/home/robin/bro/master/testing/btest/.tmp/core.reporter-type-mismatch/reporter-type-mismatch.zeek, line 11: argument type mismatch in event invocation (foo(42)) diff --git a/testing/btest/Baseline/core.reporter/logger-test.log b/testing/btest/Baseline/core.reporter/logger-test.log index 4ee0d03341..1dc58b65cd 100644 --- a/testing/btest/Baseline/core.reporter/logger-test.log +++ b/testing/btest/Baseline/core.reporter/logger-test.log @@ -1,6 +1,6 @@ -reporter_info|init test-info|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 8|0.000000 -reporter_warning|init test-warning|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 9|0.000000 -reporter_error|init test-error|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 10|0.000000 -reporter_info|done test-info|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 15|0.000000 -reporter_warning|done test-warning|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 16|0.000000 -reporter_error|done test-error|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 17|0.000000 +reporter_info|init test-info|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 8|0.000000 +reporter_warning|init test-warning|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 9|0.000000 +reporter_error|init test-error|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 10|0.000000 +reporter_info|done test-info|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 15|0.000000 +reporter_warning|done test-warning|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 16|0.000000 +reporter_error|done test-error|/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 17|0.000000 diff --git a/testing/btest/Baseline/core.reporter/output b/testing/btest/Baseline/core.reporter/output index 24a12f9679..12069545ba 100644 --- a/testing/btest/Baseline/core.reporter/output +++ b/testing/btest/Baseline/core.reporter/output @@ -1,9 +1,9 @@ -/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 52: pre test-info -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 53: pre test-warning -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 54: pre test-error -/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 8: init test-info -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 9: init test-warning -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 10: init test-error -/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 15: done test-info -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 16: done test-warning -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.bro, line 17: done test-error +/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 52: pre test-info +warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 53: pre test-warning +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 54: pre test-error +/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 8: init test-info +warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 9: init test-warning +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 10: init test-error +/Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 15: done test-info +warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 16: done test-warning +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/core.reporter/reporter.zeek, line 17: done test-error diff --git a/testing/btest/Baseline/core.tcp.large-file-reassembly/conn.log b/testing/btest/Baseline/core.tcp.large-file-reassembly/conn.log index 8da44df913..fbb4a71369 100644 --- a/testing/btest/Baseline/core.tcp.large-file-reassembly/conn.log +++ b/testing/btest/Baseline/core.tcp.large-file-reassembly/conn.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-07-13-16-13-01 +#open 2019-04-19-18-10-57 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] 1395939406.175845 ClEkJM2Vm5giqnMf4h 192.168.56.1 59763 192.168.56.101 63988 tcp ftp-data 0.001676 0 270 SF - - 0 ShAdfFa 5 272 4 486 - -1395939411.361078 C4J4Th3PJpwUYZZ6gc 192.168.56.1 59764 192.168.56.101 37150 tcp ftp-data 150.496065 0 5416666670 SF - - 4675708816 ShAdfFa 13 688 12 24454 - +1395939411.361078 C4J4Th3PJpwUYZZ6gc 192.168.56.1 59764 192.168.56.101 37150 tcp ftp-data 150.496065 0 5416666670 SF - - 5416642848 ShAdgfFa 13 688 12 24454 - 1395939399.984671 CHhAvVGS1DHFjwGM9 192.168.56.1 59762 192.168.56.101 21 tcp ftp 169.634297 104 1041 SF - - 0 ShAdDaFf 31 1728 18 1985 - -#close 2016-07-13-16-13-01 +#close 2019-04-19-18-10-57 diff --git a/testing/btest/Baseline/core.tcp.large-file-reassembly/files.log b/testing/btest/Baseline/core.tcp.large-file-reassembly/files.log index 31087d58cc..15de6047b6 100644 --- a/testing/btest/Baseline/core.tcp.large-file-reassembly/files.log +++ b/testing/btest/Baseline/core.tcp.large-file-reassembly/files.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path files -#open 2017-01-25-07-03-11 +#open 2019-04-17-20-41-29 #fields ts fuid tx_hosts rx_hosts conn_uids source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid md5 sha1 sha256 extracted extracted_cutoff extracted_size #types time string set[addr] set[addr] set[string] string count set[string] string string interval bool bool count count count count bool string string string string string bool count 1395939406.177079 FAb5m22Dhe2Zi95anf 192.168.56.101 192.168.56.1 ClEkJM2Vm5giqnMf4h FTP_DATA 0 DATA_EVENT text/plain - 0.000000 - F 270 - 0 0 F - - - - - - - 1395939411.364462 FhI0ao2FNTjabdfSBd 192.168.56.101 192.168.56.1 C4J4Th3PJpwUYZZ6gc FTP_DATA 0 DATA_EVENT text/plain - 150.490904 - F 23822 - 5416642848 0 F - - - - - - - -#close 2017-01-25-07-03-11 +#close 2019-04-17-20-41-29 diff --git a/testing/btest/Baseline/core.tcp.miss-end-data/conn.log b/testing/btest/Baseline/core.tcp.miss-end-data/conn.log index b33aec3366..e8d6102398 100644 --- a/testing/btest/Baseline/core.tcp.miss-end-data/conn.log +++ b/testing/btest/Baseline/core.tcp.miss-end-data/conn.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-07-13-16-13-02 +#open 2019-04-19-18-11-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] -1331764471.664131 CHhAvVGS1DHFjwGM9 192.168.122.230 60648 77.238.160.184 80 tcp http 10.048360 538 2902 SF - - 2902 ShADafF 5 750 4 172 - -#close 2016-07-13-16-13-02 +1331764471.664131 CHhAvVGS1DHFjwGM9 192.168.122.230 60648 77.238.160.184 80 tcp http 10.048360 538 2902 SF - - 2902 ShADafgF 5 750 4 172 - +#close 2019-04-19-18-11-07 diff --git a/testing/btest/Baseline/core.tcp.rxmit-history/conn-1.log b/testing/btest/Baseline/core.tcp.rxmit-history/conn-1.log index 43daf101a3..466f882257 100644 --- a/testing/btest/Baseline/core.tcp.rxmit-history/conn-1.log +++ b/testing/btest/Baseline/core.tcp.rxmit-history/conn-1.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#open 2018-01-12-21-43-34 +#open 2019-04-17-20-42-43 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] -1285862902.700271 CHhAvVGS1DHFjwGM9 10.0.88.85 50368 192.168.0.27 80 tcp - 60.991770 474 23783 RSTO - - 24257 ShADadtR 17 1250 22 28961 - -#close 2018-01-12-21-43-34 +1285862902.700271 CHhAvVGS1DHFjwGM9 10.0.88.85 50368 192.168.0.27 80 tcp - 60.991770 474 23783 RSTO - - 24257 ShADaGdgtR 17 1250 22 28961 - +#close 2019-04-17-20-42-43 diff --git a/testing/btest/Baseline/core.tcp.rxmit-history/conn-2.log b/testing/btest/Baseline/core.tcp.rxmit-history/conn-2.log index 22d4ec3ab9..e75d9487d0 100644 --- a/testing/btest/Baseline/core.tcp.rxmit-history/conn-2.log +++ b/testing/btest/Baseline/core.tcp.rxmit-history/conn-2.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path conn -#open 2018-01-12-21-43-35 +#open 2019-04-17-20-42-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] 1300475167.096535 CHhAvVGS1DHFjwGM9 141.142.220.202 5353 224.0.0.251 5353 udp dns - - - S0 - - 0 D 1 73 0 0 - @@ -40,4 +40,4 @@ 1300475168.859163 Ck51lg1bScffFj34Ri 141.142.220.118 49998 208.80.152.3 80 tcp http 0.215893 1130 734 S1 - - 0 ShADad 6 1450 4 950 - 1300475168.892936 CtxTCR2Yer0FR1tIBg 141.142.220.118 50000 208.80.152.3 80 tcp http 0.229603 1148 734 S1 - - 0 ShADad 6 1468 4 950 - 1300475168.895267 CLNN1k2QMum1aexUK7 141.142.220.118 50001 208.80.152.3 80 tcp http 0.227284 1178 734 S1 - - 0 ShADad 6 1498 4 950 - -#close 2018-01-12-21-43-35 +#close 2019-04-17-20-42-44 diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output index 85acc259ff..8ef1ff8e9d 100644 --- a/testing/btest/Baseline/core.truncation/output +++ b/testing/btest/Baseline/core.truncation/output @@ -3,78 +3,78 @@ #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-27 +#open 2019-06-07-02-20-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334160095.895421 - - - - - truncated_IP - F bro -#close 2017-10-19-17-18-28 +1334160095.895421 - - - - - truncated_IP - F zeek +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-29 +#open 2019-06-07-02-20-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334156241.519125 - - - - - truncated_IP - F bro -#close 2017-10-19-17-18-30 +1334156241.519125 - - - - - truncated_IP - F zeek +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-32 +#open 2019-06-07-02-20-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1334094648.590126 - - - - - truncated_IP - F bro -#close 2017-10-19-17-18-32 +1334094648.590126 - - - - - truncated_IP - F zeek +#close 2019-06-07-02-20-04 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-36 +#open 2019-06-07-02-20-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1338328954.078361 - - - - - internally_truncated_header - F bro -#close 2017-10-19-17-18-36 +1338328954.078361 - - - - - internally_truncated_header - F zeek +#close 2019-06-07-02-20-05 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-37 +#open 2019-06-07-02-20-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -0.000000 - - - - - truncated_link_header - F bro -#close 2017-10-19-17-18-38 +0.000000 - - - - - truncated_link_header - F zeek +#close 2019-06-07-02-20-05 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-39 +#open 2019-06-07-02-20-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1508360735.834163 - 163.253.48.183 0 192.150.187.43 0 invalid_IP_header_size - F bro -#close 2017-10-19-17-18-40 +1508360735.834163 - 163.253.48.183 0 192.150.187.43 0 invalid_IP_header_size - F zeek +#close 2019-06-07-02-20-06 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-41 +#open 2019-06-07-02-20-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1508360735.834163 - 163.253.48.183 0 192.150.187.43 0 internally_truncated_header - F bro -#close 2017-10-19-17-18-42 +1508360735.834163 - 163.253.48.183 0 192.150.187.43 0 internally_truncated_header - F zeek +#close 2019-06-07-02-20-06 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-18-43 +#open 2019-06-07-02-20-07 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1500557630.000000 - 0.255.0.255 0 15.254.2.1 0 invalid_IP_header_size_in_tunnel - F bro -#close 2017-10-19-17-18-44 +1500557630.000000 - 0.255.0.255 0 15.254.2.1 0 invalid_IP_header_size_in_tunnel - F zeek +#close 2019-06-07-02-20-07 diff --git a/testing/btest/Baseline/core.tunnels.gre/conn.log b/testing/btest/Baseline/core.tunnels.gre/conn.log index 4ab9714ca7..522d1525fc 100644 --- a/testing/btest/Baseline/core.tunnels.gre/conn.log +++ b/testing/btest/Baseline/core.tunnels.gre/conn.log @@ -3,14 +3,14 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-07-13-16-13-04 +#open 2019-06-15-20-36-46 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] 1055289978.756932 CtPZjS20MLrsMUOJi2 66.59.111.190 40264 172.28.2.3 22 tcp ssh 3.157831 952 1671 SF - - 0 ShAdDaFf 12 1584 10 2199 CHhAvVGS1DHFjwGM9 1055289987.055189 CUM0KZ3MLUfNB0cl11 66.59.111.190 37675 172.28.2.3 53 udp dns 5.001141 66 0 S0 - - 0 D 2 122 0 0 CHhAvVGS1DHFjwGM9 -1055289973.849878 C4J4Th3PJpwUYZZ6gc 66.59.111.190 123 18.26.4.105 123 udp - 0.074086 48 48 SF - - 0 Dd 1 76 1 76 CHhAvVGS1DHFjwGM9 -1055289992.849231 CP5puj4I8PtEU4qzYg 66.59.111.190 123 66.59.111.182 123 udp - 0.056629 48 48 SF - - 0 Dd 1 76 1 76 CHhAvVGS1DHFjwGM9 -1055289996.849099 C37jN32gN3y3AZzyf6 66.59.111.190 123 129.170.17.4 123 udp - 0.072374 48 48 SF - - 0 Dd 1 76 1 76 CHhAvVGS1DHFjwGM9 +1055289973.849878 C4J4Th3PJpwUYZZ6gc 66.59.111.190 123 18.26.4.105 123 udp ntp 0.074086 48 48 SF - - 0 Dd 1 76 1 76 CHhAvVGS1DHFjwGM9 +1055289992.849231 CP5puj4I8PtEU4qzYg 66.59.111.190 123 66.59.111.182 123 udp ntp 0.056629 48 48 SF - - 0 Dd 1 76 1 76 CHhAvVGS1DHFjwGM9 +1055289996.849099 C37jN32gN3y3AZzyf6 66.59.111.190 123 129.170.17.4 123 udp ntp 0.072374 48 48 SF - - 0 Dd 1 76 1 76 CHhAvVGS1DHFjwGM9 1055289968.793044 ClEkJM2Vm5giqnMf4h 66.59.111.190 8 172.28.2.3 0 icmp - 3.061298 224 224 OTH - - 0 - 4 336 4 336 CHhAvVGS1DHFjwGM9 1055289987.106744 CmES5u32sYpV7JYN 172.28.2.3 3 66.59.111.190 3 icmp - 4.994662 122 0 OTH - - 0 - 2 178 0 0 CHhAvVGS1DHFjwGM9 -#close 2016-07-13-16-13-05 +#close 2019-06-15-20-36-46 diff --git a/testing/btest/Baseline/core.tunnels.gtp.outer_ip_frag/conn.log b/testing/btest/Baseline/core.tunnels.gtp.outer_ip_frag/conn.log index 4c598b386d..dfa705f258 100644 --- a/testing/btest/Baseline/core.tunnels.gtp.outer_ip_frag/conn.log +++ b/testing/btest/Baseline/core.tunnels.gtp.outer_ip_frag/conn.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-07-13-16-13-10 +#open 2019-04-19-18-10-49 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] -1333458850.364667 ClEkJM2Vm5giqnMf4h 10.131.47.185 1923 79.101.110.141 80 tcp http 0.069783 2100 56702 SF - - 0 ShADadfF 27 3204 41 52594 CHhAvVGS1DHFjwGM9 +1333458850.364667 ClEkJM2Vm5giqnMf4h 10.131.47.185 1923 79.101.110.141 80 tcp http 0.069783 2100 56702 SF - - 5760 ShADadfgF 27 3204 41 52594 CHhAvVGS1DHFjwGM9 1333458850.364667 CHhAvVGS1DHFjwGM9 239.114.155.111 2152 63.94.149.181 2152 udp gtpv1 0.069813 3420 52922 SF - - 0 Dd 27 4176 41 54070 - -#close 2016-07-13-16-13-10 +#close 2019-04-19-18-10-49 diff --git a/testing/btest/Baseline/core.tunnels.ip-in-ip-version/output b/testing/btest/Baseline/core.tunnels.ip-in-ip-version/output index 728d8e4793..bf3356a6df 100644 --- a/testing/btest/Baseline/core.tunnels.ip-in-ip-version/output +++ b/testing/btest/Baseline/core.tunnels.ip-in-ip-version/output @@ -3,18 +3,18 @@ #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-26-34 +#open 2019-06-07-02-20-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1500557630.000000 - ff00:0:6929::6904:ff:3bbf 0 ffff:0:69:2900:0:69:400:ff3b 0 invalid_inner_IP_version_in_tunnel - F bro -#close 2017-10-19-17-26-35 +1500557630.000000 - ff00:0:6929::6904:ff:3bbf 0 ffff:0:69:2900:0:69:400:ff3b 0 invalid_inner_IP_version_in_tunnel - F zeek +#close 2019-06-07-02-20-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path weird -#open 2017-10-19-17-26-36 +#open 2019-06-07-02-20-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1500557630.000000 - b100:7265::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F bro -#close 2017-10-19-17-26-37 +1500557630.000000 - b100:7265::6904:2aff 0 3bbf:ff00:40:21:ffff:ffff:fffd:f7ff 0 invalid_inner_IP_version - F zeek +#close 2019-06-07-02-20-03 diff --git a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log index 5ff4c65292..8c80b270b3 100644 --- a/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log +++ b/testing/btest/Baseline/core.tunnels.teredo_bubble_with_payload/weird.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-13-14 +#open 2019-06-07-01-59-35 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1340127577.341510 CUM0KZ3MLUfNB0cl11 192.168.2.16 3797 83.170.1.38 32900 Teredo_bubble_with_payload - F bro -1340127577.346849 CHhAvVGS1DHFjwGM9 192.168.2.16 3797 65.55.158.80 3544 Teredo_bubble_with_payload - F bro -#close 2016-07-13-16-13-14 +1340127577.341510 CUM0KZ3MLUfNB0cl11 192.168.2.16 3797 83.170.1.38 32900 Teredo_bubble_with_payload - F zeek +1340127577.346849 CHhAvVGS1DHFjwGM9 192.168.2.16 3797 65.55.158.80 3544 Teredo_bubble_with_payload - F zeek +#close 2019-06-07-01-59-35 diff --git a/testing/btest/Baseline/core.when-interpreter-exceptions/bro.output b/testing/btest/Baseline/core.when-interpreter-exceptions/bro.output deleted file mode 100644 index 27a90d137c..0000000000 --- a/testing/btest/Baseline/core.when-interpreter-exceptions/bro.output +++ /dev/null @@ -1,13 +0,0 @@ -expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.bro, line 47: field value missing (myrecord$notset) -expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.bro, line 91: field value missing (myrecord$notset) -expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.bro, line 72: field value missing (myrecord$notset) -expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.bro, line 103: field value missing (myrecord$notset) -received termination signal -[f(F)] -f() done, no exception, T -[f(T)] -[bro_init()] -timeout g(), T -timeout -timeout g(), F -g() done, no exception, T diff --git a/testing/btest/Baseline/core.when-interpreter-exceptions/zeek.output b/testing/btest/Baseline/core.when-interpreter-exceptions/zeek.output new file mode 100644 index 0000000000..3abe7bcfd0 --- /dev/null +++ b/testing/btest/Baseline/core.when-interpreter-exceptions/zeek.output @@ -0,0 +1,13 @@ +expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.zeek, line 47: field value missing (myrecord$notset) +expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.zeek, line 91: field value missing (myrecord$notset) +expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.zeek, line 72: field value missing (myrecord$notset) +expression error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/core.when-interpreter-exceptions/when-interpreter-exceptions.zeek, line 103: field value missing (myrecord$notset) +received termination signal +[f(F)] +f() done, no exception, T +[f(T)] +[zeek_init()] +timeout g(), T +timeout +timeout g(), F +g() done, no exception, T diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index 4eeaa4b07b..8fa1ab560a 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,180 +3,182 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2018-06-08-16-37-15 +#open 2019-06-15-20-36-48 #fields name #types string -scripts/base/init-bare.bro - build/scripts/base/bif/const.bif.bro - build/scripts/base/bif/types.bif.bro - build/scripts/base/bif/bro.bif.bro - build/scripts/base/bif/stats.bif.bro - build/scripts/base/bif/reporter.bif.bro - build/scripts/base/bif/strings.bif.bro - build/scripts/base/bif/option.bif.bro - build/scripts/base/bif/plugins/Bro_SNMP.types.bif.bro - build/scripts/base/bif/plugins/Bro_KRB.types.bif.bro - build/scripts/base/bif/event.bif.bro -scripts/base/init-frameworks-and-bifs.bro - scripts/base/frameworks/logging/__load__.bro - scripts/base/frameworks/logging/main.bro - build/scripts/base/bif/logging.bif.bro - scripts/base/frameworks/logging/postprocessors/__load__.bro - scripts/base/frameworks/logging/postprocessors/scp.bro - scripts/base/frameworks/logging/postprocessors/sftp.bro - scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/none.bro - scripts/base/frameworks/broker/__load__.bro - scripts/base/frameworks/broker/main.bro - build/scripts/base/bif/comm.bif.bro - build/scripts/base/bif/messaging.bif.bro - scripts/base/frameworks/broker/store.bro - build/scripts/base/bif/data.bif.bro - build/scripts/base/bif/store.bif.bro - scripts/base/frameworks/broker/log.bro - scripts/base/frameworks/input/__load__.bro - scripts/base/frameworks/input/main.bro - build/scripts/base/bif/input.bif.bro - scripts/base/frameworks/input/readers/ascii.bro - scripts/base/frameworks/input/readers/raw.bro - scripts/base/frameworks/input/readers/benchmark.bro - scripts/base/frameworks/input/readers/binary.bro - scripts/base/frameworks/input/readers/config.bro - scripts/base/frameworks/input/readers/sqlite.bro - scripts/base/frameworks/analyzer/__load__.bro - scripts/base/frameworks/analyzer/main.bro - scripts/base/frameworks/packet-filter/utils.bro - build/scripts/base/bif/analyzer.bif.bro - scripts/base/frameworks/files/__load__.bro - scripts/base/frameworks/files/main.bro - build/scripts/base/bif/file_analysis.bif.bro - scripts/base/utils/site.bro - scripts/base/utils/patterns.bro - scripts/base/frameworks/files/magic/__load__.bro - build/scripts/base/bif/__load__.bro - build/scripts/base/bif/broxygen.bif.bro - build/scripts/base/bif/pcap.bif.bro - build/scripts/base/bif/bloom-filter.bif.bro - build/scripts/base/bif/cardinality-counter.bif.bro - build/scripts/base/bif/top-k.bif.bro - build/scripts/base/bif/plugins/__load__.bro - build/scripts/base/bif/plugins/Bro_ARP.events.bif.bro - build/scripts/base/bif/plugins/Bro_BackDoor.events.bif.bro - build/scripts/base/bif/plugins/Bro_BitTorrent.events.bif.bro - build/scripts/base/bif/plugins/Bro_ConnSize.events.bif.bro - build/scripts/base/bif/plugins/Bro_ConnSize.functions.bif.bro - build/scripts/base/bif/plugins/Bro_DCE_RPC.consts.bif.bro - build/scripts/base/bif/plugins/Bro_DCE_RPC.types.bif.bro - build/scripts/base/bif/plugins/Bro_DCE_RPC.events.bif.bro - build/scripts/base/bif/plugins/Bro_DHCP.events.bif.bro - build/scripts/base/bif/plugins/Bro_DHCP.types.bif.bro - build/scripts/base/bif/plugins/Bro_DNP3.events.bif.bro - build/scripts/base/bif/plugins/Bro_DNS.events.bif.bro - build/scripts/base/bif/plugins/Bro_File.events.bif.bro - build/scripts/base/bif/plugins/Bro_Finger.events.bif.bro - build/scripts/base/bif/plugins/Bro_FTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_FTP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_Gnutella.events.bif.bro - build/scripts/base/bif/plugins/Bro_GSSAPI.events.bif.bro - build/scripts/base/bif/plugins/Bro_GTPv1.events.bif.bro - build/scripts/base/bif/plugins/Bro_HTTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_HTTP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_ICMP.events.bif.bro - build/scripts/base/bif/plugins/Bro_Ident.events.bif.bro - build/scripts/base/bif/plugins/Bro_IMAP.events.bif.bro - build/scripts/base/bif/plugins/Bro_InterConn.events.bif.bro - build/scripts/base/bif/plugins/Bro_IRC.events.bif.bro - build/scripts/base/bif/plugins/Bro_KRB.events.bif.bro - build/scripts/base/bif/plugins/Bro_Login.events.bif.bro - build/scripts/base/bif/plugins/Bro_Login.functions.bif.bro - build/scripts/base/bif/plugins/Bro_MIME.events.bif.bro - build/scripts/base/bif/plugins/Bro_Modbus.events.bif.bro - build/scripts/base/bif/plugins/Bro_MySQL.events.bif.bro - build/scripts/base/bif/plugins/Bro_NCP.events.bif.bro - build/scripts/base/bif/plugins/Bro_NCP.consts.bif.bro - build/scripts/base/bif/plugins/Bro_NetBIOS.events.bif.bro - build/scripts/base/bif/plugins/Bro_NetBIOS.functions.bif.bro - build/scripts/base/bif/plugins/Bro_NTLM.types.bif.bro - build/scripts/base/bif/plugins/Bro_NTLM.events.bif.bro - build/scripts/base/bif/plugins/Bro_NTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_POP3.events.bif.bro - build/scripts/base/bif/plugins/Bro_RADIUS.events.bif.bro - build/scripts/base/bif/plugins/Bro_RDP.events.bif.bro - build/scripts/base/bif/plugins/Bro_RDP.types.bif.bro - build/scripts/base/bif/plugins/Bro_RFB.events.bif.bro - build/scripts/base/bif/plugins/Bro_RPC.events.bif.bro - build/scripts/base/bif/plugins/Bro_SIP.events.bif.bro - build/scripts/base/bif/plugins/Bro_SNMP.events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_check_directory.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_close.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_create_directory.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_echo.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_logoff_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_negotiate.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_nt_create_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_nt_cancel.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_query_information.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_read_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_session_setup_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction_secondary.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction2.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction2_secondary.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_tree_connect_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_tree_disconnect.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_write_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_close.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_create.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_negotiate.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_read.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_session_setup.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_set_info.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_tree_connect.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_tree_disconnect.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_write.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_transform_header.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.consts.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.types.bif.bro - build/scripts/base/bif/plugins/Bro_SMTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_SMTP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_SOCKS.events.bif.bro - build/scripts/base/bif/plugins/Bro_SSH.types.bif.bro - build/scripts/base/bif/plugins/Bro_SSH.events.bif.bro - build/scripts/base/bif/plugins/Bro_SSL.types.bif.bro - build/scripts/base/bif/plugins/Bro_SSL.events.bif.bro - build/scripts/base/bif/plugins/Bro_SSL.functions.bif.bro - build/scripts/base/bif/plugins/Bro_SteppingStone.events.bif.bro - build/scripts/base/bif/plugins/Bro_Syslog.events.bif.bro - build/scripts/base/bif/plugins/Bro_TCP.events.bif.bro - build/scripts/base/bif/plugins/Bro_TCP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_Teredo.events.bif.bro - build/scripts/base/bif/plugins/Bro_UDP.events.bif.bro - build/scripts/base/bif/plugins/Bro_VXLAN.events.bif.bro - build/scripts/base/bif/plugins/Bro_XMPP.events.bif.bro - build/scripts/base/bif/plugins/Bro_FileEntropy.events.bif.bro - build/scripts/base/bif/plugins/Bro_FileExtract.events.bif.bro - build/scripts/base/bif/plugins/Bro_FileExtract.functions.bif.bro - build/scripts/base/bif/plugins/Bro_FileHash.events.bif.bro - build/scripts/base/bif/plugins/Bro_PE.events.bif.bro - build/scripts/base/bif/plugins/Bro_Unified2.events.bif.bro - build/scripts/base/bif/plugins/Bro_Unified2.types.bif.bro - build/scripts/base/bif/plugins/Bro_X509.events.bif.bro - build/scripts/base/bif/plugins/Bro_X509.types.bif.bro - build/scripts/base/bif/plugins/Bro_X509.functions.bif.bro - build/scripts/base/bif/plugins/Bro_X509.ocsp_events.bif.bro - build/scripts/base/bif/plugins/Bro_AsciiReader.ascii.bif.bro - build/scripts/base/bif/plugins/Bro_BenchmarkReader.benchmark.bif.bro - build/scripts/base/bif/plugins/Bro_BinaryReader.binary.bif.bro - build/scripts/base/bif/plugins/Bro_ConfigReader.config.bif.bro - build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro - build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro - build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro - build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro - build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro -scripts/policy/misc/loaded-scripts.bro - scripts/base/utils/paths.bro -#close 2018-06-08-16-37-15 +scripts/base/init-bare.zeek + build/scripts/base/bif/const.bif.zeek + build/scripts/base/bif/types.bif.zeek + build/scripts/base/bif/zeek.bif.zeek + build/scripts/base/bif/stats.bif.zeek + build/scripts/base/bif/reporter.bif.zeek + build/scripts/base/bif/strings.bif.zeek + build/scripts/base/bif/option.bif.zeek + build/scripts/base/bif/plugins/Zeek_SNMP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_KRB.types.bif.zeek + build/scripts/base/bif/event.bif.zeek +scripts/base/init-frameworks-and-bifs.zeek + scripts/base/frameworks/logging/__load__.zeek + scripts/base/frameworks/logging/main.zeek + build/scripts/base/bif/logging.bif.zeek + scripts/base/frameworks/logging/postprocessors/__load__.zeek + scripts/base/frameworks/logging/postprocessors/scp.zeek + scripts/base/frameworks/logging/postprocessors/sftp.zeek + scripts/base/frameworks/logging/writers/ascii.zeek + scripts/base/frameworks/logging/writers/sqlite.zeek + scripts/base/frameworks/logging/writers/none.zeek + scripts/base/frameworks/broker/__load__.zeek + scripts/base/frameworks/broker/main.zeek + build/scripts/base/bif/comm.bif.zeek + build/scripts/base/bif/messaging.bif.zeek + scripts/base/frameworks/broker/store.zeek + build/scripts/base/bif/data.bif.zeek + build/scripts/base/bif/store.bif.zeek + scripts/base/frameworks/broker/log.zeek + scripts/base/frameworks/input/__load__.zeek + scripts/base/frameworks/input/main.zeek + build/scripts/base/bif/input.bif.zeek + scripts/base/frameworks/input/readers/ascii.zeek + scripts/base/frameworks/input/readers/raw.zeek + scripts/base/frameworks/input/readers/benchmark.zeek + scripts/base/frameworks/input/readers/binary.zeek + scripts/base/frameworks/input/readers/config.zeek + scripts/base/frameworks/input/readers/sqlite.zeek + scripts/base/frameworks/analyzer/__load__.zeek + scripts/base/frameworks/analyzer/main.zeek + scripts/base/frameworks/packet-filter/utils.zeek + build/scripts/base/bif/analyzer.bif.zeek + scripts/base/frameworks/files/__load__.zeek + scripts/base/frameworks/files/main.zeek + build/scripts/base/bif/file_analysis.bif.zeek + scripts/base/utils/site.zeek + scripts/base/utils/patterns.zeek + scripts/base/frameworks/files/magic/__load__.zeek + build/scripts/base/bif/__load__.zeek + build/scripts/base/bif/zeekygen.bif.zeek + build/scripts/base/bif/pcap.bif.zeek + build/scripts/base/bif/bloom-filter.bif.zeek + build/scripts/base/bif/cardinality-counter.bif.zeek + build/scripts/base/bif/top-k.bif.zeek + build/scripts/base/bif/plugins/__load__.zeek + build/scripts/base/bif/plugins/Zeek_ARP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_BackDoor.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_BitTorrent.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_ConnSize.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_ConnSize.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_DCE_RPC.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_DCE_RPC.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_DCE_RPC.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_DHCP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_DHCP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_DNP3.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_DNS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_File.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Finger.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FTP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_Gnutella.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_GSSAPI.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_GTPv1.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_HTTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_HTTP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_ICMP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Ident.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_IMAP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_InterConn.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_IRC.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_KRB.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Login.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Login.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_MIME.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Modbus.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_MySQL.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NCP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NCP.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_NetBIOS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NetBIOS.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTLM.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTLM.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_POP3.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RADIUS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RDP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RDP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_RFB.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RPC.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SIP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_check_directory.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_close.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_create_directory.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_echo.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_logoff_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_negotiate.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_nt_create_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_nt_cancel.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_query_information.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_read_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_session_setup_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction_secondary.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction2.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction2_secondary.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_tree_connect_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_tree_disconnect.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_write_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_close.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_create.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_negotiate.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_read.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_session_setup.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_set_info.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_tree_connect.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_tree_disconnect.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_write.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_transform_header.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMTP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_SNMP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SOCKS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSH.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSH.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_SteppingStone.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Syslog.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_TCP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_TCP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_Teredo.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_UDP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_VXLAN.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_XMPP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileEntropy.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileExtract.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileExtract.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileHash.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_PE.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Unified2.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Unified2.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.ocsp_events.bif.zeek + build/scripts/base/bif/plugins/Zeek_AsciiReader.ascii.bif.zeek + build/scripts/base/bif/plugins/Zeek_BenchmarkReader.benchmark.bif.zeek + build/scripts/base/bif/plugins/Zeek_BinaryReader.binary.bif.zeek + build/scripts/base/bif/plugins/Zeek_ConfigReader.config.bif.zeek + build/scripts/base/bif/plugins/Zeek_RawReader.raw.bif.zeek + build/scripts/base/bif/plugins/Zeek_SQLiteReader.sqlite.bif.zeek + build/scripts/base/bif/plugins/Zeek_AsciiWriter.ascii.bif.zeek + build/scripts/base/bif/plugins/Zeek_NoneWriter.none.bif.zeek + build/scripts/base/bif/plugins/Zeek_SQLiteWriter.sqlite.bif.zeek +scripts/policy/misc/loaded-scripts.zeek + scripts/base/utils/paths.zeek +#close 2019-06-15-20-36-48 diff --git a/testing/btest/Baseline/coverage.bare-mode-errors/errors b/testing/btest/Baseline/coverage.bare-mode-errors/errors index e11a4ca00f..a13c8849a1 100644 --- a/testing/btest/Baseline/coverage.bare-mode-errors/errors +++ b/testing/btest/Baseline/coverage.bare-mode-errors/errors @@ -1,18 +1,4 @@ -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 245: deprecated (dhcp_discover) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 248: deprecated (dhcp_offer) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 251: deprecated (dhcp_request) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 254: deprecated (dhcp_decline) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 257: deprecated (dhcp_ack) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 260: deprecated (dhcp_nak) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 263: deprecated (dhcp_release) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 266: deprecated (dhcp_inform) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/smb/__load__.bro, line 1: deprecated script loaded from /Users/jon/projects/bro/bro/testing/btest/../../scripts//broxygen/__load__.bro:10 "Use '@load base/protocols/smb' instead" -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 245: deprecated (dhcp_discover) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 248: deprecated (dhcp_offer) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 251: deprecated (dhcp_request) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 254: deprecated (dhcp_decline) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 257: deprecated (dhcp_ack) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 260: deprecated (dhcp_nak) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 263: deprecated (dhcp_release) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/dhcp/deprecated_events.bro, line 266: deprecated (dhcp_inform) -warning in /Users/jon/projects/bro/bro/testing/btest/../../scripts//policy/protocols/smb/__load__.bro, line 1: deprecated script loaded from command line arguments "Use '@load base/protocols/smb' instead" +warning in /Users/johanna/bro/master/scripts/policy/misc/trim-trace-file.zeek, line 25: deprecated (rotate_file_by_name) +warning in /Users/johanna/bro/master/scripts/policy/misc/trim-trace-file.zeek, line 25: deprecated (rotate_file_by_name) +warning in /Users/johanna/bro/master/scripts/policy/misc/trim-trace-file.zeek, line 25: deprecated (rotate_file_by_name) +warning in /Users/johanna/bro/master/testing/btest/../../scripts//policy/misc/trim-trace-file.zeek, line 25: deprecated (rotate_file_by_name) diff --git a/testing/btest/Baseline/coverage.coverage-blacklist/output b/testing/btest/Baseline/coverage.coverage-blacklist/output index c54e4283b2..e27574face 100644 --- a/testing/btest/Baseline/coverage.coverage-blacklist/output +++ b/testing/btest/Baseline/coverage.coverage-blacklist/output @@ -1,5 +1,5 @@ -1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 13 print cover me; -1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 17 print always executed; -0 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 26 print also impossible, but included in code coverage analysis; -1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 29 print success; -1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.bro, line 5 print first; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.zeek, line 13 print cover me; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.zeek, line 17 print always executed; +0 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.zeek, line 26 print also impossible, but included in code coverage analysis; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.zeek, line 29 print success; +1 /da/home/robin/bro/master/testing/btest/.tmp/coverage.coverage-blacklist/coverage-blacklist.zeek, line 5 print first; diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index eaca1c489a..0b68a0ce83 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,374 +3,375 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2018-09-05-20-33-08 +#open 2019-06-15-20-54-48 #fields name #types string -scripts/base/init-bare.bro - build/scripts/base/bif/const.bif.bro - build/scripts/base/bif/types.bif.bro - build/scripts/base/bif/bro.bif.bro - build/scripts/base/bif/stats.bif.bro - build/scripts/base/bif/reporter.bif.bro - build/scripts/base/bif/strings.bif.bro - build/scripts/base/bif/option.bif.bro - build/scripts/base/bif/plugins/Bro_SNMP.types.bif.bro - build/scripts/base/bif/plugins/Bro_KRB.types.bif.bro - build/scripts/base/bif/event.bif.bro -scripts/base/init-frameworks-and-bifs.bro - scripts/base/frameworks/logging/__load__.bro - scripts/base/frameworks/logging/main.bro - build/scripts/base/bif/logging.bif.bro - scripts/base/frameworks/logging/postprocessors/__load__.bro - scripts/base/frameworks/logging/postprocessors/scp.bro - scripts/base/frameworks/logging/postprocessors/sftp.bro - scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/none.bro - scripts/base/frameworks/broker/__load__.bro - scripts/base/frameworks/broker/main.bro - build/scripts/base/bif/comm.bif.bro - build/scripts/base/bif/messaging.bif.bro - scripts/base/frameworks/broker/store.bro - build/scripts/base/bif/data.bif.bro - build/scripts/base/bif/store.bif.bro - scripts/base/frameworks/broker/log.bro - scripts/base/frameworks/input/__load__.bro - scripts/base/frameworks/input/main.bro - build/scripts/base/bif/input.bif.bro - scripts/base/frameworks/input/readers/ascii.bro - scripts/base/frameworks/input/readers/raw.bro - scripts/base/frameworks/input/readers/benchmark.bro - scripts/base/frameworks/input/readers/binary.bro - scripts/base/frameworks/input/readers/config.bro - scripts/base/frameworks/input/readers/sqlite.bro - scripts/base/frameworks/analyzer/__load__.bro - scripts/base/frameworks/analyzer/main.bro - scripts/base/frameworks/packet-filter/utils.bro - build/scripts/base/bif/analyzer.bif.bro - scripts/base/frameworks/files/__load__.bro - scripts/base/frameworks/files/main.bro - build/scripts/base/bif/file_analysis.bif.bro - scripts/base/utils/site.bro - scripts/base/utils/patterns.bro - scripts/base/frameworks/files/magic/__load__.bro - build/scripts/base/bif/__load__.bro - build/scripts/base/bif/broxygen.bif.bro - build/scripts/base/bif/pcap.bif.bro - build/scripts/base/bif/bloom-filter.bif.bro - build/scripts/base/bif/cardinality-counter.bif.bro - build/scripts/base/bif/top-k.bif.bro - build/scripts/base/bif/plugins/__load__.bro - build/scripts/base/bif/plugins/Bro_ARP.events.bif.bro - build/scripts/base/bif/plugins/Bro_BackDoor.events.bif.bro - build/scripts/base/bif/plugins/Bro_BitTorrent.events.bif.bro - build/scripts/base/bif/plugins/Bro_ConnSize.events.bif.bro - build/scripts/base/bif/plugins/Bro_ConnSize.functions.bif.bro - build/scripts/base/bif/plugins/Bro_DCE_RPC.consts.bif.bro - build/scripts/base/bif/plugins/Bro_DCE_RPC.types.bif.bro - build/scripts/base/bif/plugins/Bro_DCE_RPC.events.bif.bro - build/scripts/base/bif/plugins/Bro_DHCP.events.bif.bro - build/scripts/base/bif/plugins/Bro_DHCP.types.bif.bro - build/scripts/base/bif/plugins/Bro_DNP3.events.bif.bro - build/scripts/base/bif/plugins/Bro_DNS.events.bif.bro - build/scripts/base/bif/plugins/Bro_File.events.bif.bro - build/scripts/base/bif/plugins/Bro_Finger.events.bif.bro - build/scripts/base/bif/plugins/Bro_FTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_FTP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_Gnutella.events.bif.bro - build/scripts/base/bif/plugins/Bro_GSSAPI.events.bif.bro - build/scripts/base/bif/plugins/Bro_GTPv1.events.bif.bro - build/scripts/base/bif/plugins/Bro_HTTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_HTTP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_ICMP.events.bif.bro - build/scripts/base/bif/plugins/Bro_Ident.events.bif.bro - build/scripts/base/bif/plugins/Bro_IMAP.events.bif.bro - build/scripts/base/bif/plugins/Bro_InterConn.events.bif.bro - build/scripts/base/bif/plugins/Bro_IRC.events.bif.bro - build/scripts/base/bif/plugins/Bro_KRB.events.bif.bro - build/scripts/base/bif/plugins/Bro_Login.events.bif.bro - build/scripts/base/bif/plugins/Bro_Login.functions.bif.bro - build/scripts/base/bif/plugins/Bro_MIME.events.bif.bro - build/scripts/base/bif/plugins/Bro_Modbus.events.bif.bro - build/scripts/base/bif/plugins/Bro_MySQL.events.bif.bro - build/scripts/base/bif/plugins/Bro_NCP.events.bif.bro - build/scripts/base/bif/plugins/Bro_NCP.consts.bif.bro - build/scripts/base/bif/plugins/Bro_NetBIOS.events.bif.bro - build/scripts/base/bif/plugins/Bro_NetBIOS.functions.bif.bro - build/scripts/base/bif/plugins/Bro_NTLM.types.bif.bro - build/scripts/base/bif/plugins/Bro_NTLM.events.bif.bro - build/scripts/base/bif/plugins/Bro_NTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_POP3.events.bif.bro - build/scripts/base/bif/plugins/Bro_RADIUS.events.bif.bro - build/scripts/base/bif/plugins/Bro_RDP.events.bif.bro - build/scripts/base/bif/plugins/Bro_RDP.types.bif.bro - build/scripts/base/bif/plugins/Bro_RFB.events.bif.bro - build/scripts/base/bif/plugins/Bro_RPC.events.bif.bro - build/scripts/base/bif/plugins/Bro_SIP.events.bif.bro - build/scripts/base/bif/plugins/Bro_SNMP.events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_check_directory.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_close.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_create_directory.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_echo.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_logoff_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_negotiate.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_nt_create_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_nt_cancel.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_query_information.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_read_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_session_setup_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction_secondary.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction2.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_transaction2_secondary.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_tree_connect_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_tree_disconnect.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_com_write_andx.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb1_events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_close.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_create.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_negotiate.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_read.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_session_setup.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_set_info.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_tree_connect.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_tree_disconnect.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_write.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_com_transform_header.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.smb2_events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.events.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.consts.bif.bro - build/scripts/base/bif/plugins/Bro_SMB.types.bif.bro - build/scripts/base/bif/plugins/Bro_SMTP.events.bif.bro - build/scripts/base/bif/plugins/Bro_SMTP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_SOCKS.events.bif.bro - build/scripts/base/bif/plugins/Bro_SSH.types.bif.bro - build/scripts/base/bif/plugins/Bro_SSH.events.bif.bro - build/scripts/base/bif/plugins/Bro_SSL.types.bif.bro - build/scripts/base/bif/plugins/Bro_SSL.events.bif.bro - build/scripts/base/bif/plugins/Bro_SSL.functions.bif.bro - build/scripts/base/bif/plugins/Bro_SteppingStone.events.bif.bro - build/scripts/base/bif/plugins/Bro_Syslog.events.bif.bro - build/scripts/base/bif/plugins/Bro_TCP.events.bif.bro - build/scripts/base/bif/plugins/Bro_TCP.functions.bif.bro - build/scripts/base/bif/plugins/Bro_Teredo.events.bif.bro - build/scripts/base/bif/plugins/Bro_UDP.events.bif.bro - build/scripts/base/bif/plugins/Bro_VXLAN.events.bif.bro - build/scripts/base/bif/plugins/Bro_XMPP.events.bif.bro - build/scripts/base/bif/plugins/Bro_FileEntropy.events.bif.bro - build/scripts/base/bif/plugins/Bro_FileExtract.events.bif.bro - build/scripts/base/bif/plugins/Bro_FileExtract.functions.bif.bro - build/scripts/base/bif/plugins/Bro_FileHash.events.bif.bro - build/scripts/base/bif/plugins/Bro_PE.events.bif.bro - build/scripts/base/bif/plugins/Bro_Unified2.events.bif.bro - build/scripts/base/bif/plugins/Bro_Unified2.types.bif.bro - build/scripts/base/bif/plugins/Bro_X509.events.bif.bro - build/scripts/base/bif/plugins/Bro_X509.types.bif.bro - build/scripts/base/bif/plugins/Bro_X509.functions.bif.bro - build/scripts/base/bif/plugins/Bro_X509.ocsp_events.bif.bro - build/scripts/base/bif/plugins/Bro_AsciiReader.ascii.bif.bro - build/scripts/base/bif/plugins/Bro_BenchmarkReader.benchmark.bif.bro - build/scripts/base/bif/plugins/Bro_BinaryReader.binary.bif.bro - build/scripts/base/bif/plugins/Bro_ConfigReader.config.bif.bro - build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro - build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro - build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro - build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro - build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro -scripts/base/init-default.bro - scripts/base/utils/active-http.bro - scripts/base/utils/exec.bro - scripts/base/utils/addrs.bro - scripts/base/utils/conn-ids.bro - scripts/base/utils/dir.bro - scripts/base/frameworks/reporter/__load__.bro - scripts/base/frameworks/reporter/main.bro - scripts/base/utils/paths.bro - scripts/base/utils/directions-and-hosts.bro - scripts/base/utils/email.bro - scripts/base/utils/files.bro - scripts/base/utils/geoip-distance.bro - scripts/base/utils/hash_hrw.bro - scripts/base/utils/numbers.bro - scripts/base/utils/queue.bro - scripts/base/utils/strings.bro - scripts/base/utils/thresholds.bro - scripts/base/utils/time.bro - scripts/base/utils/urls.bro - scripts/base/frameworks/notice/__load__.bro - scripts/base/frameworks/notice/main.bro - scripts/base/frameworks/cluster/__load__.bro - scripts/base/frameworks/cluster/main.bro - scripts/base/frameworks/control/__load__.bro - scripts/base/frameworks/control/main.bro - scripts/base/frameworks/cluster/pools.bro - scripts/base/frameworks/notice/weird.bro - scripts/base/frameworks/notice/actions/drop.bro - scripts/base/frameworks/netcontrol/__load__.bro - scripts/base/frameworks/netcontrol/types.bro - scripts/base/frameworks/netcontrol/main.bro - scripts/base/frameworks/netcontrol/plugin.bro - scripts/base/frameworks/netcontrol/plugins/__load__.bro - scripts/base/frameworks/netcontrol/plugins/debug.bro - scripts/base/frameworks/netcontrol/plugins/openflow.bro - scripts/base/frameworks/openflow/__load__.bro - scripts/base/frameworks/openflow/consts.bro - scripts/base/frameworks/openflow/types.bro - scripts/base/frameworks/openflow/main.bro - scripts/base/frameworks/openflow/plugins/__load__.bro - scripts/base/frameworks/openflow/plugins/ryu.bro - scripts/base/utils/json.bro - scripts/base/frameworks/openflow/plugins/log.bro - scripts/base/frameworks/openflow/plugins/broker.bro - scripts/base/frameworks/openflow/non-cluster.bro - scripts/base/frameworks/netcontrol/plugins/packetfilter.bro - scripts/base/frameworks/netcontrol/plugins/broker.bro - scripts/base/frameworks/netcontrol/plugins/acld.bro - scripts/base/frameworks/netcontrol/drop.bro - scripts/base/frameworks/netcontrol/shunt.bro - scripts/base/frameworks/netcontrol/catch-and-release.bro - scripts/base/frameworks/netcontrol/non-cluster.bro - scripts/base/frameworks/notice/actions/email_admin.bro - scripts/base/frameworks/notice/actions/page.bro - scripts/base/frameworks/notice/actions/add-geodata.bro - scripts/base/frameworks/notice/actions/pp-alarms.bro - scripts/base/frameworks/dpd/__load__.bro - scripts/base/frameworks/dpd/main.bro - scripts/base/frameworks/signatures/__load__.bro - scripts/base/frameworks/signatures/main.bro - scripts/base/frameworks/packet-filter/__load__.bro - scripts/base/frameworks/packet-filter/main.bro - scripts/base/frameworks/packet-filter/netstats.bro - scripts/base/frameworks/software/__load__.bro - scripts/base/frameworks/software/main.bro - scripts/base/frameworks/intel/__load__.bro - scripts/base/frameworks/intel/main.bro - scripts/base/frameworks/intel/files.bro - scripts/base/frameworks/intel/input.bro - scripts/base/frameworks/config/__load__.bro - scripts/base/frameworks/config/main.bro - scripts/base/frameworks/config/input.bro - scripts/base/frameworks/config/weird.bro - scripts/base/frameworks/sumstats/__load__.bro - scripts/base/frameworks/sumstats/main.bro - scripts/base/frameworks/sumstats/plugins/__load__.bro - scripts/base/frameworks/sumstats/plugins/average.bro - scripts/base/frameworks/sumstats/plugins/hll_unique.bro - scripts/base/frameworks/sumstats/plugins/last.bro - scripts/base/frameworks/sumstats/plugins/max.bro - scripts/base/frameworks/sumstats/plugins/min.bro - scripts/base/frameworks/sumstats/plugins/sample.bro - scripts/base/frameworks/sumstats/plugins/std-dev.bro - scripts/base/frameworks/sumstats/plugins/variance.bro - scripts/base/frameworks/sumstats/plugins/sum.bro - scripts/base/frameworks/sumstats/plugins/topk.bro - scripts/base/frameworks/sumstats/plugins/unique.bro - scripts/base/frameworks/sumstats/non-cluster.bro - scripts/base/frameworks/tunnels/__load__.bro - scripts/base/frameworks/tunnels/main.bro - scripts/base/protocols/conn/__load__.bro - scripts/base/protocols/conn/main.bro - scripts/base/protocols/conn/contents.bro - scripts/base/protocols/conn/inactivity.bro - scripts/base/protocols/conn/polling.bro - scripts/base/protocols/conn/thresholds.bro - scripts/base/protocols/dce-rpc/__load__.bro - scripts/base/protocols/dce-rpc/consts.bro - scripts/base/protocols/dce-rpc/main.bro - scripts/base/protocols/dhcp/__load__.bro - scripts/base/protocols/dhcp/consts.bro - scripts/base/protocols/dhcp/main.bro - scripts/base/protocols/dnp3/__load__.bro - scripts/base/protocols/dnp3/main.bro - scripts/base/protocols/dnp3/consts.bro - scripts/base/protocols/dns/__load__.bro - scripts/base/protocols/dns/consts.bro - scripts/base/protocols/dns/main.bro - scripts/base/protocols/ftp/__load__.bro - scripts/base/protocols/ftp/utils-commands.bro - scripts/base/protocols/ftp/info.bro - scripts/base/protocols/ftp/main.bro - scripts/base/protocols/ftp/utils.bro - scripts/base/protocols/ftp/files.bro - scripts/base/protocols/ftp/gridftp.bro - scripts/base/protocols/ssl/__load__.bro - scripts/base/protocols/ssl/consts.bro - scripts/base/protocols/ssl/main.bro - scripts/base/protocols/ssl/mozilla-ca-list.bro - scripts/base/protocols/ssl/ct-list.bro - scripts/base/protocols/ssl/files.bro - scripts/base/files/x509/__load__.bro - scripts/base/files/x509/main.bro - scripts/base/files/hash/__load__.bro - scripts/base/files/hash/main.bro - scripts/base/protocols/http/__load__.bro - scripts/base/protocols/http/main.bro - scripts/base/protocols/http/entities.bro - scripts/base/protocols/http/utils.bro - scripts/base/protocols/http/files.bro - scripts/base/protocols/imap/__load__.bro - scripts/base/protocols/imap/main.bro - scripts/base/protocols/irc/__load__.bro - scripts/base/protocols/irc/main.bro - scripts/base/protocols/irc/dcc-send.bro - scripts/base/protocols/irc/files.bro - scripts/base/protocols/krb/__load__.bro - scripts/base/protocols/krb/main.bro - scripts/base/protocols/krb/consts.bro - scripts/base/protocols/krb/files.bro - scripts/base/protocols/modbus/__load__.bro - scripts/base/protocols/modbus/consts.bro - scripts/base/protocols/modbus/main.bro - scripts/base/protocols/mysql/__load__.bro - scripts/base/protocols/mysql/main.bro - scripts/base/protocols/mysql/consts.bro - scripts/base/protocols/ntlm/__load__.bro - scripts/base/protocols/ntlm/main.bro - scripts/base/protocols/pop3/__load__.bro - scripts/base/protocols/radius/__load__.bro - scripts/base/protocols/radius/main.bro - scripts/base/protocols/radius/consts.bro - scripts/base/protocols/rdp/__load__.bro - scripts/base/protocols/rdp/consts.bro - scripts/base/protocols/rdp/main.bro - scripts/base/protocols/rfb/__load__.bro - scripts/base/protocols/rfb/main.bro - scripts/base/protocols/sip/__load__.bro - scripts/base/protocols/sip/main.bro - scripts/base/protocols/snmp/__load__.bro - scripts/base/protocols/snmp/main.bro - scripts/base/protocols/smb/__load__.bro - scripts/base/protocols/smb/consts.bro - scripts/base/protocols/smb/const-dos-error.bro - scripts/base/protocols/smb/const-nt-status.bro - scripts/base/protocols/smb/main.bro - scripts/base/protocols/smb/smb1-main.bro - scripts/base/protocols/smb/smb2-main.bro - scripts/base/protocols/smb/files.bro - scripts/base/protocols/smtp/__load__.bro - scripts/base/protocols/smtp/main.bro - scripts/base/protocols/smtp/entities.bro - scripts/base/protocols/smtp/files.bro - scripts/base/protocols/socks/__load__.bro - scripts/base/protocols/socks/consts.bro - scripts/base/protocols/socks/main.bro - scripts/base/protocols/ssh/__load__.bro - scripts/base/protocols/ssh/main.bro - scripts/base/protocols/syslog/__load__.bro - scripts/base/protocols/syslog/consts.bro - scripts/base/protocols/syslog/main.bro - scripts/base/protocols/tunnels/__load__.bro - scripts/base/protocols/xmpp/__load__.bro - scripts/base/protocols/xmpp/main.bro - scripts/base/files/pe/__load__.bro - scripts/base/files/pe/consts.bro - scripts/base/files/pe/main.bro - scripts/base/files/extract/__load__.bro - scripts/base/files/extract/main.bro - scripts/base/files/unified2/__load__.bro - scripts/base/files/unified2/main.bro - scripts/base/misc/find-checksum-offloading.bro - scripts/base/misc/find-filtered-trace.bro - scripts/base/misc/version.bro -scripts/policy/misc/loaded-scripts.bro -#close 2018-09-05-20-33-08 +scripts/base/init-bare.zeek + build/scripts/base/bif/const.bif.zeek + build/scripts/base/bif/types.bif.zeek + build/scripts/base/bif/zeek.bif.zeek + build/scripts/base/bif/stats.bif.zeek + build/scripts/base/bif/reporter.bif.zeek + build/scripts/base/bif/strings.bif.zeek + build/scripts/base/bif/option.bif.zeek + build/scripts/base/bif/plugins/Zeek_SNMP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_KRB.types.bif.zeek + build/scripts/base/bif/event.bif.zeek +scripts/base/init-frameworks-and-bifs.zeek + scripts/base/frameworks/logging/__load__.zeek + scripts/base/frameworks/logging/main.zeek + build/scripts/base/bif/logging.bif.zeek + scripts/base/frameworks/logging/postprocessors/__load__.zeek + scripts/base/frameworks/logging/postprocessors/scp.zeek + scripts/base/frameworks/logging/postprocessors/sftp.zeek + scripts/base/frameworks/logging/writers/ascii.zeek + scripts/base/frameworks/logging/writers/sqlite.zeek + scripts/base/frameworks/logging/writers/none.zeek + scripts/base/frameworks/broker/__load__.zeek + scripts/base/frameworks/broker/main.zeek + build/scripts/base/bif/comm.bif.zeek + build/scripts/base/bif/messaging.bif.zeek + scripts/base/frameworks/broker/store.zeek + build/scripts/base/bif/data.bif.zeek + build/scripts/base/bif/store.bif.zeek + scripts/base/frameworks/broker/log.zeek + scripts/base/frameworks/input/__load__.zeek + scripts/base/frameworks/input/main.zeek + build/scripts/base/bif/input.bif.zeek + scripts/base/frameworks/input/readers/ascii.zeek + scripts/base/frameworks/input/readers/raw.zeek + scripts/base/frameworks/input/readers/benchmark.zeek + scripts/base/frameworks/input/readers/binary.zeek + scripts/base/frameworks/input/readers/config.zeek + scripts/base/frameworks/input/readers/sqlite.zeek + scripts/base/frameworks/analyzer/__load__.zeek + scripts/base/frameworks/analyzer/main.zeek + scripts/base/frameworks/packet-filter/utils.zeek + build/scripts/base/bif/analyzer.bif.zeek + scripts/base/frameworks/files/__load__.zeek + scripts/base/frameworks/files/main.zeek + build/scripts/base/bif/file_analysis.bif.zeek + scripts/base/utils/site.zeek + scripts/base/utils/patterns.zeek + scripts/base/frameworks/files/magic/__load__.zeek + build/scripts/base/bif/__load__.zeek + build/scripts/base/bif/zeekygen.bif.zeek + build/scripts/base/bif/pcap.bif.zeek + build/scripts/base/bif/bloom-filter.bif.zeek + build/scripts/base/bif/cardinality-counter.bif.zeek + build/scripts/base/bif/top-k.bif.zeek + build/scripts/base/bif/plugins/__load__.zeek + build/scripts/base/bif/plugins/Zeek_ARP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_BackDoor.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_BitTorrent.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_ConnSize.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_ConnSize.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_DCE_RPC.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_DCE_RPC.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_DCE_RPC.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_DHCP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_DHCP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_DNP3.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_DNS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_File.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Finger.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FTP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_Gnutella.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_GSSAPI.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_GTPv1.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_HTTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_HTTP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_ICMP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Ident.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_IMAP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_InterConn.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_IRC.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_KRB.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Login.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Login.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_MIME.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Modbus.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_MySQL.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NCP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NCP.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_NetBIOS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NetBIOS.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTLM.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTLM.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_NTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_POP3.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RADIUS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RDP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RDP.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_RFB.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_RPC.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SIP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_check_directory.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_close.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_create_directory.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_echo.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_logoff_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_negotiate.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_nt_create_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_nt_cancel.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_query_information.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_read_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_session_setup_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction_secondary.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction2.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_transaction2_secondary.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_tree_connect_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_tree_disconnect.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_com_write_andx.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb1_events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_close.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_create.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_negotiate.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_read.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_session_setup.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_set_info.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_tree_connect.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_tree_disconnect.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_write.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_com_transform_header.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.smb2_events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMB.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMTP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SMTP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_SNMP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SOCKS.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSH.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSH.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_SSL.consts.bif.zeek + build/scripts/base/bif/plugins/Zeek_SteppingStone.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Syslog.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_TCP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_TCP.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_Teredo.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_UDP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_VXLAN.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_XMPP.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileEntropy.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileExtract.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileExtract.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_FileHash.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_PE.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Unified2.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_Unified2.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.events.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.types.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.functions.bif.zeek + build/scripts/base/bif/plugins/Zeek_X509.ocsp_events.bif.zeek + build/scripts/base/bif/plugins/Zeek_AsciiReader.ascii.bif.zeek + build/scripts/base/bif/plugins/Zeek_BenchmarkReader.benchmark.bif.zeek + build/scripts/base/bif/plugins/Zeek_BinaryReader.binary.bif.zeek + build/scripts/base/bif/plugins/Zeek_ConfigReader.config.bif.zeek + build/scripts/base/bif/plugins/Zeek_RawReader.raw.bif.zeek + build/scripts/base/bif/plugins/Zeek_SQLiteReader.sqlite.bif.zeek + build/scripts/base/bif/plugins/Zeek_AsciiWriter.ascii.bif.zeek + build/scripts/base/bif/plugins/Zeek_NoneWriter.none.bif.zeek + build/scripts/base/bif/plugins/Zeek_SQLiteWriter.sqlite.bif.zeek +scripts/base/init-default.zeek + scripts/base/utils/active-http.zeek + scripts/base/utils/exec.zeek + scripts/base/utils/addrs.zeek + scripts/base/utils/conn-ids.zeek + scripts/base/utils/dir.zeek + scripts/base/frameworks/reporter/__load__.zeek + scripts/base/frameworks/reporter/main.zeek + scripts/base/utils/paths.zeek + scripts/base/utils/directions-and-hosts.zeek + scripts/base/utils/email.zeek + scripts/base/utils/files.zeek + scripts/base/utils/geoip-distance.zeek + scripts/base/utils/hash_hrw.zeek + scripts/base/utils/numbers.zeek + scripts/base/utils/queue.zeek + scripts/base/utils/strings.zeek + scripts/base/utils/thresholds.zeek + scripts/base/utils/time.zeek + scripts/base/utils/urls.zeek + scripts/base/frameworks/notice/__load__.zeek + scripts/base/frameworks/notice/main.zeek + scripts/base/frameworks/cluster/__load__.zeek + scripts/base/frameworks/cluster/main.zeek + scripts/base/frameworks/control/__load__.zeek + scripts/base/frameworks/control/main.zeek + scripts/base/frameworks/cluster/pools.zeek + scripts/base/frameworks/notice/weird.zeek + scripts/base/frameworks/notice/actions/email_admin.zeek + scripts/base/frameworks/notice/actions/page.zeek + scripts/base/frameworks/notice/actions/add-geodata.zeek + scripts/base/frameworks/notice/actions/pp-alarms.zeek + scripts/base/frameworks/dpd/__load__.zeek + scripts/base/frameworks/dpd/main.zeek + scripts/base/frameworks/signatures/__load__.zeek + scripts/base/frameworks/signatures/main.zeek + scripts/base/frameworks/packet-filter/__load__.zeek + scripts/base/frameworks/packet-filter/main.zeek + scripts/base/frameworks/packet-filter/netstats.zeek + scripts/base/frameworks/software/__load__.zeek + scripts/base/frameworks/software/main.zeek + scripts/base/frameworks/intel/__load__.zeek + scripts/base/frameworks/intel/main.zeek + scripts/base/frameworks/intel/files.zeek + scripts/base/frameworks/intel/input.zeek + scripts/base/frameworks/config/__load__.zeek + scripts/base/frameworks/config/main.zeek + scripts/base/frameworks/config/input.zeek + scripts/base/frameworks/config/weird.zeek + scripts/base/frameworks/sumstats/__load__.zeek + scripts/base/frameworks/sumstats/main.zeek + scripts/base/frameworks/sumstats/plugins/__load__.zeek + scripts/base/frameworks/sumstats/plugins/average.zeek + scripts/base/frameworks/sumstats/plugins/hll_unique.zeek + scripts/base/frameworks/sumstats/plugins/last.zeek + scripts/base/frameworks/sumstats/plugins/max.zeek + scripts/base/frameworks/sumstats/plugins/min.zeek + scripts/base/frameworks/sumstats/plugins/sample.zeek + scripts/base/frameworks/sumstats/plugins/std-dev.zeek + scripts/base/frameworks/sumstats/plugins/variance.zeek + scripts/base/frameworks/sumstats/plugins/sum.zeek + scripts/base/frameworks/sumstats/plugins/topk.zeek + scripts/base/frameworks/sumstats/plugins/unique.zeek + scripts/base/frameworks/sumstats/non-cluster.zeek + scripts/base/frameworks/tunnels/__load__.zeek + scripts/base/frameworks/tunnels/main.zeek + scripts/base/frameworks/openflow/__load__.zeek + scripts/base/frameworks/openflow/consts.zeek + scripts/base/frameworks/openflow/types.zeek + scripts/base/frameworks/openflow/main.zeek + scripts/base/frameworks/openflow/plugins/__load__.zeek + scripts/base/frameworks/openflow/plugins/ryu.zeek + scripts/base/utils/json.zeek + scripts/base/frameworks/openflow/plugins/log.zeek + scripts/base/frameworks/openflow/plugins/broker.zeek + scripts/base/frameworks/openflow/non-cluster.zeek + scripts/base/frameworks/netcontrol/__load__.zeek + scripts/base/frameworks/netcontrol/types.zeek + scripts/base/frameworks/netcontrol/main.zeek + scripts/base/frameworks/netcontrol/plugin.zeek + scripts/base/frameworks/netcontrol/plugins/__load__.zeek + scripts/base/frameworks/netcontrol/plugins/debug.zeek + scripts/base/frameworks/netcontrol/plugins/openflow.zeek + scripts/base/frameworks/netcontrol/plugins/packetfilter.zeek + scripts/base/frameworks/netcontrol/plugins/broker.zeek + scripts/base/frameworks/netcontrol/plugins/acld.zeek + scripts/base/frameworks/netcontrol/drop.zeek + scripts/base/frameworks/netcontrol/shunt.zeek + scripts/base/frameworks/netcontrol/non-cluster.zeek + scripts/base/protocols/conn/__load__.zeek + scripts/base/protocols/conn/main.zeek + scripts/base/protocols/conn/contents.zeek + scripts/base/protocols/conn/inactivity.zeek + scripts/base/protocols/conn/polling.zeek + scripts/base/protocols/conn/thresholds.zeek + scripts/base/protocols/dce-rpc/__load__.zeek + scripts/base/protocols/dce-rpc/consts.zeek + scripts/base/protocols/dce-rpc/main.zeek + scripts/base/protocols/dhcp/__load__.zeek + scripts/base/protocols/dhcp/consts.zeek + scripts/base/protocols/dhcp/main.zeek + scripts/base/protocols/dnp3/__load__.zeek + scripts/base/protocols/dnp3/main.zeek + scripts/base/protocols/dnp3/consts.zeek + scripts/base/protocols/dns/__load__.zeek + scripts/base/protocols/dns/consts.zeek + scripts/base/protocols/dns/main.zeek + scripts/base/protocols/ftp/__load__.zeek + scripts/base/protocols/ftp/utils-commands.zeek + scripts/base/protocols/ftp/info.zeek + scripts/base/protocols/ftp/main.zeek + scripts/base/protocols/ftp/utils.zeek + scripts/base/protocols/ftp/files.zeek + scripts/base/protocols/ftp/gridftp.zeek + scripts/base/protocols/ssl/__load__.zeek + scripts/base/protocols/ssl/consts.zeek + scripts/base/protocols/ssl/main.zeek + scripts/base/protocols/ssl/mozilla-ca-list.zeek + scripts/base/protocols/ssl/ct-list.zeek + scripts/base/protocols/ssl/files.zeek + scripts/base/files/x509/__load__.zeek + scripts/base/files/x509/main.zeek + scripts/base/files/hash/__load__.zeek + scripts/base/files/hash/main.zeek + scripts/base/protocols/http/__load__.zeek + scripts/base/protocols/http/main.zeek + scripts/base/protocols/http/entities.zeek + scripts/base/protocols/http/utils.zeek + scripts/base/protocols/http/files.zeek + scripts/base/protocols/imap/__load__.zeek + scripts/base/protocols/imap/main.zeek + scripts/base/protocols/irc/__load__.zeek + scripts/base/protocols/irc/main.zeek + scripts/base/protocols/irc/dcc-send.zeek + scripts/base/protocols/irc/files.zeek + scripts/base/protocols/krb/__load__.zeek + scripts/base/protocols/krb/main.zeek + scripts/base/protocols/krb/consts.zeek + scripts/base/protocols/krb/files.zeek + scripts/base/protocols/modbus/__load__.zeek + scripts/base/protocols/modbus/consts.zeek + scripts/base/protocols/modbus/main.zeek + scripts/base/protocols/mysql/__load__.zeek + scripts/base/protocols/mysql/main.zeek + scripts/base/protocols/mysql/consts.zeek + scripts/base/protocols/ntlm/__load__.zeek + scripts/base/protocols/ntlm/main.zeek + scripts/base/protocols/ntp/__load__.zeek + scripts/base/protocols/ntp/main.zeek + scripts/base/protocols/ntp/consts.zeek + scripts/base/protocols/pop3/__load__.zeek + scripts/base/protocols/radius/__load__.zeek + scripts/base/protocols/radius/main.zeek + scripts/base/protocols/radius/consts.zeek + scripts/base/protocols/rdp/__load__.zeek + scripts/base/protocols/rdp/consts.zeek + scripts/base/protocols/rdp/main.zeek + scripts/base/protocols/rfb/__load__.zeek + scripts/base/protocols/rfb/main.zeek + scripts/base/protocols/sip/__load__.zeek + scripts/base/protocols/sip/main.zeek + scripts/base/protocols/snmp/__load__.zeek + scripts/base/protocols/snmp/main.zeek + scripts/base/protocols/smb/__load__.zeek + scripts/base/protocols/smb/consts.zeek + scripts/base/protocols/smb/const-dos-error.zeek + scripts/base/protocols/smb/const-nt-status.zeek + scripts/base/protocols/smb/main.zeek + scripts/base/protocols/smb/smb1-main.zeek + scripts/base/protocols/smb/smb2-main.zeek + scripts/base/protocols/smb/files.zeek + scripts/base/protocols/smtp/__load__.zeek + scripts/base/protocols/smtp/main.zeek + scripts/base/protocols/smtp/entities.zeek + scripts/base/protocols/smtp/files.zeek + scripts/base/protocols/socks/__load__.zeek + scripts/base/protocols/socks/consts.zeek + scripts/base/protocols/socks/main.zeek + scripts/base/protocols/ssh/__load__.zeek + scripts/base/protocols/ssh/main.zeek + scripts/base/protocols/syslog/__load__.zeek + scripts/base/protocols/syslog/consts.zeek + scripts/base/protocols/syslog/main.zeek + scripts/base/protocols/tunnels/__load__.zeek + scripts/base/protocols/xmpp/__load__.zeek + scripts/base/protocols/xmpp/main.zeek + scripts/base/files/pe/__load__.zeek + scripts/base/files/pe/consts.zeek + scripts/base/files/pe/main.zeek + scripts/base/files/extract/__load__.zeek + scripts/base/files/extract/main.zeek + scripts/base/misc/find-checksum-offloading.zeek + scripts/base/misc/find-filtered-trace.zeek + scripts/base/misc/version.zeek +scripts/policy/misc/loaded-scripts.zeek +#close 2019-06-15-20-54-48 diff --git a/testing/btest/Baseline/coverage.init-default/missing_loads b/testing/btest/Baseline/coverage.init-default/missing_loads index 31966f11c1..893a603972 100644 --- a/testing/btest/Baseline/coverage.init-default/missing_loads +++ b/testing/btest/Baseline/coverage.init-default/missing_loads @@ -1,10 +1,10 @@ --./frameworks/cluster/nodes/logger.bro --./frameworks/cluster/nodes/manager.bro --./frameworks/cluster/nodes/proxy.bro --./frameworks/cluster/nodes/worker.bro --./frameworks/cluster/setup-connections.bro --./frameworks/intel/cluster.bro --./frameworks/netcontrol/cluster.bro --./frameworks/openflow/cluster.bro --./frameworks/packet-filter/cluster.bro --./frameworks/sumstats/cluster.bro +-./frameworks/cluster/nodes/logger.zeek +-./frameworks/cluster/nodes/manager.zeek +-./frameworks/cluster/nodes/proxy.zeek +-./frameworks/cluster/nodes/worker.zeek +-./frameworks/cluster/setup-connections.zeek +-./frameworks/intel/cluster.zeek +-./frameworks/netcontrol/cluster.zeek +-./frameworks/openflow/cluster.zeek +-./frameworks/packet-filter/cluster.zeek +-./frameworks/sumstats/cluster.zeek diff --git a/testing/btest/Baseline/doc.broxygen.all_scripts/.stderr b/testing/btest/Baseline/doc.broxygen.all_scripts/.stderr deleted file mode 100644 index da6c357abf..0000000000 --- a/testing/btest/Baseline/doc.broxygen.all_scripts/.stderr +++ /dev/null @@ -1,11 +0,0 @@ -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 245: deprecated (dhcp_discover) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 248: deprecated (dhcp_offer) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 251: deprecated (dhcp_request) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 254: deprecated (dhcp_decline) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 257: deprecated (dhcp_ack) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 260: deprecated (dhcp_nak) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 263: deprecated (dhcp_release) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.bro, line 266: deprecated (dhcp_inform) -warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/smb/__load__.bro, line 1: deprecated script loaded from /Users/jon/projects/bro/bro/scripts/broxygen/__load__.bro:10 "Use '@load base/protocols/smb' instead" -error in /Users/jon/projects/bro/bro/scripts/policy/frameworks/control/controller.bro, line 22: The '' control command is unknown. -, line 1: received termination signal diff --git a/testing/btest/Baseline/doc.broxygen.enums/autogen-reST-enums.rst b/testing/btest/Baseline/doc.broxygen.enums/autogen-reST-enums.rst deleted file mode 100644 index c98d2792df..0000000000 --- a/testing/btest/Baseline/doc.broxygen.enums/autogen-reST-enums.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. bro:type:: TestEnum1 - - :Type: :bro:type:`enum` - - .. bro:enum:: ONE TestEnum1 - - like this - - .. bro:enum:: TWO TestEnum1 - - or like this - - .. bro:enum:: THREE TestEnum1 - - multiple - comments - and even - more comments - - .. bro:enum:: FOUR TestEnum1 - - adding another - value - - .. bro:enum:: FIVE TestEnum1 - - adding another - value - - There's tons of ways an enum can look... - -.. bro:type:: TestEnum2 - - :Type: :bro:type:`enum` - - .. bro:enum:: A TestEnum2 - - like this - - .. bro:enum:: B TestEnum2 - - or like this - - .. bro:enum:: C TestEnum2 - - multiple - comments - and even - more comments - - The final comma is optional - -.. bro:id:: TestEnumVal - - :Type: :bro:type:`TestEnum1` - :Attributes: :bro:attr:`&redef` - :Default: ``ONE`` - - this should reference the TestEnum1 type and not a generic "enum" type - diff --git a/testing/btest/Baseline/doc.broxygen.example/example.rst b/testing/btest/Baseline/doc.broxygen.example/example.rst deleted file mode 100644 index d729ab85ee..0000000000 --- a/testing/btest/Baseline/doc.broxygen.example/example.rst +++ /dev/null @@ -1,248 +0,0 @@ -:tocdepth: 3 - -broxygen/example.bro -==================== -.. bro:namespace:: BroxygenExample - -This is an example script that demonstrates Broxygen-style -documentation. It generally will make most sense when viewing -the script's raw source code and comparing to the HTML-rendered -version. - -Comments in the from ``##!`` are meant to summarize the script's -purpose. They are transferred directly in to the generated -`reStructuredText `_ -(reST) document associated with the script. - -.. tip:: You can embed directives and roles within ``##``-stylized comments. - -There's also a custom role to reference any identifier node in -the Bro Sphinx domain that's good for "see alsos", e.g. - -See also: :bro:see:`BroxygenExample::a_var`, -:bro:see:`BroxygenExample::ONE`, :bro:see:`SSH::Info` - -And a custom directive does the equivalent references: - -.. bro:see:: BroxygenExample::a_var BroxygenExample::ONE SSH::Info - -:Namespace: BroxygenExample -:Imports: :doc:`base/frameworks/notice `, :doc:`base/protocols/http `, :doc:`policy/frameworks/software/vulnerable.bro ` - -Summary -~~~~~~~ -Redefinable Options -################### -==================================================================================== ======================================================= -:bro:id:`BroxygenExample::an_option`: :bro:type:`set` :bro:attr:`&redef` Add documentation for "an_option" here. -:bro:id:`BroxygenExample::option_with_init`: :bro:type:`interval` :bro:attr:`&redef` Default initialization will be generated automatically. -==================================================================================== ======================================================= - -State Variables -############### -======================================================================== ======================================================================== -:bro:id:`BroxygenExample::a_var`: :bro:type:`bool` Put some documentation for "a_var" here. -:bro:id:`BroxygenExample::summary_test`: :bro:type:`string` The first sentence for a particular identifier's summary text ends here. -:bro:id:`BroxygenExample::var_without_explicit_type`: :bro:type:`string` Types are inferred, that information is self-documenting. -======================================================================== ======================================================================== - -Types -##### -================================================================================= =========================================================== -:bro:type:`BroxygenExample::ComplexRecord`: :bro:type:`record` :bro:attr:`&redef` General documentation for a type "ComplexRecord" goes here. -:bro:type:`BroxygenExample::Info`: :bro:type:`record` An example record to be used with a logging stream. -:bro:type:`BroxygenExample::SimpleEnum`: :bro:type:`enum` Documentation for the "SimpleEnum" type goes here. -:bro:type:`BroxygenExample::SimpleRecord`: :bro:type:`record` General documentation for a type "SimpleRecord" goes here. -================================================================================= =========================================================== - -Redefinitions -############# -============================================================= ==================================================================== -:bro:type:`BroxygenExample::SimpleEnum`: :bro:type:`enum` Document the "SimpleEnum" redef here with any special info regarding - the *redef* itself. -:bro:type:`BroxygenExample::SimpleRecord`: :bro:type:`record` Document the record extension *redef* itself here. -:bro:type:`Log::ID`: :bro:type:`enum` -:bro:type:`Notice::Type`: :bro:type:`enum` -============================================================= ==================================================================== - -Events -###### -====================================================== ========================== -:bro:id:`BroxygenExample::an_event`: :bro:type:`event` Summarize "an_event" here. -====================================================== ========================== - -Functions -######### -=========================================================== ======================================= -:bro:id:`BroxygenExample::a_function`: :bro:type:`function` Summarize purpose of "a_function" here. -=========================================================== ======================================= - - -Detailed Interface -~~~~~~~~~~~~~~~~~~ -Redefinable Options -################### -.. bro:id:: BroxygenExample::an_option - - :Type: :bro:type:`set` [:bro:type:`addr`, :bro:type:`addr`, :bro:type:`string`] - :Attributes: :bro:attr:`&redef` - :Default: ``{}`` - - Add documentation for "an_option" here. - The type/attribute information is all generated automatically. - -.. bro:id:: BroxygenExample::option_with_init - - :Type: :bro:type:`interval` - :Attributes: :bro:attr:`&redef` - :Default: ``10.0 msecs`` - - Default initialization will be generated automatically. - More docs can be added here. - -State Variables -############### -.. bro:id:: BroxygenExample::a_var - - :Type: :bro:type:`bool` - - Put some documentation for "a_var" here. Any global/non-const that - isn't a function/event/hook is classified as a "state variable" - in the generated docs. - -.. bro:id:: BroxygenExample::summary_test - - :Type: :bro:type:`string` - - The first sentence for a particular identifier's summary text ends here. - And this second sentence doesn't show in the short description provided - by the table of all identifiers declared by this script. - -.. bro:id:: BroxygenExample::var_without_explicit_type - - :Type: :bro:type:`string` - :Default: ``"this works"`` - - Types are inferred, that information is self-documenting. - -Types -##### -.. bro:type:: BroxygenExample::ComplexRecord - - :Type: :bro:type:`record` - - field1: :bro:type:`count` - Counts something. - - field2: :bro:type:`bool` - Toggles something. - - field3: :bro:type:`BroxygenExample::SimpleRecord` - Broxygen automatically tracks types - and cross-references are automatically - inserted in to generated docs. - - msg: :bro:type:`string` :bro:attr:`&default` = ``"blah"`` :bro:attr:`&optional` - Attributes are self-documenting. - :Attributes: :bro:attr:`&redef` - - General documentation for a type "ComplexRecord" goes here. - -.. bro:type:: BroxygenExample::Info - - :Type: :bro:type:`record` - - ts: :bro:type:`time` :bro:attr:`&log` - - uid: :bro:type:`string` :bro:attr:`&log` - - status: :bro:type:`count` :bro:attr:`&log` :bro:attr:`&optional` - - An example record to be used with a logging stream. - Nothing special about it. If another script redefs this type - to add fields, the generated documentation will show all original - fields plus the extensions and the scripts which contributed to it - (provided they are also @load'ed). - -.. bro:type:: BroxygenExample::SimpleEnum - - :Type: :bro:type:`enum` - - .. bro:enum:: BroxygenExample::ONE BroxygenExample::SimpleEnum - - Documentation for particular enum values is added like this. - And can also span multiple lines. - - .. bro:enum:: BroxygenExample::TWO BroxygenExample::SimpleEnum - - Or this style is valid to document the preceding enum value. - - .. bro:enum:: BroxygenExample::THREE BroxygenExample::SimpleEnum - - .. bro:enum:: BroxygenExample::FOUR BroxygenExample::SimpleEnum - - And some documentation for "FOUR". - - .. bro:enum:: BroxygenExample::FIVE BroxygenExample::SimpleEnum - - Also "FIVE". - - Documentation for the "SimpleEnum" type goes here. - It can span multiple lines. - -.. bro:type:: BroxygenExample::SimpleRecord - - :Type: :bro:type:`record` - - field1: :bro:type:`count` - Counts something. - - field2: :bro:type:`bool` - Toggles something. - - field_ext: :bro:type:`string` :bro:attr:`&optional` - Document the extending field like this. - Or here, like this. - - General documentation for a type "SimpleRecord" goes here. - The way fields can be documented is similar to what's already seen - for enums. - -Events -###### -.. bro:id:: BroxygenExample::an_event - - :Type: :bro:type:`event` (name: :bro:type:`string`) - - Summarize "an_event" here. - Give more details about "an_event" here. - - BroxygenExample::a_function should not be confused as a parameter - in the generated docs, but it also doesn't generate a cross-reference - link. Use the see role instead: :bro:see:`BroxygenExample::a_function`. - - - :name: Describe the argument here. - -Functions -######### -.. bro:id:: BroxygenExample::a_function - - :Type: :bro:type:`function` (tag: :bro:type:`string`, msg: :bro:type:`string`) : :bro:type:`string` - - Summarize purpose of "a_function" here. - Give more details about "a_function" here. - Separating the documentation of the params/return values with - empty comments is optional, but improves readability of script. - - - :tag: Function arguments can be described - like this. - - - :msg: Another param. - - - :returns: Describe the return type here. - - diff --git a/testing/btest/Baseline/doc.broxygen.func-params/autogen-reST-func-params.rst b/testing/btest/Baseline/doc.broxygen.func-params/autogen-reST-func-params.rst deleted file mode 100644 index 06f196b73c..0000000000 --- a/testing/btest/Baseline/doc.broxygen.func-params/autogen-reST-func-params.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. bro:id:: test_func_params_func - - :Type: :bro:type:`function` (i: :bro:type:`int`, j: :bro:type:`int`) : :bro:type:`string` - - This is a global function declaration. - - - :i: First param. - - :j: Second param. - - - :returns: A string. - -.. bro:type:: test_func_params_rec - - :Type: :bro:type:`record` - - field_func: :bro:type:`function` (i: :bro:type:`int`, j: :bro:type:`int`) : :bro:type:`string` - This is a record field function. - - - :i: First param. - - :j: Second param. - - - :returns: A string. - - diff --git a/testing/btest/Baseline/doc.broxygen.identifier/test.rst b/testing/btest/Baseline/doc.broxygen.identifier/test.rst deleted file mode 100644 index 0c7c44581d..0000000000 --- a/testing/btest/Baseline/doc.broxygen.identifier/test.rst +++ /dev/null @@ -1,230 +0,0 @@ -.. bro:id:: BroxygenExample::Broxygen_One - - :Type: :bro:type:`Notice::Type` - - Any number of this type of comment - will document "Broxygen_One". - -.. bro:id:: BroxygenExample::Broxygen_Two - - :Type: :bro:type:`Notice::Type` - - Any number of this type of comment - will document "BROXYGEN_TWO". - -.. bro:id:: BroxygenExample::Broxygen_Three - - :Type: :bro:type:`Notice::Type` - - -.. bro:id:: BroxygenExample::Broxygen_Four - - :Type: :bro:type:`Notice::Type` - - Omitting comments is fine, and so is mixing ``##`` and ``##<``, but - it's probably best to use only one style consistently. - -.. bro:id:: BroxygenExample::LOG - - :Type: :bro:type:`Log::ID` - - -.. bro:type:: BroxygenExample::SimpleEnum - - :Type: :bro:type:`enum` - - .. bro:enum:: BroxygenExample::ONE BroxygenExample::SimpleEnum - - Documentation for particular enum values is added like this. - And can also span multiple lines. - - .. bro:enum:: BroxygenExample::TWO BroxygenExample::SimpleEnum - - Or this style is valid to document the preceding enum value. - - .. bro:enum:: BroxygenExample::THREE BroxygenExample::SimpleEnum - - .. bro:enum:: BroxygenExample::FOUR BroxygenExample::SimpleEnum - - And some documentation for "FOUR". - - .. bro:enum:: BroxygenExample::FIVE BroxygenExample::SimpleEnum - - Also "FIVE". - - Documentation for the "SimpleEnum" type goes here. - It can span multiple lines. - -.. bro:id:: BroxygenExample::ONE - - :Type: :bro:type:`BroxygenExample::SimpleEnum` - - Documentation for particular enum values is added like this. - And can also span multiple lines. - -.. bro:id:: BroxygenExample::TWO - - :Type: :bro:type:`BroxygenExample::SimpleEnum` - - Or this style is valid to document the preceding enum value. - -.. bro:id:: BroxygenExample::THREE - - :Type: :bro:type:`BroxygenExample::SimpleEnum` - - -.. bro:id:: BroxygenExample::FOUR - - :Type: :bro:type:`BroxygenExample::SimpleEnum` - - And some documentation for "FOUR". - -.. bro:id:: BroxygenExample::FIVE - - :Type: :bro:type:`BroxygenExample::SimpleEnum` - - Also "FIVE". - -.. bro:type:: BroxygenExample::SimpleRecord - - :Type: :bro:type:`record` - - field1: :bro:type:`count` - Counts something. - - field2: :bro:type:`bool` - Toggles something. - - field_ext: :bro:type:`string` :bro:attr:`&optional` - Document the extending field like this. - Or here, like this. - - General documentation for a type "SimpleRecord" goes here. - The way fields can be documented is similar to what's already seen - for enums. - -.. bro:type:: BroxygenExample::ComplexRecord - - :Type: :bro:type:`record` - - field1: :bro:type:`count` - Counts something. - - field2: :bro:type:`bool` - Toggles something. - - field3: :bro:type:`BroxygenExample::SimpleRecord` - Broxygen automatically tracks types - and cross-references are automatically - inserted in to generated docs. - - msg: :bro:type:`string` :bro:attr:`&default` = ``"blah"`` :bro:attr:`&optional` - Attributes are self-documenting. - :Attributes: :bro:attr:`&redef` - - General documentation for a type "ComplexRecord" goes here. - -.. bro:type:: BroxygenExample::Info - - :Type: :bro:type:`record` - - ts: :bro:type:`time` :bro:attr:`&log` - - uid: :bro:type:`string` :bro:attr:`&log` - - status: :bro:type:`count` :bro:attr:`&log` :bro:attr:`&optional` - - An example record to be used with a logging stream. - Nothing special about it. If another script redefs this type - to add fields, the generated documentation will show all original - fields plus the extensions and the scripts which contributed to it - (provided they are also @load'ed). - -.. bro:id:: BroxygenExample::an_option - - :Type: :bro:type:`set` [:bro:type:`addr`, :bro:type:`addr`, :bro:type:`string`] - :Attributes: :bro:attr:`&redef` - :Default: ``{}`` - - Add documentation for "an_option" here. - The type/attribute information is all generated automatically. - -.. bro:id:: BroxygenExample::option_with_init - - :Type: :bro:type:`interval` - :Attributes: :bro:attr:`&redef` - :Default: ``10.0 msecs`` - - Default initialization will be generated automatically. - More docs can be added here. - -.. bro:id:: BroxygenExample::a_var - - :Type: :bro:type:`bool` - - Put some documentation for "a_var" here. Any global/non-const that - isn't a function/event/hook is classified as a "state variable" - in the generated docs. - -.. bro:id:: BroxygenExample::var_without_explicit_type - - :Type: :bro:type:`string` - :Default: ``"this works"`` - - Types are inferred, that information is self-documenting. - -.. bro:id:: BroxygenExample::summary_test - - :Type: :bro:type:`string` - - The first sentence for a particular identifier's summary text ends here. - And this second sentence doesn't show in the short description provided - by the table of all identifiers declared by this script. - -.. bro:id:: BroxygenExample::a_function - - :Type: :bro:type:`function` (tag: :bro:type:`string`, msg: :bro:type:`string`) : :bro:type:`string` - - Summarize purpose of "a_function" here. - Give more details about "a_function" here. - Separating the documentation of the params/return values with - empty comments is optional, but improves readability of script. - - - :tag: Function arguments can be described - like this. - - - :msg: Another param. - - - :returns: Describe the return type here. - -.. bro:id:: BroxygenExample::an_event - - :Type: :bro:type:`event` (name: :bro:type:`string`) - - Summarize "an_event" here. - Give more details about "an_event" here. - - BroxygenExample::a_function should not be confused as a parameter - in the generated docs, but it also doesn't generate a cross-reference - link. Use the see role instead: :bro:see:`BroxygenExample::a_function`. - - - :name: Describe the argument here. - -.. bro:id:: BroxygenExample::function_without_proto - - :Type: :bro:type:`function` (tag: :bro:type:`string`) : :bro:type:`string` - - -.. bro:type:: BroxygenExample::PrivateRecord - - :Type: :bro:type:`record` - - field1: :bro:type:`bool` - - field2: :bro:type:`count` - - diff --git a/testing/btest/Baseline/doc.broxygen.package/test.rst b/testing/btest/Baseline/doc.broxygen.package/test.rst deleted file mode 100644 index b96de2148b..0000000000 --- a/testing/btest/Baseline/doc.broxygen.package/test.rst +++ /dev/null @@ -1,37 +0,0 @@ -:orphan: - -Package: broxygen -================= - -This package is loaded during the process which automatically generates -reference documentation for all Bro scripts (i.e. "Broxygen"). Its only -purpose is to provide an easy way to load all known Bro scripts plus any -extra scripts needed or used by the documentation process. - -:doc:`/scripts/broxygen/__load__.bro` - - -:doc:`/scripts/broxygen/example.bro` - - This is an example script that demonstrates Broxygen-style - documentation. It generally will make most sense when viewing - the script's raw source code and comparing to the HTML-rendered - version. - - Comments in the from ``##!`` are meant to summarize the script's - purpose. They are transferred directly in to the generated - `reStructuredText `_ - (reST) document associated with the script. - - .. tip:: You can embed directives and roles within ``##``-stylized comments. - - There's also a custom role to reference any identifier node in - the Bro Sphinx domain that's good for "see alsos", e.g. - - See also: :bro:see:`BroxygenExample::a_var`, - :bro:see:`BroxygenExample::ONE`, :bro:see:`SSH::Info` - - And a custom directive does the equivalent references: - - .. bro:see:: BroxygenExample::a_var BroxygenExample::ONE SSH::Info - diff --git a/testing/btest/Baseline/doc.broxygen.package_index/test.rst b/testing/btest/Baseline/doc.broxygen.package_index/test.rst deleted file mode 100644 index f551ab1cd3..0000000000 --- a/testing/btest/Baseline/doc.broxygen.package_index/test.rst +++ /dev/null @@ -1,7 +0,0 @@ -:doc:`broxygen ` - - This package is loaded during the process which automatically generates - reference documentation for all Bro scripts (i.e. "Broxygen"). Its only - purpose is to provide an easy way to load all known Bro scripts plus any - extra scripts needed or used by the documentation process. - diff --git a/testing/btest/Baseline/doc.broxygen.records/autogen-reST-records.rst b/testing/btest/Baseline/doc.broxygen.records/autogen-reST-records.rst deleted file mode 100644 index 60d80f6b07..0000000000 --- a/testing/btest/Baseline/doc.broxygen.records/autogen-reST-records.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. bro:type:: TestRecord1 - - :Type: :bro:type:`record` - - field1: :bro:type:`bool` - - field2: :bro:type:`count` - - -.. bro:type:: TestRecord2 - - :Type: :bro:type:`record` - - A: :bro:type:`count` - document ``A`` - - B: :bro:type:`bool` - document ``B`` - - C: :bro:type:`TestRecord1` - and now ``C`` - is a declared type - - D: :bro:type:`set` [:bro:type:`count`, :bro:type:`bool`] - sets/tables should show the index types - - Here's the ways records and record fields can be documented. - diff --git a/testing/btest/Baseline/doc.broxygen.script_index/test.rst b/testing/btest/Baseline/doc.broxygen.script_index/test.rst deleted file mode 100644 index dda280facf..0000000000 --- a/testing/btest/Baseline/doc.broxygen.script_index/test.rst +++ /dev/null @@ -1,5 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - broxygen/__load__.bro - broxygen/example.bro diff --git a/testing/btest/Baseline/doc.broxygen.script_summary/test.rst b/testing/btest/Baseline/doc.broxygen.script_summary/test.rst deleted file mode 100644 index 125a579c81..0000000000 --- a/testing/btest/Baseline/doc.broxygen.script_summary/test.rst +++ /dev/null @@ -1,23 +0,0 @@ -:doc:`/scripts/broxygen/example.bro` - This is an example script that demonstrates Broxygen-style - documentation. It generally will make most sense when viewing - the script's raw source code and comparing to the HTML-rendered - version. - - Comments in the from ``##!`` are meant to summarize the script's - purpose. They are transferred directly in to the generated - `reStructuredText `_ - (reST) document associated with the script. - - .. tip:: You can embed directives and roles within ``##``-stylized comments. - - There's also a custom role to reference any identifier node in - the Bro Sphinx domain that's good for "see alsos", e.g. - - See also: :bro:see:`BroxygenExample::a_var`, - :bro:see:`BroxygenExample::ONE`, :bro:see:`SSH::Info` - - And a custom directive does the equivalent references: - - .. bro:see:: BroxygenExample::a_var BroxygenExample::ONE SSH::Info - diff --git a/testing/btest/Baseline/doc.broxygen.type-aliases/autogen-reST-type-aliases.rst b/testing/btest/Baseline/doc.broxygen.type-aliases/autogen-reST-type-aliases.rst deleted file mode 100644 index 3a26b8adc6..0000000000 --- a/testing/btest/Baseline/doc.broxygen.type-aliases/autogen-reST-type-aliases.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. bro:type:: BroxygenTest::TypeAlias - - :Type: :bro:type:`bool` - - This is just an alias for a builtin type ``bool``. - -.. bro:type:: BroxygenTest::NotTypeAlias - - :Type: :bro:type:`bool` - - This type should get its own comments, not associated w/ TypeAlias. - -.. bro:type:: BroxygenTest::OtherTypeAlias - - :Type: :bro:type:`bool` - - This cross references ``bool`` in the description of its type - instead of ``TypeAlias`` just because it seems more useful -- - one doesn't have to click through the full type alias chain to - find out what the actual type is... - -.. bro:id:: BroxygenTest::a - - :Type: :bro:type:`BroxygenTest::TypeAlias` - - But this should reference a type of ``TypeAlias``. - -.. bro:id:: BroxygenTest::b - - :Type: :bro:type:`BroxygenTest::OtherTypeAlias` - - And this should reference a type of ``OtherTypeAlias``. - -.. bro:type:: BroxygenTest::MyRecord - - :Type: :bro:type:`record` - - f1: :bro:type:`BroxygenTest::TypeAlias` - - f2: :bro:type:`BroxygenTest::OtherTypeAlias` - - f3: :bro:type:`bool` - - diff --git a/testing/btest/Baseline/doc.broxygen.vectors/autogen-reST-vectors.rst b/testing/btest/Baseline/doc.broxygen.vectors/autogen-reST-vectors.rst deleted file mode 100644 index 37eabb9419..0000000000 --- a/testing/btest/Baseline/doc.broxygen.vectors/autogen-reST-vectors.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. bro:id:: test_vector0 - - :Type: :bro:type:`vector` of :bro:type:`string` - :Default: - - :: - - [] - - Yield type is documented/cross-referenced for primitize types. - -.. bro:id:: test_vector1 - - :Type: :bro:type:`vector` of :bro:type:`TestRecord` - :Default: - - :: - - [] - - Yield type is documented/cross-referenced for composite types. - -.. bro:id:: test_vector2 - - :Type: :bro:type:`vector` of :bro:type:`vector` of :bro:type:`TestRecord` - :Default: - - :: - - [] - - Just showing an even fancier yield type. - diff --git a/testing/btest/Baseline/doc.manual.connection_record_01/.stdout b/testing/btest/Baseline/doc.manual.connection_record_01/.stdout deleted file mode 100644 index 7f134460e3..0000000000 --- a/testing/btest/Baseline/doc.manual.connection_record_01/.stdout +++ /dev/null @@ -1,5 +0,0 @@ -[id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], orig=[size=29, state=5, num_pkts=6, num_bytes_ip=273, flow_label=0], resp=[size=44, state=5, num_pkts=5, num_bytes_ip=248, flow_label=0], start_time=930613226.067666, duration=0.709643, service={ - -}, addl=, hot=0, history=ShADadFf, uid=UWkUyAuUGXf, tunnel=, conn=[ts=930613226.067666, uid=UWkUyAuUGXf, id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], proto=tcp, service=, duration=0.709643, orig_bytes=29, resp_bytes=44, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=6, orig_ip_bytes=273, resp_pkts=5, resp_ip_bytes=248, tunnel_parents={ - -}], extract_orig=F, extract_resp=F] diff --git a/testing/btest/Baseline/doc.manual.connection_record_02/.stdout b/testing/btest/Baseline/doc.manual.connection_record_02/.stdout deleted file mode 100644 index 824dd03097..0000000000 --- a/testing/btest/Baseline/doc.manual.connection_record_02/.stdout +++ /dev/null @@ -1,9 +0,0 @@ -[id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], orig=[size=29, state=5, num_pkts=6, num_bytes_ip=273, flow_label=0], resp=[size=44, state=5, num_pkts=5, num_bytes_ip=248, flow_label=0], start_time=930613226.067666, duration=0.709643, service={ - -}, addl=, hot=0, history=ShADadFf, uid=UWkUyAuUGXf, tunnel=, conn=[ts=930613226.067666, uid=UWkUyAuUGXf, id=[orig_h=212.180.42.100, orig_p=25000/tcp, resp_h=131.243.64.3, resp_p=53/tcp], proto=tcp, service=, duration=0.709643, orig_bytes=29, resp_bytes=44, conn_state=SF, local_orig=, missed_bytes=0, history=ShADadFf, orig_pkts=6, orig_ip_bytes=273, resp_pkts=5, resp_ip_bytes=248, tunnel_parents={ - -}], extract_orig=F, extract_resp=F, dns=, dns_state=[pending={ - -}, finished_answers={ -34798 -}]] diff --git a/testing/btest/Baseline/doc.manual.data_struct_record_01/.stdout b/testing/btest/Baseline/doc.manual.data_struct_record_01/.stdout deleted file mode 100644 index 4e628b9ae7..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_record_01/.stdout +++ /dev/null @@ -1,6 +0,0 @@ -Service: dns(RFC1035) - port: 53/tcp - port: 53/udp -Service: http(RFC2616) - port: 80/tcp - port: 8080/tcp diff --git a/testing/btest/Baseline/doc.manual.data_struct_record_02/.stdout b/testing/btest/Baseline/doc.manual.data_struct_record_02/.stdout deleted file mode 100644 index 0428764bea..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_record_02/.stdout +++ /dev/null @@ -1,7 +0,0 @@ -System: morlock - Service: dns(RFC1035) - port: 53/tcp - port: 53/udp - Service: http(RFC2616) - port: 80/tcp - port: 8080/tcp diff --git a/testing/btest/Baseline/doc.manual.data_struct_set_declaration/.stdout b/testing/btest/Baseline/doc.manual.data_struct_set_declaration/.stdout deleted file mode 100644 index d1aa16c7d3..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_set_declaration/.stdout +++ /dev/null @@ -1,8 +0,0 @@ -SSL Port: 993/tcp -SSL Port: 22/tcp -SSL Port: 587/tcp -SSL Port: 443/tcp -Non-SSL Port: 143/tcp -Non-SSL Port: 25/tcp -Non-SSL Port: 80/tcp -Non-SSL Port: 23/tcp diff --git a/testing/btest/Baseline/doc.manual.data_struct_table_complex/.stdout b/testing/btest/Baseline/doc.manual.data_struct_table_complex/.stdout deleted file mode 100644 index e22f36a244..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_table_complex/.stdout +++ /dev/null @@ -1,4 +0,0 @@ -Kiru was released in 1968 by Toho studios, directed by Kihachi Okamoto and starring Tatsuya Nakadai -Goyokin was released in 1969 by Fuji studios, directed by Hideo Gosha and starring Tatsuya Nakadai -Harakiri was released in 1962 by Shochiku Eiga studios, directed by Masaki Kobayashi and starring Tatsuya Nakadai -Tasogare Seibei was released in 2002 by Eisei Gekijo studios, directed by Yoji Yamada and starring Hiroyuki Sanada diff --git a/testing/btest/Baseline/doc.manual.data_struct_table_declaration/.stdout b/testing/btest/Baseline/doc.manual.data_struct_table_declaration/.stdout deleted file mode 100644 index 19b1648904..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_table_declaration/.stdout +++ /dev/null @@ -1,4 +0,0 @@ -Service Name: IMAPS - Common Port: 993/tcp -Service Name: HTTPS - Common Port: 443/tcp -Service Name: SSH - Common Port: 22/tcp -Service Name: SMTPS - Common Port: 587/tcp diff --git a/testing/btest/Baseline/doc.manual.data_struct_vector/.stdout b/testing/btest/Baseline/doc.manual.data_struct_vector/.stdout deleted file mode 100644 index 8348ce7198..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_vector/.stdout +++ /dev/null @@ -1,2 +0,0 @@ -[1, 2, 3, 4] -[1, 2, 3, 4] diff --git a/testing/btest/Baseline/doc.manual.data_struct_vector_declaration/.stdout b/testing/btest/Baseline/doc.manual.data_struct_vector_declaration/.stdout deleted file mode 100644 index 48ce5d9c56..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_vector_declaration/.stdout +++ /dev/null @@ -1,4 +0,0 @@ -contents of v1: [1, 2, 3, 4] -length of v1: 4 -contents of v1: [1, 2, 3, 4] -length of v2: 4 diff --git a/testing/btest/Baseline/doc.manual.data_struct_vector_iter/.stdout b/testing/btest/Baseline/doc.manual.data_struct_vector_iter/.stdout deleted file mode 100644 index 0326e6580e..0000000000 --- a/testing/btest/Baseline/doc.manual.data_struct_vector_iter/.stdout +++ /dev/null @@ -1,3 +0,0 @@ -1.2.0.0/18 -2.3.0.0/18 -3.4.0.0/18 diff --git a/testing/btest/Baseline/doc.manual.data_type_const/.stdout b/testing/btest/Baseline/doc.manual.data_type_const/.stdout deleted file mode 100644 index 0e49670a83..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_const/.stdout +++ /dev/null @@ -1,4 +0,0 @@ -{ -[6666/tcp] = IRC, -[80/tcp] = WWW -} diff --git a/testing/btest/Baseline/doc.manual.data_type_declaration/.stdout b/testing/btest/Baseline/doc.manual.data_type_declaration/.stdout deleted file mode 100644 index a6f28b5e52..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_declaration/.stdout +++ /dev/null @@ -1 +0,0 @@ -A: 10, B: 10 diff --git a/testing/btest/Baseline/doc.manual.data_type_interval/.stdout b/testing/btest/Baseline/doc.manual.data_type_interval/.stdout deleted file mode 100644 index 1cd5999711..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_interval/.stdout +++ /dev/null @@ -1,15 +0,0 @@ -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.118 -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3 - Time since last connection: 132.0 msecs 97.0 usecs -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3 - Time since last connection: 177.0 usecs -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3 - Time since last connection: 2.0 msecs 177.0 usecs -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3 - Time since last connection: 33.0 msecs 898.0 usecs -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3 - Time since last connection: 35.0 usecs -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3 - Time since last connection: 2.0 msecs 532.0 usecs -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.2 - Time since last connection: 7.0 msecs 866.0 usecs diff --git a/testing/btest/Baseline/doc.manual.data_type_local/.stdout b/testing/btest/Baseline/doc.manual.data_type_local/.stdout deleted file mode 100644 index e150c0b19d..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_local/.stdout +++ /dev/null @@ -1 +0,0 @@ -i + 2 = 12 diff --git a/testing/btest/Baseline/doc.manual.data_type_pattern_01/.stdout b/testing/btest/Baseline/doc.manual.data_type_pattern_01/.stdout deleted file mode 100644 index 11358a776e..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_pattern_01/.stdout +++ /dev/null @@ -1,3 +0,0 @@ -The - brown fox jumped over the - dog. diff --git a/testing/btest/Baseline/doc.manual.data_type_pattern_02/.stdout b/testing/btest/Baseline/doc.manual.data_type_pattern_02/.stdout deleted file mode 100644 index 808dc3d572..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_pattern_02/.stdout +++ /dev/null @@ -1,2 +0,0 @@ -equality and /^?(equal)$?/ are not equal -equality and /^?(equality)$?/ are equal diff --git a/testing/btest/Baseline/doc.manual.data_type_subnets/.stdout b/testing/btest/Baseline/doc.manual.data_type_subnets/.stdout deleted file mode 100644 index facaaabe64..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_subnets/.stdout +++ /dev/null @@ -1,4 +0,0 @@ -172.16.4.56 belongs to subnet 172.16.0.0/20 -172.16.47.254 belongs to subnet 172.16.32.0/20 -172.16.22.45 belongs to subnet 172.16.16.0/20 -172.16.1.1 belongs to subnet 172.16.0.0/20 diff --git a/testing/btest/Baseline/doc.manual.data_type_time/.stdout b/testing/btest/Baseline/doc.manual.data_type_time/.stdout deleted file mode 100644 index 149cb40e2a..0000000000 --- a/testing/btest/Baseline/doc.manual.data_type_time/.stdout +++ /dev/null @@ -1,8 +0,0 @@ -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.118^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.3^J -2011/06/18 19:03:08: New connection established from 141.142.220.118 to 208.80.152.2^J diff --git a/testing/btest/Baseline/doc.manual.framework_logging_factorial_01/.stdout b/testing/btest/Baseline/doc.manual.framework_logging_factorial_01/.stdout deleted file mode 100644 index db47b283d0..0000000000 --- a/testing/btest/Baseline/doc.manual.framework_logging_factorial_01/.stdout +++ /dev/null @@ -1,10 +0,0 @@ -1 -2 -6 -24 -120 -720 -5040 -40320 -362880 -3628800 diff --git a/testing/btest/Baseline/doc.manual.framework_logging_factorial_02/factor.log b/testing/btest/Baseline/doc.manual.framework_logging_factorial_02/factor.log deleted file mode 100644 index c643116265..0000000000 --- a/testing/btest/Baseline/doc.manual.framework_logging_factorial_02/factor.log +++ /dev/null @@ -1,19 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path factor -#open 2013-03-19-03-25-33 -#fields num factorial_num -#types count count -1 1 -2 2 -3 6 -4 24 -5 120 -6 720 -7 5040 -8 40320 -9 362880 -10 3628800 -#close 2013-03-19-03-25-33 diff --git a/testing/btest/Baseline/doc.manual.framework_logging_factorial_03/factor-mod5.log b/testing/btest/Baseline/doc.manual.framework_logging_factorial_03/factor-mod5.log deleted file mode 100644 index 2a466484d6..0000000000 --- a/testing/btest/Baseline/doc.manual.framework_logging_factorial_03/factor-mod5.log +++ /dev/null @@ -1,15 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path factor-mod5 -#open 2013-03-20-03-22-52 -#fields num factorial_num -#types count count -5 120 -6 720 -7 5040 -8 40320 -9 362880 -10 3628800 -#close 2013-03-20-03-22-52 diff --git a/testing/btest/Baseline/doc.manual.framework_logging_factorial_03/factor-non5.log b/testing/btest/Baseline/doc.manual.framework_logging_factorial_03/factor-non5.log deleted file mode 100644 index 4430dcc8a4..0000000000 --- a/testing/btest/Baseline/doc.manual.framework_logging_factorial_03/factor-non5.log +++ /dev/null @@ -1,13 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path factor-non5 -#open 2013-03-20-03-22-52 -#fields num factorial_num -#types count count -1 1 -2 2 -3 6 -4 24 -#close 2013-03-20-03-22-52 diff --git a/testing/btest/Baseline/doc.manual.framework_logging_factorial_04/factor-mod5.log b/testing/btest/Baseline/doc.manual.framework_logging_factorial_04/factor-mod5.log deleted file mode 100644 index 6b50ca55e7..0000000000 --- a/testing/btest/Baseline/doc.manual.framework_logging_factorial_04/factor-mod5.log +++ /dev/null @@ -1,15 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path factor-mod5 -#open 2013-03-25-02-00-12 -#fields num factorial_num -#types count count -5 120 -6 720 -7 5040 -8 40320 -9 362880 -10 3628800 -#close 2013-03-25-02-00-12 diff --git a/testing/btest/Baseline/doc.manual.framework_logging_factorial_04/factor-non5.log b/testing/btest/Baseline/doc.manual.framework_logging_factorial_04/factor-non5.log deleted file mode 100644 index d272ba48a9..0000000000 --- a/testing/btest/Baseline/doc.manual.framework_logging_factorial_04/factor-non5.log +++ /dev/null @@ -1,13 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path factor-non5 -#open 2013-03-25-02-00-12 -#fields num factorial_num -#types count count -1 1 -2 2 -3 6 -4 24 -#close 2013-03-25-02-00-12 diff --git a/testing/btest/Baseline/doc.manual.framework_notice_hook_suppression_01/.stdout b/testing/btest/Baseline/doc.manual.framework_notice_hook_suppression_01/.stdout deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/testing/btest/Baseline/doc.manual.framework_notice_shortcuts_01/.stdout b/testing/btest/Baseline/doc.manual.framework_notice_shortcuts_01/.stdout deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/testing/btest/Baseline/doc.manual.framework_notice_shortcuts_02/.stdout b/testing/btest/Baseline/doc.manual.framework_notice_shortcuts_02/.stdout deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/.stdout b/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/.stdout deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/conn.log b/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/conn.log deleted file mode 100644 index 6eb08725f5..0000000000 --- a/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/conn.log +++ /dev/null @@ -1,43 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path conn -#open 2013-05-05-20-51-24 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] -1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns - - - S0 - 0 D 1 73 0 0 - -1300475167.097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp - - - - S0 - 0 D 1 199 0 0 - -1300475167.099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp - - - - S0 - 0 D 1 179 0 0 - -1300475168.853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF - 0 Dd 1 66 1 117 - -1300475168.854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF - 0 Dd 1 80 1 127 - -1300475168.854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF - 0 Dd 1 66 1 211 - -1300475168.857956 fRFu0wcOle6 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 38 89 SF - 0 Dd 1 66 1 117 - -1300475168.858306 qSsw6ESzHV4 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 52 99 SF - 0 Dd 1 80 1 127 - -1300475168.858713 iE6yhOq3SF 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 38 183 SF - 0 Dd 1 66 1 211 - -1300475168.891644 qCaWGmzFtM5 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 38 89 SF - 0 Dd 1 66 1 117 - -1300475168.892037 70MGiRM1Qf4 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 52 99 SF - 0 Dd 1 80 1 127 - -1300475168.892414 h5DsfNtYzi1 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 38 183 SF - 0 Dd 1 66 1 211 - -1300475168.893988 c4Zw9TmAE05 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 38 89 SF - 0 Dd 1 66 1 117 - -1300475168.894422 EAr0uf4mhq 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 52 99 SF - 0 Dd 1 80 1 127 - -1300475168.894787 GvmoxJFXdTa 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 38 183 SF - 0 Dd 1 66 1 211 - -1300475168.901749 slFea8xwSmb 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 36 131 SF - 0 Dd 1 64 1 159 - -1300475168.902195 UfGkYA2HI2g 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 36 198 SF - 0 Dd 1 64 1 226 - -1300475169.899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns - - - S0 - 0 D 1 85 0 0 - -1300475170.862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 - 0 D 7 546 0 0 - -1300475171.675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 - 0 D 2 162 0 0 - -1300475171.677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 - 0 D 2 122 0 0 - -1300475173.116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 - 0 D 2 162 0 0 - -1300475173.117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 - 0 D 2 122 0 0 - -1300475173.153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns - - - S0 - 0 D 1 78 0 0 - -1300475168.859163 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 tcp http 0.215893 1130 734 S1 - 0 ShADad 6 1450 4 950 - -1300475168.652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp - 0.061329 463 350 OTH - 0 DdA 2 567 1 402 - -1300475168.895267 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 tcp http 0.227284 1178 734 S1 - 0 ShADad 6 1498 4 950 - -1300475168.902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp http 0.120041 534 412 S1 - 0 ShADad 4 750 3 576 - -1300475168.892936 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 tcp http 0.229603 1148 734 S1 - 0 ShADad 6 1468 4 950 - -1300475168.855305 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 tcp http 0.218501 1171 733 S1 - 0 ShADad 6 1491 4 949 - -1300475168.892913 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 tcp http 0.220961 1137 733 S1 - 0 ShADad 6 1457 4 949 - -1300475169.780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp - - - - OTH - 0 h 0 0 1 48 - -1300475168.724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp http 0.119905 525 232 S1 - 0 ShADad 4 741 3 396 - -1300475168.855330 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 tcp http 0.219720 1125 734 S1 - 0 ShADad 6 1445 4 950 - -#close 2013-05-05-20-51-24 diff --git a/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/http.log b/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/http.log deleted file mode 100644 index 617c1f0e6e..0000000000 --- a/testing/btest/Baseline/doc.manual.using_bro_sandbox_01/http.log +++ /dev/null @@ -1,23 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path http -#open 2013-05-05-21-12-40 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1300475168.784020 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 1 GET bits.wikimedia.org /skins-1.5/monobook/main.css http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.916018 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/6/63/Wikipedia-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.916183 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/b/bb/Wikipedia_wordmark.svg/174px-Wikipedia_wordmark.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.918358 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/b/bd/Bookshelf-40x201_6.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.952307 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/8/8a/Wikinews-logo.png/35px-Wikinews-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.952296 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/4/4a/Wiktionary-logo-en-35px.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.954820 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/35px-Wikiquote-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.962687 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 1 GET meta.wikimedia.org /images/wikimedia-button.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.975934 VW0XPVINV8a 141.142.220.118 49997 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikibooks-logo.svg/35px-Wikibooks-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.976436 3PKsZ2Uye21 141.142.220.118 49996 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/d/df/Wikispecies-logo.svg/35px-Wikispecies-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475168.979264 GSxOnSLghOa 141.142.220.118 49998 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4c/Wikisource-logo.svg/35px-Wikisource-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475169.014619 Tw8jXtpTGu6 141.142.220.118 50000 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475169.014593 P654jzLoe3a 141.142.220.118 49999 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -1300475169.014927 0Q4FH8sESw5 141.142.220.118 50001 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified - - - (empty) - - - - - - -#close 2013-05-05-21-12-40 diff --git a/testing/btest/Baseline/doc.manual.using_bro_sandbox_02/conn.log b/testing/btest/Baseline/doc.manual.using_bro_sandbox_02/conn.log deleted file mode 100644 index cc68286986..0000000000 --- a/testing/btest/Baseline/doc.manual.using_bro_sandbox_02/conn.log +++ /dev/null @@ -1,15 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path conn -#open 2013-05-07-14-38-27 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -#types time string addr port addr port enum string interval count count string bool count string count count count count table[string] -1320329757.771503 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 tcp http 15.161537 2899 1127 S2 - 0 ShADadF 20 3719 19 1891 - -1320329757.771262 nQcgTWjvg4c 10.0.2.15 49285 192.150.187.43 80 tcp http 15.161772 889 377 S2 - 0 ShADadF 8 1229 8 701 - -1320329757.761327 arKYeMETxOg 10.0.2.15 49283 192.150.187.43 80 tcp http 15.168898 459 189 S2 - 0 ShADadF 5 679 4 353 - -1320329757.458867 UWkUyAuUGXf 10.0.2.15 49282 192.150.187.43 80 tcp http 15.471378 1824 751 S2 - 0 ShADadF 12 2324 13 1275 - -1320329757.761638 k6kgXLOoSKl 10.0.2.15 49284 192.150.187.43 80 tcp http 15.168613 898 376 S2 - 0 ShADadF 8 1238 8 700 - -1320329757.771755 TEfuqmmG4bh 10.0.2.15 49287 192.150.187.43 80 tcp http 15.161267 900 376 S2 - 0 ShADadF 8 1240 8 700 - -#close 2013-05-07-14-38-27 diff --git a/testing/btest/Baseline/doc.manual.using_bro_sandbox_02/http.log b/testing/btest/Baseline/doc.manual.using_bro_sandbox_02/http.log deleted file mode 100644 index 031a9ce2ce..0000000000 --- a/testing/btest/Baseline/doc.manual.using_bro_sandbox_02/http.log +++ /dev/null @@ -1,26 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path http -#open 2013-05-07-14-38-27 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file -#types time string addr port addr port count string string string string string count count count string count string string table[enum] string string table[string] string string file -1320329757.460004 UWkUyAuUGXf 10.0.2.15 49282 192.150.187.43 80 1 GET bro-ids.org / - Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.772457 UWkUyAuUGXf 10.0.2.15 49282 192.150.187.43 80 2 GET bro-ids.org /css/pygments.css http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.874406 UWkUyAuUGXf 10.0.2.15 49282 192.150.187.43 80 3 GET bro-ids.org /js/jquery.zrssfeed.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.775110 k6kgXLOoSKl 10.0.2.15 49284 192.150.187.43 80 1 GET bro-ids.org /css/960.css http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.776072 TEfuqmmG4bh 10.0.2.15 49287 192.150.187.43 80 1 GET bro-ids.org /js/jquery.cycle.all.min.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.776421 nQcgTWjvg4c 10.0.2.15 49285 192.150.187.43 80 1 GET bro-ids.org /js/jquery.tweet.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.776240 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 1 GET bro-ids.org /js/jquery.fancybox-1.3.4.pack.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.775251 arKYeMETxOg 10.0.2.15 49283 192.150.187.43 80 1 GET bro-ids.org /css/bro-ids.css http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.975651 UWkUyAuUGXf 10.0.2.15 49282 192.150.187.43 80 4 GET bro-ids.org /js/jquery.tableofcontents.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.979943 k6kgXLOoSKl 10.0.2.15 49284 192.150.187.43 80 2 GET bro-ids.org /js/superfish.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.985656 TEfuqmmG4bh 10.0.2.15 49287 192.150.187.43 80 2 GET bro-ids.org /js/hoverIntent.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.989904 nQcgTWjvg4c 10.0.2.15 49285 192.150.187.43 80 2 GET bro-ids.org /js/general.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329757.991315 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 2 GET bro-ids.org /js/jquery.collapse.js http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329758.172397 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 3 GET bro-ids.org /css/print.css http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329759.998388 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 4 GET bro-ids.org /documentation/index.html http://bro-ids.org/ Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329760.146412 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 5 GET bro-ids.org /js/breadcrumbs.js http://bro-ids.org/documentation/index.html Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -1320329762.971726 j4u32Pc5bif 10.0.2.15 49286 192.150.187.43 80 6 GET bro-ids.org /documentation/reporting-problems.html http://bro-ids.org/documentation/index.html Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.106 Safari/535.2 0 0 304 Not Modified - - - (empty) - - - - - - -#close 2013-05-07-14-38-27 diff --git a/testing/btest/Baseline/doc.zeekygen.all_scripts/.stderr b/testing/btest/Baseline/doc.zeekygen.all_scripts/.stderr new file mode 100644 index 0000000000..177214239c --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.all_scripts/.stderr @@ -0,0 +1,11 @@ +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 245: deprecated (dhcp_discover) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 248: deprecated (dhcp_offer) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 251: deprecated (dhcp_request) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 254: deprecated (dhcp_decline) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 257: deprecated (dhcp_ack) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 260: deprecated (dhcp_nak) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 263: deprecated (dhcp_release) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/dhcp/deprecated_events.zeek, line 266: deprecated (dhcp_inform) +warning in /Users/jon/projects/bro/bro/scripts/policy/protocols/smb/__load__.zeek, line 1: deprecated script loaded from /Users/jon/projects/bro/bro/scripts/broxygen/__load__.zeek:10 "Use '@load base/protocols/smb' instead" +error in /Users/jon/projects/bro/bro/scripts/policy/frameworks/control/controller.zeek, line 22: The '' control command is unknown. +, line 1: received termination signal diff --git a/testing/btest/Baseline/doc.broxygen.all_scripts/.stdout b/testing/btest/Baseline/doc.zeekygen.all_scripts/.stdout similarity index 100% rename from testing/btest/Baseline/doc.broxygen.all_scripts/.stdout rename to testing/btest/Baseline/doc.zeekygen.all_scripts/.stdout diff --git a/testing/btest/Baseline/doc.broxygen.command_line/output b/testing/btest/Baseline/doc.zeekygen.command_line/output similarity index 100% rename from testing/btest/Baseline/doc.broxygen.command_line/output rename to testing/btest/Baseline/doc.zeekygen.command_line/output diff --git a/testing/btest/Baseline/doc.broxygen.comment_retrieval_bifs/out b/testing/btest/Baseline/doc.zeekygen.comment_retrieval_bifs/out similarity index 100% rename from testing/btest/Baseline/doc.broxygen.comment_retrieval_bifs/out rename to testing/btest/Baseline/doc.zeekygen.comment_retrieval_bifs/out diff --git a/testing/btest/Baseline/doc.zeekygen.enums/autogen-reST-enums.rst b/testing/btest/Baseline/doc.zeekygen.enums/autogen-reST-enums.rst new file mode 100644 index 0000000000..1cc82fbbe7 --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.enums/autogen-reST-enums.rst @@ -0,0 +1,60 @@ +.. zeek:type:: TestEnum1 + + :Type: :zeek:type:`enum` + + .. zeek:enum:: ONE TestEnum1 + + like this + + .. zeek:enum:: TWO TestEnum1 + + or like this + + .. zeek:enum:: THREE TestEnum1 + + multiple + comments + and even + more comments + + .. zeek:enum:: FOUR TestEnum1 + + adding another + value + + .. zeek:enum:: FIVE TestEnum1 + + adding another + value + + There's tons of ways an enum can look... + +.. zeek:type:: TestEnum2 + + :Type: :zeek:type:`enum` + + .. zeek:enum:: A TestEnum2 + + like this + + .. zeek:enum:: B TestEnum2 + + or like this + + .. zeek:enum:: C TestEnum2 + + multiple + comments + and even + more comments + + The final comma is optional + +.. zeek:id:: TestEnumVal + + :Type: :zeek:type:`TestEnum1` + :Attributes: :zeek:attr:`&redef` + :Default: ``ONE`` + + this should reference the TestEnum1 type and not a generic "enum" type + diff --git a/testing/btest/Baseline/doc.zeekygen.example/example.rst b/testing/btest/Baseline/doc.zeekygen.example/example.rst new file mode 100644 index 0000000000..141a06cc2a --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.example/example.rst @@ -0,0 +1,248 @@ +:tocdepth: 3 + +zeekygen/example.zeek +===================== +.. zeek:namespace:: ZeekygenExample + +This is an example script that demonstrates Zeekygen-style +documentation. It generally will make most sense when viewing +the script's raw source code and comparing to the HTML-rendered +version. + +Comments in the from ``##!`` are meant to summarize the script's +purpose. They are transferred directly in to the generated +`reStructuredText `_ +(reST) document associated with the script. + +.. tip:: You can embed directives and roles within ``##``-stylized comments. + +There's also a custom role to reference any identifier node in +the Zeek Sphinx domain that's good for "see alsos", e.g. + +See also: :zeek:see:`ZeekygenExample::a_var`, +:zeek:see:`ZeekygenExample::ONE`, :zeek:see:`SSH::Info` + +And a custom directive does the equivalent references: + +.. zeek:see:: ZeekygenExample::a_var ZeekygenExample::ONE SSH::Info + +:Namespace: ZeekygenExample +:Imports: :doc:`base/frameworks/notice `, :doc:`base/protocols/http `, :doc:`policy/frameworks/software/vulnerable.zeek ` + +Summary +~~~~~~~ +Redefinable Options +################### +======================================================================================= ======================================================= +:zeek:id:`ZeekygenExample::an_option`: :zeek:type:`set` :zeek:attr:`&redef` Add documentation for "an_option" here. +:zeek:id:`ZeekygenExample::option_with_init`: :zeek:type:`interval` :zeek:attr:`&redef` Default initialization will be generated automatically. +======================================================================================= ======================================================= + +State Variables +############### +========================================================================== ======================================================================== +:zeek:id:`ZeekygenExample::a_var`: :zeek:type:`bool` Put some documentation for "a_var" here. +:zeek:id:`ZeekygenExample::summary_test`: :zeek:type:`string` The first sentence for a particular identifier's summary text ends here. +:zeek:id:`ZeekygenExample::var_without_explicit_type`: :zeek:type:`string` Types are inferred, that information is self-documenting. +========================================================================== ======================================================================== + +Types +##### +==================================================================================== =========================================================== +:zeek:type:`ZeekygenExample::ComplexRecord`: :zeek:type:`record` :zeek:attr:`&redef` General documentation for a type "ComplexRecord" goes here. +:zeek:type:`ZeekygenExample::Info`: :zeek:type:`record` An example record to be used with a logging stream. +:zeek:type:`ZeekygenExample::SimpleEnum`: :zeek:type:`enum` Documentation for the "SimpleEnum" type goes here. +:zeek:type:`ZeekygenExample::SimpleRecord`: :zeek:type:`record` General documentation for a type "SimpleRecord" goes here. +==================================================================================== =========================================================== + +Redefinitions +############# +=============================================================== ==================================================================== +:zeek:type:`Log::ID`: :zeek:type:`enum` +:zeek:type:`Notice::Type`: :zeek:type:`enum` +:zeek:type:`ZeekygenExample::SimpleEnum`: :zeek:type:`enum` Document the "SimpleEnum" redef here with any special info regarding + the *redef* itself. +:zeek:type:`ZeekygenExample::SimpleRecord`: :zeek:type:`record` Document the record extension *redef* itself here. +=============================================================== ==================================================================== + +Events +###### +======================================================== ========================== +:zeek:id:`ZeekygenExample::an_event`: :zeek:type:`event` Summarize "an_event" here. +======================================================== ========================== + +Functions +######### +============================================================= ======================================= +:zeek:id:`ZeekygenExample::a_function`: :zeek:type:`function` Summarize purpose of "a_function" here. +============================================================= ======================================= + + +Detailed Interface +~~~~~~~~~~~~~~~~~~ +Redefinable Options +################### +.. zeek:id:: ZeekygenExample::an_option + + :Type: :zeek:type:`set` [:zeek:type:`addr`, :zeek:type:`addr`, :zeek:type:`string`] + :Attributes: :zeek:attr:`&redef` + :Default: ``{}`` + + Add documentation for "an_option" here. + The type/attribute information is all generated automatically. + +.. zeek:id:: ZeekygenExample::option_with_init + + :Type: :zeek:type:`interval` + :Attributes: :zeek:attr:`&redef` + :Default: ``10.0 msecs`` + + Default initialization will be generated automatically. + More docs can be added here. + +State Variables +############### +.. zeek:id:: ZeekygenExample::a_var + + :Type: :zeek:type:`bool` + + Put some documentation for "a_var" here. Any global/non-const that + isn't a function/event/hook is classified as a "state variable" + in the generated docs. + +.. zeek:id:: ZeekygenExample::summary_test + + :Type: :zeek:type:`string` + + The first sentence for a particular identifier's summary text ends here. + And this second sentence doesn't show in the short description provided + by the table of all identifiers declared by this script. + +.. zeek:id:: ZeekygenExample::var_without_explicit_type + + :Type: :zeek:type:`string` + :Default: ``"this works"`` + + Types are inferred, that information is self-documenting. + +Types +##### +.. zeek:type:: ZeekygenExample::ComplexRecord + + :Type: :zeek:type:`record` + + field1: :zeek:type:`count` + Counts something. + + field2: :zeek:type:`bool` + Toggles something. + + field3: :zeek:type:`ZeekygenExample::SimpleRecord` + Zeekygen automatically tracks types + and cross-references are automatically + inserted in to generated docs. + + msg: :zeek:type:`string` :zeek:attr:`&default` = ``"blah"`` :zeek:attr:`&optional` + Attributes are self-documenting. + :Attributes: :zeek:attr:`&redef` + + General documentation for a type "ComplexRecord" goes here. + +.. zeek:type:: ZeekygenExample::Info + + :Type: :zeek:type:`record` + + ts: :zeek:type:`time` :zeek:attr:`&log` + + uid: :zeek:type:`string` :zeek:attr:`&log` + + status: :zeek:type:`count` :zeek:attr:`&log` :zeek:attr:`&optional` + + An example record to be used with a logging stream. + Nothing special about it. If another script redefs this type + to add fields, the generated documentation will show all original + fields plus the extensions and the scripts which contributed to it + (provided they are also @load'ed). + +.. zeek:type:: ZeekygenExample::SimpleEnum + + :Type: :zeek:type:`enum` + + .. zeek:enum:: ZeekygenExample::ONE ZeekygenExample::SimpleEnum + + Documentation for particular enum values is added like this. + And can also span multiple lines. + + .. zeek:enum:: ZeekygenExample::TWO ZeekygenExample::SimpleEnum + + Or this style is valid to document the preceding enum value. + + .. zeek:enum:: ZeekygenExample::THREE ZeekygenExample::SimpleEnum + + .. zeek:enum:: ZeekygenExample::FOUR ZeekygenExample::SimpleEnum + + And some documentation for "FOUR". + + .. zeek:enum:: ZeekygenExample::FIVE ZeekygenExample::SimpleEnum + + Also "FIVE". + + Documentation for the "SimpleEnum" type goes here. + It can span multiple lines. + +.. zeek:type:: ZeekygenExample::SimpleRecord + + :Type: :zeek:type:`record` + + field1: :zeek:type:`count` + Counts something. + + field2: :zeek:type:`bool` + Toggles something. + + field_ext: :zeek:type:`string` :zeek:attr:`&optional` + Document the extending field like this. + Or here, like this. + + General documentation for a type "SimpleRecord" goes here. + The way fields can be documented is similar to what's already seen + for enums. + +Events +###### +.. zeek:id:: ZeekygenExample::an_event + + :Type: :zeek:type:`event` (name: :zeek:type:`string`) + + Summarize "an_event" here. + Give more details about "an_event" here. + + ZeekygenExample::a_function should not be confused as a parameter + in the generated docs, but it also doesn't generate a cross-reference + link. Use the see role instead: :zeek:see:`ZeekygenExample::a_function`. + + + :name: Describe the argument here. + +Functions +######### +.. zeek:id:: ZeekygenExample::a_function + + :Type: :zeek:type:`function` (tag: :zeek:type:`string`, msg: :zeek:type:`string`) : :zeek:type:`string` + + Summarize purpose of "a_function" here. + Give more details about "a_function" here. + Separating the documentation of the params/return values with + empty comments is optional, but improves readability of script. + + + :tag: Function arguments can be described + like this. + + + :msg: Another param. + + + :returns: Describe the return type here. + + diff --git a/testing/btest/Baseline/doc.zeekygen.func-params/autogen-reST-func-params.rst b/testing/btest/Baseline/doc.zeekygen.func-params/autogen-reST-func-params.rst new file mode 100644 index 0000000000..cd0b7871d4 --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.func-params/autogen-reST-func-params.rst @@ -0,0 +1,30 @@ +.. zeek:id:: test_func_params_func + + :Type: :zeek:type:`function` (i: :zeek:type:`int`, j: :zeek:type:`int`) : :zeek:type:`string` + + This is a global function declaration. + + + :i: First param. + + :j: Second param. + + + :returns: A string. + +.. zeek:type:: test_func_params_rec + + :Type: :zeek:type:`record` + + field_func: :zeek:type:`function` (i: :zeek:type:`int`, j: :zeek:type:`int`) : :zeek:type:`string` + This is a record field function. + + + :i: First param. + + :j: Second param. + + + :returns: A string. + + diff --git a/testing/btest/Baseline/doc.zeekygen.identifier/test.rst b/testing/btest/Baseline/doc.zeekygen.identifier/test.rst new file mode 100644 index 0000000000..34c4ae71a6 --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.identifier/test.rst @@ -0,0 +1,230 @@ +.. zeek:id:: ZeekygenExample::Zeekygen_One + + :Type: :zeek:type:`Notice::Type` + + Any number of this type of comment + will document "Zeekygen_One". + +.. zeek:id:: ZeekygenExample::Zeekygen_Two + + :Type: :zeek:type:`Notice::Type` + + Any number of this type of comment + will document "ZEEKYGEN_TWO". + +.. zeek:id:: ZeekygenExample::Zeekygen_Three + + :Type: :zeek:type:`Notice::Type` + + +.. zeek:id:: ZeekygenExample::Zeekygen_Four + + :Type: :zeek:type:`Notice::Type` + + Omitting comments is fine, and so is mixing ``##`` and ``##<``, but + it's probably best to use only one style consistently. + +.. zeek:id:: ZeekygenExample::LOG + + :Type: :zeek:type:`Log::ID` + + +.. zeek:type:: ZeekygenExample::SimpleEnum + + :Type: :zeek:type:`enum` + + .. zeek:enum:: ZeekygenExample::ONE ZeekygenExample::SimpleEnum + + Documentation for particular enum values is added like this. + And can also span multiple lines. + + .. zeek:enum:: ZeekygenExample::TWO ZeekygenExample::SimpleEnum + + Or this style is valid to document the preceding enum value. + + .. zeek:enum:: ZeekygenExample::THREE ZeekygenExample::SimpleEnum + + .. zeek:enum:: ZeekygenExample::FOUR ZeekygenExample::SimpleEnum + + And some documentation for "FOUR". + + .. zeek:enum:: ZeekygenExample::FIVE ZeekygenExample::SimpleEnum + + Also "FIVE". + + Documentation for the "SimpleEnum" type goes here. + It can span multiple lines. + +.. zeek:id:: ZeekygenExample::ONE + + :Type: :zeek:type:`ZeekygenExample::SimpleEnum` + + Documentation for particular enum values is added like this. + And can also span multiple lines. + +.. zeek:id:: ZeekygenExample::TWO + + :Type: :zeek:type:`ZeekygenExample::SimpleEnum` + + Or this style is valid to document the preceding enum value. + +.. zeek:id:: ZeekygenExample::THREE + + :Type: :zeek:type:`ZeekygenExample::SimpleEnum` + + +.. zeek:id:: ZeekygenExample::FOUR + + :Type: :zeek:type:`ZeekygenExample::SimpleEnum` + + And some documentation for "FOUR". + +.. zeek:id:: ZeekygenExample::FIVE + + :Type: :zeek:type:`ZeekygenExample::SimpleEnum` + + Also "FIVE". + +.. zeek:type:: ZeekygenExample::SimpleRecord + + :Type: :zeek:type:`record` + + field1: :zeek:type:`count` + Counts something. + + field2: :zeek:type:`bool` + Toggles something. + + field_ext: :zeek:type:`string` :zeek:attr:`&optional` + Document the extending field like this. + Or here, like this. + + General documentation for a type "SimpleRecord" goes here. + The way fields can be documented is similar to what's already seen + for enums. + +.. zeek:type:: ZeekygenExample::ComplexRecord + + :Type: :zeek:type:`record` + + field1: :zeek:type:`count` + Counts something. + + field2: :zeek:type:`bool` + Toggles something. + + field3: :zeek:type:`ZeekygenExample::SimpleRecord` + Zeekygen automatically tracks types + and cross-references are automatically + inserted in to generated docs. + + msg: :zeek:type:`string` :zeek:attr:`&default` = ``"blah"`` :zeek:attr:`&optional` + Attributes are self-documenting. + :Attributes: :zeek:attr:`&redef` + + General documentation for a type "ComplexRecord" goes here. + +.. zeek:type:: ZeekygenExample::Info + + :Type: :zeek:type:`record` + + ts: :zeek:type:`time` :zeek:attr:`&log` + + uid: :zeek:type:`string` :zeek:attr:`&log` + + status: :zeek:type:`count` :zeek:attr:`&log` :zeek:attr:`&optional` + + An example record to be used with a logging stream. + Nothing special about it. If another script redefs this type + to add fields, the generated documentation will show all original + fields plus the extensions and the scripts which contributed to it + (provided they are also @load'ed). + +.. zeek:id:: ZeekygenExample::an_option + + :Type: :zeek:type:`set` [:zeek:type:`addr`, :zeek:type:`addr`, :zeek:type:`string`] + :Attributes: :zeek:attr:`&redef` + :Default: ``{}`` + + Add documentation for "an_option" here. + The type/attribute information is all generated automatically. + +.. zeek:id:: ZeekygenExample::option_with_init + + :Type: :zeek:type:`interval` + :Attributes: :zeek:attr:`&redef` + :Default: ``10.0 msecs`` + + Default initialization will be generated automatically. + More docs can be added here. + +.. zeek:id:: ZeekygenExample::a_var + + :Type: :zeek:type:`bool` + + Put some documentation for "a_var" here. Any global/non-const that + isn't a function/event/hook is classified as a "state variable" + in the generated docs. + +.. zeek:id:: ZeekygenExample::var_without_explicit_type + + :Type: :zeek:type:`string` + :Default: ``"this works"`` + + Types are inferred, that information is self-documenting. + +.. zeek:id:: ZeekygenExample::summary_test + + :Type: :zeek:type:`string` + + The first sentence for a particular identifier's summary text ends here. + And this second sentence doesn't show in the short description provided + by the table of all identifiers declared by this script. + +.. zeek:id:: ZeekygenExample::a_function + + :Type: :zeek:type:`function` (tag: :zeek:type:`string`, msg: :zeek:type:`string`) : :zeek:type:`string` + + Summarize purpose of "a_function" here. + Give more details about "a_function" here. + Separating the documentation of the params/return values with + empty comments is optional, but improves readability of script. + + + :tag: Function arguments can be described + like this. + + + :msg: Another param. + + + :returns: Describe the return type here. + +.. zeek:id:: ZeekygenExample::an_event + + :Type: :zeek:type:`event` (name: :zeek:type:`string`) + + Summarize "an_event" here. + Give more details about "an_event" here. + + ZeekygenExample::a_function should not be confused as a parameter + in the generated docs, but it also doesn't generate a cross-reference + link. Use the see role instead: :zeek:see:`ZeekygenExample::a_function`. + + + :name: Describe the argument here. + +.. zeek:id:: ZeekygenExample::function_without_proto + + :Type: :zeek:type:`function` (tag: :zeek:type:`string`) : :zeek:type:`string` + + +.. zeek:type:: ZeekygenExample::PrivateRecord + + :Type: :zeek:type:`record` + + field1: :zeek:type:`bool` + + field2: :zeek:type:`count` + + diff --git a/testing/btest/Baseline/doc.zeekygen.package/test.rst b/testing/btest/Baseline/doc.zeekygen.package/test.rst new file mode 100644 index 0000000000..6ced7b797e --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.package/test.rst @@ -0,0 +1,37 @@ +:orphan: + +Package: zeekygen +================= + +This package is loaded during the process which automatically generates +reference documentation for all Zeek scripts (i.e. "Zeekygen"). Its only +purpose is to provide an easy way to load all known Zeek scripts plus any +extra scripts needed or used by the documentation process. + +:doc:`/scripts/zeekygen/__load__.zeek` + + +:doc:`/scripts/zeekygen/example.zeek` + + This is an example script that demonstrates Zeekygen-style + documentation. It generally will make most sense when viewing + the script's raw source code and comparing to the HTML-rendered + version. + + Comments in the from ``##!`` are meant to summarize the script's + purpose. They are transferred directly in to the generated + `reStructuredText `_ + (reST) document associated with the script. + + .. tip:: You can embed directives and roles within ``##``-stylized comments. + + There's also a custom role to reference any identifier node in + the Zeek Sphinx domain that's good for "see alsos", e.g. + + See also: :zeek:see:`ZeekygenExample::a_var`, + :zeek:see:`ZeekygenExample::ONE`, :zeek:see:`SSH::Info` + + And a custom directive does the equivalent references: + + .. zeek:see:: ZeekygenExample::a_var ZeekygenExample::ONE SSH::Info + diff --git a/testing/btest/Baseline/doc.zeekygen.package_index/test.rst b/testing/btest/Baseline/doc.zeekygen.package_index/test.rst new file mode 100644 index 0000000000..df9907bd1b --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.package_index/test.rst @@ -0,0 +1,7 @@ +:doc:`zeekygen ` + + This package is loaded during the process which automatically generates + reference documentation for all Zeek scripts (i.e. "Zeekygen"). Its only + purpose is to provide an easy way to load all known Zeek scripts plus any + extra scripts needed or used by the documentation process. + diff --git a/testing/btest/Baseline/doc.zeekygen.records/autogen-reST-records.rst b/testing/btest/Baseline/doc.zeekygen.records/autogen-reST-records.rst new file mode 100644 index 0000000000..a9b671623a --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.records/autogen-reST-records.rst @@ -0,0 +1,28 @@ +.. zeek:type:: TestRecord1 + + :Type: :zeek:type:`record` + + field1: :zeek:type:`bool` + + field2: :zeek:type:`count` + + +.. zeek:type:: TestRecord2 + + :Type: :zeek:type:`record` + + A: :zeek:type:`count` + document ``A`` + + B: :zeek:type:`bool` + document ``B`` + + C: :zeek:type:`TestRecord1` + and now ``C`` + is a declared type + + D: :zeek:type:`set` [:zeek:type:`count`, :zeek:type:`bool`] + sets/tables should show the index types + + Here's the ways records and record fields can be documented. + diff --git a/testing/btest/Baseline/doc.zeekygen.script_index/test.rst b/testing/btest/Baseline/doc.zeekygen.script_index/test.rst new file mode 100644 index 0000000000..1ca04759bb --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.script_index/test.rst @@ -0,0 +1,5 @@ +.. toctree:: + :maxdepth: 1 + + zeekygen/__load__.zeek + zeekygen/example.zeek diff --git a/testing/btest/Baseline/doc.zeekygen.script_summary/test.rst b/testing/btest/Baseline/doc.zeekygen.script_summary/test.rst new file mode 100644 index 0000000000..7f3885b86e --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.script_summary/test.rst @@ -0,0 +1,23 @@ +:doc:`/scripts/zeekygen/example.zeek` + This is an example script that demonstrates Zeekygen-style + documentation. It generally will make most sense when viewing + the script's raw source code and comparing to the HTML-rendered + version. + + Comments in the from ``##!`` are meant to summarize the script's + purpose. They are transferred directly in to the generated + `reStructuredText `_ + (reST) document associated with the script. + + .. tip:: You can embed directives and roles within ``##``-stylized comments. + + There's also a custom role to reference any identifier node in + the Zeek Sphinx domain that's good for "see alsos", e.g. + + See also: :zeek:see:`ZeekygenExample::a_var`, + :zeek:see:`ZeekygenExample::ONE`, :zeek:see:`SSH::Info` + + And a custom directive does the equivalent references: + + .. zeek:see:: ZeekygenExample::a_var ZeekygenExample::ONE SSH::Info + diff --git a/testing/btest/Baseline/doc.zeekygen.type-aliases/autogen-reST-type-aliases.rst b/testing/btest/Baseline/doc.zeekygen.type-aliases/autogen-reST-type-aliases.rst new file mode 100644 index 0000000000..4dfae471c4 --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.type-aliases/autogen-reST-type-aliases.rst @@ -0,0 +1,44 @@ +.. zeek:type:: ZeekygenTest::TypeAlias + + :Type: :zeek:type:`bool` + + This is just an alias for a builtin type ``bool``. + +.. zeek:type:: ZeekygenTest::NotTypeAlias + + :Type: :zeek:type:`bool` + + This type should get its own comments, not associated w/ TypeAlias. + +.. zeek:type:: ZeekygenTest::OtherTypeAlias + + :Type: :zeek:type:`bool` + + This cross references ``bool`` in the description of its type + instead of ``TypeAlias`` just because it seems more useful -- + one doesn't have to click through the full type alias chain to + find out what the actual type is... + +.. zeek:id:: ZeekygenTest::a + + :Type: :zeek:type:`ZeekygenTest::TypeAlias` + + But this should reference a type of ``TypeAlias``. + +.. zeek:id:: ZeekygenTest::b + + :Type: :zeek:type:`ZeekygenTest::OtherTypeAlias` + + And this should reference a type of ``OtherTypeAlias``. + +.. zeek:type:: ZeekygenTest::MyRecord + + :Type: :zeek:type:`record` + + f1: :zeek:type:`ZeekygenTest::TypeAlias` + + f2: :zeek:type:`ZeekygenTest::OtherTypeAlias` + + f3: :zeek:type:`bool` + + diff --git a/testing/btest/Baseline/doc.zeekygen.vectors/autogen-reST-vectors.rst b/testing/btest/Baseline/doc.zeekygen.vectors/autogen-reST-vectors.rst new file mode 100644 index 0000000000..e0a8ea9727 --- /dev/null +++ b/testing/btest/Baseline/doc.zeekygen.vectors/autogen-reST-vectors.rst @@ -0,0 +1,36 @@ +.. zeek:id:: test_vector0 + + :Type: :zeek:type:`vector` of :zeek:type:`string` + :Default: + + :: + + [] + + + Yield type is documented/cross-referenced for primitize types. + +.. zeek:id:: test_vector1 + + :Type: :zeek:type:`vector` of :zeek:type:`TestRecord` + :Default: + + :: + + [] + + + Yield type is documented/cross-referenced for composite types. + +.. zeek:id:: test_vector2 + + :Type: :zeek:type:`vector` of :zeek:type:`vector` of :zeek:type:`TestRecord` + :Default: + + :: + + [] + + + Just showing an even fancier yield type. + diff --git a/testing/btest/Baseline/language.at-deprecated/.stderr b/testing/btest/Baseline/language.at-deprecated/.stderr index 4668f2d7bf..97dc7ea331 100644 --- a/testing/btest/Baseline/language.at-deprecated/.stderr +++ b/testing/btest/Baseline/language.at-deprecated/.stderr @@ -1,3 +1,3 @@ -warning in ./foo.bro, line 1: deprecated script loaded from command line arguments -warning in ./bar.bro, line 1: deprecated script loaded from ./foo.bro:2 "Use '@load qux.bro' instead" -warning in ./baz.bro, line 1: deprecated script loaded from ./foo.bro:3 +warning in ./foo.zeek, line 1: deprecated script loaded from command line arguments +warning in ./bar.zeek, line 1: deprecated script loaded from ./foo.zeek:2 "Use '@load qux' instead" +warning in ./baz.zeek, line 1: deprecated script loaded from ./foo.zeek:3 diff --git a/testing/btest/Baseline/language.at-filename/out b/testing/btest/Baseline/language.at-filename/out index 12cfb152d9..23b37ef249 100644 --- a/testing/btest/Baseline/language.at-filename/out +++ b/testing/btest/Baseline/language.at-filename/out @@ -1 +1 @@ -at-filename.bro +at-filename.zeek diff --git a/testing/btest/Baseline/language.at-if-invalid/out b/testing/btest/Baseline/language.at-if-invalid/out index 63b93a3cf8..0214a8d2f8 100644 --- a/testing/btest/Baseline/language.at-if-invalid/out +++ b/testing/btest/Baseline/language.at-if-invalid/out @@ -1,4 +1,4 @@ -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.bro, line 28: referencing a local name in @if (xyz) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.bro, line 28: invalid expression in @if (F && foo(xyz)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.bro, line 36: referencing a local name in @if (local_true_condition) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.bro, line 36: invalid expression in @if (T && TRUE_CONDITION && local_true_condition) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.zeek, line 28: referencing a local name in @if (xyz) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.zeek, line 28: invalid expression in @if (F && foo(xyz)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.zeek, line 36: referencing a local name in @if (local_true_condition) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.at-if-invalid/at-if-invalid.zeek, line 36: invalid expression in @if (T && TRUE_CONDITION && local_true_condition) diff --git a/testing/btest/Baseline/language.attr-default-global-set-error/out b/testing/btest/Baseline/language.attr-default-global-set-error/out index c784bb683b..6f3fd63d4f 100644 --- a/testing/btest/Baseline/language.attr-default-global-set-error/out +++ b/testing/btest/Baseline/language.attr-default-global-set-error/out @@ -1,2 +1,2 @@ -error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.attr-default-global-set-error/attr-default-global-set-error.bro, line 4: arithmetic mixed with non-arithmetic (set[string] and 0) -error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.attr-default-global-set-error/attr-default-global-set-error.bro, line 4: &default value has inconsistent type (0 and set[string]) +error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.attr-default-global-set-error/attr-default-global-set-error.zeek, line 4: arithmetic mixed with non-arithmetic (set[string] and 0) +error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.attr-default-global-set-error/attr-default-global-set-error.zeek, line 4: &default value has inconsistent type (0 and set[string]) diff --git a/testing/btest/Baseline/language.common-mistakes/1.out b/testing/btest/Baseline/language.common-mistakes/1.out index 8070f84644..5371fc266f 100644 --- a/testing/btest/Baseline/language.common-mistakes/1.out +++ b/testing/btest/Baseline/language.common-mistakes/1.out @@ -1,4 +1,4 @@ -expression error in ./1.bro, line 9: field value missing (mr$f) +expression error in ./1.zeek, line 9: field value missing (mr$f) bar start foo start -other bro_init +other zeek_init diff --git a/testing/btest/Baseline/language.common-mistakes/2.out b/testing/btest/Baseline/language.common-mistakes/2.out index dd62af107c..dbf4ed7ae6 100644 --- a/testing/btest/Baseline/language.common-mistakes/2.out +++ b/testing/btest/Baseline/language.common-mistakes/2.out @@ -1,2 +1,2 @@ -expression error in ./2.bro, line 7: no such index (t[nope]) +expression error in ./2.zeek, line 7: no such index (t[nope]) in foo diff --git a/testing/btest/Baseline/language.common-mistakes/3.out b/testing/btest/Baseline/language.common-mistakes/3.out index d914d399a7..62cb349e7d 100644 --- a/testing/btest/Baseline/language.common-mistakes/3.out +++ b/testing/btest/Baseline/language.common-mistakes/3.out @@ -1,2 +1,2 @@ -expression error in ./3.bro, line 5: type-checking failed in vector append (v += ok) +expression error in ./3.zeek, line 5: type-checking failed in vector append (v += ok) in foo diff --git a/testing/btest/Baseline/language.const/invalid.stderr b/testing/btest/Baseline/language.const/invalid.stderr index b08c472708..5b6e120f8e 100644 --- a/testing/btest/Baseline/language.const/invalid.stderr +++ b/testing/btest/Baseline/language.const/invalid.stderr @@ -1,13 +1,13 @@ -error in ./invalid.bro, line 15: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 16: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 17: const is not a modifiable lvalue (bar) -error in ./invalid.bro, line 17: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 18: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 19: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 20: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 22: const is not a modifiable lvalue (foo) -error in ./invalid.bro, line 25: const is not a modifiable lvalue (bar) -error in ./invalid.bro, line 26: const is not a modifiable lvalue (baz) -error in ./invalid.bro, line 27: const is not a modifiable lvalue (bar) -error in ./invalid.bro, line 28: const is not a modifiable lvalue (baz) -error in ./invalid.bro, line 33: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 15: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 16: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 17: const is not a modifiable lvalue (bar) +error in ./invalid.zeek, line 17: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 18: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 19: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 20: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 22: const is not a modifiable lvalue (foo) +error in ./invalid.zeek, line 25: const is not a modifiable lvalue (bar) +error in ./invalid.zeek, line 26: const is not a modifiable lvalue (baz) +error in ./invalid.zeek, line 27: const is not a modifiable lvalue (bar) +error in ./invalid.zeek, line 28: const is not a modifiable lvalue (baz) +error in ./invalid.zeek, line 33: const is not a modifiable lvalue (foo) diff --git a/testing/btest/Baseline/language.copy-all-opaques/.stderr b/testing/btest/Baseline/language.copy-all-opaques/.stderr new file mode 100644 index 0000000000..bf07a71a21 --- /dev/null +++ b/testing/btest/Baseline/language.copy-all-opaques/.stderr @@ -0,0 +1 @@ +error: incompatible Bloom filter types diff --git a/testing/btest/Baseline/language.copy-all-opaques/out b/testing/btest/Baseline/language.copy-all-opaques/out new file mode 100644 index 0000000000..d4e347a67a --- /dev/null +++ b/testing/btest/Baseline/language.copy-all-opaques/out @@ -0,0 +1,28 @@ +============ Topk +[b, a, c] +[b, a, c] +============ HLL +3.000069 +3.000069 +3.000069 +============ Bloom +0 +1 +0 +1 +============ Hashes +5b9164ad6f496d9dee12ec7634ce253f +5b9164ad6f496d9dee12ec7634ce253f +30ae97492ce1da88d0e7117ace0a60a6f9e1e0bc +30ae97492ce1da88d0e7117ace0a60a6f9e1e0bc +25b6746d5172ed6352966a013d93ac846e1110d5a25e8f183b5931f4688842a1 +25b6746d5172ed6352966a013d93ac846e1110d5a25e8f183b5931f4688842a1 +============ X509 +[version=3, serial=040000000001154B5AC394, subject=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, issuer=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, cn=GlobalSign Root CA, not_valid_before=904651200.0, not_valid_after=1832673600.0, key_alg=rsaEncryption, sig_alg=sha1WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=] +[version=3, serial=040000000001154B5AC394, subject=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, issuer=CN=GlobalSign Root CA,OU=Root CA,O=GlobalSign nv-sa,C=BE, cn=GlobalSign Root CA, not_valid_before=904651200.0, not_valid_after=1832673600.0, key_alg=rsaEncryption, sig_alg=sha1WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=] +============ Entropy +[entropy=4.715374, chi_square=591.981818, mean=75.472727, monte_carlo_pi=4.0, serial_correlation=-0.11027] +[entropy=4.715374, chi_square=591.981818, mean=75.472727, monte_carlo_pi=4.0, serial_correlation=-0.11027] +============ Paraglob +T +T diff --git a/testing/btest/Baseline/language.copy-all-types/out b/testing/btest/Baseline/language.copy-all-types/out new file mode 100644 index 0000000000..230550b90e --- /dev/null +++ b/testing/btest/Baseline/language.copy-all-types/out @@ -0,0 +1,12 @@ +orig=-42 (int) clone=-42 (int) equal=T same_object=T (ok) +orig=42 (count) clone=42 (count) equal=T same_object=T (ok) +orig=127.0.0.1 (addr) clone=127.0.0.1 (addr) equal=T same_object=T (ok) +orig=42/tcp (port) clone=42/tcp (port) equal=T same_object=T (ok) +orig=127.0.0.0/24 (subnet) clone=127.0.0.0/24 (subnet) equal=T same_object=T (ok) +orig=Foo (string) clone=Foo (string) equal=T same_object=F (ok) +orig=/^?(.*PATTERN.*)$?/ (pattern) clone=/^?(.*PATTERN.*)$?/ (pattern) same_object=F +orig=2,4,1,5,3 (set[count]) clone=2,4,1,5,3 (set[count]) equal=T same_object=F (ok) +orig=[1, 2, 3, 4, 5] (vector of count) clone=[1, 2, 3, 4, 5] (vector of count) equal=T same_object=F (ok) +orig=b=vb;a=va (table[string] of string) clone=b=vb;a=va (table[string] of string) equal=T same_object=F (ok) +orig=ENUMME (enum) clone=ENUMME (enum) equal=T same_object=T (ok) +orig=[s1=s1, s2=s2, i1=[a=a], i2=[a=a], donotset=, def=5] (record { s1:string; s2:string; i1:record { a:string; }; i2:record { a:string; }; donotset:record { a:string; }; def:count; }) clone=[s1=s1, s2=s2, i1=[a=a], i2=[a=a], donotset=, def=5] (record { s1:string; s2:string; i1:record { a:string; }; i2:record { a:string; }; donotset:record { a:string; }; def:count; }) equal=T same_object=F (ok) diff --git a/testing/btest/Baseline/language.copy-cycle/out b/testing/btest/Baseline/language.copy-cycle/out new file mode 100644 index 0000000000..57562e818e --- /dev/null +++ b/testing/btest/Baseline/language.copy-cycle/out @@ -0,0 +1,3 @@ +F (expected: F) +T (expected: T) +T (expected: T) diff --git a/testing/btest/Baseline/language.copy/out b/testing/btest/Baseline/language.copy/out index 675d38aa5d..fbc2c4b04d 100644 --- a/testing/btest/Baseline/language.copy/out +++ b/testing/btest/Baseline/language.copy/out @@ -1,2 +1,5 @@ direct assignment (PASS) using copy (PASS) +F, T +F, T +[a=42], [a=42], [a=42], [a=42] diff --git a/testing/btest/Baseline/language.deprecated/no-warnings.out b/testing/btest/Baseline/language.deprecated/no-warnings.out new file mode 100644 index 0000000000..42930b1690 --- /dev/null +++ b/testing/btest/Baseline/language.deprecated/no-warnings.out @@ -0,0 +1,28 @@ +warning in ./no-warnings.zeek, line 27: deprecated (ONE) +warning in ./no-warnings.zeek, line 28: deprecated (TWO) +warning in ./no-warnings.zeek, line 30: deprecated (GREEN) +warning in ./no-warnings.zeek, line 31: deprecated (BLUE) +warning in ./no-warnings.zeek, line 33: deprecated (blah) +warning in ./no-warnings.zeek, line 37: deprecated (my_event) +warning in ./no-warnings.zeek, line 38: deprecated (my_event) +warning in ./no-warnings.zeek, line 39: deprecated (my_hook) +warning in ./no-warnings.zeek, line 41: deprecated (my_record$b) +warning in ./no-warnings.zeek, line 42: deprecated (my_record$b) +warning in ./no-warnings.zeek, line 43: deprecated (my_record$b) +warning in ./no-warnings.zeek, line 45: deprecated (my_record?$b) +warning in ./no-warnings.zeek, line 46: deprecated (my_record$b) +warning in ./no-warnings.zeek, line 49: deprecated (my_record$b) +warning in ./no-warnings.zeek, line 52: deprecated (my_event) +warning in ./no-warnings.zeek, line 57: deprecated (my_hook) +warning in ./no-warnings.zeek, line 62: deprecated (blah) +warning in ./no-warnings.zeek, line 71: deprecated (dont_use_me) +warning in ./no-warnings.zeek, line 76: deprecated (dont_use_me_either) +ZERO +ONE +TWO +RED +GREEN +BLUE +generate my_hook please +generate my_event please +schedule my_event please diff --git a/testing/btest/Baseline/language.deprecated/out b/testing/btest/Baseline/language.deprecated/out deleted file mode 100644 index 5bdf87a62b..0000000000 --- a/testing/btest/Baseline/language.deprecated/out +++ /dev/null @@ -1,28 +0,0 @@ -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 30: deprecated (ONE) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 31: deprecated (TWO) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 33: deprecated (GREEN) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 34: deprecated (BLUE) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 36: deprecated (blah) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 40: deprecated (my_event) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 41: deprecated (my_event) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 42: deprecated (my_hook) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 44: deprecated (my_record$b) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 45: deprecated (my_record$b) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 46: deprecated (my_record$b) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 48: deprecated (my_record?$b) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 49: deprecated (my_record$b) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 52: deprecated (my_record$b) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 55: deprecated (my_event) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 60: deprecated (my_hook) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 65: deprecated (blah) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 74: deprecated (dont_use_me) -warning in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.deprecated/deprecated.bro, line 79: deprecated (dont_use_me_either) -ZERO -ONE -TWO -RED -GREEN -BLUE -generate my_hook please -generate my_event please -schedule my_event please diff --git a/testing/btest/Baseline/language.deprecated/warnings.out b/testing/btest/Baseline/language.deprecated/warnings.out new file mode 100644 index 0000000000..5f0ee7bdc8 --- /dev/null +++ b/testing/btest/Baseline/language.deprecated/warnings.out @@ -0,0 +1,28 @@ +warning in ./warnings.zeek, line 27: deprecated (ONE): one warning +warning in ./warnings.zeek, line 28: deprecated (TWO): two warning +warning in ./warnings.zeek, line 30: deprecated (GREEN): green warning +warning in ./warnings.zeek, line 31: deprecated (BLUE): red warning +warning in ./warnings.zeek, line 33: deprecated (blah): type warning +warning in ./warnings.zeek, line 37: deprecated (my_event): event warning +warning in ./warnings.zeek, line 38: deprecated (my_event): event warning +warning in ./warnings.zeek, line 39: deprecated (my_hook): hook warning +warning in ./warnings.zeek, line 41: deprecated (my_record$b): record warning +warning in ./warnings.zeek, line 42: deprecated (my_record$b): record warning +warning in ./warnings.zeek, line 43: deprecated (my_record$b): record warning +warning in ./warnings.zeek, line 45: deprecated (my_record?$b): record warning +warning in ./warnings.zeek, line 46: deprecated (my_record$b): record warning +warning in ./warnings.zeek, line 49: deprecated (my_record$b): record warning +warning in ./warnings.zeek, line 52: deprecated (my_event): event warning +warning in ./warnings.zeek, line 57: deprecated (my_hook): hook warning +warning in ./warnings.zeek, line 62: deprecated (blah): type warning +warning in ./warnings.zeek, line 71: deprecated (dont_use_me): global function warning +warning in ./warnings.zeek, line 76: deprecated (dont_use_me_either): function warning +ZERO +ONE +TWO +RED +GREEN +BLUE +generate my_hook please +generate my_event please +schedule my_event please diff --git a/testing/btest/Baseline/language.eof-parse-errors/output1 b/testing/btest/Baseline/language.eof-parse-errors/output1 index 47a1c328e3..0fd8331175 100644 --- a/testing/btest/Baseline/language.eof-parse-errors/output1 +++ b/testing/btest/Baseline/language.eof-parse-errors/output1 @@ -1 +1 @@ -error: syntax error, at end of file ./a.bro +error: syntax error, at end of file ./a.zeek diff --git a/testing/btest/Baseline/language.eof-parse-errors/output2 b/testing/btest/Baseline/language.eof-parse-errors/output2 index 6f382c2a12..b7a433b9b0 100644 --- a/testing/btest/Baseline/language.eof-parse-errors/output2 +++ b/testing/btest/Baseline/language.eof-parse-errors/output2 @@ -1 +1 @@ -error in ./b.bro, line 1: syntax error, at or near "module" or end of file ./a.bro +error in ./b.zeek, line 1: syntax error, at or near "module" or end of file ./a.zeek diff --git a/testing/btest/Baseline/language.event-local-var/out b/testing/btest/Baseline/language.event-local-var/out index 2802c45d69..465a97d5cf 100644 --- a/testing/btest/Baseline/language.event-local-var/out +++ b/testing/btest/Baseline/language.event-local-var/out @@ -1 +1 @@ -error in /home/jgras/devel/bro/testing/btest/.tmp/language.event-local-var/event-local-var.bro, line 15: local identifier "v" cannot be used to reference an event, at or near ")" +error in /home/jgras/devel/bro/testing/btest/.tmp/language.event-local-var/event-local-var.zeek, line 15: local identifier "v" cannot be used to reference an event, at or near ")" diff --git a/testing/btest/Baseline/language.event/out b/testing/btest/Baseline/language.event/out index 14fa9c1e8a..66f0ada96f 100644 --- a/testing/btest/Baseline/language.event/out +++ b/testing/btest/Baseline/language.event/out @@ -2,6 +2,6 @@ event statement event part1 event part2 assign event variable (6) -schedule statement in bro_init +schedule statement in zeek_init schedule statement in global -schedule statement another in bro_init +schedule statement another in zeek_init diff --git a/testing/btest/Baseline/language.expire-expr-error/output b/testing/btest/Baseline/language.expire-expr-error/output index dfa0bf64c3..5bc22b8202 100644 --- a/testing/btest/Baseline/language.expire-expr-error/output +++ b/testing/btest/Baseline/language.expire-expr-error/output @@ -1,2 +1,2 @@ -expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-expr-error/expire-expr-error.bro, line 8: no such index (x[kaputt]) +expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-expr-error/expire-expr-error.zeek, line 8: no such index (x[kaputt]) received termination signal diff --git a/testing/btest/Baseline/language.expire-func-undef/output b/testing/btest/Baseline/language.expire-func-undef/output index cf869bbe6b..fb783261be 100644 --- a/testing/btest/Baseline/language.expire-func-undef/output +++ b/testing/btest/Baseline/language.expire-func-undef/output @@ -1,20 +1,20 @@ -1299470395.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299470405.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299473995.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299474005.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299477595.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299477605.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299481195.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299481205.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299484795.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299484805.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299488395.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299488405.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299491995.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299492005.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299495595.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299495605.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299499195.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299499205.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) -1299502795.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.bro, line 12: value used but not set (segfault::scan_summary) +1299470395.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299470405.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299473995.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299474005.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299477595.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299477605.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299481195.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299481205.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299484795.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299484805.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299488395.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299488405.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299491995.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299492005.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299495595.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299495605.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299499195.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299499205.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) +1299502795.000000 expression error in /home/robin/bro/master/testing/btest/.tmp/language.expire-func-undef/expire-func-undef.zeek, line 12: value used but not set (segfault::scan_summary) orig: 10.0.0.2: peers: {\x0a\x0910.0.0.3\x0a} diff --git a/testing/btest/Baseline/language.expire-type-error/out b/testing/btest/Baseline/language.expire-type-error/out index c0987a6341..1050304b06 100644 --- a/testing/btest/Baseline/language.expire-type-error/out +++ b/testing/btest/Baseline/language.expire-type-error/out @@ -1 +1 @@ -error in /home/robin/bro/master/testing/btest/.tmp/language.expire-type-error/expire-type-error.bro, line 4: expiration interval has wrong type (kaputt) +error in /home/robin/bro/master/testing/btest/.tmp/language.expire-type-error/expire-type-error.zeek, line 4: expiration interval has wrong type (kaputt) diff --git a/testing/btest/Baseline/language.expire_subnet/output b/testing/btest/Baseline/language.expire_subnet/output index dee030eb0c..76fb3cd8d3 100644 --- a/testing/btest/Baseline/language.expire_subnet/output +++ b/testing/btest/Baseline/language.expire_subnet/output @@ -15,11 +15,11 @@ Accessed table nums: two; three Accessed table nets: two; zero, three Time: 7.0 secs 518.0 msecs 828.0 usecs -Expired Subnet: 192.168.4.0/24 --> four at 8.0 secs 835.0 msecs 30.0 usecs -Expired Subnet: 192.168.1.0/24 --> one at 8.0 secs 835.0 msecs 30.0 usecs Expired Num: 4 --> four at 8.0 secs 835.0 msecs 30.0 usecs Expired Num: 1 --> one at 8.0 secs 835.0 msecs 30.0 usecs Expired Num: 0 --> zero at 8.0 secs 835.0 msecs 30.0 usecs +Expired Subnet: 192.168.4.0/24 --> four at 8.0 secs 835.0 msecs 30.0 usecs +Expired Subnet: 192.168.1.0/24 --> one at 8.0 secs 835.0 msecs 30.0 usecs Expired Subnet: 192.168.0.0/16 --> zero at 15.0 secs 150.0 msecs 681.0 usecs Expired Subnet: 192.168.3.0/24 --> three at 15.0 secs 150.0 msecs 681.0 usecs Expired Subnet: 192.168.2.0/24 --> two at 15.0 secs 150.0 msecs 681.0 usecs diff --git a/testing/btest/Baseline/language.hook_calls/invalid.out b/testing/btest/Baseline/language.hook_calls/invalid.out index 3412c1900e..fdfd719cd8 100644 --- a/testing/btest/Baseline/language.hook_calls/invalid.out +++ b/testing/btest/Baseline/language.hook_calls/invalid.out @@ -1,10 +1,10 @@ -error in ./invalid.bro, line 9: hook cannot be called directly, use hook operator (myhook) -warning in ./invalid.bro, line 9: expression value ignored (myhook(3)) -error in ./invalid.bro, line 10: hook cannot be called directly, use hook operator (myhook) -error in ./invalid.bro, line 11: hook cannot be called directly, use hook operator (myhook) -error in ./invalid.bro, line 12: not a valid hook call expression (2 + 2) -warning in ./invalid.bro, line 12: expression value ignored (2 + 2) -error in ./invalid.bro, line 13: not a valid hook call expression (2 + 2) -error in ./invalid.bro, line 15: hook cannot be called directly, use hook operator (h) -warning in ./invalid.bro, line 15: expression value ignored (h(3)) -error in ./invalid.bro, line 16: hook cannot be called directly, use hook operator (h) +error in ./invalid.zeek, line 9: hook cannot be called directly, use hook operator (myhook) +warning in ./invalid.zeek, line 9: expression value ignored (myhook(3)) +error in ./invalid.zeek, line 10: hook cannot be called directly, use hook operator (myhook) +error in ./invalid.zeek, line 11: hook cannot be called directly, use hook operator (myhook) +error in ./invalid.zeek, line 12: not a valid hook call expression (2 + 2) +warning in ./invalid.zeek, line 12: expression value ignored (2 + 2) +error in ./invalid.zeek, line 13: not a valid hook call expression (2 + 2) +error in ./invalid.zeek, line 15: hook cannot be called directly, use hook operator (h) +warning in ./invalid.zeek, line 15: expression value ignored (h(3)) +error in ./invalid.zeek, line 16: hook cannot be called directly, use hook operator (h) diff --git a/testing/btest/Baseline/language.index-assignment-invalid/out b/testing/btest/Baseline/language.index-assignment-invalid/out index 3972a9f10e..a30ecf891f 100644 --- a/testing/btest/Baseline/language.index-assignment-invalid/out +++ b/testing/btest/Baseline/language.index-assignment-invalid/out @@ -1,5 +1,5 @@ -runtime error in /home/jon/pro/zeek/zeek/scripts/base/utils/queue.bro, line 152: vector index assignment failed for invalid type 'myrec', value: [a=T, b=hi, c=], expression: Queue::ret[Queue::j], call stack: - #0 Queue::get_vector([initialized=T, vals={[2] = test,[6] = jkl;,[4] = asdf,[1] = goodbye,[5] = 3,[0] = hello,[3] = [a=T, b=hi, c=]}, settings=[max_len=], top=7, bottom=0, size=0], [hello, goodbye, test]) at /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.index-assignment-invalid/index-assignment-invalid.bro:19 - #1 bar(55) at /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.index-assignment-invalid/index-assignment-invalid.bro:27 - #2 foo(hi, 13) at /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.index-assignment-invalid/index-assignment-invalid.bro:39 - #3 bro_init() +runtime error in /home/jon/pro/zeek/zeek/scripts/base/utils/queue.zeek, line 152: vector index assignment failed for invalid type 'myrec', value: [a=T, b=hi, c=], expression: Queue::ret[Queue::j], call stack: + #0 Queue::get_vector([initialized=T, vals={[2] = test,[6] = jkl;,[4] = asdf,[1] = goodbye,[5] = 3,[0] = hello,[3] = [a=T, b=hi, c=]}, settings=[max_len=], top=7, bottom=0, size=0], [hello, goodbye, test]) at /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.index-assignment-invalid/index-assignment-invalid.zeek:19 + #1 bar(55) at /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.index-assignment-invalid/index-assignment-invalid.zeek:27 + #2 foo(hi, 13) at /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.index-assignment-invalid/index-assignment-invalid.zeek:39 + #3 zeek_init() diff --git a/testing/btest/Baseline/language.invalid_index/out b/testing/btest/Baseline/language.invalid_index/out index 4ba0373e91..aa3784aa3e 100644 --- a/testing/btest/Baseline/language.invalid_index/out +++ b/testing/btest/Baseline/language.invalid_index/out @@ -1,5 +1,5 @@ -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.invalid_index/invalid_index.bro, line 10: no such index (foo[1]) -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.invalid_index/invalid_index.bro, line 16: no such index (foo2[1]) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.invalid_index/invalid_index.zeek, line 10: no such index (foo[1]) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.invalid_index/invalid_index.zeek, line 16: no such index (foo2[1]) foo[0], 42 foo2[0], 13 done diff --git a/testing/btest/Baseline/language.outer_param_binding/out b/testing/btest/Baseline/language.outer_param_binding/out index 28ad03c85a..afdc4191cd 100644 --- a/testing/btest/Baseline/language.outer_param_binding/out +++ b/testing/btest/Baseline/language.outer_param_binding/out @@ -1,3 +1,3 @@ -error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 16: referencing outer function IDs not supported (c) -error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 16: referencing outer function IDs not supported (d) -error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 17: referencing outer function IDs not supported (b) +error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.zeek, line 16: referencing outer function IDs not supported (c) +error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.zeek, line 16: referencing outer function IDs not supported (d) +error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.zeek, line 17: referencing outer function IDs not supported (b) diff --git a/testing/btest/Baseline/language.paraglob-serialization/recv.recv.out b/testing/btest/Baseline/language.paraglob-serialization/recv.recv.out new file mode 100644 index 0000000000..bd6ae96cfa --- /dev/null +++ b/testing/btest/Baseline/language.paraglob-serialization/recv.recv.out @@ -0,0 +1,12 @@ +receiver added peer: endpoint=127.0.0.1 msg=handshake successful +is_remote should be T, and is, T +receiver got ping number: 1 +[*, *ello, hello] +is_remote should be T, and is, T +receiver got ping number: 2 +[*, *ello, hello] +is_remote should be T, and is, T +receiver got ping number: 3 +[*, *ello, hello] +is_remote should be T, and is, T +[num_peers=1, num_stores=0, num_pending_queries=0, num_events_incoming=4, num_events_outgoing=3, num_logs_incoming=0, num_logs_outgoing=1, num_ids_incoming=0, num_ids_outgoing=0] diff --git a/testing/btest/Baseline/language.paraglob-serialization/send.send.out b/testing/btest/Baseline/language.paraglob-serialization/send.send.out new file mode 100644 index 0000000000..e6f0a48779 --- /dev/null +++ b/testing/btest/Baseline/language.paraglob-serialization/send.send.out @@ -0,0 +1,11 @@ +Starting send. +[*, *ello, hello] +is_remote should be F, and is, F +sender added peer: endpoint=127.0.0.1 msg=received handshake from remote core +is_remote should be T, and is, T +sender got pong number: 1 +is_remote should be T, and is, T +sender got pong number: 2 +is_remote should be T, and is, T +sender got pong number: 3 +sender lost peer: endpoint=127.0.0.1 msg=lost remote peer diff --git a/testing/btest/Baseline/language.paraglob/out b/testing/btest/Baseline/language.paraglob/out new file mode 100644 index 0000000000..18e6da7096 --- /dev/null +++ b/testing/btest/Baseline/language.paraglob/out @@ -0,0 +1,9 @@ +[T, T, T, T, T] +T +F +[*, *og, d?g, d[!wl]g] +[once] +[] +[*.gov*, *malware*] +[z*ro] +[*.gov*, *malware*] diff --git a/testing/btest/Baseline/language.record-bad-ctor/out b/testing/btest/Baseline/language.record-bad-ctor/out index d30d0ab9d3..e6ff4a8fd5 100644 --- a/testing/btest/Baseline/language.record-bad-ctor/out +++ b/testing/btest/Baseline/language.record-bad-ctor/out @@ -1,2 +1,2 @@ -error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.record-bad-ctor/record-bad-ctor.bro, line 6: no type given (asdfasdf) -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.record-bad-ctor/record-bad-ctor.bro, line 7: uninitialized list value ($ports=asdfasdf) +error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.record-bad-ctor/record-bad-ctor.zeek, line 6: no type given (asdfasdf) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.record-bad-ctor/record-bad-ctor.zeek, line 7: uninitialized list value ($ports=asdfasdf) diff --git a/testing/btest/Baseline/language.record-bad-ctor2/out b/testing/btest/Baseline/language.record-bad-ctor2/out index d5ce540dd8..12b0fe3959 100644 --- a/testing/btest/Baseline/language.record-bad-ctor2/out +++ b/testing/btest/Baseline/language.record-bad-ctor2/out @@ -1 +1 @@ -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-bad-ctor2/record-bad-ctor2.bro, line 14: bad type in record constructor ([[$cmd=echo hi]] and [$cmd=echo hi]) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-bad-ctor2/record-bad-ctor2.zeek, line 14: bad type in record constructor ([[$cmd=echo hi]] and [$cmd=echo hi]) diff --git a/testing/btest/Baseline/language.record-ceorce-orphan/out b/testing/btest/Baseline/language.record-ceorce-orphan/out index 59df204af2..f848945979 100644 --- a/testing/btest/Baseline/language.record-ceorce-orphan/out +++ b/testing/btest/Baseline/language.record-ceorce-orphan/out @@ -1,2 +1,2 @@ -error in /home/robin/bro/master/testing/btest/.tmp/language.record-ceorce-orphan/record-ceorce-orphan.bro, line 19: orphaned field "wtf" in record coercion ((coerce [$a=test, $b=42, $wtf=1.0 sec] to myrec)) -error in /home/robin/bro/master/testing/btest/.tmp/language.record-ceorce-orphan/record-ceorce-orphan.bro, line 21: orphaned field "wtf" in record coercion ((coerce [$a=test, $b=42, $wtf=1.0 sec] to myrec)) +error in /home/robin/bro/master/testing/btest/.tmp/language.record-ceorce-orphan/record-ceorce-orphan.zeek, line 19: orphaned field "wtf" in record coercion ((coerce [$a=test, $b=42, $wtf=1.0 sec] to myrec)) +error in /home/robin/bro/master/testing/btest/.tmp/language.record-ceorce-orphan/record-ceorce-orphan.zeek, line 21: orphaned field "wtf" in record coercion ((coerce [$a=test, $b=42, $wtf=1.0 sec] to myrec)) diff --git a/testing/btest/Baseline/language.record-coerce-clash/out b/testing/btest/Baseline/language.record-coerce-clash/out index 9ef4116c7e..cb45413c63 100644 --- a/testing/btest/Baseline/language.record-coerce-clash/out +++ b/testing/btest/Baseline/language.record-coerce-clash/out @@ -1 +1 @@ -error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/language.record-coerce-clash/record-coerce-clash.bro, line 13: type clash for field "cid" ((coerce [$cid=[$orig_h=1.2.3.4, $orig_p=0/tcp, $resp_h=0.0.0.0, $resp_p=wrong]] to myrec) and record { orig_h:addr; orig_p:port; resp_h:addr; resp_p:string; }) +error in /Users/jon/Projects/bro/bro/testing/btest/.tmp/language.record-coerce-clash/record-coerce-clash.zeek, line 13: type clash for field "cid" ((coerce [$cid=[$orig_h=1.2.3.4, $orig_p=0/tcp, $resp_h=0.0.0.0, $resp_p=wrong]] to myrec) and record { orig_h:addr; orig_p:port; resp_h:addr; resp_p:string; }) diff --git a/testing/btest/Baseline/language.record-default-set-mismatch/out b/testing/btest/Baseline/language.record-default-set-mismatch/out index c005138c0c..ba40f934f7 100644 --- a/testing/btest/Baseline/language.record-default-set-mismatch/out +++ b/testing/btest/Baseline/language.record-default-set-mismatch/out @@ -1 +1 @@ -error in /home/robin/bro/master/testing/btest/.tmp/language.record-default-set-mismatch/record-default-set-mismatch.bro, line 5: &default value has inconsistent type (&default=set(1, 2, 3)) +error in /home/robin/bro/master/testing/btest/.tmp/language.record-default-set-mismatch/record-default-set-mismatch.zeek, line 5: &default value has inconsistent type (&default=set(1, 2, 3)) diff --git a/testing/btest/Baseline/language.record-type-checking/out b/testing/btest/Baseline/language.record-type-checking/out index ecd5d7b8bb..50b0db5d8c 100644 --- a/testing/btest/Baseline/language.record-type-checking/out +++ b/testing/btest/Baseline/language.record-type-checking/out @@ -1,11 +1,11 @@ -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 9 and count: type clash for field "a" ((coerce [$a=0] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 9: bad record initializer ((coerce [$a=0] to error)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 12 and count: type clash for field "a" ((coerce [$a=1] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 12: bad record initializer ((coerce (coerce [$a=1] to error) to error)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 18 and count: type clash for field "a" ((coerce [$a=2] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 22 and count: type clash for field "a" ((coerce [$a=3] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 22: bad record initializer ((coerce [$a=3] to error)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 27 and count: type clash for field "a" ((coerce [$a=1000] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 33 and count: type clash for field "a" ((coerce [$a=1001] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 40 and count: type clash for field "a" ((coerce [$a=1002] to MyRec) and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.bro, line 46 and count: type clash for field "a" ((coerce [$a=1003] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 9 and count: type clash for field "a" ((coerce [$a=0] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 9: bad record initializer ((coerce [$a=0] to error)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 12 and count: type clash for field "a" ((coerce [$a=1] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 12: bad record initializer ((coerce (coerce [$a=1] to error) to error)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 18 and count: type clash for field "a" ((coerce [$a=2] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 22 and count: type clash for field "a" ((coerce [$a=3] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 22: bad record initializer ((coerce [$a=3] to error)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 27 and count: type clash for field "a" ((coerce [$a=1000] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 33 and count: type clash for field "a" ((coerce [$a=1001] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 40 and count: type clash for field "a" ((coerce [$a=1002] to MyRec) and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.record-type-checking/record-type-checking.zeek, line 46 and count: type clash for field "a" ((coerce [$a=1003] to MyRec) and count) diff --git a/testing/btest/Baseline/language.returnwhen/bro..stdout b/testing/btest/Baseline/language.returnwhen/bro..stdout deleted file mode 100644 index d213d7bd02..0000000000 --- a/testing/btest/Baseline/language.returnwhen/bro..stdout +++ /dev/null @@ -1,12 +0,0 @@ -dummy from async_func() from bro_init() -async_func() return result in bro_init(), flag in my_set -dummy from bro_init() when block -hi! -dummy from async_func() from do_another() -async_func() return result in do_another(), flag in my_set -dummy from do_another() when block -hi! -dummy from async_func() from do_another() -async_func() return result in do_another(), timeout -dummy from do_another() when block -hi! diff --git a/testing/btest/Baseline/language.returnwhen/zeek..stdout b/testing/btest/Baseline/language.returnwhen/zeek..stdout new file mode 100644 index 0000000000..969b6715af --- /dev/null +++ b/testing/btest/Baseline/language.returnwhen/zeek..stdout @@ -0,0 +1,12 @@ +dummy from async_func() from zeek_init() +async_func() return result in zeek_init(), flag in my_set +dummy from zeek_init() when block +hi! +dummy from async_func() from do_another() +async_func() return result in do_another(), flag in my_set +dummy from do_another() when block +hi! +dummy from async_func() from do_another() +async_func() return result in do_another(), timeout +dummy from do_another() when block +hi! diff --git a/testing/btest/Baseline/language.set-type-checking/out b/testing/btest/Baseline/language.set-type-checking/out index 0387146723..d27da6205a 100644 --- a/testing/btest/Baseline/language.set-type-checking/out +++ b/testing/btest/Baseline/language.set-type-checking/out @@ -1,24 +1,24 @@ -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 7: arithmetic mixed with non-arithmetic (port and 0) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 7 and port: type mismatch (0 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 7: inconsistent type in set constructor (set(0)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 10: arithmetic mixed with non-arithmetic (port and 1) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 10 and port: type mismatch (1 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 10: inconsistent type in set constructor (set(1)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 16: arithmetic mixed with non-arithmetic (port and 2) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 16 and port: type mismatch (2 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 16: inconsistent type in set constructor (set(2)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 7: arithmetic mixed with non-arithmetic (port and 0) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 7 and port: type mismatch (0 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 7: inconsistent type in set constructor (set(0)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 10: arithmetic mixed with non-arithmetic (port and 1) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 10 and port: type mismatch (1 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 10: inconsistent type in set constructor (set(1)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 16: arithmetic mixed with non-arithmetic (port and 2) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 16 and port: type mismatch (2 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 16: inconsistent type in set constructor (set(2)) error in port: arithmetic mixed with non-arithmetic (port and 3) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 20: initialization type mismatch in set (set(3) and 3) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 25: arithmetic mixed with non-arithmetic (port and 1000) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 25 and port: type mismatch (1000 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 25: inconsistent type in set constructor (set(1000)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 31: arithmetic mixed with non-arithmetic (port and 1001) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 31 and port: type mismatch (1001 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 31: inconsistent type in set constructor (set(1001)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 38: arithmetic mixed with non-arithmetic (port and 1002) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 38 and port: type mismatch (1002 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 38: inconsistent type in set constructor (set(1002)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 44: arithmetic mixed with non-arithmetic (port and 1003) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 44 and port: type mismatch (1003 and port) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 44: inconsistent type in set constructor (set(1003)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.bro, line 44: type clash in assignment (lea = set(1003)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 20: initialization type mismatch in set (set(3) and 3) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 25: arithmetic mixed with non-arithmetic (port and 1000) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 25 and port: type mismatch (1000 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 25: inconsistent type in set constructor (set(1000)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 31: arithmetic mixed with non-arithmetic (port and 1001) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 31 and port: type mismatch (1001 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 31: inconsistent type in set constructor (set(1001)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 38: arithmetic mixed with non-arithmetic (port and 1002) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 38 and port: type mismatch (1002 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 38: inconsistent type in set constructor (set(1002)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 44: arithmetic mixed with non-arithmetic (port and 1003) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 44 and port: type mismatch (1003 and port) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 44: inconsistent type in set constructor (set(1003)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.set-type-checking/set-type-checking.zeek, line 44: type clash in assignment (lea = set(1003)) diff --git a/testing/btest/Baseline/language.subnet-errors/out b/testing/btest/Baseline/language.subnet-errors/out index 5d8e3d76da..97e999ef9b 100644 --- a/testing/btest/Baseline/language.subnet-errors/out +++ b/testing/btest/Baseline/language.subnet-errors/out @@ -1,5 +1,5 @@ -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.subnet-errors/subnet-errors.bro, line 9: bad IPv4 subnet prefix length: 33 (1.2.3.4 / i) -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.subnet-errors/subnet-errors.bro, line 18: bad IPv6 subnet prefix length: 129 (:: / i) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.subnet-errors/subnet-errors.zeek, line 9: bad IPv4 subnet prefix length: 33 (1.2.3.4 / i) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.subnet-errors/subnet-errors.zeek, line 18: bad IPv6 subnet prefix length: 129 (:: / i) 1.2.3.4/32 ::/128 init last diff --git a/testing/btest/Baseline/language.switch-error-mixed/out b/testing/btest/Baseline/language.switch-error-mixed/out index 75fa1d84c2..679b34f6ef 100644 --- a/testing/btest/Baseline/language.switch-error-mixed/out +++ b/testing/btest/Baseline/language.switch-error-mixed/out @@ -1 +1 @@ -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-error-mixed/switch-error-mixed.bro, line 6: cannot mix cases with expressions and types (switch (v) {case 42:{ return (42!)}case type count:{ return (Count!)}}) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-error-mixed/switch-error-mixed.zeek, line 6: cannot mix cases with expressions and types (switch (v) {case 42:{ return (42!)}case type count:{ return (Count!)}}) diff --git a/testing/btest/Baseline/language.switch-incomplete/out b/testing/btest/Baseline/language.switch-incomplete/out index bfe4429956..4ce7d39a08 100644 --- a/testing/btest/Baseline/language.switch-incomplete/out +++ b/testing/btest/Baseline/language.switch-incomplete/out @@ -1 +1 @@ -error in /home/robin/bro/master/testing/btest/.tmp/language.switch-incomplete/switch-incomplete.bro, lines 7-8: case block must end in break/fallthrough/return statement (case 1:{ print 1}) +error in /home/robin/bro/master/testing/btest/.tmp/language.switch-incomplete/switch-incomplete.zeek, lines 7-8: case block must end in break/fallthrough/return statement (case 1:{ print 1}) diff --git a/testing/btest/Baseline/language.switch-types-error-duplicate/out b/testing/btest/Baseline/language.switch-types-error-duplicate/out index e523b14550..0ab618bc16 100644 --- a/testing/btest/Baseline/language.switch-types-error-duplicate/out +++ b/testing/btest/Baseline/language.switch-types-error-duplicate/out @@ -1 +1 @@ -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-duplicate/switch-types-error-duplicate.bro, lines 11-12: duplicate case label (case type bool, type count:{ return (Bool or address!)}) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-duplicate/switch-types-error-duplicate.zeek, lines 11-12: duplicate case label (case type bool, type count:{ return (Bool or address!)}) diff --git a/testing/btest/Baseline/language.switch-types-error-unsupported/out b/testing/btest/Baseline/language.switch-types-error-unsupported/out index 133c8653f2..7932073710 100644 --- a/testing/btest/Baseline/language.switch-types-error-unsupported/out +++ b/testing/btest/Baseline/language.switch-types-error-unsupported/out @@ -1,3 +1,3 @@ -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-unsupported/switch-types-error-unsupported.bro, lines 9-10: cannot cast switch expression to case type (case type count:{ return (Count!)}) -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-unsupported/switch-types-error-unsupported.bro, lines 11-12: cannot cast switch expression to case type (case type bool, type addr:{ return (Bool or address!)}) -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-unsupported/switch-types-error-unsupported.bro, lines 11-12: cannot cast switch expression to case type (case type bool, type addr:{ return (Bool or address!)}) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-unsupported/switch-types-error-unsupported.zeek, lines 9-10: cannot cast switch expression to case type (case type count:{ return (Count!)}) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-unsupported/switch-types-error-unsupported.zeek, lines 11-12: cannot cast switch expression to case type (case type bool, type addr:{ return (Bool or address!)}) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.switch-types-error-unsupported/switch-types-error-unsupported.zeek, lines 11-12: cannot cast switch expression to case type (case type bool, type addr:{ return (Bool or address!)}) diff --git a/testing/btest/Baseline/language.table-list-assign-type-check/output b/testing/btest/Baseline/language.table-list-assign-type-check/output new file mode 100644 index 0000000000..2062d3f4f3 --- /dev/null +++ b/testing/btest/Baseline/language.table-list-assign-type-check/output @@ -0,0 +1,3 @@ +error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.table-list-assign-type-check/table-list-assign-type-check.zeek, lines 15-20: table type mismatch in assignment (service_table_bad_yield = table(www, 80 = Internal Web Server, dns1, 53 = Internal DNS 1, dns2, 53 = Internal DNS 2, dhcp-for-wifi, 443 = DHCP Management interface for WiFi)) +error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.table-list-assign-type-check/table-list-assign-type-check.zeek, lines 23-28: table type mismatch in assignment (service_table_bad_index = table(www, 80 = Internal Web Server, dns1, 53 = Internal DNS 1, dns2, 53 = Internal DNS 2, dhcp-for-wifi, 443 = DHCP Management interface for WiFi)) +error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.table-list-assign-type-check/table-list-assign-type-check.zeek, line 31: set type mismatch in assignment (test_set_bad = set(1, 2, 3)) diff --git a/testing/btest/Baseline/language.table-type-checking/out b/testing/btest/Baseline/language.table-type-checking/out index 488cb83ab2..a6307a6155 100644 --- a/testing/btest/Baseline/language.table-type-checking/out +++ b/testing/btest/Baseline/language.table-type-checking/out @@ -1,14 +1,14 @@ -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 7: type clash (port and zero) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 7: inconsistent types in table constructor (table(zero = 0)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 10: type clash (port and one) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 10: inconsistent types in table constructor (table(one = 1)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 17: type clash in assignment (gda = gda2) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 21 and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 4: index type doesn't match table (three and list of port) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 21: type clash in table assignment (three = 3) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 26: type clash (port and thousand) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 26: inconsistent types in table constructor (table(thousand = 1000)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 32: type clash (port and thousand-one) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 32: inconsistent types in table constructor (table(thousand-one = 1001)) -error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 39: type clash (port and thousand-two) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 39: inconsistent types in table constructor (table(thousand-two = 1002)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.bro, line 45: type clash in assignment (lea = table(thousand-three = 1003)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 7: type clash (port and zero) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 7: inconsistent types in table constructor (table(zero = 0)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 10: type clash (port and one) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 10: inconsistent types in table constructor (table(one = 1)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 17: type clash in assignment (gda = gda2) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 21 and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 4: index type doesn't match table (three and list of port) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 21: type clash in table assignment (three = 3) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 26: type clash (port and thousand) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 26: inconsistent types in table constructor (table(thousand = 1000)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 32: type clash (port and thousand-one) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 32: inconsistent types in table constructor (table(thousand-one = 1001)) +error in port and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 39: type clash (port and thousand-two) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 39: inconsistent types in table constructor (table(thousand-two = 1002)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.table-type-checking/table-type-checking.zeek, line 45: type clash in assignment (lea = table(thousand-three = 1003)) diff --git a/testing/btest/Baseline/language.ternary-record-mismatch/out b/testing/btest/Baseline/language.ternary-record-mismatch/out index 0c1cefce0d..91a3aa2e02 100644 --- a/testing/btest/Baseline/language.ternary-record-mismatch/out +++ b/testing/btest/Baseline/language.ternary-record-mismatch/out @@ -1 +1 @@ -error in /Users/jon/pro/zeek/zeek/testing/btest/.tmp/language.ternary-record-mismatch/ternary-record-mismatch.bro, lines 13-14: operands must be of the same type ((F) ? (coerce [$a=a string, $b=6] to MyRecord) : [$a=a different string, $b=7]) +error in /Users/jon/pro/zeek/zeek/testing/btest/.tmp/language.ternary-record-mismatch/ternary-record-mismatch.zeek, lines 13-14: operands must be of the same type ((F) ? (coerce [$a=a string, $b=6] to MyRecord) : [$a=a different string, $b=7]) diff --git a/testing/btest/Baseline/language.type-cast-error-dynamic/output b/testing/btest/Baseline/language.type-cast-error-dynamic/output index 7c4ec0332f..dfac361f11 100644 --- a/testing/btest/Baseline/language.type-cast-error-dynamic/output +++ b/testing/btest/Baseline/language.type-cast-error-dynamic/output @@ -1,4 +1,4 @@ -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.type-cast-error-dynamic/type-cast-error-dynamic.bro, line 11: invalid cast of value with type 'count' to type 'string' (a as string) -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.type-cast-error-dynamic/type-cast-error-dynamic.bro, line 11: invalid cast of value with type 'record { a:addr; b:port; }' to type 'string' (a as string) -expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.type-cast-error-dynamic/type-cast-error-dynamic.bro, line 11: invalid cast of value with type 'record { data:opaque of Broker::Data; }' to type 'string' (nil $data field) (a as string) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.type-cast-error-dynamic/type-cast-error-dynamic.zeek, line 11: invalid cast of value with type 'count' to type 'string' (a as string) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.type-cast-error-dynamic/type-cast-error-dynamic.zeek, line 11: invalid cast of value with type 'record { a:addr; b:port; }' to type 'string' (a as string) +expression error in /home/jon/pro/zeek/zeek/testing/btest/.tmp/language.type-cast-error-dynamic/type-cast-error-dynamic.zeek, line 11: invalid cast of value with type 'record { data:opaque of Broker::Data; }' to type 'string' (nil $data field) (a as string) data is string, F diff --git a/testing/btest/Baseline/language.type-cast-error-static/output b/testing/btest/Baseline/language.type-cast-error-static/output index a93e262f21..bd00361939 100644 --- a/testing/btest/Baseline/language.type-cast-error-static/output +++ b/testing/btest/Baseline/language.type-cast-error-static/output @@ -1,2 +1,2 @@ -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.type-cast-error-static/type-cast-error-static.bro, line 14: cast not supported (string as count) -error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.type-cast-error-static/type-cast-error-static.bro, line 15: cast not supported (string as X) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.type-cast-error-static/type-cast-error-static.zeek, line 14: cast not supported (string as count) +error in /home/robin/bro/lang-ext/testing/btest/.tmp/language.type-cast-error-static/type-cast-error-static.zeek, line 15: cast not supported (string as X) diff --git a/testing/btest/Baseline/language.type-coerce-numerics/double_convert_failure1.out b/testing/btest/Baseline/language.type-coerce-numerics/double_convert_failure1.out new file mode 100644 index 0000000000..d139c43a0c --- /dev/null +++ b/testing/btest/Baseline/language.type-coerce-numerics/double_convert_failure1.out @@ -0,0 +1 @@ +error in ./double_convert_failure1.zeek, line 7 and double: type clash for field "cc" ((coerce [$cc=5.0] to myrecord) and double) diff --git a/testing/btest/Baseline/language.type-coerce-numerics/double_convert_failure2.out b/testing/btest/Baseline/language.type-coerce-numerics/double_convert_failure2.out new file mode 100644 index 0000000000..2cc83659de --- /dev/null +++ b/testing/btest/Baseline/language.type-coerce-numerics/double_convert_failure2.out @@ -0,0 +1 @@ +error in ./double_convert_failure2.zeek, line 7 and double: type clash for field "cc" ((coerce [$cc=-5.0] to myrecord) and double) diff --git a/testing/btest/Baseline/language.type-coerce-numerics/first_set.out b/testing/btest/Baseline/language.type-coerce-numerics/first_set.out new file mode 100644 index 0000000000..6ff2d7d4df --- /dev/null +++ b/testing/btest/Baseline/language.type-coerce-numerics/first_set.out @@ -0,0 +1,14 @@ +error in int and ./first_set.zeek, line 46: overflow promoting from unsigned/double to signed arithmetic value (int and 9223372036854775808) +expression error in ./first_set.zeek, line 46: Failed type conversion ((coerce [$ii=9223372036854775808] to record { ii:int; cc:count; dd:double; })) +3 +int +4 +int +5 +int +6 +int +7.0 +double +-5.0 +double diff --git a/testing/btest/Baseline/language.type-coerce-numerics/int_convert_failure.out b/testing/btest/Baseline/language.type-coerce-numerics/int_convert_failure.out new file mode 100644 index 0000000000..3c896e096f --- /dev/null +++ b/testing/btest/Baseline/language.type-coerce-numerics/int_convert_failure.out @@ -0,0 +1 @@ +error in ./int_convert_failure.zeek, line 7 and int: type clash for field "cc" ((coerce [$cc=-5] to myrecord) and int) diff --git a/testing/btest/Baseline/language.type-coerce-numerics/vectors.out b/testing/btest/Baseline/language.type-coerce-numerics/vectors.out new file mode 100644 index 0000000000..5baa5f67c7 --- /dev/null +++ b/testing/btest/Baseline/language.type-coerce-numerics/vectors.out @@ -0,0 +1,18 @@ +vector of count +vector of count +vector of count +[1, 2] +[3, 4] +[4, 6] +vector of int +vector of int +vector of int +[1, 2] +[3, 4] +[4, 6] +vector of double +vector of double +vector of double +[1.0, 2.0] +[3.0, 4.0] +[4.0, 6.0] diff --git a/testing/btest/Baseline/language.type-type-error/.stderr b/testing/btest/Baseline/language.type-type-error/.stderr index 95cb065ece..b0e0800c72 100644 --- a/testing/btest/Baseline/language.type-type-error/.stderr +++ b/testing/btest/Baseline/language.type-type-error/.stderr @@ -1 +1 @@ -error in /home/jsiwek/bro/testing/btest/.tmp/language.type-type-error/type-type-error.bro, line 13: not a record (r$a) +error in /home/jsiwek/bro/testing/btest/.tmp/language.type-type-error/type-type-error.zeek, line 13: not a record (r$a) diff --git a/testing/btest/Baseline/language.undefined-delete-field/output b/testing/btest/Baseline/language.undefined-delete-field/output index bd0fb99289..99a71b1087 100644 --- a/testing/btest/Baseline/language.undefined-delete-field/output +++ b/testing/btest/Baseline/language.undefined-delete-field/output @@ -1,2 +1,2 @@ -error in /Users/johanna/bro/master/testing/btest/.tmp/language.undefined-delete-field/undefined-delete-field.bro, line 14: no such field in record (x$c) +error in /Users/johanna/bro/master/testing/btest/.tmp/language.undefined-delete-field/undefined-delete-field.zeek, line 14: no such field in record (x$c) 1 diff --git a/testing/btest/Baseline/language.uninitialized-local/out b/testing/btest/Baseline/language.uninitialized-local/out index 24d45d3456..dd6867f524 100644 --- a/testing/btest/Baseline/language.uninitialized-local/out +++ b/testing/btest/Baseline/language.uninitialized-local/out @@ -1 +1 @@ -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.uninitialized-local/uninitialized-local.bro, line 16: value used but not set (my_string) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.uninitialized-local/uninitialized-local.zeek, line 16: value used but not set (my_string) diff --git a/testing/btest/Baseline/language.uninitialized-local2/out b/testing/btest/Baseline/language.uninitialized-local2/out index bba567878e..ba668f08ff 100644 --- a/testing/btest/Baseline/language.uninitialized-local2/out +++ b/testing/btest/Baseline/language.uninitialized-local2/out @@ -1,2 +1,2 @@ -expression error in /home/jon/projects/bro/bro/testing/btest/.tmp/language.uninitialized-local2/uninitialized-local2.bro, line 19: value used but not set (var_b) +expression error in /home/jon/projects/bro/bro/testing/btest/.tmp/language.uninitialized-local2/uninitialized-local2.zeek, line 19: value used but not set (var_b) var_a is, baz diff --git a/testing/btest/Baseline/language.vector-type-checking/out b/testing/btest/Baseline/language.vector-type-checking/out index e96017082a..33be41836f 100644 --- a/testing/btest/Baseline/language.vector-type-checking/out +++ b/testing/btest/Baseline/language.vector-type-checking/out @@ -1,19 +1,19 @@ -error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 7: arithmetic mixed with non-arithmetic (count and zero) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 7 and count: type mismatch (zero and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 7: inconsistent types in vector constructor (vector(zero)) -error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 10: arithmetic mixed with non-arithmetic (count and one) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 10 and count: type mismatch (one and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 10: inconsistent types in vector constructor (vector(one)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 17: type clash in assignment (gda = gda2) -error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 21: arithmetic mixed with non-arithmetic (count and three) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 21: initialization type mismatch at index 0 (vector(three) and three) -error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 26: arithmetic mixed with non-arithmetic (count and thousand) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 26 and count: type mismatch (thousand and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 26: inconsistent types in vector constructor (vector(thousand)) -error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 32: arithmetic mixed with non-arithmetic (count and thousand-one) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 32 and count: type mismatch (thousand-one and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 32: inconsistent types in vector constructor (vector(thousand-one)) -error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 39: arithmetic mixed with non-arithmetic (count and thousand-two) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 39 and count: type mismatch (thousand-two and count) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 39: inconsistent types in vector constructor (vector(thousand-two)) -error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.bro, line 45: type clash in assignment (lea = vector(thousand-three)) +error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 7: arithmetic mixed with non-arithmetic (count and zero) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 7 and count: type mismatch (zero and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 7: inconsistent types in vector constructor (vector(zero)) +error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 10: arithmetic mixed with non-arithmetic (count and one) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 10 and count: type mismatch (one and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 10: inconsistent types in vector constructor (vector(one)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 17: type clash in assignment (gda = gda2) +error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 21: arithmetic mixed with non-arithmetic (count and three) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 21: initialization type mismatch at index 0 (vector(three) and three) +error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 26: arithmetic mixed with non-arithmetic (count and thousand) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 26 and count: type mismatch (thousand and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 26: inconsistent types in vector constructor (vector(thousand)) +error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 32: arithmetic mixed with non-arithmetic (count and thousand-one) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 32 and count: type mismatch (thousand-one and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 32: inconsistent types in vector constructor (vector(thousand-one)) +error in count and /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 39: arithmetic mixed with non-arithmetic (count and thousand-two) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 39 and count: type mismatch (thousand-two and count) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 39: inconsistent types in vector constructor (vector(thousand-two)) +error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.vector-type-checking/vector-type-checking.zeek, line 45: type clash in assignment (lea = vector(thousand-three)) diff --git a/testing/btest/Baseline/language.vector/out b/testing/btest/Baseline/language.vector/out index 0fdcc1fa24..2955eda26c 100644 --- a/testing/btest/Baseline/language.vector/out +++ b/testing/btest/Baseline/language.vector/out @@ -58,3 +58,12 @@ access element (PASS) && operator (PASS) || operator (PASS) += operator (PASS) +slicing (PASS) +slicing (PASS) +slicing (PASS) +slicing (PASS) +slicing (PASS) +slicing assignment (PASS) +slicing assignment (PASS) +slicing assignment grow (PASS) +slicing assignment shrink (PASS) diff --git a/testing/btest/Baseline/language.when-on-globals/out b/testing/btest/Baseline/language.when-on-globals/out new file mode 100644 index 0000000000..44dae2c89e --- /dev/null +++ b/testing/btest/Baseline/language.when-on-globals/out @@ -0,0 +1,4 @@ +"j" in x3[20]$x, expected timeout +15 in x2, T +x1 != 42, T +x2[10], T diff --git a/testing/btest/Baseline/language.when-unitialized-rhs/out b/testing/btest/Baseline/language.when-unitialized-rhs/out index 6698887be0..bad1bdbb78 100644 --- a/testing/btest/Baseline/language.when-unitialized-rhs/out +++ b/testing/btest/Baseline/language.when-unitialized-rhs/out @@ -1,5 +1,5 @@ -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.when-unitialized-rhs/when-unitialized-rhs.bro, line 9: value used but not set (crashMe) -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.when-unitialized-rhs/when-unitialized-rhs.bro, line 14: value used but not set (x) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.when-unitialized-rhs/when-unitialized-rhs.zeek, line 9: value used but not set (crashMe) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/language.when-unitialized-rhs/when-unitialized-rhs.zeek, line 14: value used but not set (x) 1 2 3 diff --git a/testing/btest/Baseline/language.wrong-delete-field/output b/testing/btest/Baseline/language.wrong-delete-field/output index 1eefa1d2fe..1250f03c3d 100644 --- a/testing/btest/Baseline/language.wrong-delete-field/output +++ b/testing/btest/Baseline/language.wrong-delete-field/output @@ -1 +1 @@ -error in /da/home/robin/bro/master/testing/btest/.tmp/language.wrong-delete-field/wrong-delete-field.bro, line 10: illegal delete statement (delete x$a) +error in /da/home/robin/bro/master/testing/btest/.tmp/language.wrong-delete-field/wrong-delete-field.zeek, line 10: illegal delete statement (delete x$a) diff --git a/testing/btest/Baseline/language.zeek_init/out b/testing/btest/Baseline/language.zeek_init/out new file mode 100644 index 0000000000..aa17ec8aa8 --- /dev/null +++ b/testing/btest/Baseline/language.zeek_init/out @@ -0,0 +1,8 @@ +zeek_init at priority 10! +bro_init at priority 5! +zeek_init at priority 0! +bro_init at priority -10! +zeek_done at priority 10! +bro_done at priority 5! +zeek_done at priority 0! +bro_done at priority -10! diff --git a/testing/btest/Baseline/language.zeek_script_loaded/out b/testing/btest/Baseline/language.zeek_script_loaded/out new file mode 100644 index 0000000000..cddf509308 --- /dev/null +++ b/testing/btest/Baseline/language.zeek_script_loaded/out @@ -0,0 +1,4 @@ +zeek_script_loaded priority 10 +bro_script_loaded priority 5 +zeek_script_loaded priority 0 +bro_script_loaded priority -10 diff --git a/testing/btest/Baseline/plugins.hooks/output b/testing/btest/Baseline/plugins.hooks/output index d4a84a5223..37c2f126e3 100644 --- a/testing/btest/Baseline/plugins.hooks/output +++ b/testing/btest/Baseline/plugins.hooks/output @@ -37,6 +37,7 @@ 0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_MODBUS, 502/tcp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_MYSQL, 1434/tcp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_MYSQL, 3306/tcp)) -> +0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_NTP, 123/udp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_RADIUS, 1812/udp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_RDP, 3389/tcp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_SIP, 5060/udp)) -> @@ -103,6 +104,7 @@ 0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_MODBUS, 502/tcp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_MYSQL, 1434/tcp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_MYSQL, 3306/tcp)) -> +0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_NTP, 123/udp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_RADIUS, 1812/udp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_RDP, 3389/tcp)) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_SIP, 5060/udp)) -> @@ -145,6 +147,7 @@ 0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_KRB_TCP, {88/tcp})) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_MODBUS, {502/tcp})) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_MYSQL, {1434<...>/tcp})) -> +0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_NTP, {123/udp})) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_RADIUS, {1812/udp})) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_RDP, {3389/tcp})) -> 0.000000 MetaHookPost CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_SIP, {5060/udp})) -> @@ -161,9 +164,9 @@ 0.000000 MetaHookPost CallFunction(Cluster::is_enabled, , ()) -> 0.000000 MetaHookPost CallFunction(Cluster::is_enabled, , ()) -> 0.000000 MetaHookPost CallFunction(Cluster::local_node_type, , ()) -> -0.000000 MetaHookPost CallFunction(Cluster::register_pool, , ([topic=bro<...>/logger, node_type=Cluster::LOGGER, max_nodes=, exclusive=F])) -> -0.000000 MetaHookPost CallFunction(Cluster::register_pool, , ([topic=bro<...>/proxy, node_type=Cluster::PROXY, max_nodes=, exclusive=F])) -> -0.000000 MetaHookPost CallFunction(Cluster::register_pool, , ([topic=bro<...>/worker, node_type=Cluster::WORKER, max_nodes=, exclusive=F])) -> +0.000000 MetaHookPost CallFunction(Cluster::register_pool, , ([topic=zeek<...>/logger, node_type=Cluster::LOGGER, max_nodes=, exclusive=F])) -> +0.000000 MetaHookPost CallFunction(Cluster::register_pool, , ([topic=zeek<...>/proxy, node_type=Cluster::PROXY, max_nodes=, exclusive=F])) -> +0.000000 MetaHookPost CallFunction(Cluster::register_pool, , ([topic=zeek<...>/worker, node_type=Cluster::WORKER, max_nodes=, exclusive=F])) -> 0.000000 MetaHookPost CallFunction(Files::register_analyzer_add_callback, , (Files::ANALYZER_EXTRACT, FileExtract::on_add{ if (!FileExtract::args?$extract_filename) FileExtract::args$extract_filename = cat(extract-, FileExtract::f$last_active, -, FileExtract::f$source, -, FileExtract::f$id)FileExtract::f$info$extracted = FileExtract::args$extract_filenameFileExtract::args$extract_filename = build_path_compressed(FileExtract::prefix, FileExtract::args$extract_filename)FileExtract::f$info$extracted_cutoff = Fmkdir(FileExtract::prefix)})) -> 0.000000 MetaHookPost CallFunction(Files::register_for_mime_type, , (Files::ANALYZER_MD5, application/pkix-cert)) -> 0.000000 MetaHookPost CallFunction(Files::register_for_mime_type, , (Files::ANALYZER_MD5, application/x-x509-ca-cert)) -> @@ -202,7 +205,7 @@ 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (KRB::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=kerberos, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=modbus, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (NTLM::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=ntlm, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::__add_filter, , (NetControl::CATCH_RELEASE, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_catch_release, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> +0.000000 MetaHookPost CallFunction(Log::__add_filter, , (NTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=Log::default_path_func{ if ( != Log::path) return (Log::path)Log::id_str = fmt(%s, Log::id)Log::parts = split_string1(Log::id_str, <...>/, )return (cat(to_lower(Log::parts[0]), _, to_lower(Log::parts[1])))}elsereturn (to_lower(Log::id_str))}, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (NetControl::DROP, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_drop, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (NetControl::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (NetControl::SHUNT, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_shunt, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> @@ -227,7 +230,6 @@ 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=software, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=syslog, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=tunnel, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::__add_filter, , (Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=unified2, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=weird, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=x509, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::__add_filter, , (mysql::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=mysql, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> @@ -248,7 +250,7 @@ 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (KRB::LOG, [columns=KRB::Info, ev=KRB::log_krb, path=kerberos])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (Modbus::LOG, [columns=Modbus::Info, ev=Modbus::log_modbus, path=modbus])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (NTLM::LOG, [columns=NTLM::Info, ev=, path=ntlm])) -> -0.000000 MetaHookPost CallFunction(Log::__create_stream, , (NetControl::CATCH_RELEASE, [columns=NetControl::CatchReleaseInfo, ev=NetControl::log_netcontrol_catch_release, path=netcontrol_catch_release])) -> +0.000000 MetaHookPost CallFunction(Log::__create_stream, , (NTP::LOG, [columns=NTP::Info, ev=NTP::log_ntp, path=])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (NetControl::DROP, [columns=NetControl::DropInfo, ev=NetControl::log_netcontrol_drop, path=netcontrol_drop])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (NetControl::LOG, [columns=NetControl::Info, ev=NetControl::log_netcontrol, path=netcontrol])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (NetControl::SHUNT, [columns=NetControl::ShuntInfo, ev=NetControl::log_netcontrol_shunt, path=netcontrol_shunt])) -> @@ -273,11 +275,10 @@ 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (Software::LOG, [columns=Software::Info, ev=Software::log_software, path=software])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (Syslog::LOG, [columns=Syslog::Info, ev=, path=syslog])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (Tunnel::LOG, [columns=Tunnel::Info, ev=, path=tunnel])) -> -0.000000 MetaHookPost CallFunction(Log::__create_stream, , (Unified2::LOG, [columns=Unified2::Info, ev=Unified2::log_unified2, path=unified2])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (Weird::LOG, [columns=Weird::Info, ev=Weird::log_weird, path=weird])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (X509::LOG, [columns=X509::Info, ev=X509::log_x509, path=x509])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, , (mysql::LOG, [columns=MySQL::Info, ev=MySQL::log_mysql, path=mysql])) -> -0.000000 MetaHookPost CallFunction(Log::__write, , (PacketFilter::LOG, [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::__write, , (PacketFilter::LOG, [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T])) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Broker::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Cluster::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Config::LOG)) -> @@ -295,7 +296,7 @@ 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (KRB::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Modbus::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (NTLM::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (NetControl::CATCH_RELEASE)) -> +0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (NTP::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (NetControl::DROP)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (NetControl::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (NetControl::SHUNT)) -> @@ -320,7 +321,6 @@ 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Software::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Syslog::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Tunnel::LOG)) -> -0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Unified2::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (Weird::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (X509::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, , (mysql::LOG)) -> @@ -341,7 +341,7 @@ 0.000000 MetaHookPost CallFunction(Log::add_filter, , (KRB::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (NTLM::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, , (NetControl::CATCH_RELEASE, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> +0.000000 MetaHookPost CallFunction(Log::add_filter, , (NTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (NetControl::DROP, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (NetControl::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (NetControl::SHUNT, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> @@ -366,7 +366,6 @@ 0.000000 MetaHookPost CallFunction(Log::add_filter, , (Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> -0.000000 MetaHookPost CallFunction(Log::add_filter, , (Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> 0.000000 MetaHookPost CallFunction(Log::add_filter, , (mysql::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -> @@ -387,7 +386,7 @@ 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (KRB::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (Modbus::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (NTLM::LOG, default)) -> -0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (NetControl::CATCH_RELEASE, default)) -> +0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (NTP::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (NetControl::DROP, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (NetControl::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (NetControl::SHUNT, default)) -> @@ -412,7 +411,6 @@ 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (Software::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (Syslog::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (Tunnel::LOG, default)) -> -0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (Unified2::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (Weird::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (X509::LOG, default)) -> 0.000000 MetaHookPost CallFunction(Log::add_stream_filters, , (mysql::LOG, default)) -> @@ -433,7 +431,7 @@ 0.000000 MetaHookPost CallFunction(Log::create_stream, , (KRB::LOG, [columns=KRB::Info, ev=KRB::log_krb, path=kerberos])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (Modbus::LOG, [columns=Modbus::Info, ev=Modbus::log_modbus, path=modbus])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (NTLM::LOG, [columns=NTLM::Info, ev=, path=ntlm])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, , (NetControl::CATCH_RELEASE, [columns=NetControl::CatchReleaseInfo, ev=NetControl::log_netcontrol_catch_release, path=netcontrol_catch_release])) -> +0.000000 MetaHookPost CallFunction(Log::create_stream, , (NTP::LOG, [columns=NTP::Info, ev=NTP::log_ntp, path=])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (NetControl::DROP, [columns=NetControl::DropInfo, ev=NetControl::log_netcontrol_drop, path=netcontrol_drop])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (NetControl::LOG, [columns=NetControl::Info, ev=NetControl::log_netcontrol, path=netcontrol])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (NetControl::SHUNT, [columns=NetControl::ShuntInfo, ev=NetControl::log_netcontrol_shunt, path=netcontrol_shunt])) -> @@ -458,11 +456,10 @@ 0.000000 MetaHookPost CallFunction(Log::create_stream, , (Software::LOG, [columns=Software::Info, ev=Software::log_software, path=software])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (Syslog::LOG, [columns=Syslog::Info, ev=, path=syslog])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (Tunnel::LOG, [columns=Tunnel::Info, ev=, path=tunnel])) -> -0.000000 MetaHookPost CallFunction(Log::create_stream, , (Unified2::LOG, [columns=Unified2::Info, ev=Unified2::log_unified2, path=unified2])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (Weird::LOG, [columns=Weird::Info, ev=Weird::log_weird, path=weird])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (X509::LOG, [columns=X509::Info, ev=X509::log_x509, path=x509])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, , (mysql::LOG, [columns=MySQL::Info, ev=MySQL::log_mysql, path=mysql])) -> -0.000000 MetaHookPost CallFunction(Log::write, , (PacketFilter::LOG, [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::write, , (PacketFilter::LOG, [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T])) -> 0.000000 MetaHookPost CallFunction(NetControl::check_plugins, , ()) -> 0.000000 MetaHookPost CallFunction(NetControl::init, , ()) -> 0.000000 MetaHookPost CallFunction(Notice::want_pp, , ()) -> @@ -497,7 +494,6 @@ 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Input::default_mode, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Input::default_reader, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (KRB::ignored_errors, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> -0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (NetControl::catch_release_warn_blocked_ip_encountered, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (NetControl::default_priority, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Notice::alarmed_types, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> 0.000000 MetaHookPost CallFunction(Option::set_change_handler, , (Notice::default_suppression_interval, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -> @@ -560,13 +556,10 @@ 0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, , (SumStats::UNIQUE, anonymous-function{ if (!SumStats::rv?$unique_vals) SumStats::rv$unique_vals = (coerce set() to set[SumStats::Observation])if (SumStats::r?$unique_max) SumStats::rv$unique_max = SumStats::r$unique_maxif (!SumStats::r?$unique_max || flattenSumStats::rv$unique_vals <= SumStats::r$unique_max) add SumStats::rv$unique_vals[SumStats::obs]SumStats::rv$unique = flattenSumStats::rv$unique_vals})) -> 0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugin, , (SumStats::VARIANCE, anonymous-function{ if (1 < SumStats::rv$num) SumStats::rv$var_s += ((SumStats::val - SumStats::rv$prev_avg) * (SumStats::val - SumStats::rv$average))SumStats::calc_variance(SumStats::rv)SumStats::rv$prev_avg = SumStats::rv$average})) -> 0.000000 MetaHookPost CallFunction(SumStats::register_observe_plugins, , ()) -> -0.000000 MetaHookPost CallFunction(Unified2::mappings_initialized, , ()) -> -0.000000 MetaHookPost CallFunction(Unified2::start_watching, , ()) -> -0.000000 MetaHookPost CallFunction(bro_init, , ()) -> 0.000000 MetaHookPost CallFunction(current_time, , ()) -> 0.000000 MetaHookPost CallFunction(filter_change_tracking, , ()) -> -0.000000 MetaHookPost CallFunction(getenv, , (BRO_DEFAULT_LISTEN_ADDRESS)) -> 0.000000 MetaHookPost CallFunction(getenv, , (CLUSTER_NODE)) -> +0.000000 MetaHookPost CallFunction(getenv, , (ZEEK_DEFAULT_LISTEN_ADDRESS)) -> 0.000000 MetaHookPost CallFunction(global_ids, , ()) -> 0.000000 MetaHookPost CallFunction(network_time, , ()) -> 0.000000 MetaHookPost CallFunction(reading_live_traffic, , ()) -> @@ -574,294 +567,296 @@ 0.000000 MetaHookPost CallFunction(set_to_regex, , ({}, (^\.?|\.)(~~)$)) -> 0.000000 MetaHookPost CallFunction(string_to_pattern, , ((^\.?|\.)()$, F)) -> 0.000000 MetaHookPost CallFunction(sub, , ((^\.?|\.)(~~)$, <...>/, )) -> +0.000000 MetaHookPost CallFunction(zeek_init, , ()) -> 0.000000 MetaHookPost DrainEvents() -> -0.000000 MetaHookPost LoadFile(0, ..<...>/main.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, ..<...>/plugin.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_ARP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_AsciiReader.ascii.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_AsciiWriter.ascii.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_BackDoor.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_BenchmarkReader.benchmark.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_BinaryReader.binary.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_BitTorrent.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_ConfigReader.config.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_ConnSize.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_ConnSize.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DCE_RPC.consts.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DCE_RPC.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DCE_RPC.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DHCP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DHCP.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DNP3.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_DNS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_FTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_FTP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_File.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_FileEntropy.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_FileExtract.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_FileExtract.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_FileHash.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Finger.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_GSSAPI.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_GTPv1.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Gnutella.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_HTTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_HTTP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_ICMP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_IMAP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_IRC.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Ident.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_InterConn.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_KRB.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_KRB.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Login.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Login.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_MIME.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Modbus.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_MySQL.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NCP.consts.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NCP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NTLM.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NTLM.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NetBIOS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NetBIOS.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_NoneWriter.none.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_PE.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_POP3.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_RADIUS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_RDP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_RDP.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_RFB.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_RPC.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_RawReader.raw.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SIP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.consts.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_check_directory.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_close.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_create_directory.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_echo.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_logoff_andx.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_negotiate.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_nt_cancel.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_nt_create_andx.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_query_information.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_read_andx.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_session_setup_andx.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction2.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction2_secondary.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction_secondary.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_tree_connect_andx.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_tree_disconnect.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_com_write_andx.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb1_events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_close.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_create.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_negotiate.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_read.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_session_setup.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_set_info.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_transform_header.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_tree_connect.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_tree_disconnect.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_com_write.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.smb2_events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMB.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMTP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SMTP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SNMP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SNMP.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SOCKS.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SQLiteReader.sqlite.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SQLiteWriter.sqlite.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SSH.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SSH.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SSL.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SSL.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SSL.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_SteppingStone.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Syslog.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_TCP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_TCP.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Teredo.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_UDP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Unified2.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_Unified2.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_VXLAN.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_X509.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_X509.functions.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_X509.ocsp_events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_X509.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/Bro_XMPP.events.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/acld.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/add-geodata.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/addrs.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/analyzer.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/ascii.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/average.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/benchmark.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/binary.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/bloom-filter.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/bro.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/broker.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/broxygen.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/cardinality-counter.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/catch-and-release.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/comm.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/config.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/const-dos-error.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/const-nt-status.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/const.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/consts.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/contents.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/ct-list.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/data.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/dcc-send.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/debug.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/drop.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/email_admin.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/entities.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/event.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/exec.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/file_analysis.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/files.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/gridftp.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/hll_unique.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/hooks.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/inactivity.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/info.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/input.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/input.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/last.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/log.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/logging.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, ..<...>/main.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, ..<...>/plugin.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_ARP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_AsciiReader.ascii.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_AsciiWriter.ascii.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_BackDoor.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_BenchmarkReader.benchmark.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_BinaryReader.binary.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_BitTorrent.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_ConfigReader.config.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_ConnSize.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_ConnSize.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DCE_RPC.consts.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DCE_RPC.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DCE_RPC.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DHCP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DHCP.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DNP3.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_DNS.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_FTP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_FTP.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_File.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_FileEntropy.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_FileExtract.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_FileExtract.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_FileHash.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Finger.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_GSSAPI.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_GTPv1.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Gnutella.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_HTTP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_HTTP.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_ICMP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_IMAP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_IRC.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Ident.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_InterConn.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_KRB.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_KRB.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Login.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Login.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_MIME.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Modbus.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_MySQL.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NCP.consts.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NCP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NTLM.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NTLM.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NTP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NTP.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NetBIOS.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NetBIOS.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_NoneWriter.none.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_PE.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_POP3.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_RADIUS.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_RDP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_RDP.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_RFB.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_RPC.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_RawReader.raw.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SIP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.consts.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_check_directory.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_close.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_create_directory.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_echo.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_logoff_andx.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_negotiate.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_nt_cancel.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_nt_create_andx.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_query_information.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_read_andx.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_session_setup_andx.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction2.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction2_secondary.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction_secondary.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_tree_connect_andx.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_tree_disconnect.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_com_write_andx.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb1_events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_close.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_create.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_negotiate.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_read.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_session_setup.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_set_info.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_transform_header.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_tree_connect.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_tree_disconnect.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_com_write.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.smb2_events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMB.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMTP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SMTP.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SNMP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SNMP.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SOCKS.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SQLiteReader.sqlite.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SQLiteWriter.sqlite.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SSH.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SSH.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SSL.consts.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SSL.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SSL.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SSL.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_SteppingStone.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Syslog.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_TCP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_TCP.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Teredo.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_UDP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Unified2.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_Unified2.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_VXLAN.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_X509.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_X509.functions.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_X509.ocsp_events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_X509.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/Zeek_XMPP.events.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/acld.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/add-geodata.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/addrs.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/analyzer.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/ascii.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/average.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/benchmark.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/binary.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/bloom-filter.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/broker.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/cardinality-counter.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/comm.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/config.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/const-dos-error.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/const-nt-status.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/const.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/consts.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/contents.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/ct-list.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/data.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/dcc-send.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/debug.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/drop.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/email_admin.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/entities.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/event.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/exec.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/file_analysis.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/files.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/gridftp.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/hll_unique.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/hooks.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/inactivity.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/info.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/input.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/input.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/last.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/log.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/logging.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, .<...>/magic) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/main.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/max.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/messaging.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/min.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/mozilla-ca-list.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/netstats.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/non-cluster.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/none.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/openflow.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/option.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/packetfilter.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/page.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/patterns.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/pcap.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/plugin.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/main.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/max.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/messaging.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/min.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/mozilla-ca-list.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/netstats.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/non-cluster.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/none.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/openflow.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/option.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/packetfilter.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/page.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/patterns.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/pcap.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/plugin.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, .<...>/plugins) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/polling.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/pools.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/polling.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/pools.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, .<...>/postprocessors) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/pp-alarms.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/raw.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/reporter.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/ryu.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/sample.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/scp.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/sftp.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/shunt.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/site.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/smb1-main.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/smb2-main.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/sqlite.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/stats.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/std-dev.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/store.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/store.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/strings.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/sum.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/thresholds.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/top-k.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/topk.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/types.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/unique.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/utils-commands.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/utils.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/variance.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, .<...>/weird.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, <...>/__load__.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, <...>/__preload__.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, <...>/hooks.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/Bro_KRB.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/Bro_SNMP.types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/active-http.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/addrs.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/pp-alarms.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/raw.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/reporter.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/ryu.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/sample.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/scp.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/sftp.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/shunt.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/site.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/smb1-main.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/smb2-main.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/sqlite.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/stats.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/std-dev.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/store.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/store.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/strings.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/sum.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/thresholds.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/top-k.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/topk.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/types.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/unique.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/utils-commands.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/utils.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/variance.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/weird.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/zeek.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, .<...>/zeekygen.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, <...>/__load__.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, <...>/__preload__.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, <...>/hooks.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/Zeek_KRB.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/Zeek_SNMP.types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/active-http.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/addrs.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/analyzer) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/analyzer.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/analyzer.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/bif) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/bro.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/broker) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/cluster) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/comm.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/comm.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/config) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/conn) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/conn-ids.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/const.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/conn-ids.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/const.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/control) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/data.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/data.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/dce-rpc) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/dhcp) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/dir.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/directions-and-hosts.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/dir.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/directions-and-hosts.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/dnp3) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/dns) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/dpd) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/email.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/event.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/exec.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/email.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/event.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/exec.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/extract) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/file_analysis.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/file_analysis.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/files) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/files.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/find-checksum-offloading.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/find-filtered-trace.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/files.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/find-checksum-offloading.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/find-filtered-trace.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/ftp) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/geoip-distance.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/geoip-distance.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/hash) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/hash_hrw.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/hash_hrw.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/http) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/imap) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/init-default.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/init-frameworks-and-bifs.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/init-default.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/init-frameworks-and-bifs.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/input) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/input.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/input.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/intel) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/irc) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/json.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/json.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/krb) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/logging) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/logging.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/main.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/messaging.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/logging.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/main.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/messaging.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/modbus) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/mysql) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/netcontrol) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/notice) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/ntlm) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/numbers.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/ntp) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/numbers.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/openflow) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/option.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/option.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/packet-filter) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/paths.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/patterns.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/paths.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/patterns.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/pe) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/plugins) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/pop3) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/queue.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/queue.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/radius) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/rdp) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/reporter) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/reporter.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/reporter.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/rfb) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/signatures) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/sip) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/site.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/site.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/smb) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/smtp) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/snmp) -> -1 @@ -869,23 +864,23 @@ 0.000000 MetaHookPost LoadFile(0, base<...>/software) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/ssh) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/ssl) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/stats.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/store.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/strings.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/strings.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/stats.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/store.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/strings.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/strings.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/sumstats) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/syslog) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/thresholds.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/time.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/thresholds.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/time.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/tunnels) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/types.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/unified2) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/urls.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/utils.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/version.bro) -> -1 -0.000000 MetaHookPost LoadFile(0, base<...>/weird.bro) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/types.bif.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/urls.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/utils.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/version.zeek) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/weird.zeek) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/x509) -> -1 0.000000 MetaHookPost LoadFile(0, base<...>/xmpp) -> -1 +0.000000 MetaHookPost LoadFile(0, base<...>/zeek.bif.zeek) -> -1 0.000000 MetaHookPost LoadFile(1, .<...>/archive.sig) -> -1 0.000000 MetaHookPost LoadFile(1, .<...>/audio.sig) -> -1 0.000000 MetaHookPost LoadFile(1, .<...>/dpd.sig) -> -1 @@ -898,8 +893,8 @@ 0.000000 MetaHookPost LogInit(Log::WRITER_ASCII, default, true, true, packet_filter(0.0,0.0,0.0), 5, {ts (time), node (string), filter (string), init (bool), success (bool)}) -> 0.000000 MetaHookPost LogWrite(Log::WRITER_ASCII, default, packet_filter(0.0,0.0,0.0), 5, {ts (time), node (string), filter (string), init (bool), success (bool)}, ) -> true 0.000000 MetaHookPost QueueEvent(NetControl::init()) -> false -0.000000 MetaHookPost QueueEvent(bro_init()) -> false 0.000000 MetaHookPost QueueEvent(filter_change_tracking()) -> false +0.000000 MetaHookPost QueueEvent(zeek_init()) -> false 0.000000 MetaHookPre CallFunction(Analyzer::__disable_analyzer, , (Analyzer::ANALYZER_BACKDOOR)) 0.000000 MetaHookPre CallFunction(Analyzer::__disable_analyzer, , (Analyzer::ANALYZER_INTERCONN)) 0.000000 MetaHookPre CallFunction(Analyzer::__disable_analyzer, , (Analyzer::ANALYZER_STEPPINGSTONE)) @@ -939,6 +934,7 @@ 0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_MODBUS, 502/tcp)) 0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_MYSQL, 1434/tcp)) 0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_MYSQL, 3306/tcp)) +0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_NTP, 123/udp)) 0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_RADIUS, 1812/udp)) 0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_RDP, 3389/tcp)) 0.000000 MetaHookPre CallFunction(Analyzer::__register_for_port, , (Analyzer::ANALYZER_SIP, 5060/udp)) @@ -1005,6 +1001,7 @@ 0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_MODBUS, 502/tcp)) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_MYSQL, 1434/tcp)) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_MYSQL, 3306/tcp)) +0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_NTP, 123/udp)) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_RADIUS, 1812/udp)) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_RDP, 3389/tcp)) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_port, , (Analyzer::ANALYZER_SIP, 5060/udp)) @@ -1047,6 +1044,7 @@ 0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_KRB_TCP, {88/tcp})) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_MODBUS, {502/tcp})) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_MYSQL, {1434<...>/tcp})) +0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_NTP, {123/udp})) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_RADIUS, {1812/udp})) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_RDP, {3389/tcp})) 0.000000 MetaHookPre CallFunction(Analyzer::register_for_ports, , (Analyzer::ANALYZER_SIP, {5060/udp})) @@ -1063,9 +1061,9 @@ 0.000000 MetaHookPre CallFunction(Cluster::is_enabled, , ()) 0.000000 MetaHookPre CallFunction(Cluster::is_enabled, , ()) 0.000000 MetaHookPre CallFunction(Cluster::local_node_type, , ()) -0.000000 MetaHookPre CallFunction(Cluster::register_pool, , ([topic=bro<...>/logger, node_type=Cluster::LOGGER, max_nodes=, exclusive=F])) -0.000000 MetaHookPre CallFunction(Cluster::register_pool, , ([topic=bro<...>/proxy, node_type=Cluster::PROXY, max_nodes=, exclusive=F])) -0.000000 MetaHookPre CallFunction(Cluster::register_pool, , ([topic=bro<...>/worker, node_type=Cluster::WORKER, max_nodes=, exclusive=F])) +0.000000 MetaHookPre CallFunction(Cluster::register_pool, , ([topic=zeek<...>/logger, node_type=Cluster::LOGGER, max_nodes=, exclusive=F])) +0.000000 MetaHookPre CallFunction(Cluster::register_pool, , ([topic=zeek<...>/proxy, node_type=Cluster::PROXY, max_nodes=, exclusive=F])) +0.000000 MetaHookPre CallFunction(Cluster::register_pool, , ([topic=zeek<...>/worker, node_type=Cluster::WORKER, max_nodes=, exclusive=F])) 0.000000 MetaHookPre CallFunction(Files::register_analyzer_add_callback, , (Files::ANALYZER_EXTRACT, FileExtract::on_add{ if (!FileExtract::args?$extract_filename) FileExtract::args$extract_filename = cat(extract-, FileExtract::f$last_active, -, FileExtract::f$source, -, FileExtract::f$id)FileExtract::f$info$extracted = FileExtract::args$extract_filenameFileExtract::args$extract_filename = build_path_compressed(FileExtract::prefix, FileExtract::args$extract_filename)FileExtract::f$info$extracted_cutoff = Fmkdir(FileExtract::prefix)})) 0.000000 MetaHookPre CallFunction(Files::register_for_mime_type, , (Files::ANALYZER_MD5, application/pkix-cert)) 0.000000 MetaHookPre CallFunction(Files::register_for_mime_type, , (Files::ANALYZER_MD5, application/x-x509-ca-cert)) @@ -1104,7 +1102,7 @@ 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (KRB::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=kerberos, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=modbus, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (NTLM::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=ntlm, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::__add_filter, , (NetControl::CATCH_RELEASE, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_catch_release, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) +0.000000 MetaHookPre CallFunction(Log::__add_filter, , (NTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=Log::default_path_func{ if ( != Log::path) return (Log::path)Log::id_str = fmt(%s, Log::id)Log::parts = split_string1(Log::id_str, <...>/, )return (cat(to_lower(Log::parts[0]), _, to_lower(Log::parts[1])))}elsereturn (to_lower(Log::id_str))}, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (NetControl::DROP, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_drop, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (NetControl::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (NetControl::SHUNT, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_shunt, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) @@ -1129,7 +1127,6 @@ 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=software, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=syslog, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=tunnel, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::__add_filter, , (Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=unified2, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=weird, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=x509, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::__add_filter, , (mysql::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=mysql, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) @@ -1150,7 +1147,7 @@ 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (KRB::LOG, [columns=KRB::Info, ev=KRB::log_krb, path=kerberos])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (Modbus::LOG, [columns=Modbus::Info, ev=Modbus::log_modbus, path=modbus])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (NTLM::LOG, [columns=NTLM::Info, ev=, path=ntlm])) -0.000000 MetaHookPre CallFunction(Log::__create_stream, , (NetControl::CATCH_RELEASE, [columns=NetControl::CatchReleaseInfo, ev=NetControl::log_netcontrol_catch_release, path=netcontrol_catch_release])) +0.000000 MetaHookPre CallFunction(Log::__create_stream, , (NTP::LOG, [columns=NTP::Info, ev=NTP::log_ntp, path=])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (NetControl::DROP, [columns=NetControl::DropInfo, ev=NetControl::log_netcontrol_drop, path=netcontrol_drop])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (NetControl::LOG, [columns=NetControl::Info, ev=NetControl::log_netcontrol, path=netcontrol])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (NetControl::SHUNT, [columns=NetControl::ShuntInfo, ev=NetControl::log_netcontrol_shunt, path=netcontrol_shunt])) @@ -1175,11 +1172,10 @@ 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (Software::LOG, [columns=Software::Info, ev=Software::log_software, path=software])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (Syslog::LOG, [columns=Syslog::Info, ev=, path=syslog])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (Tunnel::LOG, [columns=Tunnel::Info, ev=, path=tunnel])) -0.000000 MetaHookPre CallFunction(Log::__create_stream, , (Unified2::LOG, [columns=Unified2::Info, ev=Unified2::log_unified2, path=unified2])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (Weird::LOG, [columns=Weird::Info, ev=Weird::log_weird, path=weird])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (X509::LOG, [columns=X509::Info, ev=X509::log_x509, path=x509])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, , (mysql::LOG, [columns=MySQL::Info, ev=MySQL::log_mysql, path=mysql])) -0.000000 MetaHookPre CallFunction(Log::__write, , (PacketFilter::LOG, [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::__write, , (PacketFilter::LOG, [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T])) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Broker::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Cluster::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Config::LOG)) @@ -1197,7 +1193,7 @@ 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (KRB::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Modbus::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (NTLM::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (NetControl::CATCH_RELEASE)) +0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (NTP::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (NetControl::DROP)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (NetControl::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (NetControl::SHUNT)) @@ -1222,7 +1218,6 @@ 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Software::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Syslog::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Tunnel::LOG)) -0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Unified2::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (Weird::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (X509::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, , (mysql::LOG)) @@ -1243,7 +1238,7 @@ 0.000000 MetaHookPre CallFunction(Log::add_filter, , (KRB::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (NTLM::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, , (NetControl::CATCH_RELEASE, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) +0.000000 MetaHookPre CallFunction(Log::add_filter, , (NTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (NetControl::DROP, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (NetControl::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (NetControl::SHUNT, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) @@ -1268,7 +1263,6 @@ 0.000000 MetaHookPre CallFunction(Log::add_filter, , (Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) -0.000000 MetaHookPre CallFunction(Log::add_filter, , (Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) 0.000000 MetaHookPre CallFunction(Log::add_filter, , (mysql::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}])) @@ -1289,7 +1283,7 @@ 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (KRB::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (Modbus::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (NTLM::LOG, default)) -0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (NetControl::CATCH_RELEASE, default)) +0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (NTP::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (NetControl::DROP, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (NetControl::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (NetControl::SHUNT, default)) @@ -1314,7 +1308,6 @@ 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (Software::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (Syslog::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (Tunnel::LOG, default)) -0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (Unified2::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (Weird::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (X509::LOG, default)) 0.000000 MetaHookPre CallFunction(Log::add_stream_filters, , (mysql::LOG, default)) @@ -1335,7 +1328,7 @@ 0.000000 MetaHookPre CallFunction(Log::create_stream, , (KRB::LOG, [columns=KRB::Info, ev=KRB::log_krb, path=kerberos])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (Modbus::LOG, [columns=Modbus::Info, ev=Modbus::log_modbus, path=modbus])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (NTLM::LOG, [columns=NTLM::Info, ev=, path=ntlm])) -0.000000 MetaHookPre CallFunction(Log::create_stream, , (NetControl::CATCH_RELEASE, [columns=NetControl::CatchReleaseInfo, ev=NetControl::log_netcontrol_catch_release, path=netcontrol_catch_release])) +0.000000 MetaHookPre CallFunction(Log::create_stream, , (NTP::LOG, [columns=NTP::Info, ev=NTP::log_ntp, path=])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (NetControl::DROP, [columns=NetControl::DropInfo, ev=NetControl::log_netcontrol_drop, path=netcontrol_drop])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (NetControl::LOG, [columns=NetControl::Info, ev=NetControl::log_netcontrol, path=netcontrol])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (NetControl::SHUNT, [columns=NetControl::ShuntInfo, ev=NetControl::log_netcontrol_shunt, path=netcontrol_shunt])) @@ -1360,11 +1353,10 @@ 0.000000 MetaHookPre CallFunction(Log::create_stream, , (Software::LOG, [columns=Software::Info, ev=Software::log_software, path=software])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (Syslog::LOG, [columns=Syslog::Info, ev=, path=syslog])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (Tunnel::LOG, [columns=Tunnel::Info, ev=, path=tunnel])) -0.000000 MetaHookPre CallFunction(Log::create_stream, , (Unified2::LOG, [columns=Unified2::Info, ev=Unified2::log_unified2, path=unified2])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (Weird::LOG, [columns=Weird::Info, ev=Weird::log_weird, path=weird])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (X509::LOG, [columns=X509::Info, ev=X509::log_x509, path=x509])) 0.000000 MetaHookPre CallFunction(Log::create_stream, , (mysql::LOG, [columns=MySQL::Info, ev=MySQL::log_mysql, path=mysql])) -0.000000 MetaHookPre CallFunction(Log::write, , (PacketFilter::LOG, [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::write, , (PacketFilter::LOG, [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T])) 0.000000 MetaHookPre CallFunction(NetControl::check_plugins, , ()) 0.000000 MetaHookPre CallFunction(NetControl::init, , ()) 0.000000 MetaHookPre CallFunction(Notice::want_pp, , ()) @@ -1399,7 +1391,6 @@ 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Input::default_mode, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Input::default_reader, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (KRB::ignored_errors, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) -0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (NetControl::catch_release_warn_blocked_ip_encountered, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (NetControl::default_priority, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Notice::alarmed_types, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) 0.000000 MetaHookPre CallFunction(Option::set_change_handler, , (Notice::default_suppression_interval, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100)) @@ -1462,13 +1453,10 @@ 0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, , (SumStats::UNIQUE, anonymous-function{ if (!SumStats::rv?$unique_vals) SumStats::rv$unique_vals = (coerce set() to set[SumStats::Observation])if (SumStats::r?$unique_max) SumStats::rv$unique_max = SumStats::r$unique_maxif (!SumStats::r?$unique_max || flattenSumStats::rv$unique_vals <= SumStats::r$unique_max) add SumStats::rv$unique_vals[SumStats::obs]SumStats::rv$unique = flattenSumStats::rv$unique_vals})) 0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugin, , (SumStats::VARIANCE, anonymous-function{ if (1 < SumStats::rv$num) SumStats::rv$var_s += ((SumStats::val - SumStats::rv$prev_avg) * (SumStats::val - SumStats::rv$average))SumStats::calc_variance(SumStats::rv)SumStats::rv$prev_avg = SumStats::rv$average})) 0.000000 MetaHookPre CallFunction(SumStats::register_observe_plugins, , ()) -0.000000 MetaHookPre CallFunction(Unified2::mappings_initialized, , ()) -0.000000 MetaHookPre CallFunction(Unified2::start_watching, , ()) -0.000000 MetaHookPre CallFunction(bro_init, , ()) 0.000000 MetaHookPre CallFunction(current_time, , ()) 0.000000 MetaHookPre CallFunction(filter_change_tracking, , ()) -0.000000 MetaHookPre CallFunction(getenv, , (BRO_DEFAULT_LISTEN_ADDRESS)) 0.000000 MetaHookPre CallFunction(getenv, , (CLUSTER_NODE)) +0.000000 MetaHookPre CallFunction(getenv, , (ZEEK_DEFAULT_LISTEN_ADDRESS)) 0.000000 MetaHookPre CallFunction(global_ids, , ()) 0.000000 MetaHookPre CallFunction(network_time, , ()) 0.000000 MetaHookPre CallFunction(reading_live_traffic, , ()) @@ -1476,294 +1464,296 @@ 0.000000 MetaHookPre CallFunction(set_to_regex, , ({}, (^\.?|\.)(~~)$)) 0.000000 MetaHookPre CallFunction(string_to_pattern, , ((^\.?|\.)()$, F)) 0.000000 MetaHookPre CallFunction(sub, , ((^\.?|\.)(~~)$, <...>/, )) +0.000000 MetaHookPre CallFunction(zeek_init, , ()) 0.000000 MetaHookPre DrainEvents() -0.000000 MetaHookPre LoadFile(0, ..<...>/main.bro) -0.000000 MetaHookPre LoadFile(0, ..<...>/plugin.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_ARP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_AsciiReader.ascii.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_AsciiWriter.ascii.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_BackDoor.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_BenchmarkReader.benchmark.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_BinaryReader.binary.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_BitTorrent.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_ConfigReader.config.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_ConnSize.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_ConnSize.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DCE_RPC.consts.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DCE_RPC.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DCE_RPC.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DHCP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DHCP.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DNP3.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_DNS.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_FTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_FTP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_File.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_FileEntropy.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_FileExtract.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_FileExtract.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_FileHash.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Finger.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_GSSAPI.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_GTPv1.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Gnutella.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_HTTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_HTTP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_ICMP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_IMAP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_IRC.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Ident.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_InterConn.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_KRB.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_KRB.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Login.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Login.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_MIME.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Modbus.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_MySQL.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NCP.consts.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NCP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NTLM.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NTLM.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NetBIOS.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NetBIOS.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_NoneWriter.none.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_PE.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_POP3.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_RADIUS.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_RDP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_RDP.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_RFB.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_RPC.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_RawReader.raw.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SIP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.consts.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_check_directory.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_close.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_create_directory.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_echo.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_logoff_andx.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_negotiate.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_nt_cancel.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_nt_create_andx.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_query_information.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_read_andx.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_session_setup_andx.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction2.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction2_secondary.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_transaction_secondary.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_tree_connect_andx.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_tree_disconnect.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_com_write_andx.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb1_events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_close.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_create.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_negotiate.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_read.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_session_setup.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_set_info.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_transform_header.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_tree_connect.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_tree_disconnect.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_com_write.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.smb2_events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMB.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMTP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SMTP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SNMP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SNMP.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SOCKS.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SQLiteReader.sqlite.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SQLiteWriter.sqlite.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SSH.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SSH.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SSL.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SSL.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SSL.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_SteppingStone.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Syslog.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_TCP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_TCP.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Teredo.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_UDP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Unified2.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_Unified2.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_VXLAN.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_X509.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_X509.functions.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_X509.ocsp_events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_X509.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/Bro_XMPP.events.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/acld.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/add-geodata.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/addrs.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/analyzer.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/ascii.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/average.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/benchmark.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/binary.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/bloom-filter.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/bro.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/broker.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/broxygen.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/cardinality-counter.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/catch-and-release.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/comm.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/config.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/const-dos-error.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/const-nt-status.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/const.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/consts.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/contents.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/ct-list.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/data.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/dcc-send.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/debug.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/drop.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/email_admin.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/entities.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/event.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/exec.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/file_analysis.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/files.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/gridftp.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/hll_unique.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/hooks.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/inactivity.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/info.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/input.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/input.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/last.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/log.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/logging.bif.bro) +0.000000 MetaHookPre LoadFile(0, ..<...>/main.zeek) +0.000000 MetaHookPre LoadFile(0, ..<...>/plugin.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_ARP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_AsciiReader.ascii.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_AsciiWriter.ascii.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_BackDoor.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_BenchmarkReader.benchmark.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_BinaryReader.binary.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_BitTorrent.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_ConfigReader.config.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_ConnSize.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_ConnSize.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DCE_RPC.consts.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DCE_RPC.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DCE_RPC.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DHCP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DHCP.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DNP3.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_DNS.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_FTP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_FTP.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_File.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_FileEntropy.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_FileExtract.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_FileExtract.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_FileHash.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Finger.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_GSSAPI.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_GTPv1.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Gnutella.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_HTTP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_HTTP.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_ICMP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_IMAP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_IRC.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Ident.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_InterConn.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_KRB.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_KRB.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Login.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Login.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_MIME.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Modbus.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_MySQL.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NCP.consts.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NCP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NTLM.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NTLM.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NTP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NTP.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NetBIOS.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NetBIOS.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_NoneWriter.none.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_PE.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_POP3.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_RADIUS.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_RDP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_RDP.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_RFB.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_RPC.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_RawReader.raw.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SIP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.consts.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_check_directory.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_close.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_create_directory.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_echo.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_logoff_andx.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_negotiate.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_nt_cancel.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_nt_create_andx.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_query_information.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_read_andx.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_session_setup_andx.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction2.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction2_secondary.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_transaction_secondary.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_tree_connect_andx.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_tree_disconnect.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_com_write_andx.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb1_events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_close.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_create.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_negotiate.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_read.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_session_setup.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_set_info.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_transform_header.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_tree_connect.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_tree_disconnect.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_com_write.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.smb2_events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMB.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMTP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SMTP.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SNMP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SNMP.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SOCKS.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SQLiteReader.sqlite.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SQLiteWriter.sqlite.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SSH.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SSH.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SSL.consts.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SSL.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SSL.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SSL.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_SteppingStone.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Syslog.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_TCP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_TCP.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Teredo.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_UDP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Unified2.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_Unified2.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_VXLAN.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_X509.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_X509.functions.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_X509.ocsp_events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_X509.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/Zeek_XMPP.events.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/acld.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/add-geodata.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/addrs.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/analyzer.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/ascii.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/average.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/benchmark.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/binary.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/bloom-filter.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/broker.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/cardinality-counter.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/comm.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/config.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/const-dos-error.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/const-nt-status.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/const.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/consts.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/contents.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/ct-list.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/data.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/dcc-send.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/debug.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/drop.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/email_admin.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/entities.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/event.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/exec.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/file_analysis.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/files.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/gridftp.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/hll_unique.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/hooks.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/inactivity.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/info.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/input.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/input.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/last.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/log.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/logging.bif.zeek) 0.000000 MetaHookPre LoadFile(0, .<...>/magic) -0.000000 MetaHookPre LoadFile(0, .<...>/main.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/max.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/messaging.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/min.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/mozilla-ca-list.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/netstats.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/non-cluster.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/none.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/openflow.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/option.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/packetfilter.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/page.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/patterns.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/pcap.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/plugin.bro) +0.000000 MetaHookPre LoadFile(0, .<...>/main.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/max.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/messaging.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/min.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/mozilla-ca-list.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/netstats.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/non-cluster.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/none.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/openflow.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/option.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/packetfilter.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/page.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/patterns.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/pcap.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/plugin.zeek) 0.000000 MetaHookPre LoadFile(0, .<...>/plugins) -0.000000 MetaHookPre LoadFile(0, .<...>/polling.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/pools.bro) +0.000000 MetaHookPre LoadFile(0, .<...>/polling.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/pools.zeek) 0.000000 MetaHookPre LoadFile(0, .<...>/postprocessors) -0.000000 MetaHookPre LoadFile(0, .<...>/pp-alarms.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/raw.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/reporter.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/ryu.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/sample.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/scp.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/sftp.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/shunt.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/site.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/smb1-main.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/smb2-main.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/sqlite.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/stats.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/std-dev.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/store.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/store.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/strings.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/sum.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/thresholds.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/top-k.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/topk.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/types.bif.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/types.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/unique.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/utils-commands.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/utils.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/variance.bro) -0.000000 MetaHookPre LoadFile(0, .<...>/weird.bro) -0.000000 MetaHookPre LoadFile(0, <...>/__load__.bro) -0.000000 MetaHookPre LoadFile(0, <...>/__preload__.bro) -0.000000 MetaHookPre LoadFile(0, <...>/hooks.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/Bro_KRB.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/Bro_SNMP.types.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/active-http.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/addrs.bro) +0.000000 MetaHookPre LoadFile(0, .<...>/pp-alarms.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/raw.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/reporter.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/ryu.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/sample.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/scp.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/sftp.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/shunt.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/site.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/smb1-main.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/smb2-main.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/sqlite.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/stats.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/std-dev.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/store.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/store.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/strings.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/sum.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/thresholds.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/top-k.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/topk.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/types.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/unique.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/utils-commands.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/utils.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/variance.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/weird.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/zeek.bif.zeek) +0.000000 MetaHookPre LoadFile(0, .<...>/zeekygen.bif.zeek) +0.000000 MetaHookPre LoadFile(0, <...>/__load__.zeek) +0.000000 MetaHookPre LoadFile(0, <...>/__preload__.zeek) +0.000000 MetaHookPre LoadFile(0, <...>/hooks.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/Zeek_KRB.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/Zeek_SNMP.types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/active-http.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/addrs.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/analyzer) -0.000000 MetaHookPre LoadFile(0, base<...>/analyzer.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/analyzer.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/bif) -0.000000 MetaHookPre LoadFile(0, base<...>/bro.bif.bro) 0.000000 MetaHookPre LoadFile(0, base<...>/broker) 0.000000 MetaHookPre LoadFile(0, base<...>/cluster) -0.000000 MetaHookPre LoadFile(0, base<...>/comm.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/comm.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/config) 0.000000 MetaHookPre LoadFile(0, base<...>/conn) -0.000000 MetaHookPre LoadFile(0, base<...>/conn-ids.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/const.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/conn-ids.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/const.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/control) -0.000000 MetaHookPre LoadFile(0, base<...>/data.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/data.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/dce-rpc) 0.000000 MetaHookPre LoadFile(0, base<...>/dhcp) -0.000000 MetaHookPre LoadFile(0, base<...>/dir.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/directions-and-hosts.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/dir.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/directions-and-hosts.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/dnp3) 0.000000 MetaHookPre LoadFile(0, base<...>/dns) 0.000000 MetaHookPre LoadFile(0, base<...>/dpd) -0.000000 MetaHookPre LoadFile(0, base<...>/email.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/event.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/exec.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/email.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/event.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/exec.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/extract) -0.000000 MetaHookPre LoadFile(0, base<...>/file_analysis.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/file_analysis.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/files) -0.000000 MetaHookPre LoadFile(0, base<...>/files.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/find-checksum-offloading.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/find-filtered-trace.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/files.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/find-checksum-offloading.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/find-filtered-trace.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/ftp) -0.000000 MetaHookPre LoadFile(0, base<...>/geoip-distance.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/geoip-distance.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/hash) -0.000000 MetaHookPre LoadFile(0, base<...>/hash_hrw.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/hash_hrw.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/http) 0.000000 MetaHookPre LoadFile(0, base<...>/imap) -0.000000 MetaHookPre LoadFile(0, base<...>/init-default.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/init-frameworks-and-bifs.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/init-default.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/init-frameworks-and-bifs.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/input) -0.000000 MetaHookPre LoadFile(0, base<...>/input.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/input.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/intel) 0.000000 MetaHookPre LoadFile(0, base<...>/irc) -0.000000 MetaHookPre LoadFile(0, base<...>/json.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/json.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/krb) 0.000000 MetaHookPre LoadFile(0, base<...>/logging) -0.000000 MetaHookPre LoadFile(0, base<...>/logging.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/main.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/messaging.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/logging.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/main.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/messaging.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/modbus) 0.000000 MetaHookPre LoadFile(0, base<...>/mysql) 0.000000 MetaHookPre LoadFile(0, base<...>/netcontrol) 0.000000 MetaHookPre LoadFile(0, base<...>/notice) 0.000000 MetaHookPre LoadFile(0, base<...>/ntlm) -0.000000 MetaHookPre LoadFile(0, base<...>/numbers.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/ntp) +0.000000 MetaHookPre LoadFile(0, base<...>/numbers.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/openflow) -0.000000 MetaHookPre LoadFile(0, base<...>/option.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/option.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/packet-filter) -0.000000 MetaHookPre LoadFile(0, base<...>/paths.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/patterns.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/paths.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/patterns.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/pe) 0.000000 MetaHookPre LoadFile(0, base<...>/plugins) 0.000000 MetaHookPre LoadFile(0, base<...>/pop3) -0.000000 MetaHookPre LoadFile(0, base<...>/queue.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/queue.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/radius) 0.000000 MetaHookPre LoadFile(0, base<...>/rdp) 0.000000 MetaHookPre LoadFile(0, base<...>/reporter) -0.000000 MetaHookPre LoadFile(0, base<...>/reporter.bif.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/reporter.bif.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/rfb) 0.000000 MetaHookPre LoadFile(0, base<...>/signatures) 0.000000 MetaHookPre LoadFile(0, base<...>/sip) -0.000000 MetaHookPre LoadFile(0, base<...>/site.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/site.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/smb) 0.000000 MetaHookPre LoadFile(0, base<...>/smtp) 0.000000 MetaHookPre LoadFile(0, base<...>/snmp) @@ -1771,23 +1761,23 @@ 0.000000 MetaHookPre LoadFile(0, base<...>/software) 0.000000 MetaHookPre LoadFile(0, base<...>/ssh) 0.000000 MetaHookPre LoadFile(0, base<...>/ssl) -0.000000 MetaHookPre LoadFile(0, base<...>/stats.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/store.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/strings.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/strings.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/stats.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/store.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/strings.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/strings.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/sumstats) 0.000000 MetaHookPre LoadFile(0, base<...>/syslog) -0.000000 MetaHookPre LoadFile(0, base<...>/thresholds.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/time.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/thresholds.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/time.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/tunnels) -0.000000 MetaHookPre LoadFile(0, base<...>/types.bif.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/unified2) -0.000000 MetaHookPre LoadFile(0, base<...>/urls.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/utils.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/version.bro) -0.000000 MetaHookPre LoadFile(0, base<...>/weird.bro) +0.000000 MetaHookPre LoadFile(0, base<...>/types.bif.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/urls.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/utils.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/version.zeek) +0.000000 MetaHookPre LoadFile(0, base<...>/weird.zeek) 0.000000 MetaHookPre LoadFile(0, base<...>/x509) 0.000000 MetaHookPre LoadFile(0, base<...>/xmpp) +0.000000 MetaHookPre LoadFile(0, base<...>/zeek.bif.zeek) 0.000000 MetaHookPre LoadFile(1, .<...>/archive.sig) 0.000000 MetaHookPre LoadFile(1, .<...>/audio.sig) 0.000000 MetaHookPre LoadFile(1, .<...>/dpd.sig) @@ -1800,8 +1790,8 @@ 0.000000 MetaHookPre LogInit(Log::WRITER_ASCII, default, true, true, packet_filter(0.0,0.0,0.0), 5, {ts (time), node (string), filter (string), init (bool), success (bool)}) 0.000000 MetaHookPre LogWrite(Log::WRITER_ASCII, default, packet_filter(0.0,0.0,0.0), 5, {ts (time), node (string), filter (string), init (bool), success (bool)}, ) 0.000000 MetaHookPre QueueEvent(NetControl::init()) -0.000000 MetaHookPre QueueEvent(bro_init()) 0.000000 MetaHookPre QueueEvent(filter_change_tracking()) +0.000000 MetaHookPre QueueEvent(zeek_init()) 0.000000 | HookCallFunction Analyzer::__disable_analyzer(Analyzer::ANALYZER_BACKDOOR) 0.000000 | HookCallFunction Analyzer::__disable_analyzer(Analyzer::ANALYZER_INTERCONN) 0.000000 | HookCallFunction Analyzer::__disable_analyzer(Analyzer::ANALYZER_STEPPINGSTONE) @@ -1841,6 +1831,7 @@ 0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_MODBUS, 502/tcp) 0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_MYSQL, 1434/tcp) 0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_MYSQL, 3306/tcp) +0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_NTP, 123/udp) 0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_RADIUS, 1812/udp) 0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_RDP, 3389/tcp) 0.000000 | HookCallFunction Analyzer::__register_for_port(Analyzer::ANALYZER_SIP, 5060/udp) @@ -1907,6 +1898,7 @@ 0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_MODBUS, 502/tcp) 0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_MYSQL, 1434/tcp) 0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_MYSQL, 3306/tcp) +0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_NTP, 123/udp) 0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_RADIUS, 1812/udp) 0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_RDP, 3389/tcp) 0.000000 | HookCallFunction Analyzer::register_for_port(Analyzer::ANALYZER_SIP, 5060/udp) @@ -1949,6 +1941,7 @@ 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_KRB_TCP, {88/tcp}) 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_MODBUS, {502/tcp}) 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_MYSQL, {1434<...>/tcp}) +0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_NTP, {123/udp}) 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_RADIUS, {1812/udp}) 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_RDP, {3389/tcp}) 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_SIP, {5060/udp}) @@ -1964,9 +1957,9 @@ 0.000000 | HookCallFunction Analyzer::register_for_ports(Analyzer::ANALYZER_XMPP, {5222<...>/tcp}) 0.000000 | HookCallFunction Cluster::is_enabled() 0.000000 | HookCallFunction Cluster::local_node_type() -0.000000 | HookCallFunction Cluster::register_pool([topic=bro<...>/logger, node_type=Cluster::LOGGER, max_nodes=, exclusive=F]) -0.000000 | HookCallFunction Cluster::register_pool([topic=bro<...>/proxy, node_type=Cluster::PROXY, max_nodes=, exclusive=F]) -0.000000 | HookCallFunction Cluster::register_pool([topic=bro<...>/worker, node_type=Cluster::WORKER, max_nodes=, exclusive=F]) +0.000000 | HookCallFunction Cluster::register_pool([topic=zeek<...>/logger, node_type=Cluster::LOGGER, max_nodes=, exclusive=F]) +0.000000 | HookCallFunction Cluster::register_pool([topic=zeek<...>/proxy, node_type=Cluster::PROXY, max_nodes=, exclusive=F]) +0.000000 | HookCallFunction Cluster::register_pool([topic=zeek<...>/worker, node_type=Cluster::WORKER, max_nodes=, exclusive=F]) 0.000000 | HookCallFunction Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, FileExtract::on_add{ if (!FileExtract::args?$extract_filename) FileExtract::args$extract_filename = cat(extract-, FileExtract::f$last_active, -, FileExtract::f$source, -, FileExtract::f$id)FileExtract::f$info$extracted = FileExtract::args$extract_filenameFileExtract::args$extract_filename = build_path_compressed(FileExtract::prefix, FileExtract::args$extract_filename)FileExtract::f$info$extracted_cutoff = Fmkdir(FileExtract::prefix)}) 0.000000 | HookCallFunction Files::register_for_mime_type(Files::ANALYZER_MD5, application/pkix-cert) 0.000000 | HookCallFunction Files::register_for_mime_type(Files::ANALYZER_MD5, application/x-x509-ca-cert) @@ -2005,7 +1998,7 @@ 0.000000 | HookCallFunction Log::__add_filter(KRB::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=kerberos, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=modbus, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(NTLM::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=ntlm, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::__add_filter(NetControl::CATCH_RELEASE, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_catch_release, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) +0.000000 | HookCallFunction Log::__add_filter(NTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=Log::default_path_func{ if ( != Log::path) return (Log::path)Log::id_str = fmt(%s, Log::id)Log::parts = split_string1(Log::id_str, <...>/, )return (cat(to_lower(Log::parts[0]), _, to_lower(Log::parts[1])))}elsereturn (to_lower(Log::id_str))}, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(NetControl::DROP, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_drop, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(NetControl::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(NetControl::SHUNT, [name=default, writer=Log::WRITER_ASCII, pred=, path=netcontrol_shunt, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) @@ -2030,7 +2023,6 @@ 0.000000 | HookCallFunction Log::__add_filter(Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=software, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=syslog, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=tunnel, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::__add_filter(Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=unified2, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=weird, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=x509, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::__add_filter(mysql::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=mysql, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) @@ -2051,7 +2043,7 @@ 0.000000 | HookCallFunction Log::__create_stream(KRB::LOG, [columns=KRB::Info, ev=KRB::log_krb, path=kerberos]) 0.000000 | HookCallFunction Log::__create_stream(Modbus::LOG, [columns=Modbus::Info, ev=Modbus::log_modbus, path=modbus]) 0.000000 | HookCallFunction Log::__create_stream(NTLM::LOG, [columns=NTLM::Info, ev=, path=ntlm]) -0.000000 | HookCallFunction Log::__create_stream(NetControl::CATCH_RELEASE, [columns=NetControl::CatchReleaseInfo, ev=NetControl::log_netcontrol_catch_release, path=netcontrol_catch_release]) +0.000000 | HookCallFunction Log::__create_stream(NTP::LOG, [columns=NTP::Info, ev=NTP::log_ntp, path=]) 0.000000 | HookCallFunction Log::__create_stream(NetControl::DROP, [columns=NetControl::DropInfo, ev=NetControl::log_netcontrol_drop, path=netcontrol_drop]) 0.000000 | HookCallFunction Log::__create_stream(NetControl::LOG, [columns=NetControl::Info, ev=NetControl::log_netcontrol, path=netcontrol]) 0.000000 | HookCallFunction Log::__create_stream(NetControl::SHUNT, [columns=NetControl::ShuntInfo, ev=NetControl::log_netcontrol_shunt, path=netcontrol_shunt]) @@ -2076,11 +2068,10 @@ 0.000000 | HookCallFunction Log::__create_stream(Software::LOG, [columns=Software::Info, ev=Software::log_software, path=software]) 0.000000 | HookCallFunction Log::__create_stream(Syslog::LOG, [columns=Syslog::Info, ev=, path=syslog]) 0.000000 | HookCallFunction Log::__create_stream(Tunnel::LOG, [columns=Tunnel::Info, ev=, path=tunnel]) -0.000000 | HookCallFunction Log::__create_stream(Unified2::LOG, [columns=Unified2::Info, ev=Unified2::log_unified2, path=unified2]) 0.000000 | HookCallFunction Log::__create_stream(Weird::LOG, [columns=Weird::Info, ev=Weird::log_weird, path=weird]) 0.000000 | HookCallFunction Log::__create_stream(X509::LOG, [columns=X509::Info, ev=X509::log_x509, path=x509]) 0.000000 | HookCallFunction Log::__create_stream(mysql::LOG, [columns=MySQL::Info, ev=MySQL::log_mysql, path=mysql]) -0.000000 | HookCallFunction Log::__write(PacketFilter::LOG, [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::__write(PacketFilter::LOG, [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T]) 0.000000 | HookCallFunction Log::add_default_filter(Broker::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Cluster::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Config::LOG) @@ -2098,7 +2089,7 @@ 0.000000 | HookCallFunction Log::add_default_filter(KRB::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Modbus::LOG) 0.000000 | HookCallFunction Log::add_default_filter(NTLM::LOG) -0.000000 | HookCallFunction Log::add_default_filter(NetControl::CATCH_RELEASE) +0.000000 | HookCallFunction Log::add_default_filter(NTP::LOG) 0.000000 | HookCallFunction Log::add_default_filter(NetControl::DROP) 0.000000 | HookCallFunction Log::add_default_filter(NetControl::LOG) 0.000000 | HookCallFunction Log::add_default_filter(NetControl::SHUNT) @@ -2123,7 +2114,6 @@ 0.000000 | HookCallFunction Log::add_default_filter(Software::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Syslog::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Tunnel::LOG) -0.000000 | HookCallFunction Log::add_default_filter(Unified2::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Weird::LOG) 0.000000 | HookCallFunction Log::add_default_filter(X509::LOG) 0.000000 | HookCallFunction Log::add_default_filter(mysql::LOG) @@ -2144,7 +2134,7 @@ 0.000000 | HookCallFunction Log::add_filter(KRB::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(Modbus::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(NTLM::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(NetControl::CATCH_RELEASE, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) +0.000000 | HookCallFunction Log::add_filter(NTP::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(NetControl::DROP, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(NetControl::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(NetControl::SHUNT, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) @@ -2169,7 +2159,6 @@ 0.000000 | HookCallFunction Log::add_filter(Software::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(Syslog::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(Tunnel::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) -0.000000 | HookCallFunction Log::add_filter(Unified2::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(Weird::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(X509::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) 0.000000 | HookCallFunction Log::add_filter(mysql::LOG, [name=default, writer=Log::WRITER_ASCII, pred=, path=, path_func=, include=, exclude=, log_local=T, log_remote=T, field_name_map={}, scope_sep=., ext_prefix=_, ext_func=anonymous-function, interv=0 secs, postprocessor=, config={}]) @@ -2190,7 +2179,7 @@ 0.000000 | HookCallFunction Log::add_stream_filters(KRB::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(Modbus::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(NTLM::LOG, default) -0.000000 | HookCallFunction Log::add_stream_filters(NetControl::CATCH_RELEASE, default) +0.000000 | HookCallFunction Log::add_stream_filters(NTP::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(NetControl::DROP, default) 0.000000 | HookCallFunction Log::add_stream_filters(NetControl::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(NetControl::SHUNT, default) @@ -2215,7 +2204,6 @@ 0.000000 | HookCallFunction Log::add_stream_filters(Software::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(Syslog::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(Tunnel::LOG, default) -0.000000 | HookCallFunction Log::add_stream_filters(Unified2::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(Weird::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(X509::LOG, default) 0.000000 | HookCallFunction Log::add_stream_filters(mysql::LOG, default) @@ -2236,7 +2224,7 @@ 0.000000 | HookCallFunction Log::create_stream(KRB::LOG, [columns=KRB::Info, ev=KRB::log_krb, path=kerberos]) 0.000000 | HookCallFunction Log::create_stream(Modbus::LOG, [columns=Modbus::Info, ev=Modbus::log_modbus, path=modbus]) 0.000000 | HookCallFunction Log::create_stream(NTLM::LOG, [columns=NTLM::Info, ev=, path=ntlm]) -0.000000 | HookCallFunction Log::create_stream(NetControl::CATCH_RELEASE, [columns=NetControl::CatchReleaseInfo, ev=NetControl::log_netcontrol_catch_release, path=netcontrol_catch_release]) +0.000000 | HookCallFunction Log::create_stream(NTP::LOG, [columns=NTP::Info, ev=NTP::log_ntp, path=]) 0.000000 | HookCallFunction Log::create_stream(NetControl::DROP, [columns=NetControl::DropInfo, ev=NetControl::log_netcontrol_drop, path=netcontrol_drop]) 0.000000 | HookCallFunction Log::create_stream(NetControl::LOG, [columns=NetControl::Info, ev=NetControl::log_netcontrol, path=netcontrol]) 0.000000 | HookCallFunction Log::create_stream(NetControl::SHUNT, [columns=NetControl::ShuntInfo, ev=NetControl::log_netcontrol_shunt, path=netcontrol_shunt]) @@ -2261,11 +2249,10 @@ 0.000000 | HookCallFunction Log::create_stream(Software::LOG, [columns=Software::Info, ev=Software::log_software, path=software]) 0.000000 | HookCallFunction Log::create_stream(Syslog::LOG, [columns=Syslog::Info, ev=, path=syslog]) 0.000000 | HookCallFunction Log::create_stream(Tunnel::LOG, [columns=Tunnel::Info, ev=, path=tunnel]) -0.000000 | HookCallFunction Log::create_stream(Unified2::LOG, [columns=Unified2::Info, ev=Unified2::log_unified2, path=unified2]) 0.000000 | HookCallFunction Log::create_stream(Weird::LOG, [columns=Weird::Info, ev=Weird::log_weird, path=weird]) 0.000000 | HookCallFunction Log::create_stream(X509::LOG, [columns=X509::Info, ev=X509::log_x509, path=x509]) 0.000000 | HookCallFunction Log::create_stream(mysql::LOG, [columns=MySQL::Info, ev=MySQL::log_mysql, path=mysql]) -0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T]) 0.000000 | HookCallFunction NetControl::check_plugins() 0.000000 | HookCallFunction NetControl::init() 0.000000 | HookCallFunction Notice::want_pp() @@ -2300,7 +2287,6 @@ 0.000000 | HookCallFunction Option::set_change_handler(Input::default_mode, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Input::default_reader, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(KRB::ignored_errors, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) -0.000000 | HookCallFunction Option::set_change_handler(NetControl::catch_release_warn_blocked_ip_encountered, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(NetControl::default_priority, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Notice::alarmed_types, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) 0.000000 | HookCallFunction Option::set_change_handler(Notice::default_suppression_interval, Config::config_option_changed{ Config::log = (coerce [$ts=network_time(), $id=Config::ID, $old_value=Config::format_value(lookup_ID(Config::ID)), $new_value=Config::format_value(Config::new_value)] to Config::Info)if ( != Config::location) Config::log$location = Config::locationLog::write(Config::LOG, Config::log)return (Config::new_value)}, -100) @@ -2363,13 +2349,10 @@ 0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::UNIQUE, anonymous-function{ if (!SumStats::rv?$unique_vals) SumStats::rv$unique_vals = (coerce set() to set[SumStats::Observation])if (SumStats::r?$unique_max) SumStats::rv$unique_max = SumStats::r$unique_maxif (!SumStats::r?$unique_max || flattenSumStats::rv$unique_vals <= SumStats::r$unique_max) add SumStats::rv$unique_vals[SumStats::obs]SumStats::rv$unique = flattenSumStats::rv$unique_vals}) 0.000000 | HookCallFunction SumStats::register_observe_plugin(SumStats::VARIANCE, anonymous-function{ if (1 < SumStats::rv$num) SumStats::rv$var_s += ((SumStats::val - SumStats::rv$prev_avg) * (SumStats::val - SumStats::rv$average))SumStats::calc_variance(SumStats::rv)SumStats::rv$prev_avg = SumStats::rv$average}) 0.000000 | HookCallFunction SumStats::register_observe_plugins() -0.000000 | HookCallFunction Unified2::mappings_initialized() -0.000000 | HookCallFunction Unified2::start_watching() -0.000000 | HookCallFunction bro_init() 0.000000 | HookCallFunction current_time() 0.000000 | HookCallFunction filter_change_tracking() -0.000000 | HookCallFunction getenv(BRO_DEFAULT_LISTEN_ADDRESS) 0.000000 | HookCallFunction getenv(CLUSTER_NODE) +0.000000 | HookCallFunction getenv(ZEEK_DEFAULT_LISTEN_ADDRESS) 0.000000 | HookCallFunction global_ids() 0.000000 | HookCallFunction network_time() 0.000000 | HookCallFunction reading_live_traffic() @@ -2377,303 +2360,305 @@ 0.000000 | HookCallFunction set_to_regex({}, (^\.?|\.)(~~)$) 0.000000 | HookCallFunction string_to_pattern((^\.?|\.)()$, F) 0.000000 | HookCallFunction sub((^\.?|\.)(~~)$, <...>/, ) +0.000000 | HookCallFunction zeek_init() 0.000000 | HookDrainEvents -0.000000 | HookLoadFile ..<...>/main.bro -0.000000 | HookLoadFile ..<...>/plugin.bro -0.000000 | HookLoadFile .<...>/Bro_ARP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_AsciiReader.ascii.bif.bro -0.000000 | HookLoadFile .<...>/Bro_AsciiWriter.ascii.bif.bro -0.000000 | HookLoadFile .<...>/Bro_BackDoor.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_BenchmarkReader.benchmark.bif.bro -0.000000 | HookLoadFile .<...>/Bro_BinaryReader.binary.bif.bro -0.000000 | HookLoadFile .<...>/Bro_BitTorrent.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_ConfigReader.config.bif.bro -0.000000 | HookLoadFile .<...>/Bro_ConnSize.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_ConnSize.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DCE_RPC.consts.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DCE_RPC.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DCE_RPC.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DHCP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DHCP.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DNP3.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_DNS.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_FTP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_FTP.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_File.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_FileEntropy.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_FileExtract.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_FileExtract.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_FileHash.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Finger.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_GSSAPI.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_GTPv1.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Gnutella.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_HTTP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_HTTP.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_ICMP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_IMAP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_IRC.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Ident.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_InterConn.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_KRB.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_KRB.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Login.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Login.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_MIME.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Modbus.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_MySQL.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NCP.consts.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NCP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NTLM.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NTLM.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NTP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NetBIOS.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NetBIOS.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_NoneWriter.none.bif.bro -0.000000 | HookLoadFile .<...>/Bro_PE.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_POP3.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_RADIUS.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_RDP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_RDP.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_RFB.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_RPC.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_RawReader.raw.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SIP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.consts.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_check_directory.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_close.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_create_directory.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_echo.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_logoff_andx.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_negotiate.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_nt_cancel.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_nt_create_andx.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_query_information.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_read_andx.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_session_setup_andx.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_transaction.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_transaction2.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_transaction2_secondary.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_transaction_secondary.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_tree_connect_andx.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_tree_disconnect.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_com_write_andx.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb1_events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_close.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_create.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_negotiate.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_read.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_session_setup.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_set_info.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_transform_header.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_tree_connect.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_tree_disconnect.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_com_write.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.smb2_events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMB.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMTP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SMTP.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SNMP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SNMP.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SOCKS.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SQLiteReader.sqlite.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SQLiteWriter.sqlite.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SSH.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SSH.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SSL.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SSL.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SSL.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_SteppingStone.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Syslog.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_TCP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_TCP.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Teredo.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_UDP.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Unified2.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_Unified2.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_VXLAN.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_X509.events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_X509.functions.bif.bro -0.000000 | HookLoadFile .<...>/Bro_X509.ocsp_events.bif.bro -0.000000 | HookLoadFile .<...>/Bro_X509.types.bif.bro -0.000000 | HookLoadFile .<...>/Bro_XMPP.events.bif.bro -0.000000 | HookLoadFile .<...>/acld.bro -0.000000 | HookLoadFile .<...>/add-geodata.bro -0.000000 | HookLoadFile .<...>/addrs.bro -0.000000 | HookLoadFile .<...>/analyzer.bif.bro +0.000000 | HookLoadFile ..<...>/main.zeek +0.000000 | HookLoadFile ..<...>/plugin.zeek +0.000000 | HookLoadFile .<...>/Zeek_ARP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_AsciiReader.ascii.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_AsciiWriter.ascii.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_BackDoor.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_BenchmarkReader.benchmark.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_BinaryReader.binary.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_BitTorrent.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_ConfigReader.config.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_ConnSize.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_ConnSize.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DCE_RPC.consts.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DCE_RPC.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DCE_RPC.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DHCP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DHCP.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DNP3.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_DNS.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_FTP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_FTP.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_File.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_FileEntropy.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_FileExtract.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_FileExtract.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_FileHash.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Finger.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_GSSAPI.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_GTPv1.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Gnutella.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_HTTP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_HTTP.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_ICMP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_IMAP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_IRC.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Ident.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_InterConn.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_KRB.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_KRB.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Login.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Login.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_MIME.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Modbus.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_MySQL.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NCP.consts.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NCP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NTLM.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NTLM.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NTP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NTP.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NetBIOS.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NetBIOS.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_NoneWriter.none.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_PE.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_POP3.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_RADIUS.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_RDP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_RDP.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_RFB.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_RPC.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_RawReader.raw.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SIP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.consts.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_check_directory.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_close.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_create_directory.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_echo.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_logoff_andx.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_negotiate.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_nt_cancel.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_nt_create_andx.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_query_information.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_read_andx.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_session_setup_andx.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_transaction.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_transaction2.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_transaction2_secondary.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_transaction_secondary.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_tree_connect_andx.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_tree_disconnect.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_com_write_andx.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb1_events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_close.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_create.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_negotiate.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_read.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_session_setup.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_set_info.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_transform_header.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_tree_connect.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_tree_disconnect.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_com_write.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.smb2_events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMB.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMTP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SMTP.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SNMP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SNMP.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SOCKS.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SQLiteReader.sqlite.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SQLiteWriter.sqlite.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SSH.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SSH.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SSL.consts.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SSL.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SSL.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SSL.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_SteppingStone.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Syslog.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_TCP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_TCP.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Teredo.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_UDP.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Unified2.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_Unified2.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_VXLAN.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_X509.events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_X509.functions.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_X509.ocsp_events.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_X509.types.bif.zeek +0.000000 | HookLoadFile .<...>/Zeek_XMPP.events.bif.zeek +0.000000 | HookLoadFile .<...>/acld.zeek +0.000000 | HookLoadFile .<...>/add-geodata.zeek +0.000000 | HookLoadFile .<...>/addrs.zeek +0.000000 | HookLoadFile .<...>/analyzer.bif.zeek 0.000000 | HookLoadFile .<...>/archive.sig -0.000000 | HookLoadFile .<...>/ascii.bro +0.000000 | HookLoadFile .<...>/ascii.zeek 0.000000 | HookLoadFile .<...>/audio.sig -0.000000 | HookLoadFile .<...>/average.bro -0.000000 | HookLoadFile .<...>/benchmark.bro -0.000000 | HookLoadFile .<...>/binary.bro -0.000000 | HookLoadFile .<...>/bloom-filter.bif.bro -0.000000 | HookLoadFile .<...>/bro.bif.bro -0.000000 | HookLoadFile .<...>/broker.bro -0.000000 | HookLoadFile .<...>/broxygen.bif.bro -0.000000 | HookLoadFile .<...>/cardinality-counter.bif.bro -0.000000 | HookLoadFile .<...>/catch-and-release.bro -0.000000 | HookLoadFile .<...>/comm.bif.bro -0.000000 | HookLoadFile .<...>/config.bro -0.000000 | HookLoadFile .<...>/const-dos-error.bro -0.000000 | HookLoadFile .<...>/const-nt-status.bro -0.000000 | HookLoadFile .<...>/const.bif.bro -0.000000 | HookLoadFile .<...>/consts.bro -0.000000 | HookLoadFile .<...>/contents.bro -0.000000 | HookLoadFile .<...>/ct-list.bro -0.000000 | HookLoadFile .<...>/data.bif.bro -0.000000 | HookLoadFile .<...>/dcc-send.bro -0.000000 | HookLoadFile .<...>/debug.bro +0.000000 | HookLoadFile .<...>/average.zeek +0.000000 | HookLoadFile .<...>/benchmark.zeek +0.000000 | HookLoadFile .<...>/binary.zeek +0.000000 | HookLoadFile .<...>/bloom-filter.bif.zeek +0.000000 | HookLoadFile .<...>/broker.zeek +0.000000 | HookLoadFile .<...>/cardinality-counter.bif.zeek +0.000000 | HookLoadFile .<...>/comm.bif.zeek +0.000000 | HookLoadFile .<...>/config.zeek +0.000000 | HookLoadFile .<...>/const-dos-error.zeek +0.000000 | HookLoadFile .<...>/const-nt-status.zeek +0.000000 | HookLoadFile .<...>/const.bif.zeek +0.000000 | HookLoadFile .<...>/consts.zeek +0.000000 | HookLoadFile .<...>/contents.zeek +0.000000 | HookLoadFile .<...>/ct-list.zeek +0.000000 | HookLoadFile .<...>/data.bif.zeek +0.000000 | HookLoadFile .<...>/dcc-send.zeek +0.000000 | HookLoadFile .<...>/debug.zeek 0.000000 | HookLoadFile .<...>/dpd.sig -0.000000 | HookLoadFile .<...>/drop.bro -0.000000 | HookLoadFile .<...>/email_admin.bro -0.000000 | HookLoadFile .<...>/entities.bro -0.000000 | HookLoadFile .<...>/event.bif.bro -0.000000 | HookLoadFile .<...>/exec.bro -0.000000 | HookLoadFile .<...>/file_analysis.bif.bro -0.000000 | HookLoadFile .<...>/files.bro +0.000000 | HookLoadFile .<...>/drop.zeek +0.000000 | HookLoadFile .<...>/email_admin.zeek +0.000000 | HookLoadFile .<...>/entities.zeek +0.000000 | HookLoadFile .<...>/event.bif.zeek +0.000000 | HookLoadFile .<...>/exec.zeek +0.000000 | HookLoadFile .<...>/file_analysis.bif.zeek +0.000000 | HookLoadFile .<...>/files.zeek 0.000000 | HookLoadFile .<...>/font.sig 0.000000 | HookLoadFile .<...>/general.sig -0.000000 | HookLoadFile .<...>/gridftp.bro -0.000000 | HookLoadFile .<...>/hll_unique.bro -0.000000 | HookLoadFile .<...>/hooks.bif.bro +0.000000 | HookLoadFile .<...>/gridftp.zeek +0.000000 | HookLoadFile .<...>/hll_unique.zeek +0.000000 | HookLoadFile .<...>/hooks.bif.zeek 0.000000 | HookLoadFile .<...>/image.sig -0.000000 | HookLoadFile .<...>/inactivity.bro -0.000000 | HookLoadFile .<...>/info.bro -0.000000 | HookLoadFile .<...>/input.bif.bro -0.000000 | HookLoadFile .<...>/input.bro -0.000000 | HookLoadFile .<...>/last.bro +0.000000 | HookLoadFile .<...>/inactivity.zeek +0.000000 | HookLoadFile .<...>/info.zeek +0.000000 | HookLoadFile .<...>/input.bif.zeek +0.000000 | HookLoadFile .<...>/input.zeek +0.000000 | HookLoadFile .<...>/last.zeek 0.000000 | HookLoadFile .<...>/libmagic.sig -0.000000 | HookLoadFile .<...>/log.bro -0.000000 | HookLoadFile .<...>/logging.bif.bro +0.000000 | HookLoadFile .<...>/log.zeek +0.000000 | HookLoadFile .<...>/logging.bif.zeek 0.000000 | HookLoadFile .<...>/magic -0.000000 | HookLoadFile .<...>/main.bro -0.000000 | HookLoadFile .<...>/max.bro -0.000000 | HookLoadFile .<...>/messaging.bif.bro -0.000000 | HookLoadFile .<...>/min.bro -0.000000 | HookLoadFile .<...>/mozilla-ca-list.bro +0.000000 | HookLoadFile .<...>/main.zeek +0.000000 | HookLoadFile .<...>/max.zeek +0.000000 | HookLoadFile .<...>/messaging.bif.zeek +0.000000 | HookLoadFile .<...>/min.zeek +0.000000 | HookLoadFile .<...>/mozilla-ca-list.zeek 0.000000 | HookLoadFile .<...>/msoffice.sig -0.000000 | HookLoadFile .<...>/netstats.bro -0.000000 | HookLoadFile .<...>/non-cluster.bro -0.000000 | HookLoadFile .<...>/none.bro -0.000000 | HookLoadFile .<...>/openflow.bro -0.000000 | HookLoadFile .<...>/option.bif.bro -0.000000 | HookLoadFile .<...>/packetfilter.bro -0.000000 | HookLoadFile .<...>/page.bro -0.000000 | HookLoadFile .<...>/patterns.bro -0.000000 | HookLoadFile .<...>/pcap.bif.bro -0.000000 | HookLoadFile .<...>/plugin.bro +0.000000 | HookLoadFile .<...>/netstats.zeek +0.000000 | HookLoadFile .<...>/non-cluster.zeek +0.000000 | HookLoadFile .<...>/none.zeek +0.000000 | HookLoadFile .<...>/openflow.zeek +0.000000 | HookLoadFile .<...>/option.bif.zeek +0.000000 | HookLoadFile .<...>/packetfilter.zeek +0.000000 | HookLoadFile .<...>/page.zeek +0.000000 | HookLoadFile .<...>/patterns.zeek +0.000000 | HookLoadFile .<...>/pcap.bif.zeek +0.000000 | HookLoadFile .<...>/plugin.zeek 0.000000 | HookLoadFile .<...>/plugins -0.000000 | HookLoadFile .<...>/polling.bro -0.000000 | HookLoadFile .<...>/pools.bro +0.000000 | HookLoadFile .<...>/polling.zeek +0.000000 | HookLoadFile .<...>/pools.zeek 0.000000 | HookLoadFile .<...>/postprocessors -0.000000 | HookLoadFile .<...>/pp-alarms.bro -0.000000 | HookLoadFile .<...>/raw.bro -0.000000 | HookLoadFile .<...>/reporter.bif.bro -0.000000 | HookLoadFile .<...>/ryu.bro -0.000000 | HookLoadFile .<...>/sample.bro -0.000000 | HookLoadFile .<...>/scp.bro -0.000000 | HookLoadFile .<...>/sftp.bro -0.000000 | HookLoadFile .<...>/shunt.bro -0.000000 | HookLoadFile .<...>/site.bro -0.000000 | HookLoadFile .<...>/smb1-main.bro -0.000000 | HookLoadFile .<...>/smb2-main.bro -0.000000 | HookLoadFile .<...>/sqlite.bro -0.000000 | HookLoadFile .<...>/stats.bif.bro -0.000000 | HookLoadFile .<...>/std-dev.bro -0.000000 | HookLoadFile .<...>/store.bif.bro -0.000000 | HookLoadFile .<...>/store.bro -0.000000 | HookLoadFile .<...>/strings.bif.bro -0.000000 | HookLoadFile .<...>/sum.bro -0.000000 | HookLoadFile .<...>/thresholds.bro -0.000000 | HookLoadFile .<...>/top-k.bif.bro -0.000000 | HookLoadFile .<...>/topk.bro -0.000000 | HookLoadFile .<...>/types.bif.bro -0.000000 | HookLoadFile .<...>/types.bro -0.000000 | HookLoadFile .<...>/unique.bro -0.000000 | HookLoadFile .<...>/utils-commands.bro -0.000000 | HookLoadFile .<...>/utils.bro -0.000000 | HookLoadFile .<...>/variance.bro +0.000000 | HookLoadFile .<...>/pp-alarms.zeek +0.000000 | HookLoadFile .<...>/raw.zeek +0.000000 | HookLoadFile .<...>/reporter.bif.zeek +0.000000 | HookLoadFile .<...>/ryu.zeek +0.000000 | HookLoadFile .<...>/sample.zeek +0.000000 | HookLoadFile .<...>/scp.zeek +0.000000 | HookLoadFile .<...>/sftp.zeek +0.000000 | HookLoadFile .<...>/shunt.zeek +0.000000 | HookLoadFile .<...>/site.zeek +0.000000 | HookLoadFile .<...>/smb1-main.zeek +0.000000 | HookLoadFile .<...>/smb2-main.zeek +0.000000 | HookLoadFile .<...>/sqlite.zeek +0.000000 | HookLoadFile .<...>/stats.bif.zeek +0.000000 | HookLoadFile .<...>/std-dev.zeek +0.000000 | HookLoadFile .<...>/store.bif.zeek +0.000000 | HookLoadFile .<...>/store.zeek +0.000000 | HookLoadFile .<...>/strings.bif.zeek +0.000000 | HookLoadFile .<...>/sum.zeek +0.000000 | HookLoadFile .<...>/thresholds.zeek +0.000000 | HookLoadFile .<...>/top-k.bif.zeek +0.000000 | HookLoadFile .<...>/topk.zeek +0.000000 | HookLoadFile .<...>/types.bif.zeek +0.000000 | HookLoadFile .<...>/types.zeek +0.000000 | HookLoadFile .<...>/unique.zeek +0.000000 | HookLoadFile .<...>/utils-commands.zeek +0.000000 | HookLoadFile .<...>/utils.zeek +0.000000 | HookLoadFile .<...>/variance.zeek 0.000000 | HookLoadFile .<...>/video.sig -0.000000 | HookLoadFile .<...>/weird.bro -0.000000 | HookLoadFile <...>/__load__.bro -0.000000 | HookLoadFile <...>/__preload__.bro -0.000000 | HookLoadFile <...>/hooks.bro -0.000000 | HookLoadFile base<...>/Bro_KRB.types.bif.bro -0.000000 | HookLoadFile base<...>/Bro_SNMP.types.bif.bro -0.000000 | HookLoadFile base<...>/active-http.bro -0.000000 | HookLoadFile base<...>/addrs.bro +0.000000 | HookLoadFile .<...>/weird.zeek +0.000000 | HookLoadFile .<...>/zeek.bif.zeek +0.000000 | HookLoadFile .<...>/zeekygen.bif.zeek +0.000000 | HookLoadFile <...>/__load__.zeek +0.000000 | HookLoadFile <...>/__preload__.zeek +0.000000 | HookLoadFile <...>/hooks.zeek +0.000000 | HookLoadFile base<...>/Zeek_KRB.types.bif.zeek +0.000000 | HookLoadFile base<...>/Zeek_SNMP.types.bif.zeek +0.000000 | HookLoadFile base<...>/active-http.zeek +0.000000 | HookLoadFile base<...>/addrs.zeek 0.000000 | HookLoadFile base<...>/analyzer -0.000000 | HookLoadFile base<...>/analyzer.bif.bro +0.000000 | HookLoadFile base<...>/analyzer.bif.zeek 0.000000 | HookLoadFile base<...>/bif -0.000000 | HookLoadFile base<...>/bro.bif.bro 0.000000 | HookLoadFile base<...>/broker 0.000000 | HookLoadFile base<...>/cluster -0.000000 | HookLoadFile base<...>/comm.bif.bro +0.000000 | HookLoadFile base<...>/comm.bif.zeek 0.000000 | HookLoadFile base<...>/config 0.000000 | HookLoadFile base<...>/conn -0.000000 | HookLoadFile base<...>/conn-ids.bro -0.000000 | HookLoadFile base<...>/const.bif.bro +0.000000 | HookLoadFile base<...>/conn-ids.zeek +0.000000 | HookLoadFile base<...>/const.bif.zeek 0.000000 | HookLoadFile base<...>/control -0.000000 | HookLoadFile base<...>/data.bif.bro +0.000000 | HookLoadFile base<...>/data.bif.zeek 0.000000 | HookLoadFile base<...>/dce-rpc 0.000000 | HookLoadFile base<...>/dhcp -0.000000 | HookLoadFile base<...>/dir.bro -0.000000 | HookLoadFile base<...>/directions-and-hosts.bro +0.000000 | HookLoadFile base<...>/dir.zeek +0.000000 | HookLoadFile base<...>/directions-and-hosts.zeek 0.000000 | HookLoadFile base<...>/dnp3 0.000000 | HookLoadFile base<...>/dns 0.000000 | HookLoadFile base<...>/dpd -0.000000 | HookLoadFile base<...>/email.bro -0.000000 | HookLoadFile base<...>/event.bif.bro -0.000000 | HookLoadFile base<...>/exec.bro +0.000000 | HookLoadFile base<...>/email.zeek +0.000000 | HookLoadFile base<...>/event.bif.zeek +0.000000 | HookLoadFile base<...>/exec.zeek 0.000000 | HookLoadFile base<...>/extract -0.000000 | HookLoadFile base<...>/file_analysis.bif.bro +0.000000 | HookLoadFile base<...>/file_analysis.bif.zeek 0.000000 | HookLoadFile base<...>/files -0.000000 | HookLoadFile base<...>/files.bro -0.000000 | HookLoadFile base<...>/find-checksum-offloading.bro -0.000000 | HookLoadFile base<...>/find-filtered-trace.bro +0.000000 | HookLoadFile base<...>/files.zeek +0.000000 | HookLoadFile base<...>/find-checksum-offloading.zeek +0.000000 | HookLoadFile base<...>/find-filtered-trace.zeek 0.000000 | HookLoadFile base<...>/ftp -0.000000 | HookLoadFile base<...>/geoip-distance.bro +0.000000 | HookLoadFile base<...>/geoip-distance.zeek 0.000000 | HookLoadFile base<...>/hash -0.000000 | HookLoadFile base<...>/hash_hrw.bro +0.000000 | HookLoadFile base<...>/hash_hrw.zeek 0.000000 | HookLoadFile base<...>/http 0.000000 | HookLoadFile base<...>/imap -0.000000 | HookLoadFile base<...>/init-default.bro -0.000000 | HookLoadFile base<...>/init-frameworks-and-bifs.bro +0.000000 | HookLoadFile base<...>/init-default.zeek +0.000000 | HookLoadFile base<...>/init-frameworks-and-bifs.zeek 0.000000 | HookLoadFile base<...>/input -0.000000 | HookLoadFile base<...>/input.bif.bro +0.000000 | HookLoadFile base<...>/input.bif.zeek 0.000000 | HookLoadFile base<...>/intel 0.000000 | HookLoadFile base<...>/irc -0.000000 | HookLoadFile base<...>/json.bro +0.000000 | HookLoadFile base<...>/json.zeek 0.000000 | HookLoadFile base<...>/krb 0.000000 | HookLoadFile base<...>/logging -0.000000 | HookLoadFile base<...>/logging.bif.bro -0.000000 | HookLoadFile base<...>/main.bro -0.000000 | HookLoadFile base<...>/messaging.bif.bro +0.000000 | HookLoadFile base<...>/logging.bif.zeek +0.000000 | HookLoadFile base<...>/main.zeek +0.000000 | HookLoadFile base<...>/messaging.bif.zeek 0.000000 | HookLoadFile base<...>/modbus 0.000000 | HookLoadFile base<...>/mysql 0.000000 | HookLoadFile base<...>/netcontrol 0.000000 | HookLoadFile base<...>/notice 0.000000 | HookLoadFile base<...>/ntlm -0.000000 | HookLoadFile base<...>/numbers.bro +0.000000 | HookLoadFile base<...>/ntp +0.000000 | HookLoadFile base<...>/numbers.zeek 0.000000 | HookLoadFile base<...>/openflow -0.000000 | HookLoadFile base<...>/option.bif.bro +0.000000 | HookLoadFile base<...>/option.bif.zeek 0.000000 | HookLoadFile base<...>/packet-filter -0.000000 | HookLoadFile base<...>/paths.bro -0.000000 | HookLoadFile base<...>/patterns.bro +0.000000 | HookLoadFile base<...>/paths.zeek +0.000000 | HookLoadFile base<...>/patterns.zeek 0.000000 | HookLoadFile base<...>/pe 0.000000 | HookLoadFile base<...>/plugins 0.000000 | HookLoadFile base<...>/pop3 -0.000000 | HookLoadFile base<...>/queue.bro +0.000000 | HookLoadFile base<...>/queue.zeek 0.000000 | HookLoadFile base<...>/radius 0.000000 | HookLoadFile base<...>/rdp 0.000000 | HookLoadFile base<...>/reporter -0.000000 | HookLoadFile base<...>/reporter.bif.bro +0.000000 | HookLoadFile base<...>/reporter.bif.zeek 0.000000 | HookLoadFile base<...>/rfb 0.000000 | HookLoadFile base<...>/signatures 0.000000 | HookLoadFile base<...>/sip -0.000000 | HookLoadFile base<...>/site.bro +0.000000 | HookLoadFile base<...>/site.zeek 0.000000 | HookLoadFile base<...>/smb 0.000000 | HookLoadFile base<...>/smtp 0.000000 | HookLoadFile base<...>/snmp @@ -2681,80 +2666,74 @@ 0.000000 | HookLoadFile base<...>/software 0.000000 | HookLoadFile base<...>/ssh 0.000000 | HookLoadFile base<...>/ssl -0.000000 | HookLoadFile base<...>/stats.bif.bro -0.000000 | HookLoadFile base<...>/store.bif.bro -0.000000 | HookLoadFile base<...>/strings.bif.bro -0.000000 | HookLoadFile base<...>/strings.bro +0.000000 | HookLoadFile base<...>/stats.bif.zeek +0.000000 | HookLoadFile base<...>/store.bif.zeek +0.000000 | HookLoadFile base<...>/strings.bif.zeek +0.000000 | HookLoadFile base<...>/strings.zeek 0.000000 | HookLoadFile base<...>/sumstats 0.000000 | HookLoadFile base<...>/syslog -0.000000 | HookLoadFile base<...>/thresholds.bro -0.000000 | HookLoadFile base<...>/time.bro +0.000000 | HookLoadFile base<...>/thresholds.zeek +0.000000 | HookLoadFile base<...>/time.zeek 0.000000 | HookLoadFile base<...>/tunnels -0.000000 | HookLoadFile base<...>/types.bif.bro -0.000000 | HookLoadFile base<...>/unified2 -0.000000 | HookLoadFile base<...>/urls.bro -0.000000 | HookLoadFile base<...>/utils.bro -0.000000 | HookLoadFile base<...>/version.bro -0.000000 | HookLoadFile base<...>/weird.bro +0.000000 | HookLoadFile base<...>/types.bif.zeek +0.000000 | HookLoadFile base<...>/urls.zeek +0.000000 | HookLoadFile base<...>/utils.zeek +0.000000 | HookLoadFile base<...>/version.zeek +0.000000 | HookLoadFile base<...>/weird.zeek 0.000000 | HookLoadFile base<...>/x509 0.000000 | HookLoadFile base<...>/xmpp +0.000000 | HookLoadFile base<...>/zeek.bif.zeek 0.000000 | HookLogInit packet_filter 1/1 {ts (time), node (string), filter (string), init (bool), success (bool)} -0.000000 | HookLogWrite packet_filter [ts=1552701731.192609, node=bro, filter=ip or not ip, init=T, success=T] +0.000000 | HookLogWrite packet_filter [ts=1560631035.263667, node=zeek, filter=ip or not ip, init=T, success=T] 0.000000 | HookQueueEvent NetControl::init() -0.000000 | HookQueueEvent bro_init() 0.000000 | HookQueueEvent filter_change_tracking() +0.000000 | HookQueueEvent zeek_init() 1362692526.869344 MetaHookPost BroObjDtor() -> 1362692526.869344 MetaHookPost CallFunction(ChecksumOffloading::check, , ()) -> -1362692526.869344 MetaHookPost CallFunction(NetControl::catch_release_seen, , (141.142.228.5)) -> 1362692526.869344 MetaHookPost CallFunction(filter_change_tracking, , ()) -> 1362692526.869344 MetaHookPost CallFunction(get_net_stats, , ()) -> -1362692526.869344 MetaHookPost CallFunction(new_connection, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692526.869344 MetaHookPost CallFunction(new_connection, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> 1362692526.869344 MetaHookPost DrainEvents() -> 1362692526.869344 MetaHookPost QueueEvent(ChecksumOffloading::check()) -> false 1362692526.869344 MetaHookPost QueueEvent(filter_change_tracking()) -> false -1362692526.869344 MetaHookPost QueueEvent(new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false +1362692526.869344 MetaHookPost QueueEvent(new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false 1362692526.869344 MetaHookPost SetupAnalyzerTree(1362692526.869344(1362692526.869344) TCP 141.142.228.5:59856 -> 192.150.187.43:80) -> 1362692526.869344 MetaHookPost UpdateNetworkTime(1362692526.869344) -> 1362692526.869344 MetaHookPre BroObjDtor() 1362692526.869344 MetaHookPre CallFunction(ChecksumOffloading::check, , ()) -1362692526.869344 MetaHookPre CallFunction(NetControl::catch_release_seen, , (141.142.228.5)) 1362692526.869344 MetaHookPre CallFunction(filter_change_tracking, , ()) 1362692526.869344 MetaHookPre CallFunction(get_net_stats, , ()) -1362692526.869344 MetaHookPre CallFunction(new_connection, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692526.869344 MetaHookPre CallFunction(new_connection, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692526.869344 MetaHookPre DrainEvents() 1362692526.869344 MetaHookPre QueueEvent(ChecksumOffloading::check()) 1362692526.869344 MetaHookPre QueueEvent(filter_change_tracking()) -1362692526.869344 MetaHookPre QueueEvent(new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692526.869344 MetaHookPre QueueEvent(new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692526.869344 MetaHookPre SetupAnalyzerTree(1362692526.869344(1362692526.869344) TCP 141.142.228.5:59856 -> 192.150.187.43:80) 1362692526.869344 MetaHookPre UpdateNetworkTime(1362692526.869344) 1362692526.869344 | HookBroObjDtor 1362692526.869344 | HookUpdateNetworkTime 1362692526.869344 1362692526.869344 | HookCallFunction ChecksumOffloading::check() -1362692526.869344 | HookCallFunction NetControl::catch_release_seen(141.142.228.5) 1362692526.869344 | HookCallFunction filter_change_tracking() 1362692526.869344 | HookCallFunction get_net_stats() -1362692526.869344 | HookCallFunction new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692526.869344 | HookCallFunction new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692526.869344 | HookDrainEvents 1362692526.869344 | HookQueueEvent ChecksumOffloading::check() 1362692526.869344 | HookQueueEvent filter_change_tracking() -1362692526.869344 | HookQueueEvent new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692526.869344 | HookQueueEvent new_connection([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.0, service={}, history=, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692526.869344 | HookSetupAnalyzerTree 1362692526.869344(1362692526.869344) TCP 141.142.228.5:59856 -> 192.150.187.43:80 1362692526.869344 | RequestObjDtor ChecksumOffloading::check() -1362692526.939084 MetaHookPost CallFunction(NetControl::catch_release_seen, , (141.142.228.5)) -> -1362692526.939084 MetaHookPost CallFunction(connection_established, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692526.939084 MetaHookPost CallFunction(connection_established, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> 1362692526.939084 MetaHookPost DrainEvents() -> -1362692526.939084 MetaHookPost QueueEvent(connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false +1362692526.939084 MetaHookPost QueueEvent(connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false 1362692526.939084 MetaHookPost UpdateNetworkTime(1362692526.939084) -> -1362692526.939084 MetaHookPre CallFunction(NetControl::catch_release_seen, , (141.142.228.5)) -1362692526.939084 MetaHookPre CallFunction(connection_established, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692526.939084 MetaHookPre CallFunction(connection_established, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692526.939084 MetaHookPre DrainEvents() -1362692526.939084 MetaHookPre QueueEvent(connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692526.939084 MetaHookPre QueueEvent(connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692526.939084 MetaHookPre UpdateNetworkTime(1362692526.939084) 1362692526.939084 | HookUpdateNetworkTime 1362692526.939084 -1362692526.939084 | HookCallFunction NetControl::catch_release_seen(141.142.228.5) -1362692526.939084 | HookCallFunction connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692526.939084 | HookCallFunction connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692526.939084 | HookDrainEvents -1362692526.939084 | HookQueueEvent connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692526.939084 | HookQueueEvent connection_established([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.06974, service={}, history=Sh, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692526.939378 MetaHookPost DrainEvents() -> 1362692526.939378 MetaHookPost UpdateNetworkTime(1362692526.939378) -> 1362692526.939378 MetaHookPre DrainEvents() @@ -2763,118 +2742,118 @@ 1362692526.939378 | HookDrainEvents 1362692526.939527 MetaHookPost CallFunction(Analyzer::__name, , (Analyzer::ANALYZER_HTTP)) -> 1362692526.939527 MetaHookPost CallFunction(Analyzer::name, , (Analyzer::ANALYZER_HTTP)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::new_http_session, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::new_http_session, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> 1362692526.939527 MetaHookPost CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, T, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> 1362692526.939527 MetaHookPost CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -> 1362692526.939527 MetaHookPost CallFunction(fmt, , (-%s, HTTP)) -> -1362692526.939527 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692526.939527 MetaHookPost CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692526.939527 MetaHookPost CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> 1362692526.939527 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/*)) -> 1362692526.939527 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0))) -> -1362692526.939527 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -> -1362692526.939527 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -> -1362692526.939527 MetaHookPost CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -> +1362692526.939527 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -> +1362692526.939527 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -> +1362692526.939527 MetaHookPost CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -> 1362692526.939527 MetaHookPost CallFunction(http_request, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, 1.1)) -> 1362692526.939527 MetaHookPost CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -> 1362692526.939527 MetaHookPost CallFunction(network_time, , ()) -> -1362692526.939527 MetaHookPost CallFunction(protocol_confirmation, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) -> +1362692526.939527 MetaHookPost CallFunction(protocol_confirmation, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) -> 1362692526.939527 MetaHookPost CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344T11141.142.228.5:59856 > 192.150.187.43:80)) -> 1362692526.939527 MetaHookPost CallFunction(split_string1, , (bro.org, <...>/)) -> 1362692526.939527 MetaHookPost DrainEvents() -> -1362692526.939527 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false +1362692526.939527 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false +1362692526.939527 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false +1362692526.939527 MetaHookPost QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false 1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/*)) -> false 1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0))) -> false -1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -> false -1362692526.939527 MetaHookPost QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -> false +1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -> false +1362692526.939527 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -> false +1362692526.939527 MetaHookPost QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) -> false 1362692526.939527 MetaHookPost QueueEvent(http_request([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, 1.1)) -> false -1362692526.939527 MetaHookPost QueueEvent(protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) -> false +1362692526.939527 MetaHookPost QueueEvent(protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) -> false 1362692526.939527 MetaHookPost UpdateNetworkTime(1362692526.939527) -> 1362692526.939527 MetaHookPre CallFunction(Analyzer::__name, , (Analyzer::ANALYZER_HTTP)) 1362692526.939527 MetaHookPre CallFunction(Analyzer::name, , (Analyzer::ANALYZER_HTTP)) -1362692526.939527 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::new_http_session, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(HTTP::new_http_session, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) 1362692526.939527 MetaHookPre CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, T, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) 1362692526.939527 MetaHookPre CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) 1362692526.939527 MetaHookPre CallFunction(fmt, , (-%s, HTTP)) -1362692526.939527 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) 1362692526.939527 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/*)) 1362692526.939527 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0))) -1362692526.939527 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -1362692526.939527 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -1362692526.939527 MetaHookPre CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) +1362692526.939527 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) +1362692526.939527 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) +1362692526.939527 MetaHookPre CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) 1362692526.939527 MetaHookPre CallFunction(http_request, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, 1.1)) 1362692526.939527 MetaHookPre CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) 1362692526.939527 MetaHookPre CallFunction(network_time, , ()) -1362692526.939527 MetaHookPre CallFunction(protocol_confirmation, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) +1362692526.939527 MetaHookPre CallFunction(protocol_confirmation, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) 1362692526.939527 MetaHookPre CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344T11141.142.228.5:59856 > 192.150.187.43:80)) 1362692526.939527 MetaHookPre CallFunction(split_string1, , (bro.org, <...>/)) 1362692526.939527 MetaHookPre DrainEvents() -1362692526.939527 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692526.939527 MetaHookPre QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692526.939527 MetaHookPre QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) 1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/*)) 1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0))) -1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) -1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) -1362692526.939527 MetaHookPre QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) +1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive)) +1362692526.939527 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org)) +1362692526.939527 MetaHookPre QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124])) 1362692526.939527 MetaHookPre QueueEvent(http_request([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, 1.1)) -1362692526.939527 MetaHookPre QueueEvent(protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) +1362692526.939527 MetaHookPre QueueEvent(protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3)) 1362692526.939527 MetaHookPre UpdateNetworkTime(1362692526.939527) 1362692526.939527 | HookUpdateNetworkTime 1362692526.939527 1362692526.939527 | HookCallFunction Analyzer::__name(Analyzer::ANALYZER_HTTP) 1362692526.939527 | HookCallFunction Analyzer::name(Analyzer::ANALYZER_HTTP) -1362692526.939527 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction HTTP::new_http_session([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction HTTP::new_http_session([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={HTTP}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=[pending={}, current_request=1, current_response=0, trans_depth=0], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) 1362692526.939527 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, T, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) 1362692526.939527 | HookCallFunction fmt(%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp) 1362692526.939527 | HookCallFunction fmt(-%s, HTTP) -1362692526.939527 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookCallFunction http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, referrer=, version=, user_agent=, origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=0, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookCallFunction http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) 1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/*) 1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0)) -1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive) -1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org) -1362692526.939527 | HookCallFunction http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124]) +1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive) +1362692526.939527 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org) +1362692526.939527 | HookCallFunction http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124]) 1362692526.939527 | HookCallFunction http_request([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, 1.1) 1362692526.939527 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856<...>/tcp]) 1362692526.939527 | HookCallFunction network_time() -1362692526.939527 | HookCallFunction protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3) +1362692526.939527 | HookCallFunction protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3) 1362692526.939527 | HookCallFunction set_file_handle(Analyzer::ANALYZER_HTTP1362692526.869344T11141.142.228.5:59856 > 192.150.187.43:80) 1362692526.939527 | HookCallFunction split_string1(bro.org, <...>/) 1362692526.939527 | HookDrainEvents -1362692526.939527 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692526.939527 | HookQueueEvent http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692526.939527 | HookQueueEvent http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) 1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/*) 1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0)) -1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive) -1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org) -1362692526.939527 | HookQueueEvent http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124]) +1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, CONNECTION, Keep-Alive) +1362692526.939527 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, HOST, bro.org) +1362692526.939527 | HookQueueEvent http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T, [start=1362692526.939527, interrupted=F, finish_msg=message ends normally, body_length=0, content_gap_length=0, header_length=124]) 1362692526.939527 | HookQueueEvent http_request([id=[orig_h=141.142.228.5, orig_p=59856<...>/CHANGES.bro-aux.txt, 1.1) -1362692526.939527 | HookQueueEvent protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3) +1362692526.939527 | HookQueueEvent protocol_confirmation([id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], orig=[size=136, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=c8:bc:c8:96:d2:a0], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:10:db:88:d2:ef], start_time=1362692526.869344, duration=0.070183, service={}, history=ShAD, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], Analyzer::ANALYZER_HTTP, 3) 1362692527.008509 MetaHookPost DrainEvents() -> 1362692527.008509 MetaHookPost UpdateNetworkTime(1362692527.008509) -> 1362692527.008509 MetaHookPre DrainEvents() @@ -2883,142 +2862,139 @@ 1362692527.008509 | HookDrainEvents 1362692527.009512 MetaHookPost CallFunction(Files::__enable_reassembly, , (FakNcS1Jfe01uljb3)) -> 1362692527.009512 MetaHookPost CallFunction(Files::__set_reassembly_buffer, , (FakNcS1Jfe01uljb3, 524288)) -> -1362692527.009512 MetaHookPost CallFunction(Files::enable_reassembly, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=])) -> -1362692527.009512 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=])) -> -1362692527.009512 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=])) -> -1362692527.009512 MetaHookPost CallFunction(Files::set_reassembly_buffer_size, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=], 524288)) -> +1362692527.009512 MetaHookPost CallFunction(Files::enable_reassembly, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=])) -> +1362692527.009512 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=])) -> +1362692527.009512 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=])) -> +1362692527.009512 MetaHookPost CallFunction(Files::set_reassembly_buffer_size, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=], 524288)) -> 1362692527.009512 MetaHookPost CallFunction(HTTP::code_in_range, , (200, 100, 199)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009512 MetaHookPost CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> -1362692527.009512 MetaHookPost CallFunction(file_new, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=])) -> -1362692527.009512 MetaHookPost CallFunction(file_over_new_connection, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(file_new, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=])) -> +1362692527.009512 MetaHookPost CallFunction(file_over_new_connection, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009512 MetaHookPost CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -> -1362692527.009512 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009512 MetaHookPost CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -> -1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -> +1362692527.009512 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -> +1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -> 1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora))) -> 1362692527.009512 MetaHookPost CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) -> -1362692527.009512 MetaHookPost CallFunction(http_reply, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> +1362692527.009512 MetaHookPost CallFunction(http_reply, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> 1362692527.009512 MetaHookPost CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -> 1362692527.009512 MetaHookPost CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) -> -1362692527.009512 MetaHookPost CallFunction(split_string_all, , (HTTP, <...>/)) -> 1362692527.009512 MetaHookPost DrainEvents() -> -1362692527.009512 MetaHookPost QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=])) -> false -1362692527.009512 MetaHookPost QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009512 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -> false +1362692527.009512 MetaHookPost QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=])) -> false +1362692527.009512 MetaHookPost QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false +1362692527.009512 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) -> false 1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora))) -> false 1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) -> false -1362692527.009512 MetaHookPost QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> false +1362692527.009512 MetaHookPost QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> false 1362692527.009512 MetaHookPost UpdateNetworkTime(1362692527.009512) -> 1362692527.009512 MetaHookPre CallFunction(Files::__enable_reassembly, , (FakNcS1Jfe01uljb3)) 1362692527.009512 MetaHookPre CallFunction(Files::__set_reassembly_buffer, , (FakNcS1Jfe01uljb3, 524288)) -1362692527.009512 MetaHookPre CallFunction(Files::enable_reassembly, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=])) -1362692527.009512 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=])) -1362692527.009512 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=])) -1362692527.009512 MetaHookPre CallFunction(Files::set_reassembly_buffer_size, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=], 524288)) +1362692527.009512 MetaHookPre CallFunction(Files::enable_reassembly, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=])) +1362692527.009512 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=])) +1362692527.009512 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=])) +1362692527.009512 MetaHookPre CallFunction(Files::set_reassembly_buffer_size, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=], 524288)) 1362692527.009512 MetaHookPre CallFunction(HTTP::code_in_range, , (200, 100, 199)) -1362692527.009512 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -1362692527.009512 MetaHookPre CallFunction(file_new, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=])) -1362692527.009512 MetaHookPre CallFunction(file_over_new_connection, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(file_new, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=])) +1362692527.009512 MetaHookPre CallFunction(file_over_new_connection, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -1362692527.009512 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) +1362692527.009512 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(http_begin_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) +1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) 1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora))) 1362692527.009512 MetaHookPre CallFunction(http_header, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) -1362692527.009512 MetaHookPre CallFunction(http_reply, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) +1362692527.009512 MetaHookPre CallFunction(http_reply, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) 1362692527.009512 MetaHookPre CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) 1362692527.009512 MetaHookPre CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) -1362692527.009512 MetaHookPre CallFunction(split_string_all, , (HTTP, <...>/)) 1362692527.009512 MetaHookPre DrainEvents() -1362692527.009512 MetaHookPre QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=])) -1362692527.009512 MetaHookPre QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) -1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) +1362692527.009512 MetaHookPre QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=])) +1362692527.009512 MetaHookPre QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705)) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT)) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0")) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100)) +1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT)) 1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora))) 1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) -1362692527.009512 MetaHookPre QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) +1362692527.009512 MetaHookPre QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) 1362692527.009512 MetaHookPre UpdateNetworkTime(1362692527.009512) 1362692527.009512 | HookUpdateNetworkTime 1362692527.009512 1362692527.009512 | HookCallFunction Files::__enable_reassembly(FakNcS1Jfe01uljb3) 1362692527.009512 | HookCallFunction Files::__set_reassembly_buffer(FakNcS1Jfe01uljb3, 524288) -1362692527.009512 | HookCallFunction Files::enable_reassembly([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=]) -1362692527.009512 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=]) -1362692527.009512 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=]) -1362692527.009512 | HookCallFunction Files::set_reassembly_buffer_size([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=], 524288) +1362692527.009512 | HookCallFunction Files::enable_reassembly([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=]) +1362692527.009512 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=]) +1362692527.009512 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=]) +1362692527.009512 | HookCallFunction Files::set_reassembly_buffer_size([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={}, rx_hosts={}, conn_uids={}, source=HTTP, depth=0, analyzers={}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=], 524288) 1362692527.009512 | HookCallFunction HTTP::code_in_range(200, 100, 199) -1362692527.009512 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) -1362692527.009512 | HookCallFunction file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=]) -1362692527.009512 | HookCallFunction file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=]) +1362692527.009512 | HookCallFunction file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookCallFunction fmt(%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp) -1362692527.009512 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0") -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100) -1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT) +1362692527.009512 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705) +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT) +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0") +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100) +1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT) 1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora)) 1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8) -1362692527.009512 | HookCallFunction http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) +1362692527.009512 | HookCallFunction http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) 1362692527.009512 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856<...>/tcp]) 1362692527.009512 | HookCallFunction set_file_handle(Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80) -1362692527.009512 | HookCallFunction split_string_all(HTTP, <...>/) 1362692527.009512 | HookDrainEvents -1362692527.009512 | HookQueueEvent file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=]) -1362692527.009512 | HookQueueEvent file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0") -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100) -1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT) +1362692527.009512 | HookQueueEvent file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]}, last_active=1362692527.009512, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=]) +1362692527.009512 | HookQueueEvent file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONTENT-LENGTH, 4705) +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, DATE, Thu, 07 Mar 2013 21:43:07 GMT) +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ETAG, "1261-4c870358a6fc0") +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, KEEP-ALIVE, timeout=5, max=100) +1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, LAST-MODIFIED, Wed, 29 Aug 2012 23:49:27 GMT) 1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora)) 1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8) -1362692527.009512 | HookQueueEvent http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) +1362692527.009512 | HookQueueEvent http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=, resp_filenames=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) 1362692527.009721 MetaHookPost DrainEvents() -> 1362692527.009721 MetaHookPost UpdateNetworkTime(1362692527.009721) -> 1362692527.009721 MetaHookPre DrainEvents() @@ -3031,22 +3007,22 @@ 1362692527.009765 MetaHookPre UpdateNetworkTime(1362692527.009765) 1362692527.009765 | HookUpdateNetworkTime 1362692527.009765 1362692527.009765 | HookDrainEvents -1362692527.009775 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) -> -1362692527.009775 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) -> +1362692527.009775 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) -> +1362692527.009775 MetaHookPost CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) -> 1362692527.009775 MetaHookPost CallFunction(HTTP::code_in_range, , (200, 100, 199)) -> -1362692527.009775 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009775 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009775 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009775 MetaHookPost CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009775 MetaHookPost CallFunction(Log::__write, , (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CHhAvVGS1DHFjwGM9}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=])) -> 1362692527.009775 MetaHookPost CallFunction(Log::__write, , (HTTP::LOG, [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> 1362692527.009775 MetaHookPost CallFunction(Log::write, , (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CHhAvVGS1DHFjwGM9}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=])) -> 1362692527.009775 MetaHookPost CallFunction(Log::write, , (HTTP::LOG, [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> 1362692527.009775 MetaHookPost CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> 1362692527.009775 MetaHookPost CallFunction(file_sniff, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], inferred=T])) -> -1362692527.009775 MetaHookPost CallFunction(file_state_remove, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) -> +1362692527.009775 MetaHookPost CallFunction(file_state_remove, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) -> 1362692527.009775 MetaHookPost CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -> -1362692527.009775 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009775 MetaHookPost CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009775 MetaHookPost CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -> +1362692527.009775 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009775 MetaHookPost CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009775 MetaHookPost CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -> 1362692527.009775 MetaHookPost CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -> 1362692527.009775 MetaHookPost CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) -> 1362692527.009775 MetaHookPost DrainEvents() -> @@ -3055,27 +3031,27 @@ 1362692527.009775 MetaHookPost LogWrite(Log::WRITER_ASCII, default, files(1362692527.009775,0.0,0.0), 25, {ts (time), fuid (string), tx_hosts (set[addr]), rx_hosts (set[addr]), conn_uids (set[string]), source (string), depth (count), analyzers (set[string]), mime_type (string), filename (string), duration (interval), local_orig (bool), is_orig (bool), seen_bytes (count), total_bytes (count), missing_bytes (count), overflow_bytes (count), timedout (bool), parent_fuid (string), md5 (string), sha1 (string), sha256 (string), extracted (string), extracted_cutoff (bool), extracted_size (count)}, ) -> true 1362692527.009775 MetaHookPost LogWrite(Log::WRITER_ASCII, default, http(1362692527.009775,0.0,0.0), 30, {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), trans_depth (count), method (string), host (string), uri (string), referrer (string), version (string), user_agent (string), origin (string), request_body_len (count), response_body_len (count), status_code (count), status_msg (string), info_code (count), info_msg (string), tags (set[enum]), username (string), password (string), proxied (set[string]), orig_fuids (vector[string]), orig_filenames (vector[string]), orig_mime_types (vector[string]), resp_fuids (vector[string]), resp_filenames (vector[string]), resp_mime_types (vector[string])}, ) -> true 1362692527.009775 MetaHookPost QueueEvent(file_sniff([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], inferred=T])) -> false -1362692527.009775 MetaHookPost QueueEvent(file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) -> false -1362692527.009775 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009775 MetaHookPost QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009775 MetaHookPost QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -> false +1362692527.009775 MetaHookPost QueueEvent(file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) -> false +1362692527.009775 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false +1362692527.009775 MetaHookPost QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false +1362692527.009775 MetaHookPost QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) -> false 1362692527.009775 MetaHookPost UpdateNetworkTime(1362692527.009775) -> -1362692527.009775 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) -1362692527.009775 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) +1362692527.009775 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) +1362692527.009775 MetaHookPre CallFunction(Files::set_info, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) 1362692527.009775 MetaHookPre CallFunction(HTTP::code_in_range, , (200, 100, 199)) -1362692527.009775 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009775 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009775 MetaHookPre CallFunction(HTTP::set_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009775 MetaHookPre CallFunction(Log::__write, , (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CHhAvVGS1DHFjwGM9}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=])) 1362692527.009775 MetaHookPre CallFunction(Log::__write, , (HTTP::LOG, [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) 1362692527.009775 MetaHookPre CallFunction(Log::write, , (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CHhAvVGS1DHFjwGM9}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=])) 1362692527.009775 MetaHookPre CallFunction(Log::write, , (HTTP::LOG, [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) 1362692527.009775 MetaHookPre CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) 1362692527.009775 MetaHookPre CallFunction(file_sniff, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], inferred=T])) -1362692527.009775 MetaHookPre CallFunction(file_state_remove, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) +1362692527.009775 MetaHookPre CallFunction(file_state_remove, , ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) 1362692527.009775 MetaHookPre CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -1362692527.009775 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) +1362692527.009775 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009775 MetaHookPre CallFunction(http_end_entity, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009775 MetaHookPre CallFunction(http_message_done, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) 1362692527.009775 MetaHookPre CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) 1362692527.009775 MetaHookPre CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) 1362692527.009775 MetaHookPre DrainEvents() @@ -3084,28 +3060,28 @@ 1362692527.009775 MetaHookPre LogWrite(Log::WRITER_ASCII, default, files(1362692527.009775,0.0,0.0), 25, {ts (time), fuid (string), tx_hosts (set[addr]), rx_hosts (set[addr]), conn_uids (set[string]), source (string), depth (count), analyzers (set[string]), mime_type (string), filename (string), duration (interval), local_orig (bool), is_orig (bool), seen_bytes (count), total_bytes (count), missing_bytes (count), overflow_bytes (count), timedout (bool), parent_fuid (string), md5 (string), sha1 (string), sha256 (string), extracted (string), extracted_cutoff (bool), extracted_size (count)}, ) 1362692527.009775 MetaHookPre LogWrite(Log::WRITER_ASCII, default, http(1362692527.009775,0.0,0.0), 30, {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), trans_depth (count), method (string), host (string), uri (string), referrer (string), version (string), user_agent (string), origin (string), request_body_len (count), response_body_len (count), status_code (count), status_msg (string), info_code (count), info_msg (string), tags (set[enum]), username (string), password (string), proxied (set[string]), orig_fuids (vector[string]), orig_filenames (vector[string]), orig_mime_types (vector[string]), resp_fuids (vector[string]), resp_filenames (vector[string]), resp_mime_types (vector[string])}, ) 1362692527.009775 MetaHookPre QueueEvent(file_sniff([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], inferred=T])) -1362692527.009775 MetaHookPre QueueEvent(file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=])) -1362692527.009775 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009775 MetaHookPre QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) +1362692527.009775 MetaHookPre QueueEvent(file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=])) +1362692527.009775 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009775 MetaHookPre QueueEvent(http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009775 MetaHookPre QueueEvent(http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280])) 1362692527.009775 MetaHookPre UpdateNetworkTime(1362692527.009775) 1362692527.009775 | HookUpdateNetworkTime 1362692527.009775 -1362692527.009775 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=]) -1362692527.009775 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=]) +1362692527.009775 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), origin=, request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_filenames=, orig_mime_types=, resp_fuids=[FakNcS1Jfe01uljb3], resp_filenames=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=]) +1362692527.009775 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=]) 1362692527.009775 | HookCallFunction HTTP::code_in_range(200, 100, 199) -1362692527.009775 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009775 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009775 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009775 | HookCallFunction Log::__write(Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CHhAvVGS1DHFjwGM9}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]) 1362692527.009775 | HookCallFunction Log::__write(HTTP::LOG, [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) 1362692527.009775 | HookCallFunction Log::write(Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CHhAvVGS1DHFjwGM9}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]) 1362692527.009775 | HookCallFunction Log::write(HTTP::LOG, [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) 1362692527.009775 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) 1362692527.009775 | HookCallFunction file_sniff([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], inferred=T]) -1362692527.009775 | HookCallFunction file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=]) +1362692527.009775 | HookCallFunction file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=]) 1362692527.009775 | HookCallFunction fmt(%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp) -1362692527.009775 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookCallFunction http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookCallFunction http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280]) +1362692527.009775 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009775 | HookCallFunction http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009775 | HookCallFunction http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280]) 1362692527.009775 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856<...>/tcp]) 1362692527.009775 | HookCallFunction set_file_handle(Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80) 1362692527.009775 | HookDrainEvents @@ -3114,10 +3090,10 @@ 1362692527.009775 | HookLogWrite files [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts=192.150.187.43, rx_hosts=141.142.228.5, conn_uids=CHhAvVGS1DHFjwGM9, source=HTTP, depth=0, analyzers=, mime_type=text/plain, filename=, duration=0.000263, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, extracted=, extracted_cutoff=, extracted_size=] 1362692527.009775 | HookLogWrite http [ts=1362692526.939527, uid=CHhAvVGS1DHFjwGM9, id.orig_h=141.142.228.5, id.orig_p=59856, id.resp_h=192.150.187.43, id.resp_p=80, trans_depth=1, method=GET, host=bro.org, uri=<...>/plain] 1362692527.009775 | HookQueueEvent file_sniff([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], inferred=T]) -1362692527.009775 | HookQueueEvent file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=, u2_events=]) -1362692527.009775 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookQueueEvent http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009775 | HookQueueEvent http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280]) +1362692527.009775 | HookQueueEvent file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1], irc=, pe=]) +1362692527.009775 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009775 | HookQueueEvent http_end_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009775 | HookQueueEvent http_message_done([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, [start=1362692527.009512, interrupted=F, finish_msg=message ends normally, body_length=4705, content_gap_length=0, header_length=280]) 1362692527.009855 MetaHookPost DrainEvents() -> 1362692527.009855 MetaHookPost UpdateNetworkTime(1362692527.009855) -> 1362692527.009855 MetaHookPre DrainEvents() @@ -3143,20 +3119,19 @@ 1362692527.080828 | HookUpdateNetworkTime 1362692527.080828 1362692527.080828 | HookDrainEvents 1362692527.080972 MetaHookPost CallFunction(ChecksumOffloading::check, , ()) -> -1362692527.080972 MetaHookPost CallFunction(Conn::conn_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp)) -> -1362692527.080972 MetaHookPost CallFunction(Conn::determine_service, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692527.080972 MetaHookPost CallFunction(Conn::set_conn, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692527.080972 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> -1362692527.080972 MetaHookPost CallFunction(KRB::do_log, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> -1362692527.080972 MetaHookPost CallFunction(KRB::fill_in_subjects, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692527.080972 MetaHookPost CallFunction(Conn::conn_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp)) -> +1362692527.080972 MetaHookPost CallFunction(Conn::determine_service, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692527.080972 MetaHookPost CallFunction(Conn::set_conn, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692527.080972 MetaHookPost CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692527.080972 MetaHookPost CallFunction(KRB::do_log, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692527.080972 MetaHookPost CallFunction(KRB::fill_in_subjects, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> 1362692527.080972 MetaHookPost CallFunction(Log::__write, , (Conn::LOG, [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=])) -> 1362692527.080972 MetaHookPost CallFunction(Log::write, , (Conn::LOG, [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=])) -> -1362692527.080972 MetaHookPost CallFunction(bro_done, , ()) -> 1362692527.080972 MetaHookPost CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, T, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> -1362692527.080972 MetaHookPost CallFunction(connection_state_remove, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> +1362692527.080972 MetaHookPost CallFunction(connection_state_remove, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> 1362692527.080972 MetaHookPost CallFunction(filter_change_tracking, , ()) -> 1362692527.080972 MetaHookPost CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -> -1362692527.080972 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> +1362692527.080972 MetaHookPost CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> 1362692527.080972 MetaHookPost CallFunction(get_net_stats, , ()) -> 1362692527.080972 MetaHookPost CallFunction(get_port_transport_proto, , (80/tcp)) -> 1362692527.080972 MetaHookPost CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -> @@ -3166,30 +3141,30 @@ 1362692527.080972 MetaHookPost CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344T11141.142.228.5:59856 > 192.150.187.43:80)) -> 1362692527.080972 MetaHookPost CallFunction(sub_bytes, , (HTTP, 0, 1)) -> 1362692527.080972 MetaHookPost CallFunction(to_lower, , (HTTP)) -> +1362692527.080972 MetaHookPost CallFunction(zeek_done, , ()) -> 1362692527.080972 MetaHookPost DrainEvents() -> 1362692527.080972 MetaHookPost LogInit(Log::WRITER_ASCII, default, true, true, conn(1362692527.080972,0.0,0.0), 21, {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), proto (enum), service (string), duration (interval), orig_bytes (count), resp_bytes (count), conn_state (string), local_orig (bool), local_resp (bool), missed_bytes (count), history (string), orig_pkts (count), orig_ip_bytes (count), resp_pkts (count), resp_ip_bytes (count), tunnel_parents (set[string])}) -> 1362692527.080972 MetaHookPost LogWrite(Log::WRITER_ASCII, default, conn(1362692527.080972,0.0,0.0), 21, {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), proto (enum), service (string), duration (interval), orig_bytes (count), resp_bytes (count), conn_state (string), local_orig (bool), local_resp (bool), missed_bytes (count), history (string), orig_pkts (count), orig_ip_bytes (count), resp_pkts (count), resp_ip_bytes (count), tunnel_parents (set[string])}, ) -> true 1362692527.080972 MetaHookPost QueueEvent(ChecksumOffloading::check()) -> false -1362692527.080972 MetaHookPost QueueEvent(bro_done()) -> false -1362692527.080972 MetaHookPost QueueEvent(connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false +1362692527.080972 MetaHookPost QueueEvent(connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -> false 1362692527.080972 MetaHookPost QueueEvent(filter_change_tracking()) -> false -1362692527.080972 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false +1362692527.080972 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -> false +1362692527.080972 MetaHookPost QueueEvent(zeek_done()) -> false 1362692527.080972 MetaHookPost UpdateNetworkTime(1362692527.080972) -> 1362692527.080972 MetaHookPre CallFunction(ChecksumOffloading::check, , ()) -1362692527.080972 MetaHookPre CallFunction(Conn::conn_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp)) -1362692527.080972 MetaHookPre CallFunction(Conn::determine_service, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692527.080972 MetaHookPre CallFunction(Conn::set_conn, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692527.080972 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) -1362692527.080972 MetaHookPre CallFunction(KRB::do_log, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) -1362692527.080972 MetaHookPre CallFunction(KRB::fill_in_subjects, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692527.080972 MetaHookPre CallFunction(Conn::conn_state, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp)) +1362692527.080972 MetaHookPre CallFunction(Conn::determine_service, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692527.080972 MetaHookPre CallFunction(Conn::set_conn, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692527.080972 MetaHookPre CallFunction(HTTP::get_file_handle, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692527.080972 MetaHookPre CallFunction(KRB::do_log, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692527.080972 MetaHookPre CallFunction(KRB::fill_in_subjects, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692527.080972 MetaHookPre CallFunction(Log::__write, , (Conn::LOG, [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=])) 1362692527.080972 MetaHookPre CallFunction(Log::write, , (Conn::LOG, [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=])) -1362692527.080972 MetaHookPre CallFunction(bro_done, , ()) 1362692527.080972 MetaHookPre CallFunction(cat, , (Analyzer::ANALYZER_HTTP, 1362692526.869344, T, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -1362692527.080972 MetaHookPre CallFunction(connection_state_remove, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692527.080972 MetaHookPre CallFunction(connection_state_remove, , ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692527.080972 MetaHookPre CallFunction(filter_change_tracking, , ()) 1362692527.080972 MetaHookPre CallFunction(fmt, , (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -1362692527.080972 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692527.080972 MetaHookPre CallFunction(get_file_handle, , (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) 1362692527.080972 MetaHookPre CallFunction(get_net_stats, , ()) 1362692527.080972 MetaHookPre CallFunction(get_port_transport_proto, , (80/tcp)) 1362692527.080972 MetaHookPre CallFunction(id_string, , ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) @@ -3199,31 +3174,31 @@ 1362692527.080972 MetaHookPre CallFunction(set_file_handle, , (Analyzer::ANALYZER_HTTP1362692526.869344T11141.142.228.5:59856 > 192.150.187.43:80)) 1362692527.080972 MetaHookPre CallFunction(sub_bytes, , (HTTP, 0, 1)) 1362692527.080972 MetaHookPre CallFunction(to_lower, , (HTTP)) +1362692527.080972 MetaHookPre CallFunction(zeek_done, , ()) 1362692527.080972 MetaHookPre DrainEvents() 1362692527.080972 MetaHookPre LogInit(Log::WRITER_ASCII, default, true, true, conn(1362692527.080972,0.0,0.0), 21, {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), proto (enum), service (string), duration (interval), orig_bytes (count), resp_bytes (count), conn_state (string), local_orig (bool), local_resp (bool), missed_bytes (count), history (string), orig_pkts (count), orig_ip_bytes (count), resp_pkts (count), resp_ip_bytes (count), tunnel_parents (set[string])}) 1362692527.080972 MetaHookPre LogWrite(Log::WRITER_ASCII, default, conn(1362692527.080972,0.0,0.0), 21, {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), proto (enum), service (string), duration (interval), orig_bytes (count), resp_bytes (count), conn_state (string), local_orig (bool), local_resp (bool), missed_bytes (count), history (string), orig_pkts (count), orig_ip_bytes (count), resp_pkts (count), resp_ip_bytes (count), tunnel_parents (set[string])}, ) 1362692527.080972 MetaHookPre QueueEvent(ChecksumOffloading::check()) -1362692527.080972 MetaHookPre QueueEvent(bro_done()) -1362692527.080972 MetaHookPre QueueEvent(connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) +1362692527.080972 MetaHookPre QueueEvent(connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=])) 1362692527.080972 MetaHookPre QueueEvent(filter_change_tracking()) -1362692527.080972 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692527.080972 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T)) +1362692527.080972 MetaHookPre QueueEvent(zeek_done()) 1362692527.080972 MetaHookPre UpdateNetworkTime(1362692527.080972) 1362692527.080972 | HookUpdateNetworkTime 1362692527.080972 1362692527.080972 | HookCallFunction ChecksumOffloading::check() -1362692527.080972 | HookCallFunction Conn::conn_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp) -1362692527.080972 | HookCallFunction Conn::determine_service([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692527.080972 | HookCallFunction Conn::set_conn([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692527.080972 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) -1362692527.080972 | HookCallFunction KRB::do_log([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) -1362692527.080972 | HookCallFunction KRB::fill_in_subjects([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692527.080972 | HookCallFunction Conn::conn_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], tcp) +1362692527.080972 | HookCallFunction Conn::determine_service([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692527.080972 | HookCallFunction Conn::set_conn([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692527.080972 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692527.080972 | HookCallFunction KRB::do_log([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692527.080972 | HookCallFunction KRB::fill_in_subjects([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692527.080972 | HookCallFunction Log::__write(Conn::LOG, [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=]) 1362692527.080972 | HookCallFunction Log::write(Conn::LOG, [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=141.142.228.5, orig_p=59856<...>/tcp], proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=]) -1362692527.080972 | HookCallFunction bro_done() 1362692527.080972 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, T, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) -1362692527.080972 | HookCallFunction connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692527.080972 | HookCallFunction connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692527.080972 | HookCallFunction filter_change_tracking() 1362692527.080972 | HookCallFunction fmt(%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp) -1362692527.080972 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692527.080972 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) 1362692527.080972 | HookCallFunction get_net_stats() 1362692527.080972 | HookCallFunction get_port_transport_proto(80/tcp) 1362692527.080972 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856<...>/tcp]) @@ -3233,11 +3208,12 @@ 1362692527.080972 | HookCallFunction set_file_handle(Analyzer::ANALYZER_HTTP1362692526.869344T11141.142.228.5:59856 > 192.150.187.43:80) 1362692527.080972 | HookCallFunction sub_bytes(HTTP, 0, 1) 1362692527.080972 | HookCallFunction to_lower(HTTP) +1362692527.080972 | HookCallFunction zeek_done() 1362692527.080972 | HookDrainEvents 1362692527.080972 | HookLogInit conn 1/1 {ts (time), uid (string), id.orig_h (addr), id.orig_p (port), id.resp_h (addr), id.resp_p (port), proto (enum), service (string), duration (interval), orig_bytes (count), resp_bytes (count), conn_state (string), local_orig (bool), local_resp (bool), missed_bytes (count), history (string), orig_pkts (count), orig_ip_bytes (count), resp_pkts (count), resp_ip_bytes (count), tunnel_parents (set[string])} 1362692527.080972 | HookLogWrite conn [ts=1362692526.869344, uid=CHhAvVGS1DHFjwGM9, id.orig_h=141.142.228.5, id.orig_p=59856, id.resp_h=192.150.187.43, id.resp_p=80, proto=tcp, service=http, duration=0.211484, orig_bytes=136, resp_bytes=5007, conn_state=SF, local_orig=, local_resp=, missed_bytes=0, history=ShADadFf, orig_pkts=7, orig_ip_bytes=512, resp_pkts=7, resp_ip_bytes=5379, tunnel_parents=] 1362692527.080972 | HookQueueEvent ChecksumOffloading::check() -1362692527.080972 | HookQueueEvent bro_done() -1362692527.080972 | HookQueueEvent connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) +1362692527.080972 | HookQueueEvent connection_state_remove([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]) 1362692527.080972 | HookQueueEvent filter_change_tracking() -1362692527.080972 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692527.080972 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1], http_state=[pending={}, current_request=1, current_response=1, trans_depth=1], irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=], T) +1362692527.080972 | HookQueueEvent zeek_done() diff --git a/testing/btest/Baseline/plugins.legacy/output b/testing/btest/Baseline/plugins.legacy/output new file mode 100644 index 0000000000..675a884b16 --- /dev/null +++ b/testing/btest/Baseline/plugins.legacy/output @@ -0,0 +1,6 @@ +Demo::Foo - A Foo test analyzer (dynamic, version 1.0.0) + [Analyzer] Foo (ANALYZER_FOO, enabled) + [Event] foo_message + +=== +foo_message, [orig_h=::1, orig_p=37927/tcp, resp_h=::1, resp_p=4242/tcp], Hello, Foo!\x0a diff --git a/testing/btest/Baseline/plugins.reporter-hook/output b/testing/btest/Baseline/plugins.reporter-hook/output index 8f706ec644..36418d2405 100644 --- a/testing/btest/Baseline/plugins.reporter-hook/output +++ b/testing/btest/Baseline/plugins.reporter-hook/output @@ -1,10 +1,10 @@ - | Hook Some Info <...>/reporter-hook.bro, line 16 - | Hook error An Error <...>/reporter-hook.bro, line 18 - | Hook error An Error that does not show up in the log <...>/reporter-hook.bro, line 19 - | Hook expression error field value missing (b$a) <...>/reporter-hook.bro, line 23 - | Hook warning A warning <...>/reporter-hook.bro, line 17 -<...>/reporter-hook.bro, line 16: Some Info -error in <...>/reporter-hook.bro, line 18: An Error -error in <...>/reporter-hook.bro, line 19: An Error that does not show up in the log -expression error in <...>/reporter-hook.bro, line 23: field value missing (b$a) -warning in <...>/reporter-hook.bro, line 17: A warning + | Hook Some Info <...>/reporter-hook.zeek, line 16 + | Hook error An Error <...>/reporter-hook.zeek, line 18 + | Hook error An Error that does not show up in the log <...>/reporter-hook.zeek, line 19 + | Hook expression error field value missing (b$a) <...>/reporter-hook.zeek, line 23 + | Hook warning A warning <...>/reporter-hook.zeek, line 17 +<...>/reporter-hook.zeek, line 16: Some Info +error in <...>/reporter-hook.zeek, line 18: An Error +error in <...>/reporter-hook.zeek, line 19: An Error that does not show up in the log +expression error in <...>/reporter-hook.zeek, line 23: field value missing (b$a) +warning in <...>/reporter-hook.zeek, line 17: A warning diff --git a/testing/btest/Baseline/plugins.reporter-hook/reporter.log b/testing/btest/Baseline/plugins.reporter-hook/reporter.log index bce2fb909f..fc5a79bc86 100644 --- a/testing/btest/Baseline/plugins.reporter-hook/reporter.log +++ b/testing/btest/Baseline/plugins.reporter-hook/reporter.log @@ -6,8 +6,8 @@ #open 2017-07-26-17-58-52 #fields ts level message location #types time enum string string -0.000000 Reporter::INFO Some Info /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.bro, line 16 -0.000000 Reporter::WARNING A warning /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.bro, line 17 -0.000000 Reporter::ERROR An Error /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.bro, line 18 -0.000000 Reporter::ERROR field value missing (b$a) /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.bro, line 23 +0.000000 Reporter::INFO Some Info /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.zeek, line 16 +0.000000 Reporter::WARNING A warning /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.zeek, line 17 +0.000000 Reporter::ERROR An Error /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.zeek, line 18 +0.000000 Reporter::ERROR field value missing (b$a) /Users/johanna/corelight/bro/testing/btest/.tmp/plugins.reporter-hook/reporter-hook.zeek, line 23 #close 2017-07-26-17-58-52 diff --git a/testing/btest/Baseline/plugins.writer/output b/testing/btest/Baseline/plugins.writer/output index 729887b44d..cafb0429af 100644 --- a/testing/btest/Baseline/plugins.writer/output +++ b/testing/btest/Baseline/plugins.writer/output @@ -17,6 +17,6 @@ Demo::Foo - A Foo test logging writer (dynamic, version 1.0.0) [http] 1340213020.732963|ClEkJM2Vm5giqnMf4h|10.0.0.55|53994|60.190.189.214|8124|5|GET|www.osnews.com|/images/icons/17.gif|http://www.osnews.com/|1.1|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|-|0|0|304|Not Modified|-|-||-|-|-|-|-|-|-|-|- [http] 1340213021.300269|ClEkJM2Vm5giqnMf4h|10.0.0.55|53994|60.190.189.214|8124|6|GET|www.osnews.com|/images/left.gif|http://www.osnews.com/|1.1|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|-|0|0|304|Not Modified|-|-||-|-|-|-|-|-|-|-|- [http] 1340213021.861584|ClEkJM2Vm5giqnMf4h|10.0.0.55|53994|60.190.189.214|8124|7|GET|www.osnews.com|/images/icons/32.gif|http://www.osnews.com/|1.1|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|-|0|0|304|Not Modified|-|-||-|-|-|-|-|-|-|-|- -[packet_filter] 1552509148.042714|bro|ip or not ip|T|T +[packet_filter] 1559874008.158433|zeek|ip or not ip|T|T [socks] 1340213015.276495|ClEkJM2Vm5giqnMf4h|10.0.0.55|53994|60.190.189.214|8124|5|-|-|succeeded|-|www.osnews.com|80|192.168.0.31|-|2688 [tunnel] 1340213015.276495|-|10.0.0.55|0|60.190.189.214|8124|Tunnel::SOCKS|Tunnel::DISCOVER diff --git a/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_exclusivity/manager-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_exclusivity/manager-1..stdout index f5b2222839..788b3699dd 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_exclusivity/manager-1..stdout +++ b/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_exclusivity/manager-1..stdout @@ -1,101 +1,101 @@ 1st stuff -hrw, 0, bro/cluster/node/proxy-1 -hrw (custom pool), 0, bro/cluster/node/proxy-2 -hrw, 1, bro/cluster/node/proxy-1 -hrw (custom pool), 1, bro/cluster/node/proxy-2 -hrw, 2, bro/cluster/node/proxy-1 -hrw (custom pool), 2, bro/cluster/node/proxy-2 -hrw, 3, bro/cluster/node/proxy-1 -hrw (custom pool), 3, bro/cluster/node/proxy-2 -hrw, 13, bro/cluster/node/proxy-1 -hrw (custom pool), 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-1 -hrw (custom pool), 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-1 -hrw (custom pool), 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-1 -hrw (custom pool), 101, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -hrw, 0, bro/cluster/node/proxy-1 -hrw (custom pool), 0, bro/cluster/node/proxy-2 -hrw, 1, bro/cluster/node/proxy-1 -hrw (custom pool), 1, bro/cluster/node/proxy-2 -hrw, 2, bro/cluster/node/proxy-1 -hrw (custom pool), 2, bro/cluster/node/proxy-2 -hrw, 3, bro/cluster/node/proxy-1 -hrw (custom pool), 3, bro/cluster/node/proxy-2 -hrw, 13, bro/cluster/node/proxy-1 -hrw (custom pool), 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-1 -hrw (custom pool), 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-1 -hrw (custom pool), 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-1 -hrw (custom pool), 101, bro/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-1 +hrw (custom pool), 0, zeek/cluster/node/proxy-2 +hrw, 1, zeek/cluster/node/proxy-1 +hrw (custom pool), 1, zeek/cluster/node/proxy-2 +hrw, 2, zeek/cluster/node/proxy-1 +hrw (custom pool), 2, zeek/cluster/node/proxy-2 +hrw, 3, zeek/cluster/node/proxy-1 +hrw (custom pool), 3, zeek/cluster/node/proxy-2 +hrw, 13, zeek/cluster/node/proxy-1 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-1 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-1 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-1 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-1 +hrw (custom pool), 0, zeek/cluster/node/proxy-2 +hrw, 1, zeek/cluster/node/proxy-1 +hrw (custom pool), 1, zeek/cluster/node/proxy-2 +hrw, 2, zeek/cluster/node/proxy-1 +hrw (custom pool), 2, zeek/cluster/node/proxy-2 +hrw, 3, zeek/cluster/node/proxy-1 +hrw (custom pool), 3, zeek/cluster/node/proxy-2 +hrw, 13, zeek/cluster/node/proxy-1 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-1 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-1 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-1 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 2nd stuff hrw, 0, -hrw (custom pool), 0, bro/cluster/node/proxy-2 +hrw (custom pool), 0, zeek/cluster/node/proxy-2 hrw, 1, -hrw (custom pool), 1, bro/cluster/node/proxy-2 +hrw (custom pool), 1, zeek/cluster/node/proxy-2 hrw, 2, -hrw (custom pool), 2, bro/cluster/node/proxy-2 +hrw (custom pool), 2, zeek/cluster/node/proxy-2 hrw, 3, -hrw (custom pool), 3, bro/cluster/node/proxy-2 +hrw (custom pool), 3, zeek/cluster/node/proxy-2 hrw, 13, -hrw (custom pool), 13, bro/cluster/node/proxy-2 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 hrw, 37, -hrw (custom pool), 37, bro/cluster/node/proxy-2 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 hrw, 42, -hrw (custom pool), 42, bro/cluster/node/proxy-2 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 hrw, 101, -hrw (custom pool), 101, bro/cluster/node/proxy-2 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 hrw, 0, -hrw (custom pool), 0, bro/cluster/node/proxy-2 +hrw (custom pool), 0, zeek/cluster/node/proxy-2 hrw, 1, -hrw (custom pool), 1, bro/cluster/node/proxy-2 +hrw (custom pool), 1, zeek/cluster/node/proxy-2 hrw, 2, -hrw (custom pool), 2, bro/cluster/node/proxy-2 +hrw (custom pool), 2, zeek/cluster/node/proxy-2 hrw, 3, -hrw (custom pool), 3, bro/cluster/node/proxy-2 +hrw (custom pool), 3, zeek/cluster/node/proxy-2 hrw, 13, -hrw (custom pool), 13, bro/cluster/node/proxy-2 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 hrw, 37, -hrw (custom pool), 37, bro/cluster/node/proxy-2 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 hrw, 42, -hrw (custom pool), 42, bro/cluster/node/proxy-2 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 hrw, 101, -hrw (custom pool), 101, bro/cluster/node/proxy-2 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 no stuff hrw, 0, hrw (custom pool), 0, diff --git a/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_limits/manager-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_limits/manager-1..stdout index 977abbf9e9..310df794f0 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_limits/manager-1..stdout +++ b/testing/btest/Baseline/scripts.base.frameworks.cluster.custom_pool_limits/manager-1..stdout @@ -1,101 +1,101 @@ 1st stuff -hrw, 0, bro/cluster/node/proxy-1 -hrw (custom pool), 0, bro/cluster/node/proxy-1 -hrw, 1, bro/cluster/node/proxy-1 -hrw (custom pool), 1, bro/cluster/node/proxy-1 -hrw, 2, bro/cluster/node/proxy-1 -hrw (custom pool), 2, bro/cluster/node/proxy-1 -hrw, 3, bro/cluster/node/proxy-1 -hrw (custom pool), 3, bro/cluster/node/proxy-1 -hrw, 13, bro/cluster/node/proxy-1 -hrw (custom pool), 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-1 -hrw (custom pool), 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-1 -hrw (custom pool), 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-1 -hrw (custom pool), 101, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-1 -rr (custom pool), bro/cluster/node/proxy-2 -hrw, 0, bro/cluster/node/proxy-1 -hrw (custom pool), 0, bro/cluster/node/proxy-1 -hrw, 1, bro/cluster/node/proxy-1 -hrw (custom pool), 1, bro/cluster/node/proxy-1 -hrw, 2, bro/cluster/node/proxy-1 -hrw (custom pool), 2, bro/cluster/node/proxy-1 -hrw, 3, bro/cluster/node/proxy-1 -hrw (custom pool), 3, bro/cluster/node/proxy-1 -hrw, 13, bro/cluster/node/proxy-1 -hrw (custom pool), 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-1 -hrw (custom pool), 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-1 -hrw (custom pool), 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-1 -hrw (custom pool), 101, bro/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-1 +hrw (custom pool), 0, zeek/cluster/node/proxy-1 +hrw, 1, zeek/cluster/node/proxy-1 +hrw (custom pool), 1, zeek/cluster/node/proxy-1 +hrw, 2, zeek/cluster/node/proxy-1 +hrw (custom pool), 2, zeek/cluster/node/proxy-1 +hrw, 3, zeek/cluster/node/proxy-1 +hrw (custom pool), 3, zeek/cluster/node/proxy-1 +hrw, 13, zeek/cluster/node/proxy-1 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-1 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-1 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-1 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-1 +rr (custom pool), zeek/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-1 +hrw (custom pool), 0, zeek/cluster/node/proxy-1 +hrw, 1, zeek/cluster/node/proxy-1 +hrw (custom pool), 1, zeek/cluster/node/proxy-1 +hrw, 2, zeek/cluster/node/proxy-1 +hrw (custom pool), 2, zeek/cluster/node/proxy-1 +hrw, 3, zeek/cluster/node/proxy-1 +hrw (custom pool), 3, zeek/cluster/node/proxy-1 +hrw, 13, zeek/cluster/node/proxy-1 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-1 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-1 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-1 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 2nd stuff hrw, 0, -hrw (custom pool), 0, bro/cluster/node/proxy-2 +hrw (custom pool), 0, zeek/cluster/node/proxy-2 hrw, 1, -hrw (custom pool), 1, bro/cluster/node/proxy-2 +hrw (custom pool), 1, zeek/cluster/node/proxy-2 hrw, 2, -hrw (custom pool), 2, bro/cluster/node/proxy-2 +hrw (custom pool), 2, zeek/cluster/node/proxy-2 hrw, 3, -hrw (custom pool), 3, bro/cluster/node/proxy-2 +hrw (custom pool), 3, zeek/cluster/node/proxy-2 hrw, 13, -hrw (custom pool), 13, bro/cluster/node/proxy-2 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 hrw, 37, -hrw (custom pool), 37, bro/cluster/node/proxy-2 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 hrw, 42, -hrw (custom pool), 42, bro/cluster/node/proxy-2 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 hrw, 101, -hrw (custom pool), 101, bro/cluster/node/proxy-2 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 rr, -rr (custom pool), bro/cluster/node/proxy-2 +rr (custom pool), zeek/cluster/node/proxy-2 hrw, 0, -hrw (custom pool), 0, bro/cluster/node/proxy-2 +hrw (custom pool), 0, zeek/cluster/node/proxy-2 hrw, 1, -hrw (custom pool), 1, bro/cluster/node/proxy-2 +hrw (custom pool), 1, zeek/cluster/node/proxy-2 hrw, 2, -hrw (custom pool), 2, bro/cluster/node/proxy-2 +hrw (custom pool), 2, zeek/cluster/node/proxy-2 hrw, 3, -hrw (custom pool), 3, bro/cluster/node/proxy-2 +hrw (custom pool), 3, zeek/cluster/node/proxy-2 hrw, 13, -hrw (custom pool), 13, bro/cluster/node/proxy-2 +hrw (custom pool), 13, zeek/cluster/node/proxy-2 hrw, 37, -hrw (custom pool), 37, bro/cluster/node/proxy-2 +hrw (custom pool), 37, zeek/cluster/node/proxy-2 hrw, 42, -hrw (custom pool), 42, bro/cluster/node/proxy-2 +hrw (custom pool), 42, zeek/cluster/node/proxy-2 hrw, 101, -hrw (custom pool), 101, bro/cluster/node/proxy-2 +hrw (custom pool), 101, zeek/cluster/node/proxy-2 no stuff hrw, 0, hrw (custom pool), 0, diff --git a/testing/btest/Baseline/scripts.base.frameworks.cluster.topic_distribution/manager-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.cluster.topic_distribution/manager-1..stdout index 2c99f08ef2..3b5dd7bad4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.cluster.topic_distribution/manager-1..stdout +++ b/testing/btest/Baseline/scripts.base.frameworks.cluster.topic_distribution/manager-1..stdout @@ -1,53 +1,53 @@ 1st stuff -hrw, 0, bro/cluster/node/proxy-1 -hrw, 1, bro/cluster/node/proxy-1 -hrw, 2, bro/cluster/node/proxy-1 -hrw, 3, bro/cluster/node/proxy-1 -hrw, 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-1 -rr, bro/cluster/node/proxy-2 -hrw, 0, bro/cluster/node/proxy-1 -hrw, 1, bro/cluster/node/proxy-1 -hrw, 2, bro/cluster/node/proxy-1 -hrw, 3, bro/cluster/node/proxy-1 -hrw, 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-1 +hrw, 1, zeek/cluster/node/proxy-1 +hrw, 2, zeek/cluster/node/proxy-1 +hrw, 3, zeek/cluster/node/proxy-1 +hrw, 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-1 +rr, zeek/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-1 +hrw, 1, zeek/cluster/node/proxy-1 +hrw, 2, zeek/cluster/node/proxy-1 +hrw, 3, zeek/cluster/node/proxy-1 +hrw, 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-2 2nd stuff -hrw, 0, bro/cluster/node/proxy-2 -hrw, 1, bro/cluster/node/proxy-2 -hrw, 2, bro/cluster/node/proxy-2 -hrw, 3, bro/cluster/node/proxy-2 -hrw, 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -rr, bro/cluster/node/proxy-2 -hrw, 0, bro/cluster/node/proxy-2 -hrw, 1, bro/cluster/node/proxy-2 -hrw, 2, bro/cluster/node/proxy-2 -hrw, 3, bro/cluster/node/proxy-2 -hrw, 13, bro/cluster/node/proxy-2 -hrw, 37, bro/cluster/node/proxy-2 -hrw, 42, bro/cluster/node/proxy-2 -hrw, 101, bro/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-2 +hrw, 1, zeek/cluster/node/proxy-2 +hrw, 2, zeek/cluster/node/proxy-2 +hrw, 3, zeek/cluster/node/proxy-2 +hrw, 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +rr, zeek/cluster/node/proxy-2 +hrw, 0, zeek/cluster/node/proxy-2 +hrw, 1, zeek/cluster/node/proxy-2 +hrw, 2, zeek/cluster/node/proxy-2 +hrw, 3, zeek/cluster/node/proxy-2 +hrw, 13, zeek/cluster/node/proxy-2 +hrw, 37, zeek/cluster/node/proxy-2 +hrw, 42, zeek/cluster/node/proxy-2 +hrw, 101, zeek/cluster/node/proxy-2 no stuff hrw, 0, hrw, 1, diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.basic/bro..stderr b/testing/btest/Baseline/scripts.base.frameworks.config.basic/zeek..stderr similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.config.basic/bro..stderr rename to testing/btest/Baseline/scripts.base.frameworks.config.basic/zeek..stderr diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.basic/bro.config.log b/testing/btest/Baseline/scripts.base.frameworks.config.basic/zeek.config.log similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.config.basic/bro.config.log rename to testing/btest/Baseline/scripts.base.frameworks.config.basic/zeek.config.log diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.read_config/bro.config.log b/testing/btest/Baseline/scripts.base.frameworks.config.read_config/zeek.config.log similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.config.read_config/bro.config.log rename to testing/btest/Baseline/scripts.base.frameworks.config.read_config/zeek.config.log diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-1..stdout index 08b60346e3..9484b3d205 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-1..stdout +++ b/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-1..stdout @@ -6,7 +6,7 @@ cluster_set_option, testinterval, [data=broker::data{60000000000ns}], ../configf cluster_set_option, test_set, [data=broker::data{{-}}], ../configfile cluster_set_option, testaddr, [data=broker::data{2607:f8b0:4005:801::200e}], ../configfile cluster_set_option, testenum, [data=broker::data{Conn::LOG}], ../configfile -cluster_set_option, test_vector, [data=broker::data{[1, 2, 3, 4, 5, 6]}], ../configfile +cluster_set_option, test_vector, [data=broker::data{(1, 2, 3, 4, 5, 6)}], ../configfile cluster_set_option, testbool, [data=broker::data{F}], ../configfile cluster_set_option, testcount, [data=broker::data{2}], ../configfile cluster_set_option, test_set_full, [data=broker::data{{1, 3, 4, 5, 6, 7}}], ../configfile diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-2..stdout b/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-2..stdout index 08b60346e3..9484b3d205 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-2..stdout +++ b/testing/btest/Baseline/scripts.base.frameworks.config.read_config_cluster/worker-2..stdout @@ -6,7 +6,7 @@ cluster_set_option, testinterval, [data=broker::data{60000000000ns}], ../configf cluster_set_option, test_set, [data=broker::data{{-}}], ../configfile cluster_set_option, testaddr, [data=broker::data{2607:f8b0:4005:801::200e}], ../configfile cluster_set_option, testenum, [data=broker::data{Conn::LOG}], ../configfile -cluster_set_option, test_vector, [data=broker::data{[1, 2, 3, 4, 5, 6]}], ../configfile +cluster_set_option, test_vector, [data=broker::data{(1, 2, 3, 4, 5, 6)}], ../configfile cluster_set_option, testbool, [data=broker::data{F}], ../configfile cluster_set_option, testcount, [data=broker::data{2}], ../configfile cluster_set_option, test_set_full, [data=broker::data{{1, 3, 4, 5, 6, 7}}], ../configfile diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.several-files/bro.config.log b/testing/btest/Baseline/scripts.base.frameworks.config.several-files/zeek.config.log similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.config.several-files/bro.config.log rename to testing/btest/Baseline/scripts.base.frameworks.config.several-files/zeek.config.log diff --git a/testing/btest/Baseline/scripts.base.frameworks.config.updates/bro.config.log b/testing/btest/Baseline/scripts.base.frameworks.config.updates/zeek.config.log similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.config.updates/bro.config.log rename to testing/btest/Baseline/scripts.base.frameworks.config.updates/zeek.config.log diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.set_timeout_interval/bro..stdout b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.set_timeout_interval/zeek..stdout similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.set_timeout_interval/bro..stdout rename to testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.set_timeout_interval/zeek..stdout diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.input.basic/bro..stdout b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.input.basic/zeek..stdout similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.file-analysis.input.basic/bro..stdout rename to testing/btest/Baseline/scripts.base.frameworks.file-analysis.input.basic/zeek..stdout diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.bad_patterns/.stderr b/testing/btest/Baseline/scripts.base.frameworks.input.bad_patterns/.stderr new file mode 100644 index 0000000000..e0a7be2cc3 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.bad_patterns/.stderr @@ -0,0 +1,9 @@ +error: input.log/Input::READER_ASCII: String '/cat/sss' contained no parseable pattern. +warning: input.log/Input::READER_ASCII: Could not convert line '2 /cat/sss' of input.log to Val. Ignoring line. +error: input.log/Input::READER_ASCII: String '/foo|bar' contained no parseable pattern. +warning: input.log/Input::READER_ASCII: Could not convert line '3 /foo|bar' of input.log to Val. Ignoring line. +error: input.log/Input::READER_ASCII: String 'this is not a pattern' contained no parseable pattern. +warning: input.log/Input::READER_ASCII: Could not convert line '4 this is not a pattern' of input.log to Val. Ignoring line. +error: input.log/Input::READER_ASCII: String '/5' contained no parseable pattern. +warning: input.log/Input::READER_ASCII: Could not convert line '5 /5' of input.log to Val. Ignoring line. +received termination signal diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.errors/.stderr b/testing/btest/Baseline/scripts.base.frameworks.input.errors/.stderr index 238c9a1495..ec83be310b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.errors/.stderr +++ b/testing/btest/Baseline/scripts.base.frameworks.input.errors/.stderr @@ -38,4 +38,5 @@ error: Input stream error2: Error event's first attribute must be of type Input: error: Input stream error3: Error event's first attribute must be of type Input::EventDescription error: Input stream error4: Error event's second attribute must be of type string error: Input stream error5: Error event's third attribute must be of type Reporter::Level +error: Input stream error6: 'destination' field is a table, but 'val' field is not provided (did you mean to use a set instead of a table?) received termination signal diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline index d388e3f406..32e928a177 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidnumbers/.stderrwithoutfirstline @@ -1,8 +1,8 @@ warning: ../input.log/Input::READER_ASCII: Number '12129223372036854775800' out of supported range. -warning: ../input.log/Input::READER_ASCII: Could not convert line '12129223372036854775800 121218446744073709551612' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Could not convert line '12129223372036854775800 121218446744073709551612' of ../input.log to Val. Ignoring line. warning: ../input.log/Input::READER_ASCII: Number '9223372036854775801TEXTHERE' contained non-numeric trailing characters. Ignored trailing characters 'TEXTHERE' warning: ../input.log/Input::READER_ASCII: Number '1Justtext' contained non-numeric trailing characters. Ignored trailing characters 'Justtext' warning: ../input.log/Input::READER_ASCII: String 'Justtext' contained no parseable number -warning: ../input.log/Input::READER_ASCII: Could not convert line 'Justtext 1' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Could not convert line 'Justtext 1' of ../input.log to Val. Ignoring line. received termination signal >>> diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/.stderrwithoutfirstline b/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/.stderrwithoutfirstline index 69855535cf..de0fcb462f 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/.stderrwithoutfirstline +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/.stderrwithoutfirstline @@ -1,8 +1,8 @@ warning: ../input.log/Input::READER_ASCII: Invalid value for subnet: 127.0.0.1 warning: ../input.log/Input::READER_ASCII: Error while reading set or vector -warning: ../input.log/Input::READER_ASCII: Could not convert line 'name 127.0.0.1' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Could not convert line 'name 127.0.0.1' of ../input.log to Val. Ignoring line. warning: ../input.log/Input::READER_ASCII: Invalid value for subnet: 127.0.0.1 warning: ../input.log/Input::READER_ASCII: Error while reading set or vector -warning: ../input.log/Input::READER_ASCII: Could not convert line 'name 127.0.0.1' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Could not convert line 'name 127.0.0.1' of ../input.log to Val. Ignoring line. received termination signal >>> diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/out b/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/out index 80359cc005..1930cacc9c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidset/out @@ -1,12 +1,12 @@ TableErrorEvent, Invalid value for subnet: 127.0.0.1, Reporter::WARNING TableErrorEvent, Error while reading set or vector, Reporter::WARNING -TableErrorEvent, Could not convert line 'name\x09127.0.0.1' to Val. Ignoring line., Reporter::WARNING +TableErrorEvent, Could not convert line 'name\x09127.0.0.1' of ../input.log to Val. Ignoring line., Reporter::WARNING Event, [s={ }] EventErrorEvent, Invalid value for subnet: 127.0.0.1, Reporter::WARNING EventErrorEvent, Error while reading set or vector, Reporter::WARNING -EventErrorEvent, Could not convert line 'name\x09127.0.0.1' to Val. Ignoring line., Reporter::WARNING +EventErrorEvent, Could not convert line 'name\x09127.0.0.1' of ../input.log to Val. Ignoring line., Reporter::WARNING { [name] = [s={ diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/.stderrwithoutfirstline b/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/.stderrwithoutfirstline index 04f43b38bb..56cc5181b7 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/.stderrwithoutfirstline +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/.stderrwithoutfirstline @@ -1,6 +1,6 @@ warning: ../input.log/Input::READER_ASCII: String 'l' contained no parseable number -warning: ../input.log/Input::READER_ASCII: Could not convert line ' l' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Could not convert line ' l' of ../input.log to Val. Ignoring line. warning: ../input.log/Input::READER_ASCII: String 'l' contained no parseable number -warning: ../input.log/Input::READER_ASCII: Could not convert line ' l' to Val. Ignoring line. +warning: ../input.log/Input::READER_ASCII: Could not convert line ' l' of ../input.log to Val. Ignoring line. received termination signal >>> diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/out b/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/out index 6bcb395d16..7dd04f71b3 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/out +++ b/testing/btest/Baseline/scripts.base.frameworks.input.invalidtext/out @@ -1,8 +1,8 @@ TableErrorEvent, String 'l' contained no parseable number, Reporter::WARNING -TableErrorEvent, Could not convert line '\x09l' to Val. Ignoring line., Reporter::WARNING +TableErrorEvent, Could not convert line '\x09l' of ../input.log to Val. Ignoring line., Reporter::WARNING +EventErrorEvent, String 'l' contained no parseable number, Reporter::WARNING +EventErrorEvent, Could not convert line '\x09l' of ../input.log to Val. Ignoring line., Reporter::WARNING +Event, [c=5] { [] = [c=5] } -EventErrorEvent, String 'l' contained no parseable number, Reporter::WARNING -EventErrorEvent, Could not convert line '\x09l' to Val. Ignoring line., Reporter::WARNING -Event, [c=5] diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/bro..stderr b/testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/zeek..stderr similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/bro..stderr rename to testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/zeek..stderr diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/bro..stdout b/testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/zeek..stdout similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/bro..stdout rename to testing/btest/Baseline/scripts.base.frameworks.input.missing-enum/zeek..stdout diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/bro..stderr b/testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/zeek..stderr similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/bro..stderr rename to testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/zeek..stderr diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/bro..stdout b/testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/zeek..stdout similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/bro..stdout rename to testing/btest/Baseline/scripts.base.frameworks.input.missing-file-initially/zeek..stdout diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.missing-file/bro..stderr b/testing/btest/Baseline/scripts.base.frameworks.input.missing-file/zeek..stderr similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.missing-file/bro..stderr rename to testing/btest/Baseline/scripts.base.frameworks.input.missing-file/zeek..stderr diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.patterns/out b/testing/btest/Baseline/scripts.base.frameworks.input.patterns/out new file mode 100644 index 0000000000..9852d0d5d5 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.input.patterns/out @@ -0,0 +1,9 @@ +T +F +T +{ +[2] = [p=/^?(cat)$?/], +[4] = [p=/^?(^oob)$?/], +[1] = [p=/^?(dog)$?/], +[3] = [p=/^?(foo|bar)$?/] +} diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/bro..stderr b/testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/zeek..stderr similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/bro..stderr rename to testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/zeek..stderr diff --git a/testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/bro..stdout b/testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/zeek..stdout similarity index 100% rename from testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/bro..stdout rename to testing/btest/Baseline/scripts.base.frameworks.input.port-embedded/zeek..stdout diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.expire-item/output b/testing/btest/Baseline/scripts.base.frameworks.intel.expire-item/output index 78422499cf..a4878b7cc4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.expire-item/output +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.expire-item/output @@ -3,15 +3,15 @@ #empty_field (empty) #unset_field - #path intel -#open 2018-04-27-23-53-04 +#open 2019-06-07-02-20-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc #types time string addr port addr port string enum enum string set[enum] set[string] string string string -1524873184.861542 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1 - - - -1524873187.913197 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1 - - - -1524873190.976201 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1,source2 - - - -1524873194.052686 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1,source2 - - - -1524873197.128942 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1,source2 - - - -#close 2018-04-27-23-53-20 +1559874005.130930 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1 - - - +1559874008.152069 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1 - - - +1559874011.172813 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1,source2 - - - +1559874014.190374 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1,source2 - - - +1559874017.207215 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1,source2 - - - +#close 2019-06-07-02-20-20 -- Run 1 -- Trigger: 1.2.3.4 Seen: 1.2.3.4 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.filter-item/broproc.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.filter-item/broproc.intel.log deleted file mode 100644 index dfe45974c1..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.filter-item/broproc.intel.log +++ /dev/null @@ -1,10 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path intel -#open 2019-03-24-20-29-18 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc -#types time string addr port addr port string enum enum string set[enum] set[string] string string string -1553459358.205227 - - - - - 1.2.3.42 Intel::ADDR SOMEWHERE bro Intel::ADDR source1 - - - -#close 2019-03-24-20-29-18 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.filter-item/zeekproc.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.filter-item/zeekproc.intel.log new file mode 100644 index 0000000000..ba8d9f449e --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.filter-item/zeekproc.intel.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path intel +#open 2019-06-07-02-27-51 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc +#types time string addr port addr port string enum enum string set[enum] set[string] string string string +1559874471.197669 - - - - - 1.2.3.42 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1 - - - +#close 2019-06-07-02-27-51 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log deleted file mode 100644 index 7c29bb659e..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log +++ /dev/null @@ -1,11 +0,0 @@ -#separator \x09 -#set_separator , -#empty_field (empty) -#unset_field - -#path intel -#open 2016-06-15-19-12-26 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc -#types time string addr port addr port string enum enum string set[enum] set[string] string string string -1466017946.413077 - - - - - e@mail.com Intel::EMAIL SOMEWHERE bro Intel::EMAIL source1 - - - -1466017946.413077 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1 - - - -#close 2016-06-15-19-12-26 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/zeekproc.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/zeekproc.intel.log new file mode 100644 index 0000000000..50c041efda --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/zeekproc.intel.log @@ -0,0 +1,11 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path intel +#open 2019-06-07-02-27-51 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc +#types time string addr port addr port string enum enum string set[enum] set[string] string string string +1559874471.206651 - - - - - e@mail.com Intel::EMAIL SOMEWHERE zeek Intel::EMAIL source1 - - - +1559874471.206651 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1 - - - +#close 2019-06-07-02-27-51 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.match-subnet/output b/testing/btest/Baseline/scripts.base.frameworks.intel.match-subnet/output index d8c2755fe4..c36418c477 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.match-subnet/output +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.match-subnet/output @@ -3,21 +3,21 @@ #empty_field (empty) #unset_field - #path intel -#open 2016-08-05-13-13-14 +#open 2019-06-07-02-20-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc #types time string addr port addr port string enum enum string set[enum] set[string] string string string -1470402794.307931 - - - - - 192.168.1.1 Intel::ADDR SOMEWHERE bro Intel::ADDR source1 - - - -1470402794.307931 - - - - - 192.168.2.1 Intel::ADDR SOMEWHERE bro Intel::SUBNET source1 - - - -1470402794.307931 - - - - - 192.168.142.1 Intel::ADDR SOMEWHERE bro Intel::SUBNET,Intel::ADDR source1 - - - -#close 2016-08-05-13-13-14 +1559874004.952411 - - - - - 192.168.1.1 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1 - - - +1559874004.952411 - - - - - 192.168.2.1 Intel::ADDR SOMEWHERE zeek Intel::SUBNET source1 - - - +1559874004.952411 - - - - - 192.168.142.1 Intel::ADDR SOMEWHERE zeek Intel::SUBNET,Intel::ADDR source1 - - - +#close 2019-06-07-02-20-05 -Seen: [indicator=192.168.1.1, indicator_type=Intel::ADDR, host=192.168.1.1, where=SOMEWHERE, node=bro, conn=, uid=, f=, fuid=] +Seen: [indicator=192.168.1.1, indicator_type=Intel::ADDR, host=192.168.1.1, where=SOMEWHERE, node=zeek, conn=, uid=, f=, fuid=] Item: [indicator=192.168.1.1, indicator_type=Intel::ADDR, meta=[source=source1, desc=this host is just plain baaad, url=http://some-data-distributor.com/1]] -Seen: [indicator=192.168.2.1, indicator_type=Intel::ADDR, host=192.168.2.1, where=SOMEWHERE, node=bro, conn=, uid=, f=, fuid=] +Seen: [indicator=192.168.2.1, indicator_type=Intel::ADDR, host=192.168.2.1, where=SOMEWHERE, node=zeek, conn=, uid=, f=, fuid=] Item: [indicator=192.168.2.0/24, indicator_type=Intel::SUBNET, meta=[source=source1, desc=this subnetwork is just plain baaad, url=http://some-data-distributor.com/2]] -Seen: [indicator=192.168.142.1, indicator_type=Intel::ADDR, host=192.168.142.1, where=SOMEWHERE, node=bro, conn=, uid=, f=, fuid=] +Seen: [indicator=192.168.142.1, indicator_type=Intel::ADDR, host=192.168.142.1, where=SOMEWHERE, node=zeek, conn=, uid=, f=, fuid=] Item: [indicator=192.168.142.1, indicator_type=Intel::ADDR, meta=[source=source1, desc=this host is just plain baaad, url=http://some-data-distributor.com/3]] Item: [indicator=192.168.128.0/18, indicator_type=Intel::SUBNET, meta=[source=source1, desc=this subnetwork might be baaad, url=http://some-data-distributor.com/5]] Item: [indicator=192.168.142.0/26, indicator_type=Intel::SUBNET, meta=[source=source1, desc=this subnetwork is inside, url=http://some-data-distributor.com/4]] diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.remove-non-existing/output b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-non-existing/output index 90d390518f..c6dec0f9aa 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.remove-non-existing/output +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-non-existing/output @@ -6,6 +6,6 @@ #open 2019-03-24-20-20-10 #fields ts level message location #types time enum string string -0.000000 Reporter::INFO Tried to remove non-existing item '192.168.1.1' (Intel::ADDR). /home/jgras/devel/zeek/scripts/base/frameworks/intel/./main.bro, lines 563-564 +0.000000 Reporter::INFO Tried to remove non-existing item '192.168.1.1' (Intel::ADDR). /home/jgras/devel/zeek/scripts/base/frameworks/intel/./main.zeek, lines 563-564 0.000000 Reporter::INFO received termination signal (empty) #close 2019-03-24-20-20-10 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.updated-match/output b/testing/btest/Baseline/scripts.base.frameworks.intel.updated-match/output index c6f6e14fdd..1e065f2673 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.updated-match/output +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.updated-match/output @@ -3,23 +3,23 @@ #empty_field (empty) #unset_field - #path intel -#open 2017-12-21-02-28-27 +#open 2019-06-07-02-20-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p seen.indicator seen.indicator_type seen.where seen.node matched sources fuid file_mime_type file_desc #types time string addr port addr port string enum enum string set[enum] set[string] string string string -1513823307.655824 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1 - - - -1513823310.680693 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1,source2 - - - -1513823310.680693 - - - - - 4.3.2.1 Intel::ADDR SOMEWHERE bro Intel::ADDR source2 - - - -1513823313.736551 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro Intel::ADDR source1,source2 - - - -1513823313.736551 - - - - - 4.3.2.1 Intel::ADDR SOMEWHERE bro Intel::ADDR source2 - - - -#close 2017-12-21-02-28-33 +1559874004.005095 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1 - - - +1559874005.130958 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1,source2 - - - +1559874005.130958 - - - - - 4.3.2.1 Intel::ADDR SOMEWHERE zeek Intel::ADDR source2 - - - +1559874006.142023 - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE zeek Intel::ADDR source1,source2 - - - +1559874006.142023 - - - - - 4.3.2.1 Intel::ADDR SOMEWHERE zeek Intel::ADDR source2 - - - +#close 2019-06-07-02-20-06 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-28-33 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1513823313.736551 - - - - - - - - - Intel::Notice Intel hit on 1.2.3.4 at SOMEWHERE 1.2.3.4 - - - - - Notice::ACTION_LOG 3600.000000 F - - - - - -1513823313.736551 - - - - - - - - - Intel::Notice Intel hit on 4.3.2.1 at SOMEWHERE 4.3.2.1 - - - - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-28-33 +#open 2019-06-07-02-20-06 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1559874006.142023 - - - - - - - - - Intel::Notice Intel hit on 1.2.3.4 at SOMEWHERE 1.2.3.4 - - - - - Notice::ACTION_LOG 3600.000000 - - - - - +1559874006.142023 - - - - - - - - - Intel::Notice Intel hit on 4.3.2.1 at SOMEWHERE 4.3.2.1 - - - - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-07-02-20-06 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-cluster-error/manager-reporter.log b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-cluster-error/manager-reporter.log index f4b240d619..a58380f26c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-cluster-error/manager-reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-cluster-error/manager-reporter.log @@ -1,2 +1,2 @@ -1535139819.649067 Reporter::INFO qux /home/jon/projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-cluster-error/field-extension-cluster-error.bro, line XX -1535139821.906059 bah manager-1 0.000000 Reporter::INFO qux /home/jon/projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-cluster-error/field-extension-cluster-error.bro, line XX +1535139819.649067 Reporter::INFO qux /home/jon/projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-cluster-error/field-extension-cluster-error.zeek, line XX +1535139821.906059 bah manager-1 0.000000 Reporter::INFO qux /home/jon/projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-cluster-error/field-extension-cluster-error.zeek, line XX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-optional/conn.log b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-optional/conn.log index 867ba696d8..63c348ce42 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-optional/conn.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-optional/conn.log @@ -3,41 +3,41 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-08-10-20-27-56 +#open 2019-06-07-02-20-04 #fields _write_ts _system_name _undefined_string ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string string time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] -1300475173.475401 bro - 1300475173.475401 C3eiCBGOLw3VtHfOj 173.192.163.128 80 141.142.220.235 6705 tcp - - - - OTH - - 0 H 1 48 0 0 - -1300475173.475401 bro - 1300475173.475401 CmES5u32sYpV7JYN 141.142.220.118 49999 208.80.152.3 80 tcp - 0.220961 1137 733 S1 - - 0 ShADad 6 1457 4 949 - -1300475173.475401 bro - 1300475173.475401 CHhAvVGS1DHFjwGM9 141.142.220.118 48649 208.80.152.118 80 tcp - 0.119905 525 232 S1 - - 0 ShADad 4 741 3 396 - -1300475173.475401 bro - 1300475173.475401 ClEkJM2Vm5giqnMf4h 141.142.220.118 49997 208.80.152.3 80 tcp - 0.219720 1125 734 S1 - - 0 ShADad 6 1445 4 950 - -1300475173.475401 bro - 1300475173.475401 C4J4Th3PJpwUYZZ6gc 141.142.220.118 49996 208.80.152.3 80 tcp - 0.218501 1171 733 S1 - - 0 ShADad 6 1491 4 949 - -1300475173.475401 bro - 1300475173.475401 CwjjYJ2WqgTbAqiHl6 141.142.220.118 35634 208.80.152.2 80 tcp - 0.061329 463 350 OTH - - 0 DdA 2 567 1 402 - -1300475173.475401 bro - 1300475173.475401 C37jN32gN3y3AZzyf6 141.142.220.118 35642 208.80.152.2 80 tcp - 0.120041 534 412 S1 - - 0 ShADad 4 750 3 576 - -1300475173.475401 bro - 1300475173.475401 CtPZjS20MLrsMUOJi2 141.142.220.118 49998 208.80.152.3 80 tcp - 0.215893 1130 734 S1 - - 0 ShADad 6 1450 4 950 - -1300475173.475401 bro - 1300475173.475401 CUM0KZ3MLUfNB0cl11 141.142.220.118 50000 208.80.152.3 80 tcp - 0.229603 1148 734 S1 - - 0 ShADad 6 1468 4 950 - -1300475173.475401 bro - 1300475173.475401 CP5puj4I8PtEU4qzYg 141.142.220.118 50001 208.80.152.3 80 tcp - 0.227284 1178 734 S1 - - 0 ShADad 6 1498 4 950 - -1300475173.475401 bro - 1300475173.475401 C0LAHyvtKSQHyJxIl 141.142.220.118 43927 141.142.2.2 53 udp - 0.000435 38 89 SF - - 0 Dd 1 66 1 117 - -1300475173.475401 bro - 1300475173.475401 CFLRIC3zaTU1loLGxh 141.142.220.118 56056 141.142.2.2 53 udp - 0.000402 36 131 SF - - 0 Dd 1 64 1 159 - -1300475173.475401 bro - 1300475173.475401 C9rXSW3KSpTYvPrlI1 141.142.220.118 55092 141.142.2.2 53 udp - 0.000374 36 198 SF - - 0 Dd 1 64 1 226 - -1300475173.475401 bro - 1300475173.475401 Ck51lg1bScffFj34Ri 141.142.220.118 59714 141.142.2.2 53 udp - 0.000375 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 bro - 1300475173.475401 C9mvWx3ezztgzcexV7 141.142.220.50 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 179 0 0 - -1300475173.475401 bro - 1300475173.475401 CNnMIj2QSd84NKf7U3 141.142.220.118 40526 141.142.2.2 53 udp - 0.000392 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 bro - 1300475173.475401 C7fIlMZDuRiqjpYbb 141.142.220.118 48128 141.142.2.2 53 udp - 0.000423 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 bro - 1300475173.475401 CykQaM33ztNt0csB9a 141.142.220.118 48479 141.142.2.2 53 udp - 0.000317 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 bro - 1300475173.475401 CtxTCR2Yer0FR1tIBg 141.142.220.44 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 85 0 0 - -1300475173.475401 bro - 1300475173.475401 CpmdRlaUoJLN3uIRa 141.142.220.226 137 141.142.220.255 137 udp - 2.613017 350 0 S0 - - 0 D 7 546 0 0 - -1300475173.475401 bro - 1300475173.475401 C1Xkzz2MaGtLrc1Tla 141.142.220.118 59746 141.142.2.2 53 udp - 0.000421 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 bro - 1300475173.475401 CqlVyW1YwZ15RhTBc4 141.142.220.118 59816 141.142.2.2 53 udp - 0.000343 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 bro - 1300475173.475401 CLNN1k2QMum1aexUK7 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp - - - - S0 - - 0 D 1 199 0 0 - -1300475173.475401 bro - 1300475173.475401 CBA8792iHmnhPLksKa 141.142.220.226 55671 224.0.0.252 5355 udp - 0.099849 66 0 S0 - - 0 D 2 122 0 0 - -1300475173.475401 bro - 1300475173.475401 CGLPPc35OzDQij1XX8 141.142.220.238 56641 141.142.220.255 137 udp - - - - S0 - - 0 D 1 78 0 0 - -1300475173.475401 bro - 1300475173.475401 CiyBAq1bBLNaTiTAc 141.142.220.118 38911 141.142.2.2 53 udp - 0.000335 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 bro - 1300475173.475401 CFSwNi4CNGxcuffo49 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp - 0.100096 66 0 S0 - - 0 D 2 162 0 0 - -1300475173.475401 bro - 1300475173.475401 Cipfzj1BEnhejw8cGf 141.142.220.202 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 73 0 0 - -1300475173.475401 bro - 1300475173.475401 CV5WJ42jPYbNW9JNWf 141.142.220.118 37676 141.142.2.2 53 udp - 0.000420 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 bro - 1300475173.475401 CPhDKt12KQPUVbQz06 141.142.220.226 55131 224.0.0.252 5355 udp - 0.100021 66 0 S0 - - 0 D 2 122 0 0 - -1300475173.475401 bro - 1300475173.475401 CAnFrb2Cvxr5T7quOc fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp - 0.099801 66 0 S0 - - 0 D 2 162 0 0 - -1300475173.475401 bro - 1300475173.475401 C8rquZ3DjgNW06JGLl 141.142.220.118 45000 141.142.2.2 53 udp - 0.000384 38 89 SF - - 0 Dd 1 66 1 117 - -1300475173.475401 bro - 1300475173.475401 CzrZOtXqhwwndQva3 141.142.220.118 32902 141.142.2.2 53 udp - 0.000317 38 89 SF - - 0 Dd 1 66 1 117 - -1300475173.475401 bro - 1300475173.475401 CaGCc13FffXe6RkQl9 141.142.220.118 58206 141.142.2.2 53 udp - 0.000339 38 89 SF - - 0 Dd 1 66 1 117 - -#close 2016-08-10-20-27-56 +1300475173.475401 zeek - 1300475173.475401 C3eiCBGOLw3VtHfOj 173.192.163.128 80 141.142.220.235 6705 tcp - - - - OTH - - 0 H 1 48 0 0 - +1300475173.475401 zeek - 1300475173.475401 CmES5u32sYpV7JYN 141.142.220.118 49999 208.80.152.3 80 tcp - 0.220961 1137 733 S1 - - 0 ShADad 6 1457 4 949 - +1300475173.475401 zeek - 1300475173.475401 CHhAvVGS1DHFjwGM9 141.142.220.118 48649 208.80.152.118 80 tcp - 0.119905 525 232 S1 - - 0 ShADad 4 741 3 396 - +1300475173.475401 zeek - 1300475173.475401 ClEkJM2Vm5giqnMf4h 141.142.220.118 49997 208.80.152.3 80 tcp - 0.219720 1125 734 S1 - - 0 ShADad 6 1445 4 950 - +1300475173.475401 zeek - 1300475173.475401 C4J4Th3PJpwUYZZ6gc 141.142.220.118 49996 208.80.152.3 80 tcp - 0.218501 1171 733 S1 - - 0 ShADad 6 1491 4 949 - +1300475173.475401 zeek - 1300475173.475401 CwjjYJ2WqgTbAqiHl6 141.142.220.118 35634 208.80.152.2 80 tcp - 0.061329 463 350 OTH - - 0 DdA 2 567 1 402 - +1300475173.475401 zeek - 1300475173.475401 C37jN32gN3y3AZzyf6 141.142.220.118 35642 208.80.152.2 80 tcp - 0.120041 534 412 S1 - - 0 ShADad 4 750 3 576 - +1300475173.475401 zeek - 1300475173.475401 CtPZjS20MLrsMUOJi2 141.142.220.118 49998 208.80.152.3 80 tcp - 0.215893 1130 734 S1 - - 0 ShADad 6 1450 4 950 - +1300475173.475401 zeek - 1300475173.475401 CUM0KZ3MLUfNB0cl11 141.142.220.118 50000 208.80.152.3 80 tcp - 0.229603 1148 734 S1 - - 0 ShADad 6 1468 4 950 - +1300475173.475401 zeek - 1300475173.475401 CP5puj4I8PtEU4qzYg 141.142.220.118 50001 208.80.152.3 80 tcp - 0.227284 1178 734 S1 - - 0 ShADad 6 1498 4 950 - +1300475173.475401 zeek - 1300475173.475401 C0LAHyvtKSQHyJxIl 141.142.220.118 43927 141.142.2.2 53 udp - 0.000435 38 89 SF - - 0 Dd 1 66 1 117 - +1300475173.475401 zeek - 1300475173.475401 CFLRIC3zaTU1loLGxh 141.142.220.118 56056 141.142.2.2 53 udp - 0.000402 36 131 SF - - 0 Dd 1 64 1 159 - +1300475173.475401 zeek - 1300475173.475401 C9rXSW3KSpTYvPrlI1 141.142.220.118 55092 141.142.2.2 53 udp - 0.000374 36 198 SF - - 0 Dd 1 64 1 226 - +1300475173.475401 zeek - 1300475173.475401 Ck51lg1bScffFj34Ri 141.142.220.118 59714 141.142.2.2 53 udp - 0.000375 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 zeek - 1300475173.475401 C9mvWx3ezztgzcexV7 141.142.220.50 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 179 0 0 - +1300475173.475401 zeek - 1300475173.475401 CNnMIj2QSd84NKf7U3 141.142.220.118 40526 141.142.2.2 53 udp - 0.000392 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 zeek - 1300475173.475401 C7fIlMZDuRiqjpYbb 141.142.220.118 48128 141.142.2.2 53 udp - 0.000423 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 zeek - 1300475173.475401 CykQaM33ztNt0csB9a 141.142.220.118 48479 141.142.2.2 53 udp - 0.000317 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 zeek - 1300475173.475401 CtxTCR2Yer0FR1tIBg 141.142.220.44 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 85 0 0 - +1300475173.475401 zeek - 1300475173.475401 CpmdRlaUoJLN3uIRa 141.142.220.226 137 141.142.220.255 137 udp - 2.613017 350 0 S0 - - 0 D 7 546 0 0 - +1300475173.475401 zeek - 1300475173.475401 C1Xkzz2MaGtLrc1Tla 141.142.220.118 59746 141.142.2.2 53 udp - 0.000421 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 zeek - 1300475173.475401 CqlVyW1YwZ15RhTBc4 141.142.220.118 59816 141.142.2.2 53 udp - 0.000343 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 zeek - 1300475173.475401 CLNN1k2QMum1aexUK7 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp - - - - S0 - - 0 D 1 199 0 0 - +1300475173.475401 zeek - 1300475173.475401 CBA8792iHmnhPLksKa 141.142.220.226 55671 224.0.0.252 5355 udp - 0.099849 66 0 S0 - - 0 D 2 122 0 0 - +1300475173.475401 zeek - 1300475173.475401 CGLPPc35OzDQij1XX8 141.142.220.238 56641 141.142.220.255 137 udp - - - - S0 - - 0 D 1 78 0 0 - +1300475173.475401 zeek - 1300475173.475401 CiyBAq1bBLNaTiTAc 141.142.220.118 38911 141.142.2.2 53 udp - 0.000335 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 zeek - 1300475173.475401 CFSwNi4CNGxcuffo49 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp - 0.100096 66 0 S0 - - 0 D 2 162 0 0 - +1300475173.475401 zeek - 1300475173.475401 Cipfzj1BEnhejw8cGf 141.142.220.202 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 73 0 0 - +1300475173.475401 zeek - 1300475173.475401 CV5WJ42jPYbNW9JNWf 141.142.220.118 37676 141.142.2.2 53 udp - 0.000420 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 zeek - 1300475173.475401 CPhDKt12KQPUVbQz06 141.142.220.226 55131 224.0.0.252 5355 udp - 0.100021 66 0 S0 - - 0 D 2 122 0 0 - +1300475173.475401 zeek - 1300475173.475401 CAnFrb2Cvxr5T7quOc fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp - 0.099801 66 0 S0 - - 0 D 2 162 0 0 - +1300475173.475401 zeek - 1300475173.475401 C8rquZ3DjgNW06JGLl 141.142.220.118 45000 141.142.2.2 53 udp - 0.000384 38 89 SF - - 0 Dd 1 66 1 117 - +1300475173.475401 zeek - 1300475173.475401 CzrZOtXqhwwndQva3 141.142.220.118 32902 141.142.2.2 53 udp - 0.000317 38 89 SF - - 0 Dd 1 66 1 117 - +1300475173.475401 zeek - 1300475173.475401 CaGCc13FffXe6RkQl9 141.142.220.118 58206 141.142.2.2 53 udp - 0.000339 38 89 SF - - 0 Dd 1 66 1 117 - +#close 2019-06-07-02-20-04 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-table/.stderr b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-table/.stderr index ff76d4ea54..5efd4bac43 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-table/.stderr +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension-table/.stderr @@ -1,2 +1,2 @@ -error in /testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-table/field-extension-table.bro, line 9: &log applied to a type that cannot be logged (&log) -error in /testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-table/field-extension-table.bro, line 18: syntax error, at or near "{" +error in /testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-table/field-extension-table.zeek, line 9: &log applied to a type that cannot be logged (&log) +error in /testing/btest/.tmp/scripts.base.frameworks.logging.field-extension-table/field-extension-table.zeek, line 18: syntax error, at or near "{" diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension/conn.log b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension/conn.log index 5d66623de7..6447a50a69 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension/conn.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.field-extension/conn.log @@ -3,41 +3,41 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-08-10-17-45-11 +#open 2019-06-07-02-20-03 #fields _write_ts _stream _system_name ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string string time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] -1300475173.475401 conn bro 1300475169.780331 C3eiCBGOLw3VtHfOj 173.192.163.128 80 141.142.220.235 6705 tcp - - - - OTH - - 0 H 1 48 0 0 - -1300475173.475401 conn bro 1300475168.892913 CmES5u32sYpV7JYN 141.142.220.118 49999 208.80.152.3 80 tcp - 0.220961 1137 733 S1 - - 0 ShADad 6 1457 4 949 - -1300475173.475401 conn bro 1300475168.724007 CHhAvVGS1DHFjwGM9 141.142.220.118 48649 208.80.152.118 80 tcp - 0.119905 525 232 S1 - - 0 ShADad 4 741 3 396 - -1300475173.475401 conn bro 1300475168.855330 ClEkJM2Vm5giqnMf4h 141.142.220.118 49997 208.80.152.3 80 tcp - 0.219720 1125 734 S1 - - 0 ShADad 6 1445 4 950 - -1300475173.475401 conn bro 1300475168.855305 C4J4Th3PJpwUYZZ6gc 141.142.220.118 49996 208.80.152.3 80 tcp - 0.218501 1171 733 S1 - - 0 ShADad 6 1491 4 949 - -1300475173.475401 conn bro 1300475168.652003 CwjjYJ2WqgTbAqiHl6 141.142.220.118 35634 208.80.152.2 80 tcp - 0.061329 463 350 OTH - - 0 DdA 2 567 1 402 - -1300475173.475401 conn bro 1300475168.902635 C37jN32gN3y3AZzyf6 141.142.220.118 35642 208.80.152.2 80 tcp - 0.120041 534 412 S1 - - 0 ShADad 4 750 3 576 - -1300475173.475401 conn bro 1300475168.859163 CtPZjS20MLrsMUOJi2 141.142.220.118 49998 208.80.152.3 80 tcp - 0.215893 1130 734 S1 - - 0 ShADad 6 1450 4 950 - -1300475173.475401 conn bro 1300475168.892936 CUM0KZ3MLUfNB0cl11 141.142.220.118 50000 208.80.152.3 80 tcp - 0.229603 1148 734 S1 - - 0 ShADad 6 1468 4 950 - -1300475173.475401 conn bro 1300475168.895267 CP5puj4I8PtEU4qzYg 141.142.220.118 50001 208.80.152.3 80 tcp - 0.227284 1178 734 S1 - - 0 ShADad 6 1498 4 950 - -1300475173.475401 conn bro 1300475168.853899 C0LAHyvtKSQHyJxIl 141.142.220.118 43927 141.142.2.2 53 udp - 0.000435 38 89 SF - - 0 Dd 1 66 1 117 - -1300475173.475401 conn bro 1300475168.901749 CFLRIC3zaTU1loLGxh 141.142.220.118 56056 141.142.2.2 53 udp - 0.000402 36 131 SF - - 0 Dd 1 64 1 159 - -1300475173.475401 conn bro 1300475168.902195 C9rXSW3KSpTYvPrlI1 141.142.220.118 55092 141.142.2.2 53 udp - 0.000374 36 198 SF - - 0 Dd 1 64 1 226 - -1300475173.475401 conn bro 1300475168.858713 Ck51lg1bScffFj34Ri 141.142.220.118 59714 141.142.2.2 53 udp - 0.000375 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 conn bro 1300475167.099816 C9mvWx3ezztgzcexV7 141.142.220.50 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 179 0 0 - -1300475173.475401 conn bro 1300475168.854837 CNnMIj2QSd84NKf7U3 141.142.220.118 40526 141.142.2.2 53 udp - 0.000392 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 conn bro 1300475168.894787 C7fIlMZDuRiqjpYbb 141.142.220.118 48128 141.142.2.2 53 udp - 0.000423 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 conn bro 1300475168.894422 CykQaM33ztNt0csB9a 141.142.220.118 48479 141.142.2.2 53 udp - 0.000317 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 conn bro 1300475169.899438 CtxTCR2Yer0FR1tIBg 141.142.220.44 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 85 0 0 - -1300475173.475401 conn bro 1300475170.862384 CpmdRlaUoJLN3uIRa 141.142.220.226 137 141.142.220.255 137 udp - 2.613017 350 0 S0 - - 0 D 7 546 0 0 - -1300475173.475401 conn bro 1300475168.892414 C1Xkzz2MaGtLrc1Tla 141.142.220.118 59746 141.142.2.2 53 udp - 0.000421 38 183 SF - - 0 Dd 1 66 1 211 - -1300475173.475401 conn bro 1300475168.858306 CqlVyW1YwZ15RhTBc4 141.142.220.118 59816 141.142.2.2 53 udp - 0.000343 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 conn bro 1300475167.097012 CLNN1k2QMum1aexUK7 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp - - - - S0 - - 0 D 1 199 0 0 - -1300475173.475401 conn bro 1300475173.117362 CBA8792iHmnhPLksKa 141.142.220.226 55671 224.0.0.252 5355 udp - 0.099849 66 0 S0 - - 0 D 2 122 0 0 - -1300475173.475401 conn bro 1300475173.153679 CGLPPc35OzDQij1XX8 141.142.220.238 56641 141.142.220.255 137 udp - - - - S0 - - 0 D 1 78 0 0 - -1300475173.475401 conn bro 1300475168.892037 CiyBAq1bBLNaTiTAc 141.142.220.118 38911 141.142.2.2 53 udp - 0.000335 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 conn bro 1300475171.675372 CFSwNi4CNGxcuffo49 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp - 0.100096 66 0 S0 - - 0 D 2 162 0 0 - -1300475173.475401 conn bro 1300475167.096535 Cipfzj1BEnhejw8cGf 141.142.220.202 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 73 0 0 - -1300475173.475401 conn bro 1300475168.854378 CV5WJ42jPYbNW9JNWf 141.142.220.118 37676 141.142.2.2 53 udp - 0.000420 52 99 SF - - 0 Dd 1 80 1 127 - -1300475173.475401 conn bro 1300475171.677081 CPhDKt12KQPUVbQz06 141.142.220.226 55131 224.0.0.252 5355 udp - 0.100021 66 0 S0 - - 0 D 2 122 0 0 - -1300475173.475401 conn bro 1300475173.116749 CAnFrb2Cvxr5T7quOc fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp - 0.099801 66 0 S0 - - 0 D 2 162 0 0 - -1300475173.475401 conn bro 1300475168.893988 C8rquZ3DjgNW06JGLl 141.142.220.118 45000 141.142.2.2 53 udp - 0.000384 38 89 SF - - 0 Dd 1 66 1 117 - -1300475173.475401 conn bro 1300475168.857956 CzrZOtXqhwwndQva3 141.142.220.118 32902 141.142.2.2 53 udp - 0.000317 38 89 SF - - 0 Dd 1 66 1 117 - -1300475173.475401 conn bro 1300475168.891644 CaGCc13FffXe6RkQl9 141.142.220.118 58206 141.142.2.2 53 udp - 0.000339 38 89 SF - - 0 Dd 1 66 1 117 - -#close 2016-08-10-17-45-11 +1300475173.475401 conn zeek 1300475169.780331 C3eiCBGOLw3VtHfOj 173.192.163.128 80 141.142.220.235 6705 tcp - - - - OTH - - 0 H 1 48 0 0 - +1300475173.475401 conn zeek 1300475168.892913 CmES5u32sYpV7JYN 141.142.220.118 49999 208.80.152.3 80 tcp - 0.220961 1137 733 S1 - - 0 ShADad 6 1457 4 949 - +1300475173.475401 conn zeek 1300475168.724007 CHhAvVGS1DHFjwGM9 141.142.220.118 48649 208.80.152.118 80 tcp - 0.119905 525 232 S1 - - 0 ShADad 4 741 3 396 - +1300475173.475401 conn zeek 1300475168.855330 ClEkJM2Vm5giqnMf4h 141.142.220.118 49997 208.80.152.3 80 tcp - 0.219720 1125 734 S1 - - 0 ShADad 6 1445 4 950 - +1300475173.475401 conn zeek 1300475168.855305 C4J4Th3PJpwUYZZ6gc 141.142.220.118 49996 208.80.152.3 80 tcp - 0.218501 1171 733 S1 - - 0 ShADad 6 1491 4 949 - +1300475173.475401 conn zeek 1300475168.652003 CwjjYJ2WqgTbAqiHl6 141.142.220.118 35634 208.80.152.2 80 tcp - 0.061329 463 350 OTH - - 0 DdA 2 567 1 402 - +1300475173.475401 conn zeek 1300475168.902635 C37jN32gN3y3AZzyf6 141.142.220.118 35642 208.80.152.2 80 tcp - 0.120041 534 412 S1 - - 0 ShADad 4 750 3 576 - +1300475173.475401 conn zeek 1300475168.859163 CtPZjS20MLrsMUOJi2 141.142.220.118 49998 208.80.152.3 80 tcp - 0.215893 1130 734 S1 - - 0 ShADad 6 1450 4 950 - +1300475173.475401 conn zeek 1300475168.892936 CUM0KZ3MLUfNB0cl11 141.142.220.118 50000 208.80.152.3 80 tcp - 0.229603 1148 734 S1 - - 0 ShADad 6 1468 4 950 - +1300475173.475401 conn zeek 1300475168.895267 CP5puj4I8PtEU4qzYg 141.142.220.118 50001 208.80.152.3 80 tcp - 0.227284 1178 734 S1 - - 0 ShADad 6 1498 4 950 - +1300475173.475401 conn zeek 1300475168.853899 C0LAHyvtKSQHyJxIl 141.142.220.118 43927 141.142.2.2 53 udp - 0.000435 38 89 SF - - 0 Dd 1 66 1 117 - +1300475173.475401 conn zeek 1300475168.901749 CFLRIC3zaTU1loLGxh 141.142.220.118 56056 141.142.2.2 53 udp - 0.000402 36 131 SF - - 0 Dd 1 64 1 159 - +1300475173.475401 conn zeek 1300475168.902195 C9rXSW3KSpTYvPrlI1 141.142.220.118 55092 141.142.2.2 53 udp - 0.000374 36 198 SF - - 0 Dd 1 64 1 226 - +1300475173.475401 conn zeek 1300475168.858713 Ck51lg1bScffFj34Ri 141.142.220.118 59714 141.142.2.2 53 udp - 0.000375 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 conn zeek 1300475167.099816 C9mvWx3ezztgzcexV7 141.142.220.50 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 179 0 0 - +1300475173.475401 conn zeek 1300475168.854837 CNnMIj2QSd84NKf7U3 141.142.220.118 40526 141.142.2.2 53 udp - 0.000392 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 conn zeek 1300475168.894787 C7fIlMZDuRiqjpYbb 141.142.220.118 48128 141.142.2.2 53 udp - 0.000423 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 conn zeek 1300475168.894422 CykQaM33ztNt0csB9a 141.142.220.118 48479 141.142.2.2 53 udp - 0.000317 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 conn zeek 1300475169.899438 CtxTCR2Yer0FR1tIBg 141.142.220.44 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 85 0 0 - +1300475173.475401 conn zeek 1300475170.862384 CpmdRlaUoJLN3uIRa 141.142.220.226 137 141.142.220.255 137 udp - 2.613017 350 0 S0 - - 0 D 7 546 0 0 - +1300475173.475401 conn zeek 1300475168.892414 C1Xkzz2MaGtLrc1Tla 141.142.220.118 59746 141.142.2.2 53 udp - 0.000421 38 183 SF - - 0 Dd 1 66 1 211 - +1300475173.475401 conn zeek 1300475168.858306 CqlVyW1YwZ15RhTBc4 141.142.220.118 59816 141.142.2.2 53 udp - 0.000343 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 conn zeek 1300475167.097012 CLNN1k2QMum1aexUK7 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp - - - - S0 - - 0 D 1 199 0 0 - +1300475173.475401 conn zeek 1300475173.117362 CBA8792iHmnhPLksKa 141.142.220.226 55671 224.0.0.252 5355 udp - 0.099849 66 0 S0 - - 0 D 2 122 0 0 - +1300475173.475401 conn zeek 1300475173.153679 CGLPPc35OzDQij1XX8 141.142.220.238 56641 141.142.220.255 137 udp - - - - S0 - - 0 D 1 78 0 0 - +1300475173.475401 conn zeek 1300475168.892037 CiyBAq1bBLNaTiTAc 141.142.220.118 38911 141.142.2.2 53 udp - 0.000335 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 conn zeek 1300475171.675372 CFSwNi4CNGxcuffo49 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp - 0.100096 66 0 S0 - - 0 D 2 162 0 0 - +1300475173.475401 conn zeek 1300475167.096535 Cipfzj1BEnhejw8cGf 141.142.220.202 5353 224.0.0.251 5353 udp - - - - S0 - - 0 D 1 73 0 0 - +1300475173.475401 conn zeek 1300475168.854378 CV5WJ42jPYbNW9JNWf 141.142.220.118 37676 141.142.2.2 53 udp - 0.000420 52 99 SF - - 0 Dd 1 80 1 127 - +1300475173.475401 conn zeek 1300475171.677081 CPhDKt12KQPUVbQz06 141.142.220.226 55131 224.0.0.252 5355 udp - 0.100021 66 0 S0 - - 0 D 2 122 0 0 - +1300475173.475401 conn zeek 1300475173.116749 CAnFrb2Cvxr5T7quOc fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp - 0.099801 66 0 S0 - - 0 D 2 162 0 0 - +1300475173.475401 conn zeek 1300475168.893988 C8rquZ3DjgNW06JGLl 141.142.220.118 45000 141.142.2.2 53 udp - 0.000384 38 89 SF - - 0 Dd 1 66 1 117 - +1300475173.475401 conn zeek 1300475168.857956 CzrZOtXqhwwndQva3 141.142.220.118 32902 141.142.2.2 53 udp - 0.000317 38 89 SF - - 0 Dd 1 66 1 117 - +1300475173.475401 conn zeek 1300475168.891644 CaGCc13FffXe6RkQl9 141.142.220.118 58206 141.142.2.2 53 udp - 0.000339 38 89 SF - - 0 Dd 1 66 1 117 - +#close 2019-06-07-02-20-03 diff --git a/testing/btest/Baseline/scripts.base.frameworks.netcontrol.acld/send.netcontrol.log b/testing/btest/Baseline/scripts.base.frameworks.netcontrol.acld/send.netcontrol.log index 6170cb6ce0..6ac98821e4 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.netcontrol.acld/send.netcontrol.log +++ b/testing/btest/Baseline/scripts.base.frameworks.netcontrol.acld/send.netcontrol.log @@ -6,20 +6,20 @@ #open 2017-04-07-17-26-05 #fields ts rule_id category cmd state action target entity_type entity mod msg priority expire location plugin #types time string enum string enum string enum string string string string int interval string string -0.000000 - NetControl::MESSAGE - - - - - - - activating plugin with priority 0 - - - Acld-bro/event/netcontroltest +0.000000 - NetControl::MESSAGE - - - - - - - activating plugin with priority 0 - - - Acld-zeek/event/netcontroltest 0.000000 - NetControl::MESSAGE - - - - - - - waiting for plugins to initialize - - - - -1491585965.002956 - NetControl::MESSAGE - - - - - - - activation finished - - - Acld-bro/event/netcontroltest +1491585965.002956 - NetControl::MESSAGE - - - - - - - activation finished - - - Acld-zeek/event/netcontroltest 1491585965.002956 - NetControl::MESSAGE - - - - - - - plugin initialization done - - - - -1491585965.027155 2 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - - 0 36000.000000 here Acld-bro/event/netcontroltest -1491585965.027155 3 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - - 0 36000.000000 there Acld-bro/event/netcontroltest -1491585965.027155 4 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - - 0 36000.000000 - Acld-bro/event/netcontroltest -1491585965.027706 2 NetControl::RULE ADD NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - blockhosthost 0 36000.000000 here Acld-bro/event/netcontroltest -1491585965.027706 2 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - - 0 36000.000000 here Acld-bro/event/netcontroltest -1491585965.027706 3 NetControl::RULE ADD NetControl::EXISTS NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - droptcpport 0 36000.000000 there Acld-bro/event/netcontroltest -1491585965.027706 3 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - - 0 36000.000000 there Acld-bro/event/netcontroltest -1491585965.027706 4 NetControl::RULE ADD NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - drop 0 36000.000000 - Acld-bro/event/netcontroltest -1491585965.027706 4 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - - 0 36000.000000 - Acld-bro/event/netcontroltest -1491585965.027706 2 NetControl::ERROR - - NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - restorehosthost 0 36000.000000 here Acld-bro/event/netcontroltest -1491585965.027706 3 NetControl::RULE REMOVE NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - restoretcpport 0 36000.000000 there Acld-bro/event/netcontroltest -1491585965.027706 4 NetControl::RULE REMOVE NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - restore 0 36000.000000 - Acld-bro/event/netcontroltest +1491585965.027155 2 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - - 0 36000.000000 here Acld-zeek/event/netcontroltest +1491585965.027155 3 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - - 0 36000.000000 there Acld-zeek/event/netcontroltest +1491585965.027155 4 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - - 0 36000.000000 - Acld-zeek/event/netcontroltest +1491585965.027706 2 NetControl::RULE ADD NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - blockhosthost 0 36000.000000 here Acld-zeek/event/netcontroltest +1491585965.027706 2 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - - 0 36000.000000 here Acld-zeek/event/netcontroltest +1491585965.027706 3 NetControl::RULE ADD NetControl::EXISTS NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - droptcpport 0 36000.000000 there Acld-zeek/event/netcontroltest +1491585965.027706 3 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - - 0 36000.000000 there Acld-zeek/event/netcontroltest +1491585965.027706 4 NetControl::RULE ADD NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - drop 0 36000.000000 - Acld-zeek/event/netcontroltest +1491585965.027706 4 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - - 0 36000.000000 - Acld-zeek/event/netcontroltest +1491585965.027706 2 NetControl::ERROR - - NetControl::DROP NetControl::FORWARD NetControl::FLOW 192.168.18.50/32/*->74.125.239.97/32/* - restorehosthost 0 36000.000000 here Acld-zeek/event/netcontroltest +1491585965.027706 3 NetControl::RULE REMOVE NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::FLOW */*->*/443 - restoretcpport 0 36000.000000 there Acld-zeek/event/netcontroltest +1491585965.027706 4 NetControl::RULE REMOVE NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 192.168.18.50/32 - restore 0 36000.000000 - Acld-zeek/event/netcontroltest #close 2017-04-07-17-26-05 diff --git a/testing/btest/Baseline/scripts.base.frameworks.netcontrol.broker/send.netcontrol.log b/testing/btest/Baseline/scripts.base.frameworks.netcontrol.broker/send.netcontrol.log index fccd9f61f7..96edf66410 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.netcontrol.broker/send.netcontrol.log +++ b/testing/btest/Baseline/scripts.base.frameworks.netcontrol.broker/send.netcontrol.log @@ -6,15 +6,15 @@ #open 2016-08-05-17-34-55 #fields ts rule_id category cmd state action target entity_type entity mod msg priority expire location plugin #types time string enum string enum string enum string string string string int interval string string -0.000000 - NetControl::MESSAGE - - - - - - - activating plugin with priority 0 - - - Broker-bro/event/netcontroltest +0.000000 - NetControl::MESSAGE - - - - - - - activating plugin with priority 0 - - - Broker-zeek/event/netcontroltest 0.000000 - NetControl::MESSAGE - - - - - - - waiting for plugins to initialize - - - - -1470418495.661396 - NetControl::MESSAGE - - - - - - - activation finished - - - Broker-bro/event/netcontroltest +1470418495.661396 - NetControl::MESSAGE - - - - - - - activation finished - - - Broker-zeek/event/netcontroltest 1470418495.661396 - NetControl::MESSAGE - - - - - - - plugin initialization done - - - - -1470418496.045332 2 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::MONITOR NetControl::FLOW 10.10.1.4/32/1470->74.53.140.153/32/25 - - 0 36000.000000 - Broker-bro/event/netcontroltest -1470418496.045332 3 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - - 0 36000.000000 - Broker-bro/event/netcontroltest -1470418496.045364 2 NetControl::RULE ADD NetControl::EXISTS NetControl::DROP NetControl::MONITOR NetControl::FLOW 10.10.1.4/32/1470->74.53.140.153/32/25 - - 0 36000.000000 - Broker-bro/event/netcontroltest -1470418496.045364 2 NetControl::RULE EXPIRE NetControl::TIMEOUT NetControl::DROP NetControl::MONITOR NetControl::FLOW 10.10.1.4/32/1470->74.53.140.153/32/25 - - 0 36000.000000 - Broker-bro/event/netcontroltest -1470418496.045364 3 NetControl::RULE ADD NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - - 0 36000.000000 - Broker-bro/event/netcontroltest -1470418496.045364 3 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - removing 0 36000.000000 - Broker-bro/event/netcontroltest -1470418496.045364 3 NetControl::RULE REMOVE NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - - 0 36000.000000 - Broker-bro/event/netcontroltest +1470418496.045332 2 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::MONITOR NetControl::FLOW 10.10.1.4/32/1470->74.53.140.153/32/25 - - 0 36000.000000 - Broker-zeek/event/netcontroltest +1470418496.045332 3 NetControl::RULE ADD NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - - 0 36000.000000 - Broker-zeek/event/netcontroltest +1470418496.045364 2 NetControl::RULE ADD NetControl::EXISTS NetControl::DROP NetControl::MONITOR NetControl::FLOW 10.10.1.4/32/1470->74.53.140.153/32/25 - - 0 36000.000000 - Broker-zeek/event/netcontroltest +1470418496.045364 2 NetControl::RULE EXPIRE NetControl::TIMEOUT NetControl::DROP NetControl::MONITOR NetControl::FLOW 10.10.1.4/32/1470->74.53.140.153/32/25 - - 0 36000.000000 - Broker-zeek/event/netcontroltest +1470418496.045364 3 NetControl::RULE ADD NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - - 0 36000.000000 - Broker-zeek/event/netcontroltest +1470418496.045364 3 NetControl::RULE REMOVE NetControl::REQUESTED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - removing 0 36000.000000 - Broker-zeek/event/netcontroltest +1470418496.045364 3 NetControl::RULE REMOVE NetControl::SUCCEEDED NetControl::DROP NetControl::FORWARD NetControl::ADDRESS 10.10.1.4/32 - - 0 36000.000000 - Broker-zeek/event/netcontroltest #close 2016-08-05-17-34-56 diff --git a/testing/btest/Baseline/scripts.base.frameworks.netcontrol.packetfilter/conn.log b/testing/btest/Baseline/scripts.base.frameworks.netcontrol.packetfilter/conn.log index 9a673f80e2..614a90a0f7 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.netcontrol.packetfilter/conn.log +++ b/testing/btest/Baseline/scripts.base.frameworks.netcontrol.packetfilter/conn.log @@ -3,13 +3,13 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-07-13-16-15-38 +#open 2019-04-17-21-00-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] 1254722767.492060 CHhAvVGS1DHFjwGM9 10.10.1.4 56166 10.10.1.1 53 udp dns 0.034025 34 100 SF - - 0 Dd 1 62 1 128 - 1254722776.690444 C4J4Th3PJpwUYZZ6gc 10.10.1.20 138 10.10.1.255 138 udp - - - - S0 - - 0 D 1 229 0 0 - 1254722767.529046 ClEkJM2Vm5giqnMf4h 10.10.1.4 1470 74.53.140.153 25 tcp - 0.346950 0 0 S1 - - 0 Sh 1 48 1 48 - 1437831776.764391 CtPZjS20MLrsMUOJi2 192.168.133.100 49285 66.196.121.26 5050 tcp - 0.343008 41 0 OTH - - 0 Da 1 93 1 52 - -1437831787.856895 CUM0KZ3MLUfNB0cl11 192.168.133.100 49648 192.168.133.102 25 tcp - 0.048043 162 154 S1 - - 154 ShDA 3 192 1 60 - +1437831787.856895 CUM0KZ3MLUfNB0cl11 192.168.133.100 49648 192.168.133.102 25 tcp - 0.048043 162 154 S1 - - 154 ShDgA 3 192 1 60 - 1437831798.533765 CmES5u32sYpV7JYN 192.168.133.100 49336 74.125.71.189 443 tcp - - - - OTH - - 0 A 1 52 0 0 - -#close 2016-07-13-16-15-38 +#close 2019-04-17-21-00-04 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log index 461bcc9a92..7d2e1019d6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.cluster/manager-1.notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2014-04-01-23-15-31 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1396394130.996908 - - - - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2014-04-01-23-15-31 +#open 2019-06-05-19-55-00 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1559764500.522055 - - - - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-55-00 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log index 275bf725a7..44affa1edd 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression-cluster/manager-1.notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2014-04-01-23-15-42 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1396394142.353380 - - - - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2014-04-01-23-15-45 +#open 2019-06-05-19-55-01 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1559764501.477321 - - - - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-55-04 diff --git a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log index 9b79fc7adc..a9a07fddc6 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log +++ b/testing/btest/Baseline/scripts.base.frameworks.notice.suppression/notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-20-23-33-05 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1513812785.342226 - - - - - - - - - Test_Notice test - - - - - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-20-23-33-05 +#open 2019-06-05-19-30-57 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1559763057.706137 - - - - - - - - - Test_Notice test - - - - - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-30-57 diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log index 144c094b2f..744f050046 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.disable-stderr/reporter.log @@ -6,5 +6,5 @@ #open 2012-08-10-20-09-16 #fields ts level message location #types time enum string string -0.000000 Reporter::ERROR no such index (test[3]) /da/home/robin/bro/master/testing/btest/.tmp/scripts.base.frameworks.reporter.disable-stderr/disable-stderr.bro, line 12 +0.000000 Reporter::ERROR no such index (test[3]) /da/home/robin/bro/master/testing/btest/.tmp/scripts.base.frameworks.reporter.disable-stderr/disable-stderr.zeek, line 12 #close 2012-08-10-20-09-16 diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr index ed161b2409..b01cfa1e84 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/.stderr @@ -1 +1 @@ -expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.bro, line 9: no such index (test[3]) +expression error in /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.zeek, line 9: no such index (test[3]) diff --git a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log index 391cf77a00..705bb357fa 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log +++ b/testing/btest/Baseline/scripts.base.frameworks.reporter.stderr/reporter.log @@ -6,5 +6,5 @@ #open 2013-01-18-18-29-30 #fields ts level message location #types time enum string string -0.000000 Reporter::ERROR no such index (test[3]) /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.bro, line 9 +0.000000 Reporter::ERROR no such index (test[3]) /Users/jsiwek/Projects/bro/bro/testing/btest/.tmp/scripts.base.frameworks.reporter.stderr/stderr.zeek, line 9 #close 2013-01-18-18-29-30 diff --git a/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 b/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 index c2f791ba82..3c3f495e11 100644 --- a/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 +++ b/testing/btest/Baseline/scripts.base.misc.find-filtered-trace/out1 @@ -1 +1 @@ -1389719059.311687 warning in /Users/jsiwek/Projects/bro/bro/scripts/base/misc/find-filtered-trace.bro, line 48: The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Bro reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired. +1389719059.311687 warning in /Users/jsiwek/Projects/bro/bro/scripts/base/misc/find-filtered-trace.zeek, line 48: The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Zeek reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired. diff --git a/testing/btest/Baseline/scripts.base.misc.version/.stderr b/testing/btest/Baseline/scripts.base.misc.version/.stderr index bfae6163df..28da0b203a 100644 --- a/testing/btest/Baseline/scripts.base.misc.version/.stderr +++ b/testing/btest/Baseline/scripts.base.misc.version/.stderr @@ -1,4 +1,4 @@ -error in /home/robin/bro/master/scripts/base/misc/version.bro, line 54: Version string 1 cannot be parsed -error in /home/robin/bro/master/scripts/base/misc/version.bro, line 54: Version string 12.5 cannot be parsed -error in /home/robin/bro/master/scripts/base/misc/version.bro, line 54: Version string 1.12-beta-drunk cannot be parsed -error in /home/robin/bro/master/scripts/base/misc/version.bro, line 54: Version string JustARandomString cannot be parsed +error in /home/robin/bro/master/scripts/base/misc/version.zeek, line 54: Version string 1 cannot be parsed +error in /home/robin/bro/master/scripts/base/misc/version.zeek, line 54: Version string 12.5 cannot be parsed +error in /home/robin/bro/master/scripts/base/misc/version.zeek, line 54: Version string 1.12-beta-drunk cannot be parsed +error in /home/robin/bro/master/scripts/base/misc/version.zeek, line 54: Version string JustARandomString cannot be parsed diff --git a/testing/btest/Baseline/scripts.base.protocols.dhcp.dhcp-time-nameserver-events/.stdout b/testing/btest/Baseline/scripts.base.protocols.dhcp.dhcp-time-nameserver-events/.stdout new file mode 100644 index 0000000000..7e274f0c83 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.dhcp.dhcp-time-nameserver-events/.stdout @@ -0,0 +1,4 @@ +time_offset, -18000 +timeserver_list, [192.168.15.101] +nameserver_list, [192.168.15.101] +ntpserver_list, [192.168.15.101] diff --git a/testing/btest/Baseline/scripts.base.protocols.dns.spf/dns.log b/testing/btest/Baseline/scripts.base.protocols.dns.spf/dns.log new file mode 100644 index 0000000000..ebec6a3979 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.dns.spf/dns.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path dns +#open 2019-06-14-15-15-00 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto trans_id rtt query qclass qclass_name qtype qtype_name rcode rcode_name AA TC RD RA Z answers TTLs rejected +#types time string addr port addr port enum count interval string count string count string count string bool bool bool bool count vector[string] vector[interval] bool +1560524739.386971 CHhAvVGS1DHFjwGM9 10.91.0.62 57806 10.91.1.59 53 udp 64161 - mail.vladg.net - - - - 0 NOERROR F F F T 0 SPF 19 v=spf1 mx -all test,SPF 14 v=spf1 mx -all 300.000000,300.000000 F +#close 2019-06-14-15-15-00 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/conn.log b/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/conn.log index 8990518008..2559f88db2 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/conn.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/conn.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path conn -#open 2016-07-13-16-16-15 +#open 2019-04-17-21-00-49 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig local_resp missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents #types time string addr port addr port enum string interval count count string bool bool count string count count count count set[string] -1464385864.999633 CHhAvVGS1DHFjwGM9 10.3.22.91 58218 10.167.25.101 21 tcp ftp 600.931043 41420 159830 S1 - - 233 ShAdDa 4139 206914 4178 326799 - -#close 2016-07-13-16-16-15 +1464385864.999633 CHhAvVGS1DHFjwGM9 10.3.22.91 58218 10.167.25.101 21 tcp ftp 600.931043 41420 159830 S1 - - 233 ShAdDaGg 4139 206914 4178 326799 - +#close 2019-04-17-21-00-50 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/ftp.log b/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/ftp.log index 4516886e52..8a2d00a6c7 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/ftp.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.cwd-navigation/ftp.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path ftp -#open 2016-07-13-16-16-15 +#open 2019-04-17-21-00-48 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p user password command arg mime_type file_size reply_code reply_msg data_channel.passive data_channel.orig_h data_channel.resp_h data_channel.resp_p fuid #types time string addr port addr port string string string string string count count string bool addr addr port string 1464385865.669674 CHhAvVGS1DHFjwGM9 10.3.22.91 58218 10.167.25.101 21 anonymous anonymous@ PASV - - - 227 Entering Passive Mode (205,167,25,101,243,251). T 10.3.22.91 205.167.25.101 62459 - @@ -1381,4 +1381,4 @@ 1464386464.737901 CHhAvVGS1DHFjwGM9 10.3.22.91 58218 10.167.25.101 21 anonymous anonymous@ RETR ftp://10.167.25.101/./pub/data/1993/722024-99999-1993.gz - 30171 226 Transfer complete - - - - - 1464386465.294490 CHhAvVGS1DHFjwGM9 10.3.22.91 58218 10.167.25.101 21 anonymous anonymous@ PASV - - - 227 Entering Passive Mode (205,167,25,101,251,88). T 10.3.22.91 205.167.25.101 64344 - 1464386465.471708 CHhAvVGS1DHFjwGM9 10.3.22.91 58218 10.167.25.101 21 anonymous anonymous@ RETR ftp://10.167.25.101/./pub/data/1994/722024-99999-1994.gz - 29736 226 Transfer complete - - - - - -#close 2016-07-13-16-16-15 +#close 2019-04-17-21-00-50 diff --git a/testing/btest/Baseline/scripts.base.protocols.ftp.gridftp/notice.log b/testing/btest/Baseline/scripts.base.protocols.ftp.gridftp/notice.log index c8f76ae4d0..17e6b09237 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ftp.gridftp/notice.log +++ b/testing/btest/Baseline/scripts.base.protocols.ftp.gridftp/notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-27-54 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1348168976.557131 ClEkJM2Vm5giqnMf4h 192.168.57.103 35391 192.168.57.101 55968 - - - tcp GridFTP::Data_Channel GridFTP data channel over threshold 2 bytes - 192.168.57.103 192.168.57.101 55968 - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-27-54 +#open 2019-06-05-19-31-25 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1348168976.557131 ClEkJM2Vm5giqnMf4h 192.168.57.103 35391 192.168.57.101 55968 - - - tcp GridFTP::Data_Channel GridFTP data channel over threshold 2 bytes - 192.168.57.103 192.168.57.101 55968 - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-31-25 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.content-range-less-than-len/weird.log b/testing/btest/Baseline/scripts.base.protocols.http.content-range-less-than-len/weird.log index 7cd09fb789..a08e7d9514 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.content-range-less-than-len/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.content-range-less-than-len/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#open 2018-05-08-20-04-16 +#open 2019-06-07-02-00-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1523627611.748118 CHhAvVGS1DHFjwGM9 127.0.0.1 58128 127.0.0.1 80 HTTP_range_not_matching_len - F bro -#close 2018-05-08-20-04-17 +1523627611.748118 CHhAvVGS1DHFjwGM9 127.0.0.1 58128 127.0.0.1 80 HTTP_range_not_matching_len - F zeek +#close 2019-06-07-02-00-44 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-bad-request-with-version/weird.log b/testing/btest/Baseline/scripts.base.protocols.http.http-bad-request-with-version/weird.log index 141247a989..8245e19c81 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-bad-request-with-version/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-bad-request-with-version/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-16-20 +#open 2019-06-07-02-00-44 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1452204358.172926 CHhAvVGS1DHFjwGM9 192.168.122.130 49157 202.7.177.41 80 bad_HTTP_request_with_version - F bro -#close 2016-07-13-16-16-20 +1452204358.172926 CHhAvVGS1DHFjwGM9 192.168.122.130 49157 202.7.177.41 80 bad_HTTP_request_with_version - F zeek +#close 2019-06-07-02-00-45 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.http-methods/weird.log b/testing/btest/Baseline/scripts.base.protocols.http.http-methods/weird.log index 5ff7cb6ab7..fe218669ca 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.http-methods/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.http-methods/weird.log @@ -3,34 +3,34 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-16-23 +#open 2019-06-07-02-00-45 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1354328874.237327 ClEkJM2Vm5giqnMf4h 128.2.6.136 46563 173.194.75.103 80 missing_HTTP_uri - F bro -1354328874.278822 C4J4Th3PJpwUYZZ6gc 128.2.6.136 46564 173.194.75.103 80 bad_HTTP_request - F bro -1354328874.321792 CtPZjS20MLrsMUOJi2 128.2.6.136 46565 173.194.75.103 80 bad_HTTP_request - F bro -1354328882.908690 C37jN32gN3y3AZzyf6 128.2.6.136 46569 173.194.75.103 80 bad_HTTP_request - F bro -1354328882.949510 C3eiCBGOLw3VtHfOj 128.2.6.136 46570 173.194.75.103 80 bad_HTTP_request - F bro -1354328887.094494 C0LAHyvtKSQHyJxIl 128.2.6.136 46572 173.194.75.103 80 bad_HTTP_request - F bro -1354328891.141058 CFLRIC3zaTU1loLGxh 128.2.6.136 46573 173.194.75.103 80 bad_HTTP_request - F bro -1354328891.183942 C9rXSW3KSpTYvPrlI1 128.2.6.136 46574 173.194.75.103 80 bad_HTTP_request_with_version - F bro -1354328891.226199 Ck51lg1bScffFj34Ri 128.2.6.136 46575 173.194.75.103 80 bad_HTTP_request - F bro -1354328891.267625 C9mvWx3ezztgzcexV7 128.2.6.136 46576 173.194.75.103 80 bad_HTTP_request_with_version - F bro -1354328891.309065 CNnMIj2QSd84NKf7U3 128.2.6.136 46577 173.194.75.103 80 unknown_HTTP_method CCM_POST F bro -1354328895.355012 C7fIlMZDuRiqjpYbb 128.2.6.136 46578 173.194.75.103 80 unknown_HTTP_method CCM_POST F bro -1354328895.396634 CykQaM33ztNt0csB9a 128.2.6.136 46579 173.194.75.103 80 bad_HTTP_request - F bro -1354328895.438812 CtxTCR2Yer0FR1tIBg 128.2.6.136 46580 173.194.75.103 80 bad_HTTP_request - F bro -1354328895.480865 CpmdRlaUoJLN3uIRa 128.2.6.136 46581 173.194.75.103 80 unknown_HTTP_method CCM_POST F bro -1354328903.614145 CLNN1k2QMum1aexUK7 128.2.6.136 46584 173.194.75.103 80 bad_HTTP_request - F bro -1354328903.656369 CBA8792iHmnhPLksKa 128.2.6.136 46585 173.194.75.103 80 bad_HTTP_request - F bro -1354328911.832856 Cipfzj1BEnhejw8cGf 128.2.6.136 46589 173.194.75.103 80 bad_HTTP_request - F bro -1354328911.876341 CV5WJ42jPYbNW9JNWf 128.2.6.136 46590 173.194.75.103 80 bad_HTTP_request - F bro -1354328920.052085 CzrZOtXqhwwndQva3 128.2.6.136 46594 173.194.75.103 80 bad_HTTP_request - F bro -1354328920.094072 CaGCc13FffXe6RkQl9 128.2.6.136 46595 173.194.75.103 80 bad_HTTP_request - F bro -1354328924.266693 CzmEfj4RValNyLfT58 128.2.6.136 46599 173.194.75.103 80 bad_HTTP_request - F bro -1354328924.308714 CCk2V03QgWwIurU3f 128.2.6.136 46600 173.194.75.103 80 bad_HTTP_request - F bro -1354328924.476011 CKJVAj1rNx0nolFFc4 128.2.6.136 46604 173.194.75.103 80 bad_HTTP_request - F bro -1354328924.518204 CD7vfu1qu4YJKe1nGi 128.2.6.136 46605 173.194.75.103 80 bad_HTTP_request - F bro -1354328932.734579 CRJ9x54IaE7bkVEpad 128.2.6.136 46609 173.194.75.103 80 bad_HTTP_request - F bro -1354328932.776609 CAvUKGaEgLlR4i6t2 128.2.6.136 46610 173.194.75.103 80 bad_HTTP_request - F bro -#close 2016-07-13-16-16-23 +1354328874.237327 ClEkJM2Vm5giqnMf4h 128.2.6.136 46563 173.194.75.103 80 missing_HTTP_uri - F zeek +1354328874.278822 C4J4Th3PJpwUYZZ6gc 128.2.6.136 46564 173.194.75.103 80 bad_HTTP_request - F zeek +1354328874.321792 CtPZjS20MLrsMUOJi2 128.2.6.136 46565 173.194.75.103 80 bad_HTTP_request - F zeek +1354328882.908690 C37jN32gN3y3AZzyf6 128.2.6.136 46569 173.194.75.103 80 bad_HTTP_request - F zeek +1354328882.949510 C3eiCBGOLw3VtHfOj 128.2.6.136 46570 173.194.75.103 80 bad_HTTP_request - F zeek +1354328887.094494 C0LAHyvtKSQHyJxIl 128.2.6.136 46572 173.194.75.103 80 bad_HTTP_request - F zeek +1354328891.141058 CFLRIC3zaTU1loLGxh 128.2.6.136 46573 173.194.75.103 80 bad_HTTP_request - F zeek +1354328891.183942 C9rXSW3KSpTYvPrlI1 128.2.6.136 46574 173.194.75.103 80 bad_HTTP_request_with_version - F zeek +1354328891.226199 Ck51lg1bScffFj34Ri 128.2.6.136 46575 173.194.75.103 80 bad_HTTP_request - F zeek +1354328891.267625 C9mvWx3ezztgzcexV7 128.2.6.136 46576 173.194.75.103 80 bad_HTTP_request_with_version - F zeek +1354328891.309065 CNnMIj2QSd84NKf7U3 128.2.6.136 46577 173.194.75.103 80 unknown_HTTP_method CCM_POST F zeek +1354328895.355012 C7fIlMZDuRiqjpYbb 128.2.6.136 46578 173.194.75.103 80 unknown_HTTP_method CCM_POST F zeek +1354328895.396634 CykQaM33ztNt0csB9a 128.2.6.136 46579 173.194.75.103 80 bad_HTTP_request - F zeek +1354328895.438812 CtxTCR2Yer0FR1tIBg 128.2.6.136 46580 173.194.75.103 80 bad_HTTP_request - F zeek +1354328895.480865 CpmdRlaUoJLN3uIRa 128.2.6.136 46581 173.194.75.103 80 unknown_HTTP_method CCM_POST F zeek +1354328903.614145 CLNN1k2QMum1aexUK7 128.2.6.136 46584 173.194.75.103 80 bad_HTTP_request - F zeek +1354328903.656369 CBA8792iHmnhPLksKa 128.2.6.136 46585 173.194.75.103 80 bad_HTTP_request - F zeek +1354328911.832856 Cipfzj1BEnhejw8cGf 128.2.6.136 46589 173.194.75.103 80 bad_HTTP_request - F zeek +1354328911.876341 CV5WJ42jPYbNW9JNWf 128.2.6.136 46590 173.194.75.103 80 bad_HTTP_request - F zeek +1354328920.052085 CzrZOtXqhwwndQva3 128.2.6.136 46594 173.194.75.103 80 bad_HTTP_request - F zeek +1354328920.094072 CaGCc13FffXe6RkQl9 128.2.6.136 46595 173.194.75.103 80 bad_HTTP_request - F zeek +1354328924.266693 CzmEfj4RValNyLfT58 128.2.6.136 46599 173.194.75.103 80 bad_HTTP_request - F zeek +1354328924.308714 CCk2V03QgWwIurU3f 128.2.6.136 46600 173.194.75.103 80 bad_HTTP_request - F zeek +1354328924.476011 CKJVAj1rNx0nolFFc4 128.2.6.136 46604 173.194.75.103 80 bad_HTTP_request - F zeek +1354328924.518204 CD7vfu1qu4YJKe1nGi 128.2.6.136 46605 173.194.75.103 80 bad_HTTP_request - F zeek +1354328932.734579 CRJ9x54IaE7bkVEpad 128.2.6.136 46609 173.194.75.103 80 bad_HTTP_request - F zeek +1354328932.776609 CAvUKGaEgLlR4i6t2 128.2.6.136 46610 173.194.75.103 80 bad_HTTP_request - F zeek +#close 2019-06-07-02-00-45 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.no-uri/weird.log b/testing/btest/Baseline/scripts.base.protocols.http.no-uri/weird.log index f24e35c0d4..0e7d7f91f7 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.no-uri/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.no-uri/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#open 2016-07-13-16-16-26 +#open 2019-06-07-02-00-45 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1362692526.939527 CHhAvVGS1DHFjwGM9 141.142.228.5 59856 192.150.187.43 80 missing_HTTP_uri - F bro -#close 2016-07-13-16-16-26 +1362692526.939527 CHhAvVGS1DHFjwGM9 141.142.228.5 59856 192.150.187.43 80 missing_HTTP_uri - F zeek +#close 2019-06-07-02-00-45 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.percent-end-of-line/weird.log b/testing/btest/Baseline/scripts.base.protocols.http.percent-end-of-line/weird.log index df24831d15..8dd763e62c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.http.percent-end-of-line/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.http.percent-end-of-line/weird.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path weird -#open 2017-07-28-05-03-01 +#open 2019-06-07-02-00-45 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1501217955.063524 CHhAvVGS1DHFjwGM9 192.168.0.9 57322 192.150.187.12 80 illegal_%_at_end_of_URI - F bro -1501217957.423701 ClEkJM2Vm5giqnMf4h 192.168.0.9 57323 192.150.187.12 80 partial_escape_at_end_of_URI - F bro -#close 2017-07-28-05-03-01 +1501217955.063524 CHhAvVGS1DHFjwGM9 192.168.0.9 57322 192.150.187.12 80 illegal_%_at_end_of_URI - F zeek +1501217957.423701 ClEkJM2Vm5giqnMf4h 192.168.0.9 57323 192.150.187.12 80 partial_escape_at_end_of_URI - F zeek +#close 2019-06-07-02-00-45 diff --git a/testing/btest/Baseline/scripts.base.protocols.irc.longline/weird.log b/testing/btest/Baseline/scripts.base.protocols.irc.longline/weird.log index b88f8724c5..67b7d6616e 100644 --- a/testing/btest/Baseline/scripts.base.protocols.irc.longline/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.irc.longline/weird.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path weird -#open 2017-11-03-19-17-18 +#open 2019-06-07-02-00-46 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1509735979.080381 CtPZjS20MLrsMUOJi2 127.0.0.1 50164 127.0.0.1 6667 contentline_size_exceeded - F bro -1509735979.080381 CtPZjS20MLrsMUOJi2 127.0.0.1 50164 127.0.0.1 6667 irc_line_size_exceeded - F bro -1509735981.241042 CtPZjS20MLrsMUOJi2 127.0.0.1 50164 127.0.0.1 6667 irc_invalid_command - F bro -#close 2017-11-03-19-17-18 +1509735979.080381 CtPZjS20MLrsMUOJi2 127.0.0.1 50164 127.0.0.1 6667 contentline_size_exceeded - F zeek +1509735979.080381 CtPZjS20MLrsMUOJi2 127.0.0.1 50164 127.0.0.1 6667 irc_line_size_exceeded - F zeek +1509735981.241042 CtPZjS20MLrsMUOJi2 127.0.0.1 50164 127.0.0.1 6667 irc_invalid_command - F zeek +#close 2019-06-07-02-00-46 diff --git a/testing/btest/Baseline/scripts.base.protocols.irc.names-weird/weird.log b/testing/btest/Baseline/scripts.base.protocols.irc.names-weird/weird.log index 908df6470e..959dd8febd 100644 --- a/testing/btest/Baseline/scripts.base.protocols.irc.names-weird/weird.log +++ b/testing/btest/Baseline/scripts.base.protocols.irc.names-weird/weird.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path weird -#open 2018-09-13-00-31-10 +#open 2019-06-07-02-00-46 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer #types time string addr port addr port string string bool string -1536797872.428637 ClEkJM2Vm5giqnMf4h 127.0.0.1 65389 127.0.0.1 6666 irc_invalid_names_line - F bro -#close 2018-09-13-00-31-10 +1536797872.428637 ClEkJM2Vm5giqnMf4h 127.0.0.1 65389 127.0.0.1 6666 irc_invalid_names_line - F zeek +#close 2019-06-07-02-00-46 diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp-digest/.stdout b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp-digest/.stdout new file mode 100644 index 0000000000..ce4d6750d0 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp-digest/.stdout @@ -0,0 +1,40 @@ +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.02623, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495804929.482904, key_id=1, digest=\xac\x01{i\x91\\xe5\xa7\xa9\xfbs\xac\x8b\xd1`;, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.0271, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495804987.483591, key_id=1, digest=3\x03\x03\xf0\xaa\x15`\xf5i\xd8=\xfa\x10S\x80\xd4, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.027374, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805006.483955, key_id=1, digest=\xbb\xdc\x86&\xa6\xfd\x0d\xd5q.4I\x04\xad\xeb\xe1, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.027939, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805043.484331, key_id=1, digest=\x97\x99\x90\x18t\xf8-k\xcdo0\x9945\xe1\xbf, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.028503, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805081.484917, key_id=1, digest=}\xb9\x12\xfe\x8fT\xd2\x0fA\x8c\x86\x90\x9b\xf9\xd1p, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.02887, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805105.48515, key_id=1, digest=\x8e\xb7\xe1\x0e\x86T\xdcsB\x05j\xe0|\xc9i\xa6, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.029678, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805159.485867, key_id=1, digest=.\x16\x11\x9e\xd1\xadJ\x16x}\x03f\x91\x11\x85_, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.030334, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805203.486536, key_id=1, digest=\x1bu\x87"U\xa3\xbb*\xe7-\xecu`\x88\xb8z, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.030396, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805207.486593, key_id=1, digest=\x10\x8eb\x92\x85\xc1\xd0#\x09\x93\x97\xff\x0a$&/, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.031128, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805256.487243, key_id=1, digest=\xcc\x1f~\xa1\xdf\xe0.\x9f_,\xa65\x1a[\x01s, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.031479, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805279.487564, key_id=1, digest=}\xfd\xbf\x81\xae\x16\x07\x1b\x987kd\x9255\xf9, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.032242, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805330.48824, key_id=1, digest=\x90\xfc\x8c\x02\xa6w\x1f\xf2\x0aU\xfb\x04\x10\xe9\xe3\xfb, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.032639, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805357.488596, key_id=1, digest=\x96@\x0b\x95\x99W\xd7\xa8\xf6\x06\xf9\xa0B,\x1d\xd8, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.03302, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805382.488826, key_id=1, digest=\xd3\xd7\xd1Vn\xb7\x0e\xceg\xb9C\x0b\xc2\xac\xcc\xab, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.03392, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805442.489609, key_id=1, digest=\x93E\xaa\xdaj\xfc3d\x0c\xd1\xad\xb3\x83ce\xc9, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.034058, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805451.489738, key_id=1, digest={\xaa\x86\xeb\x99\xb8H\xe4\x8b\x93\x7f\xfe\x84)%\xa4, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.034088, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805453.489803, key_id=1, digest=1D0\x0f\x08\x16\x9fjs\xd4\xef}5\xa1\xf6\x9a, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.03421, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805461.489877, key_id=1, digest=nU~J\x8d\xe9m\xf3\.\xf1g\xb5loj, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.034454, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805478.490201, key_id=1, digest=\xa4\xf8A\x08\xbf\xda}F%\xf2Fl\x19\xff\xdd\xab, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.034851, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805504.490512, key_id=1, digest=\xb0\xeec=\xb3\x17\xa2\xc5\xab\xe5\x04\x82\xe3\xfcE\xa1, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.035263, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805532.49081, key_id=1, digest=\x0b\xa2\x86M\x0d\xdc%\xbc\xcbi\xe9\x81\xd6\x0d$\x80, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.035339, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805537.490953, key_id=1, digest=\x01\xc1~\xeeJ'\x92g\x0cYI0\x0c\xd7\x91\xb1, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.035858, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805571.491412, key_id=1, digest=i\x90_\x89B\xf0\xe8Nq.\xe2t\xf1_jn, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.036804, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805634.492157, key_id=1, digest=\x07p\xf5&\xff\x1b6\x04\xb3\x83/\xb9\xff\xcb\xf2\xc6, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.036942, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805644.492282, key_id=1, digest=M\x05\xa9\x90'a\xbajGdb\xe03ZA\xf9, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.037735, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805696.492977, key_id=1, digest=\x17B\xcf\xb1y\x88\xdfA\xfe\xc2\x03u"J\xe9\xb0, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.038666, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805758.493804, key_id=1, digest=\x99\xe3\xa8\x0c\xca@\xc1\x0c\x1c\xc9\xec=\x07\xc0d\xc7, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.038925, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805776.494147, key_id=1, digest=T\x1bE\x16,{\x1b\x08\xc9\x05G\xce\xb9\xe2,\x9d, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.03952, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805815.494543, key_id=1, digest=\xb7\xa6\xc2'u\xdc\xcc\x86(\xe0^{\xdd\x8e\xde,, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.040253, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805864.495286, key_id=1, digest=\xfcU\x94Z\xca\x04x\xe0\x93z\xa2K\x89\xc0 \xb7, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.040268, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805865.495321, key_id=1, digest=\xf9& \x1a\xb2}O\xe5\xba\xd9\xfe\x0f\xf4J\x93P, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.041031, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805916.495973, key_id=1, digest=\x8f\x8c\xb7\xcd\xd4{\x8f\x0b\x92\xee~\xa6\x17\x0e\xa1\x0f, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.041748, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805964.496596, key_id=1, digest=L\xe68\xb6\x1d\x1d\x8cq\x0b\xb7\xf0N\xee\x9b\xb2j, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.042175, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495805992.496969, key_id=1, digest=D\xa4>MCS\x90h\x0f=[4u\xd0\x1f\x9f, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.042816, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495806035.497545, key_id=1, digest=U\xcb]\xd3"NV\xb9[W\xa5!\xddl\x1f4, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.043106, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495806055.497725, key_id=1, digest=Y\xdc\xc7]#\xb3\x9bV\xf7\x1b\xb3[\x8e\x836!, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.04332, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495806069.497937, key_id=1, digest=\xd1-O\xc7-\xac\x10\x19\xa9h\xe4\x81;\xb4\x01e, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.043488, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495806080.498132, key_id=1, digest=\xe0\xa3^\x9ck*qa\x85\x1aVA|bJA, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.043793, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495806100.498395, key_id=1, digest=;\x03\xae\x8a\xf1}ABj\x87\xa0\xf4\xa8C\xd7\x9f, num_exts=0], control_msg=, mode7_msg=] +ntp_message 2003:51:6012:121::2 -> 2003:51:6012:110::dcf7:123:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.002213, root_disp=0.044235, kiss_code=, ref_id=, ref_addr=182.165.128.219, ref_time=1495804247.476651, org_time=0.0, rec_time=0.0, xmt_time=1495806130.498813, key_id=1, digest=\x1e\xa7\xc3\xd2\xe3\x0e\x08!\x8f\xe3Z$&B\x96\x13, num_exts=0], control_msg=, mode7_msg=] diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp-digest/ntp.log b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp-digest/ntp.log new file mode 100644 index 0000000000..cd8310105e --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp-digest/ntp.log @@ -0,0 +1,49 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ntp +#open 2019-06-16-00-50-01 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version mode stratum poll precision root_delay root_disp ref_id ref_time org_time rec_time xmt_time num_exts +#types time string addr port addr port count count count interval interval interval interval string time time time time count +1495804929.483801 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.026230 182.165.128.219 1495804247.476651 0.000000 0.000000 1495804929.482904 0 +1495804987.484143 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.027100 182.165.128.219 1495804247.476651 0.000000 0.000000 1495804987.483591 0 +1495805006.484270 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.027374 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805006.483955 0 +1495805043.485165 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.027939 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805043.484331 0 +1495805081.485865 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.028503 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805081.484917 0 +1495805105.485765 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.028870 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805105.485150 0 +1495805159.486908 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.029678 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805159.485867 0 +1495805203.487540 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.030334 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805203.486536 0 +1495805207.487345 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.030396 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805207.486593 0 +1495805256.488426 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.031128 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805256.487243 0 +1495805279.488216 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.031479 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805279.487564 0 +1495805330.489109 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.032242 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805330.488240 0 +1495805357.489015 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.032639 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805357.488596 0 +1495805382.489247 CHhAvVGS1DHFjwGM9 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.033020 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805382.488826 0 +1495805442.490416 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.033920 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805442.489609 0 +1495805451.490156 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.034058 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805451.489738 0 +1495805453.490234 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.034088 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805453.489803 0 +1495805461.490303 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.034210 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805461.489877 0 +1495805478.490697 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.034454 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805478.490201 0 +1495805504.491041 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.034851 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805504.490512 0 +1495805532.491194 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.035263 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805532.490810 0 +1495805537.491291 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.035339 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805537.490953 0 +1495805571.492008 ClEkJM2Vm5giqnMf4h 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.035858 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805571.491412 0 +1495805634.493022 C4J4Th3PJpwUYZZ6gc 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.036804 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805634.492157 0 +1495805644.492819 C4J4Th3PJpwUYZZ6gc 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.036942 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805644.492282 0 +1495805696.493954 C4J4Th3PJpwUYZZ6gc 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.037735 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805696.492977 0 +1495805758.494168 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.038666 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805758.493804 0 +1495805776.494105 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.038925 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805776.494147 0 +1495805815.494785 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.039520 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805815.494543 0 +1495805864.495551 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.040253 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805864.495286 0 +1495805865.495215 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.040268 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805865.495321 0 +1495805916.496467 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.041031 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805916.495973 0 +1495805964.497107 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.041748 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805964.496596 0 +1495805992.497281 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.042175 182.165.128.219 1495804247.476651 0.000000 0.000000 1495805992.496969 0 +1495806035.498089 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.042816 182.165.128.219 1495804247.476651 0.000000 0.000000 1495806035.497545 0 +1495806055.497837 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.043106 182.165.128.219 1495804247.476651 0.000000 0.000000 1495806055.497725 0 +1495806069.497947 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.043320 182.165.128.219 1495804247.476651 0.000000 0.000000 1495806069.497937 0 +1495806080.498110 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.043488 182.165.128.219 1495804247.476651 0.000000 0.000000 1495806080.498132 0 +1495806100.498331 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.043793 182.165.128.219 1495804247.476651 0.000000 0.000000 1495806100.498395 0 +1495806130.498492 CtPZjS20MLrsMUOJi2 2003:51:6012:121::2 123 2003:51:6012:110::dcf7:123 123 4 3 2 64.000000 0.000000 0.002213 0.044235 182.165.128.219 1495804247.476651 0.000000 0.000000 1495806130.498813 0 +#close 2019-06-16-00-50-01 diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp/.stdout b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp/.stdout new file mode 100644 index 0000000000..51e74de79f --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp/.stdout @@ -0,0 +1,32 @@ +ntp_message 192.168.43.118 -> 80.211.52.109:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.02417, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246548.048352, rec_time=1559246548.076756, xmt_time=1559246614.027421, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.52.109:123 [version=4, mode=4, std_msg=[stratum=4, poll=64.0, precision=0, root_delay=0.048843, root_disp=0.07135, kiss_code=, ref_id=, ref_addr=105.237.207.28, ref_time=1559245852.721794, org_time=1559246614.027421, rec_time=1559246614.048376, xmt_time=1559246614.048407, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.88:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024216, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246550.040662, rec_time=1559246550.063198, xmt_time=1559246617.027452, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.88:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.003799, root_disp=0.032959, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559245541.537424, org_time=1559246617.027452, rec_time=1559246617.040799, xmt_time=1559246617.040813, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.131.188:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024246, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246553.074199, rec_time=1559246553.094855, xmt_time=1559246619.027384, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.131.188:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.040375, root_disp=0.001266, kiss_code=, ref_id=, ref_addr=195.113.144.238, ref_time=1559246560.207644, org_time=1559246619.027384, rec_time=1559246619.054018, xmt_time=1559246619.054053, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 188.213.165.209:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024261, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246551.034239, rec_time=1559246551.058223, xmt_time=1559246620.027408, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 185.19.184.35:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024261, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246553.067084, rec_time=1559246553.088704, xmt_time=1559246620.027461, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.3:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024261, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246554.041266, rec_time=1559246554.063055, xmt_time=1559246620.027475, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 185.19.184.35:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000008, root_delay=0.003235, root_disp=0.000275, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559246481.481997, org_time=1559246620.027461, rec_time=1559246620.040139, xmt_time=1559246620.040206, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 188.213.165.209:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.013397, root_disp=0.053787, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559244627.070973, org_time=1559246620.027408, rec_time=1559246620.043959, xmt_time=1559246620.043985, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.3:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000001, root_delay=0.00351, root_disp=0.036545, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559245278.44239, org_time=1559246620.027475, rec_time=1559246620.048058, xmt_time=1559246620.048074, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.133.122:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024277, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246553.072776, rec_time=1559246553.094814, xmt_time=1559246621.027421, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.133.122:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.01091, root_disp=0.033463, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559245577.265702, org_time=1559246621.027421, rec_time=1559246621.073143, xmt_time=1559246621.073172, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 85.199.214.99:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024292, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246556.043833, rec_time=1559246556.073681, xmt_time=1559246622.027384, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 94.177.187.22:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024292, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246553.078959, rec_time=1559246553.100708, xmt_time=1559246622.027446, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.214:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024292, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246553.085177, rec_time=1559246553.102587, xmt_time=1559246622.027464, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.206:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024292, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246554.041367, rec_time=1559246554.069181, xmt_time=1559246622.027478, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 94.177.187.22:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.013733, root_disp=0.041672, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559245709.302032, org_time=1559246622.027446, rec_time=1559246622.071899, xmt_time=1559246622.071924, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.206:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000002, root_delay=0.00351, root_disp=0.038559, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559245178.020777, org_time=1559246622.027478, rec_time=1559246622.068521, xmt_time=1559246622.06856, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 85.199.214.99:123 [version=4, mode=4, std_msg=[stratum=1, poll=16.0, precision=0, root_delay=0.0, root_disp=0.0, kiss_code=, ref_id=GPS\x00, ref_addr=, ref_time=1559246622.0, org_time=1559246622.027384, rec_time=1559246622.073734, xmt_time=1559246622.07374, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.214:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000008, root_delay=0.042236, root_disp=0.03743, kiss_code=, ref_id=, ref_addr=212.7.1.132, ref_time=1559245356.576177, org_time=1559246622.027464, rec_time=1559246622.086267, xmt_time=1559246622.086348, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 93.41.196.243:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024307, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246556.032041, rec_time=1559246556.054612, xmt_time=1559246623.027478, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.171.177:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024307, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246555.051459, rec_time=1559246555.077253, xmt_time=1559246623.027521, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 93.41.196.243:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.025391, root_disp=0.011642, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559246412.455332, org_time=1559246623.027478, rec_time=1559246623.041209, xmt_time=1559246623.04122, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.171.177:123 [version=4, mode=4, std_msg=[stratum=4, poll=64.0, precision=0, root_delay=0.036835, root_disp=0.046951, kiss_code=, ref_id=, ref_addr=73.98.4.223, ref_time=1559245789.870424, org_time=1559246623.027521, rec_time=1559246623.04836, xmt_time=1559246623.048416, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.213:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024353, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246557.07812, rec_time=1559246557.097844, xmt_time=1559246626.027432, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.155.206:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024353, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246558.043947, rec_time=1559246558.067904, xmt_time=1559246626.027514, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.155.206:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.013535, root_disp=0.025497, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559246283.180069, org_time=1559246626.027514, rec_time=1559246626.044105, xmt_time=1559246626.044139, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.213:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000008, root_delay=0.042236, root_disp=0.037491, kiss_code=, ref_id=, ref_addr=212.7.1.132, ref_time=1559245356.576177, org_time=1559246626.027432, rec_time=1559246626.058084, xmt_time=1559246626.058151, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.88.132:123 [version=3, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.024368, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246560.040576, rec_time=1559246560.064668, xmt_time=1559246627.027459, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.88.132:123 [version=3, mode=4, std_msg=[stratum=3, poll=64.0, precision=0.000001, root_delay=0.011765, root_disp=0.001526, kiss_code=, ref_id=, ref_addr=185.19.184.35, ref_time=1559245638.390748, org_time=1559246627.027459, rec_time=1559246627.050401, xmt_time=1559246627.050438, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp/ntp.log b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp/ntp.log new file mode 100644 index 0000000000..9833b09aef --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp/ntp.log @@ -0,0 +1,41 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ntp +#open 2019-06-16-00-50-01 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version mode stratum poll precision root_delay root_disp ref_id ref_time org_time rec_time xmt_time num_exts +#types time string addr port addr port count count count interval interval interval interval string time time time time count +1559246614.027454 CHhAvVGS1DHFjwGM9 192.168.43.118 123 80.211.52.109 123 4 3 2 64.000000 0.000000 0.046280 0.024170 85.199.214.99 1559246556.073681 1559246548.048352 1559246548.076756 1559246614.027421 0 +1559246614.074475 CHhAvVGS1DHFjwGM9 192.168.43.118 123 80.211.52.109 123 4 4 4 64.000000 0.000000 0.048843 0.071350 105.237.207.28 1559245852.721794 1559246614.027421 1559246614.048376 1559246614.048407 0 +1559246617.027486 ClEkJM2Vm5giqnMf4h 192.168.43.118 123 212.45.144.88 123 4 3 2 64.000000 0.000000 0.046280 0.024216 85.199.214.99 1559246556.073681 1559246550.040662 1559246550.063198 1559246617.027452 0 +1559246617.063504 ClEkJM2Vm5giqnMf4h 192.168.43.118 123 212.45.144.88 123 4 4 2 64.000000 0.000000 0.003799 0.032959 193.204.114.233 1559245541.537424 1559246617.027452 1559246617.040799 1559246617.040813 0 +1559246619.027413 C4J4Th3PJpwUYZZ6gc 192.168.43.118 123 31.14.131.188 123 4 3 2 64.000000 0.000000 0.046280 0.024246 85.199.214.99 1559246556.073681 1559246553.074199 1559246553.094855 1559246619.027384 0 +1559246619.074513 C4J4Th3PJpwUYZZ6gc 192.168.43.118 123 31.14.131.188 123 4 4 2 64.000000 0.000000 0.040375 0.001266 195.113.144.238 1559246560.207644 1559246619.027384 1559246619.054018 1559246619.054053 0 +1559246620.027437 CtPZjS20MLrsMUOJi2 192.168.43.118 123 188.213.165.209 123 4 3 2 64.000000 0.000000 0.046280 0.024261 85.199.214.99 1559246556.073681 1559246551.034239 1559246551.058223 1559246620.027408 0 +1559246620.027466 CUM0KZ3MLUfNB0cl11 192.168.43.118 123 185.19.184.35 123 4 3 2 64.000000 0.000000 0.046280 0.024261 85.199.214.99 1559246556.073681 1559246553.067084 1559246553.088704 1559246620.027461 0 +1559246620.027480 CmES5u32sYpV7JYN 192.168.43.118 123 212.45.144.3 123 4 3 2 64.000000 0.000000 0.046280 0.024261 85.199.214.99 1559246556.073681 1559246554.041266 1559246554.063055 1559246620.027475 0 +1559246620.059693 CUM0KZ3MLUfNB0cl11 192.168.43.118 123 185.19.184.35 123 4 4 2 64.000000 0.000008 0.003235 0.000275 193.204.114.233 1559246481.481997 1559246620.027461 1559246620.040139 1559246620.040206 0 +1559246620.065302 CtPZjS20MLrsMUOJi2 192.168.43.118 123 188.213.165.209 123 4 4 2 64.000000 0.000000 0.013397 0.053787 193.204.114.233 1559244627.070973 1559246620.027408 1559246620.043959 1559246620.043985 0 +1559246620.065335 CmES5u32sYpV7JYN 192.168.43.118 123 212.45.144.3 123 4 4 2 64.000000 0.000001 0.003510 0.036545 193.204.114.232 1559245278.442390 1559246620.027475 1559246620.048058 1559246620.048074 0 +1559246621.027458 CP5puj4I8PtEU4qzYg 192.168.43.118 123 31.14.133.122 123 4 3 2 64.000000 0.000000 0.046280 0.024277 85.199.214.99 1559246556.073681 1559246553.072776 1559246553.094814 1559246621.027421 0 +1559246621.095645 CP5puj4I8PtEU4qzYg 192.168.43.118 123 31.14.133.122 123 4 4 2 64.000000 0.000000 0.010910 0.033463 193.204.114.233 1559245577.265702 1559246621.027421 1559246621.073143 1559246621.073172 0 +1559246622.027418 C37jN32gN3y3AZzyf6 192.168.43.118 123 85.199.214.99 123 4 3 2 64.000000 0.000000 0.046280 0.024292 85.199.214.99 1559246556.073681 1559246556.043833 1559246556.073681 1559246622.027384 0 +1559246622.027454 C3eiCBGOLw3VtHfOj 192.168.43.118 123 94.177.187.22 123 4 3 2 64.000000 0.000000 0.046280 0.024292 85.199.214.99 1559246556.073681 1559246553.078959 1559246553.100708 1559246622.027446 0 +1559246622.027471 CwjjYJ2WqgTbAqiHl6 192.168.43.118 123 147.135.207.214 123 4 3 2 64.000000 0.000000 0.046280 0.024292 85.199.214.99 1559246556.073681 1559246553.085177 1559246553.102587 1559246622.027464 0 +1559246622.027484 C0LAHyvtKSQHyJxIl 192.168.43.118 123 212.45.144.206 123 4 3 2 64.000000 0.000000 0.046280 0.024292 85.199.214.99 1559246556.073681 1559246554.041367 1559246554.069181 1559246622.027478 0 +1559246622.092519 C3eiCBGOLw3VtHfOj 192.168.43.118 123 94.177.187.22 123 4 4 2 64.000000 0.000000 0.013733 0.041672 193.204.114.233 1559245709.302032 1559246622.027446 1559246622.071899 1559246622.071924 0 +1559246622.092556 C0LAHyvtKSQHyJxIl 192.168.43.118 123 212.45.144.206 123 4 4 2 64.000000 0.000002 0.003510 0.038559 193.204.114.232 1559245178.020777 1559246622.027478 1559246622.068521 1559246622.068560 0 +1559246622.100109 C37jN32gN3y3AZzyf6 192.168.43.118 123 85.199.214.99 123 4 4 1 16.000000 0.000000 0.000000 0.000000 GPS\x00 1559246622.000000 1559246622.027384 1559246622.073734 1559246622.073740 0 +1559246622.100152 CwjjYJ2WqgTbAqiHl6 192.168.43.118 123 147.135.207.214 123 4 4 2 64.000000 0.000008 0.042236 0.037430 212.7.1.132 1559245356.576177 1559246622.027464 1559246622.086267 1559246622.086348 0 +1559246623.027502 CFLRIC3zaTU1loLGxh 192.168.43.118 123 93.41.196.243 123 4 3 2 64.000000 0.000000 0.046280 0.024307 85.199.214.99 1559246556.073681 1559246556.032041 1559246556.054612 1559246623.027478 0 +1559246623.027531 C9rXSW3KSpTYvPrlI1 192.168.43.118 123 80.211.171.177 123 4 3 2 64.000000 0.000000 0.046280 0.024307 85.199.214.99 1559246556.073681 1559246555.051459 1559246555.077253 1559246623.027521 0 +1559246623.062844 CFLRIC3zaTU1loLGxh 192.168.43.118 123 93.41.196.243 123 4 4 2 64.000000 0.000000 0.025391 0.011642 193.204.114.233 1559246412.455332 1559246623.027478 1559246623.041209 1559246623.041220 0 +1559246623.070217 C9rXSW3KSpTYvPrlI1 192.168.43.118 123 80.211.171.177 123 4 4 4 64.000000 0.000000 0.036835 0.046951 73.98.4.223 1559245789.870424 1559246623.027521 1559246623.048360 1559246623.048416 0 +1559246626.027461 Ck51lg1bScffFj34Ri 192.168.43.118 123 147.135.207.213 123 4 3 2 64.000000 0.000000 0.046280 0.024353 85.199.214.99 1559246556.073681 1559246557.078120 1559246557.097844 1559246626.027432 0 +1559246626.027518 C9mvWx3ezztgzcexV7 192.168.43.118 123 80.211.155.206 123 4 3 2 64.000000 0.000000 0.046280 0.024353 85.199.214.99 1559246556.073681 1559246558.043947 1559246558.067904 1559246626.027514 0 +1559246626.065984 C9mvWx3ezztgzcexV7 192.168.43.118 123 80.211.155.206 123 4 4 2 64.000000 0.000000 0.013535 0.025497 193.204.114.232 1559246283.180069 1559246626.027514 1559246626.044105 1559246626.044139 0 +1559246626.075079 Ck51lg1bScffFj34Ri 192.168.43.118 123 147.135.207.213 123 4 4 2 64.000000 0.000008 0.042236 0.037491 212.7.1.132 1559245356.576177 1559246626.027432 1559246626.058084 1559246626.058151 0 +1559246627.027502 CNnMIj2QSd84NKf7U3 192.168.43.118 123 80.211.88.132 123 3 3 2 64.000000 0.000000 0.046280 0.024368 85.199.214.99 1559246556.073681 1559246560.040576 1559246560.064668 1559246627.027459 0 +1559246627.073485 CNnMIj2QSd84NKf7U3 192.168.43.118 123 80.211.88.132 123 3 4 3 64.000000 0.000001 0.011765 0.001526 185.19.184.35 1559245638.390748 1559246627.027459 1559246627.050401 1559246627.050438 0 +#close 2019-06-16-00-50-01 diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp2/.stdout b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp2/.stdout new file mode 100644 index 0000000000..8a22c8d0e2 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp2/.stdout @@ -0,0 +1,35 @@ +ntp_message 192.168.43.118 -> 80.211.52.109:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028229, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246818.058351, rec_time=1559246818.079217, xmt_time=1559246885.027449, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.52.109:123 [version=4, mode=4, std_msg=[stratum=4, poll=64.0, precision=0, root_delay=0.048843, root_disp=0.075409, kiss_code=, ref_id=, ref_addr=105.237.207.28, ref_time=1559245852.721794, org_time=1559246885.027449, rec_time=1559246885.069212, xmt_time=1559246885.069247, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.88:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028259, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246820.060608, rec_time=1559246820.081498, xmt_time=1559246887.027425, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.88:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.003799, root_disp=0.037018, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559245541.537424, org_time=1559246887.027425, rec_time=1559246887.050758, xmt_time=1559246887.050774, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.131.188:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028275, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246819.064014, rec_time=1559246819.079147, xmt_time=1559246888.027454, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 185.19.184.35:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028275, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246822.050275, rec_time=1559246822.064562, xmt_time=1559246888.030021, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 185.19.184.35:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000008, root_delay=0.003235, root_disp=0.000565, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559246481.481997, org_time=1559246888.030021, rec_time=1559246888.22033, xmt_time=1559246888.220401, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.131.188:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.040375, root_disp=0.001236, kiss_code=, ref_id=, ref_addr=195.113.144.238, ref_time=1559246882.967102, org_time=1559246888.027454, rec_time=1559246888.234035, xmt_time=1559246888.234061, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.3:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.02829, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246822.05809, rec_time=1559246822.069097, xmt_time=1559246889.027449, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.3:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000001, root_delay=0.00351, root_disp=0.040573, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559245278.44239, org_time=1559246889.027449, rec_time=1559246889.061203, xmt_time=1559246889.06122, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 85.199.214.99:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028305, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246824.073855, rec_time=1559246824.095227, xmt_time=1559246890.027469, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.133.122:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028305, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246821.052836, rec_time=1559246821.069165, xmt_time=1559246890.027512, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 188.213.165.209:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028305, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246823.12395, rec_time=1559246823.295751, xmt_time=1559246890.027523, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 188.213.165.209:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.013596, root_disp=0.025208, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559246828.086879, org_time=1559246890.027523, rec_time=1559246890.060644, xmt_time=1559246890.060687, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 31.14.133.122:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.01091, root_disp=0.037491, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559245577.265702, org_time=1559246890.027512, rec_time=1559246890.069012, xmt_time=1559246890.069048, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 85.199.214.99:123 [version=4, mode=4, std_msg=[stratum=1, poll=16.0, precision=0, root_delay=0.0, root_disp=0.0, kiss_code=, ref_id=GPS\x00, ref_addr=, ref_time=1559246890.0, org_time=1559246890.027469, rec_time=1559246890.070262, xmt_time=1559246890.070268, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 93.41.196.243:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.02832, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246822.05173, rec_time=1559246822.067161, xmt_time=1559246891.027395, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 94.177.187.22:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.02832, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246825.052045, rec_time=1559246825.066358, xmt_time=1559246891.029953, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 93.41.196.243:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.025391, root_disp=0.015671, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559246412.455332, org_time=1559246891.027395, rec_time=1559246891.051818, xmt_time=1559246891.051827, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 94.177.187.22:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.013657, root_disp=0.025818, kiss_code=, ref_id=, ref_addr=193.204.114.233, ref_time=1559246788.670839, org_time=1559246891.029953, rec_time=1559246891.061992, xmt_time=1559246891.062023, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.206:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028336, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246824.061335, rec_time=1559246824.08282, xmt_time=1559246892.027401, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 212.45.144.206:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000002, root_delay=0.00351, root_disp=0.042603, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559245178.020777, org_time=1559246892.027401, rec_time=1559246892.05139, xmt_time=1559246892.051436, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.214:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028366, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246825.064608, rec_time=1559246825.074985, xmt_time=1559246894.027491, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.214:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000008, root_delay=0.042236, root_disp=0.041504, kiss_code=, ref_id=, ref_addr=212.7.1.132, ref_time=1559245356.576177, org_time=1559246894.027491, rec_time=1559246894.059304, xmt_time=1559246894.05938, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.88.132:123 [version=3, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028427, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246829.060691, rec_time=1559246829.079018, xmt_time=1559246898.027403, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.171.177:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028427, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246830.0714, rec_time=1559246830.08971, xmt_time=1559246898.029953, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.88.132:123 [version=3, mode=4, std_msg=[stratum=3, poll=64.0, precision=0.000001, root_delay=0.011917, root_disp=0.000565, kiss_code=, ref_id=, ref_addr=185.19.184.35, ref_time=1559246667.219303, org_time=1559246898.027403, rec_time=1559246898.077958, xmt_time=1559246898.078029, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.171.177:123 [version=4, mode=4, std_msg=[stratum=4, poll=64.0, precision=0, root_delay=0.036819, root_disp=0.050842, kiss_code=, ref_id=, ref_addr=73.98.4.223, ref_time=1559246822.40751, org_time=1559246898.029953, rec_time=1559246898.078347, xmt_time=1559246898.07843, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.213:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028458, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246833.067975, rec_time=1559246833.07965, xmt_time=1559246900.027439, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.155.206:123 [version=4, mode=3, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.04628, root_disp=0.028458, kiss_code=, ref_id=, ref_addr=85.199.214.99, ref_time=1559246556.073681, org_time=1559246831.053954, rec_time=1559246831.069547, xmt_time=1559246900.030036, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 80.211.155.206:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0, root_delay=0.013535, root_disp=0.029602, kiss_code=, ref_id=, ref_addr=193.204.114.232, ref_time=1559246283.180069, org_time=1559246900.030036, rec_time=1559246900.08881, xmt_time=1559246900.088844, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 147.135.207.213:123 [version=4, mode=4, std_msg=[stratum=2, poll=64.0, precision=0.000008, root_delay=0.042236, root_disp=0.041595, kiss_code=, ref_id=, ref_addr=212.7.1.132, ref_time=1559245356.576177, org_time=1559246900.027439, rec_time=1559246900.103765, xmt_time=1559246900.103887, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 193.204.114.232:123 [version=4, mode=3, std_msg=[stratum=0, poll=16.0, precision=0.015625, root_delay=1.0, root_disp=1.0, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1101309131.444112, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 193.204.114.232:123 [version=4, mode=4, std_msg=[stratum=1, poll=16.0, precision=0, root_delay=0.0, root_disp=0.000122, kiss_code=, ref_id=CTD\x00, ref_addr=, ref_time=1559246910.937978, org_time=1101309131.444112, rec_time=1559246940.281161, xmt_time=1559246940.281191, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.43.118 -> 193.204.114.232:123 [version=2, mode=7, std_msg=, control_msg=, mode7_msg=[req_code=42, auth_bit=F, sequence=0, implementation=3, err=0, data=]] diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp2/ntp.log b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp2/ntp.log new file mode 100644 index 0000000000..53fac9df63 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp2/ntp.log @@ -0,0 +1,43 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ntp +#open 2019-06-16-00-59-58 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version mode stratum poll precision root_delay root_disp ref_id ref_time org_time rec_time xmt_time num_exts +#types time string addr port addr port count count count interval interval interval interval string time time time time count +1559246885.027478 CHhAvVGS1DHFjwGM9 192.168.43.118 123 80.211.52.109 123 4 3 2 64.000000 0.000000 0.046280 0.028229 85.199.214.99 1559246556.073681 1559246818.058351 1559246818.079217 1559246885.027449 0 +1559246885.088815 CHhAvVGS1DHFjwGM9 192.168.43.118 123 80.211.52.109 123 4 4 4 64.000000 0.000000 0.048843 0.075409 105.237.207.28 1559245852.721794 1559246885.027449 1559246885.069212 1559246885.069247 0 +1559246887.027467 ClEkJM2Vm5giqnMf4h 192.168.43.118 123 212.45.144.88 123 4 3 2 64.000000 0.000000 0.046280 0.028259 85.199.214.99 1559246556.073681 1559246820.060608 1559246820.081498 1559246887.027425 0 +1559246887.060766 ClEkJM2Vm5giqnMf4h 192.168.43.118 123 212.45.144.88 123 4 4 2 64.000000 0.000000 0.003799 0.037018 193.204.114.233 1559245541.537424 1559246887.027425 1559246887.050758 1559246887.050774 0 +1559246888.027489 C4J4Th3PJpwUYZZ6gc 192.168.43.118 123 31.14.131.188 123 4 3 2 64.000000 0.000000 0.046280 0.028275 85.199.214.99 1559246556.073681 1559246819.064014 1559246819.079147 1559246888.027454 0 +1559246888.030028 CtPZjS20MLrsMUOJi2 192.168.43.118 123 185.19.184.35 123 4 3 2 64.000000 0.000000 0.046280 0.028275 85.199.214.99 1559246556.073681 1559246822.050275 1559246822.064562 1559246888.030021 0 +1559246888.422200 CtPZjS20MLrsMUOJi2 192.168.43.118 123 185.19.184.35 123 4 4 2 64.000000 0.000008 0.003235 0.000565 193.204.114.233 1559246481.481997 1559246888.030021 1559246888.220330 1559246888.220401 0 +1559246888.422229 C4J4Th3PJpwUYZZ6gc 192.168.43.118 123 31.14.131.188 123 4 4 2 64.000000 0.000000 0.040375 0.001236 195.113.144.238 1559246882.967102 1559246888.027454 1559246888.234035 1559246888.234061 0 +1559246889.027482 CUM0KZ3MLUfNB0cl11 192.168.43.118 123 212.45.144.3 123 4 3 2 64.000000 0.000000 0.046280 0.028290 85.199.214.99 1559246556.073681 1559246822.058090 1559246822.069097 1559246889.027449 0 +1559246889.075261 CUM0KZ3MLUfNB0cl11 192.168.43.118 123 212.45.144.3 123 4 4 2 64.000000 0.000001 0.003510 0.040573 193.204.114.232 1559245278.442390 1559246889.027449 1559246889.061203 1559246889.061220 0 +1559246890.027493 CmES5u32sYpV7JYN 192.168.43.118 123 85.199.214.99 123 4 3 2 64.000000 0.000000 0.046280 0.028305 85.199.214.99 1559246556.073681 1559246824.073855 1559246824.095227 1559246890.027469 0 +1559246890.027517 CP5puj4I8PtEU4qzYg 192.168.43.118 123 31.14.133.122 123 4 3 2 64.000000 0.000000 0.046280 0.028305 85.199.214.99 1559246556.073681 1559246821.052836 1559246821.069165 1559246890.027512 0 +1559246890.027528 C37jN32gN3y3AZzyf6 192.168.43.118 123 188.213.165.209 123 4 3 2 64.000000 0.000000 0.046280 0.028305 85.199.214.99 1559246556.073681 1559246823.123950 1559246823.295751 1559246890.027523 0 +1559246890.076319 C37jN32gN3y3AZzyf6 192.168.43.118 123 188.213.165.209 123 4 4 2 64.000000 0.000000 0.013596 0.025208 193.204.114.232 1559246828.086879 1559246890.027523 1559246890.060644 1559246890.060687 0 +1559246890.082370 CP5puj4I8PtEU4qzYg 192.168.43.118 123 31.14.133.122 123 4 4 2 64.000000 0.000000 0.010910 0.037491 193.204.114.233 1559245577.265702 1559246890.027512 1559246890.069012 1559246890.069048 0 +1559246890.094824 CmES5u32sYpV7JYN 192.168.43.118 123 85.199.214.99 123 4 4 1 16.000000 0.000000 0.000000 0.000000 GPS\x00 1559246890.000000 1559246890.027469 1559246890.070262 1559246890.070268 0 +1559246891.027431 C3eiCBGOLw3VtHfOj 192.168.43.118 123 93.41.196.243 123 4 3 2 64.000000 0.000000 0.046280 0.028320 85.199.214.99 1559246556.073681 1559246822.051730 1559246822.067161 1559246891.027395 0 +1559246891.029967 CwjjYJ2WqgTbAqiHl6 192.168.43.118 123 94.177.187.22 123 4 3 2 64.000000 0.000000 0.046280 0.028320 85.199.214.99 1559246556.073681 1559246825.052045 1559246825.066358 1559246891.029953 0 +1559246891.068733 C3eiCBGOLw3VtHfOj 192.168.43.118 123 93.41.196.243 123 4 4 2 64.000000 0.000000 0.025391 0.015671 193.204.114.233 1559246412.455332 1559246891.027395 1559246891.051818 1559246891.051827 0 +1559246891.075965 CwjjYJ2WqgTbAqiHl6 192.168.43.118 123 94.177.187.22 123 4 4 2 64.000000 0.000000 0.013657 0.025818 193.204.114.233 1559246788.670839 1559246891.029953 1559246891.061992 1559246891.062023 0 +1559246892.027415 C0LAHyvtKSQHyJxIl 192.168.43.118 123 212.45.144.206 123 4 3 2 64.000000 0.000000 0.046280 0.028336 85.199.214.99 1559246556.073681 1559246824.061335 1559246824.082820 1559246892.027401 0 +1559246892.077560 C0LAHyvtKSQHyJxIl 192.168.43.118 123 212.45.144.206 123 4 4 2 64.000000 0.000002 0.003510 0.042603 193.204.114.232 1559245178.020777 1559246892.027401 1559246892.051390 1559246892.051436 0 +1559246894.027523 CFLRIC3zaTU1loLGxh 192.168.43.118 123 147.135.207.214 123 4 3 2 64.000000 0.000000 0.046280 0.028366 85.199.214.99 1559246556.073681 1559246825.064608 1559246825.074985 1559246894.027491 0 +1559246894.070325 CFLRIC3zaTU1loLGxh 192.168.43.118 123 147.135.207.214 123 4 4 2 64.000000 0.000008 0.042236 0.041504 212.7.1.132 1559245356.576177 1559246894.027491 1559246894.059304 1559246894.059380 0 +1559246898.027422 C9rXSW3KSpTYvPrlI1 192.168.43.118 123 80.211.88.132 123 3 3 2 64.000000 0.000000 0.046280 0.028427 85.199.214.99 1559246556.073681 1559246829.060691 1559246829.079018 1559246898.027403 0 +1559246898.029960 Ck51lg1bScffFj34Ri 192.168.43.118 123 80.211.171.177 123 4 3 2 64.000000 0.000000 0.046280 0.028427 85.199.214.99 1559246556.073681 1559246830.071400 1559246830.089710 1559246898.029953 0 +1559246898.094782 C9rXSW3KSpTYvPrlI1 192.168.43.118 123 80.211.88.132 123 3 4 3 64.000000 0.000001 0.011917 0.000565 185.19.184.35 1559246667.219303 1559246898.027403 1559246898.077958 1559246898.078029 0 +1559246898.094827 Ck51lg1bScffFj34Ri 192.168.43.118 123 80.211.171.177 123 4 4 4 64.000000 0.000000 0.036819 0.050842 73.98.4.223 1559246822.407510 1559246898.029953 1559246898.078347 1559246898.078430 0 +1559246900.027467 C9mvWx3ezztgzcexV7 192.168.43.118 123 147.135.207.213 123 4 3 2 64.000000 0.000000 0.046280 0.028458 85.199.214.99 1559246556.073681 1559246833.067975 1559246833.079650 1559246900.027439 0 +1559246900.030051 CNnMIj2QSd84NKf7U3 192.168.43.118 123 80.211.155.206 123 4 3 2 64.000000 0.000000 0.046280 0.028458 85.199.214.99 1559246556.073681 1559246831.053954 1559246831.069547 1559246900.030036 0 +1559246900.102991 CNnMIj2QSd84NKf7U3 192.168.43.118 123 80.211.155.206 123 4 4 2 64.000000 0.000000 0.013535 0.029602 193.204.114.232 1559246283.180069 1559246900.030036 1559246900.088810 1559246900.088844 0 +1559246900.111834 C9mvWx3ezztgzcexV7 192.168.43.118 123 147.135.207.213 123 4 4 2 64.000000 0.000008 0.042236 0.041595 212.7.1.132 1559245356.576177 1559246900.027439 1559246900.103765 1559246900.103887 0 +1559246940.262220 C7fIlMZDuRiqjpYbb 192.168.43.118 58229 193.204.114.232 123 4 3 0 16.000000 0.015625 1.000000 1.000000 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1101309131.444112 0 +1559246940.304152 C7fIlMZDuRiqjpYbb 192.168.43.118 58229 193.204.114.232 123 4 4 1 16.000000 0.000000 0.000000 0.000122 CTD\x00 1559246910.937978 1101309131.444112 1559246940.281161 1559246940.281191 0 +#close 2019-06-16-00-59-58 diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp3/.stdout b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp3/.stdout new file mode 100644 index 0000000000..c37665c5d7 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp3/.stdout @@ -0,0 +1,30 @@ +ntp_message 192.168.50.50 -> 67.129.68.9:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 69.44.57.60:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 207.234.209.181:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 209.132.176.4:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 216.27.185.42:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 24.34.79.42:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 24.123.202.230:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 63.164.62.249:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 64.112.189.11:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 65.125.233.206:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.922896, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.33.206.5:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.932911, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.33.216.11:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.932911, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.92.68.246:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.932911, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.111.46.200:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.932911, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.115.136.4:123 [version=3, mode=1, std_msg=[stratum=0, poll=1024.0, precision=0.015625, root_delay=0.0, root_disp=1.01001, kiss_code=\x00\x00\x00\x00, ref_id=, ref_addr=, ref_time=0.0, org_time=0.0, rec_time=0.0, xmt_time=1096255084.932911, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 69.44.57.60:123 [version=3, mode=2, std_msg=[stratum=3, poll=1024.0, precision=0.000004, root_delay=0.109238, root_disp=0.081726, kiss_code=, ref_id=, ref_addr=81.174.128.183, ref_time=1096254668.551001, org_time=1096255084.922896, rec_time=1096255083.809713, xmt_time=1096255083.80976, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 24.123.202.230:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000001, root_delay=0.030319, root_disp=0.185547, kiss_code=, ref_id=, ref_addr=198.30.92.2, ref_time=1096252181.259041, org_time=1096255084.922896, rec_time=1096255083.821124, xmt_time=1096255083.821134, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 67.129.68.9:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000008, root_delay=0.060455, root_disp=7.46431, kiss_code=, ref_id=, ref_addr=17.254.0.49, ref_time=1095788645.064548, org_time=1096255084.922896, rec_time=1096255083.848508, xmt_time=1096255083.848601, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 65.125.233.206:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000031, root_delay=0.023254, root_disp=0.012848, kiss_code=, ref_id=, ref_addr=130.207.244.240, ref_time=1096254901.858123, org_time=1096255084.922896, rec_time=1096255083.828025, xmt_time=1096255083.828189, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 63.164.62.249:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000001, root_delay=0.015015, root_disp=0.037491, kiss_code=, ref_id=, ref_addr=18.145.0.30, ref_time=1096254668.213801, org_time=1096255084.922896, rec_time=1096255083.829249, xmt_time=1096255083.829301, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 207.234.209.181:123 [version=3, mode=2, std_msg=[stratum=3, poll=1024.0, precision=0.000008, root_delay=0.072678, root_disp=0.035049, kiss_code=, ref_id=, ref_addr=198.82.1.203, ref_time=1096254326.1896, org_time=1096255084.922896, rec_time=1096255083.824154, xmt_time=1096255083.824174, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.92.68.246:123 [version=3, mode=2, std_msg=[stratum=1, poll=1024.0, precision=0.000015, root_delay=0.0, root_disp=0.00032, kiss_code=, ref_id=GPS\x00, ref_addr=, ref_time=1096255078.223498, org_time=1096255084.932911, rec_time=1096255083.836845, xmt_time=1096255083.83687, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 24.34.79.42:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000031, root_delay=0.123322, root_disp=0.039917, kiss_code=, ref_id=, ref_addr=131.107.1.10, ref_time=1096254970.010788, org_time=1096255084.922896, rec_time=1096255083.825662, xmt_time=1096255083.825692, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.115.136.4:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000008, root_delay=0.016632, root_disp=0.028641, kiss_code=, ref_id=, ref_addr=130.207.244.240, ref_time=1096254406.517429, org_time=1096255084.932911, rec_time=1096255083.853291, xmt_time=1096255083.853336, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.33.206.5:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000004, root_delay=0.01236, root_disp=0.022202, kiss_code=, ref_id=, ref_addr=192.12.19.20, ref_time=1096255027.694744, org_time=1096255084.932911, rec_time=1096255083.850895, xmt_time=1096255083.850907, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.33.216.11:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000001, root_delay=0.009857, root_disp=0.043747, kiss_code=, ref_id=, ref_addr=204.123.2.72, ref_time=1096254508.255586, org_time=1096255084.932911, rec_time=1096255083.850965, xmt_time=1096255083.851024, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 66.111.46.200:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000001, root_delay=0.056396, root_disp=0.062164, kiss_code=, ref_id=, ref_addr=198.30.92.2, ref_time=1096253376.841474, org_time=1096255084.932911, rec_time=1096255083.847619, xmt_time=1096255083.847644, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 64.112.189.11:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000015, root_delay=0.081268, root_disp=0.029877, kiss_code=, ref_id=, ref_addr=128.10.252.6, ref_time=1096254706.14029, org_time=1096255084.922896, rec_time=1096255083.850451, xmt_time=1096255083.850465, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 216.27.185.42:123 [version=3, mode=2, std_msg=[stratum=2, poll=1024.0, precision=0.000004, root_delay=0.029846, root_disp=0.045456, kiss_code=, ref_id=, ref_addr=164.67.62.194, ref_time=1096254209.896379, org_time=1096255084.922896, rec_time=1096255083.849099, xmt_time=1096255083.849269, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] +ntp_message 192.168.50.50 -> 209.132.176.4:123 [version=3, mode=2, std_msg=[stratum=1, poll=1024.0, precision=0.000015, root_delay=0.0, root_disp=0.000504, kiss_code=, ref_id=CDMA, ref_addr=, ref_time=1096255068.944018, org_time=1096255084.922896, rec_time=1096255083.827772, xmt_time=1096255083.828313, key_id=, digest=, num_exts=0], control_msg=, mode7_msg=] diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntp3/ntp.log b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp3/ntp.log new file mode 100644 index 0000000000..2f1c9cfbb2 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntp3/ntp.log @@ -0,0 +1,39 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ntp +#open 2019-06-16-00-50-01 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version mode stratum poll precision root_delay root_disp ref_id ref_time org_time rec_time xmt_time num_exts +#types time string addr port addr port count count count interval interval interval interval string time time time time count +1096255084.954975 ClEkJM2Vm5giqnMf4h 192.168.50.50 123 67.129.68.9 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.955306 C4J4Th3PJpwUYZZ6gc 192.168.50.50 123 69.44.57.60 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.955760 CtPZjS20MLrsMUOJi2 192.168.50.50 123 207.234.209.181 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.956155 CUM0KZ3MLUfNB0cl11 192.168.50.50 123 209.132.176.4 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.956577 CmES5u32sYpV7JYN 192.168.50.50 123 216.27.185.42 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.956975 CP5puj4I8PtEU4qzYg 192.168.50.50 123 24.34.79.42 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.957457 C37jN32gN3y3AZzyf6 192.168.50.50 123 24.123.202.230 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.957903 C3eiCBGOLw3VtHfOj 192.168.50.50 123 63.164.62.249 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.958625 CwjjYJ2WqgTbAqiHl6 192.168.50.50 123 64.112.189.11 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.959273 C0LAHyvtKSQHyJxIl 192.168.50.50 123 65.125.233.206 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.922896 0 +1096255084.960065 CFLRIC3zaTU1loLGxh 192.168.50.50 123 66.33.206.5 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.932911 0 +1096255084.960866 C9rXSW3KSpTYvPrlI1 192.168.50.50 123 66.33.216.11 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.932911 0 +1096255084.961475 Ck51lg1bScffFj34Ri 192.168.50.50 123 66.92.68.246 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.932911 0 +1096255084.962222 C9mvWx3ezztgzcexV7 192.168.50.50 123 66.111.46.200 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.932911 0 +1096255084.962915 CNnMIj2QSd84NKf7U3 192.168.50.50 123 66.115.136.4 123 3 1 0 1024.000000 0.015625 0.000000 1.010010 \x00\x00\x00\x00 0.000000 0.000000 0.000000 1096255084.932911 0 +1096255085.012029 C4J4Th3PJpwUYZZ6gc 192.168.50.50 123 69.44.57.60 123 3 2 3 1024.000000 0.000004 0.109238 0.081726 81.174.128.183 1096254668.551001 1096255084.922896 1096255083.809713 1096255083.809760 0 +1096255085.049280 C37jN32gN3y3AZzyf6 192.168.50.50 123 24.123.202.230 123 3 2 2 1024.000000 0.000001 0.030319 0.185547 198.30.92.2 1096252181.259041 1096255084.922896 1096255083.821124 1096255083.821134 0 +1096255085.092991 ClEkJM2Vm5giqnMf4h 192.168.50.50 123 67.129.68.9 123 3 2 2 1024.000000 0.000008 0.060455 7.464310 17.254.0.49 1095788645.064548 1096255084.922896 1096255083.848508 1096255083.848601 0 +1096255085.120557 C0LAHyvtKSQHyJxIl 192.168.50.50 123 65.125.233.206 123 3 2 2 1024.000000 0.000031 0.023254 0.012848 130.207.244.240 1096254901.858123 1096255084.922896 1096255083.828025 1096255083.828189 0 +1096255085.185955 C3eiCBGOLw3VtHfOj 192.168.50.50 123 63.164.62.249 123 3 2 2 1024.000000 0.000001 0.015015 0.037491 18.145.0.30 1096254668.213801 1096255084.922896 1096255083.829249 1096255083.829301 0 +1096255085.223026 CtPZjS20MLrsMUOJi2 192.168.50.50 123 207.234.209.181 123 3 2 3 1024.000000 0.000008 0.072678 0.035049 198.82.1.203 1096254326.189600 1096255084.922896 1096255083.824154 1096255083.824174 0 +1096255085.280949 Ck51lg1bScffFj34Ri 192.168.50.50 123 66.92.68.246 123 3 2 1 1024.000000 0.000015 0.000000 0.000320 GPS\x00 1096255078.223498 1096255084.932911 1096255083.836845 1096255083.836870 0 +1096255085.304774 CP5puj4I8PtEU4qzYg 192.168.50.50 123 24.34.79.42 123 3 2 2 1024.000000 0.000031 0.123322 0.039917 131.107.1.10 1096254970.010788 1096255084.922896 1096255083.825662 1096255083.825692 0 +1096255085.353360 CNnMIj2QSd84NKf7U3 192.168.50.50 123 66.115.136.4 123 3 2 2 1024.000000 0.000008 0.016632 0.028641 130.207.244.240 1096254406.517429 1096255084.932911 1096255083.853291 1096255083.853336 0 +1096255085.406368 CFLRIC3zaTU1loLGxh 192.168.50.50 123 66.33.206.5 123 3 2 2 1024.000000 0.000004 0.012360 0.022202 192.12.19.20 1096255027.694744 1096255084.932911 1096255083.850895 1096255083.850907 0 +1096255085.439833 C9rXSW3KSpTYvPrlI1 192.168.50.50 123 66.33.216.11 123 3 2 2 1024.000000 0.000001 0.009857 0.043747 204.123.2.72 1096254508.255586 1096255084.932911 1096255083.850965 1096255083.851024 0 +1096255085.480955 C9mvWx3ezztgzcexV7 192.168.50.50 123 66.111.46.200 123 3 2 2 1024.000000 0.000001 0.056396 0.062164 198.30.92.2 1096253376.841474 1096255084.932911 1096255083.847619 1096255083.847644 0 +1096255085.522297 CwjjYJ2WqgTbAqiHl6 192.168.50.50 123 64.112.189.11 123 3 2 2 1024.000000 0.000015 0.081268 0.029877 128.10.252.6 1096254706.140290 1096255084.922896 1096255083.850451 1096255083.850465 0 +1096255085.562197 CmES5u32sYpV7JYN 192.168.50.50 123 216.27.185.42 123 3 2 2 1024.000000 0.000004 0.029846 0.045456 164.67.62.194 1096254209.896379 1096255084.922896 1096255083.849099 1096255083.849269 0 +1096255085.599961 CUM0KZ3MLUfNB0cl11 192.168.50.50 123 209.132.176.4 123 3 2 1 1024.000000 0.000015 0.000000 0.000504 CDMA 1096255068.944018 1096255084.922896 1096255083.827772 1096255083.828313 0 +#close 2019-06-16-00-50-01 diff --git a/testing/btest/Baseline/scripts.base.protocols.ntp.ntpmode67/.stdout b/testing/btest/Baseline/scripts.base.protocols.ntp.ntpmode67/.stdout new file mode 100644 index 0000000000..371eb7dbb4 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ntp.ntpmode67/.stdout @@ -0,0 +1,9 @@ +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=6, std_msg=, control_msg=[op_code=1, resp_bit=F, err_bit=F, more_bit=F, sequence=1, status=0, association_id=0, data=, key_id=, crypto_checksum=], mode7_msg=] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=6, std_msg=, control_msg=[op_code=2, resp_bit=F, err_bit=F, more_bit=F, sequence=2, status=0, association_id=19183, data=, key_id=, crypto_checksum=], mode7_msg=] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=6, std_msg=, control_msg=[op_code=2, resp_bit=F, err_bit=F, more_bit=F, sequence=3, status=0, association_id=19184, data=, key_id=, crypto_checksum=], mode7_msg=] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=7, std_msg=, control_msg=, mode7_msg=[req_code=1, auth_bit=F, sequence=0, implementation=3, err=0, data=]] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=7, std_msg=, control_msg=, mode7_msg=[req_code=42, auth_bit=F, sequence=0, implementation=3, err=0, data=]] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=6, std_msg=, control_msg=[op_code=1, resp_bit=F, err_bit=F, more_bit=F, sequence=1, status=0, association_id=0, data=, key_id=, crypto_checksum=], mode7_msg=] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=6, std_msg=, control_msg=[op_code=2, resp_bit=F, err_bit=F, more_bit=F, sequence=2, status=0, association_id=19183, data=, key_id=, crypto_checksum=], mode7_msg=] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=6, std_msg=, control_msg=[op_code=2, resp_bit=F, err_bit=F, more_bit=F, sequence=3, status=0, association_id=19184, data=, key_id=, crypto_checksum=], mode7_msg=] +ntp_message 127.0.0.1 -> 127.0.0.1:123 [version=2, mode=7, std_msg=, control_msg=, mode7_msg=[req_code=1, auth_bit=F, sequence=0, implementation=3, err=0, data=]] diff --git a/testing/btest/Baseline/scripts.base.protocols.radius.auth/radius.log b/testing/btest/Baseline/scripts.base.protocols.radius.auth/radius.log index bd536ecca2..a681496944 100644 --- a/testing/btest/Baseline/scripts.base.protocols.radius.auth/radius.log +++ b/testing/btest/Baseline/scripts.base.protocols.radius.auth/radius.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path radius -#open 2017-02-20-04-53-55 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p username mac framed_addr remote_ip connect_info reply_msg result ttl -#types time string addr port addr port string string addr addr string string string interval +#open 2019-06-05-18-03-41 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p username mac framed_addr tunnel_client connect_info reply_msg result ttl +#types time string addr port addr port string string addr string string string string interval 1217631137.872968 CHhAvVGS1DHFjwGM9 10.0.0.1 1645 10.0.0.100 1812 John.McGuirk 00:14:22:e9:54:5e 255.255.255.254 - - Hello, %u success 0.043882 -#close 2017-02-20-04-53-55 +#close 2019-06-05-18-03-41 diff --git a/testing/btest/Baseline/scripts.base.protocols.radius.radius-multiple-attempts/radius.log b/testing/btest/Baseline/scripts.base.protocols.radius.radius-multiple-attempts/radius.log index 8dac83de65..510c7ef503 100644 --- a/testing/btest/Baseline/scripts.base.protocols.radius.radius-multiple-attempts/radius.log +++ b/testing/btest/Baseline/scripts.base.protocols.radius.radius-multiple-attempts/radius.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path radius -#open 2017-02-20-04-56-31 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p username mac framed_addr remote_ip connect_info reply_msg result ttl -#types time string addr port addr port string string addr addr string string string interval +#open 2019-06-05-18-04-34 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p username mac framed_addr tunnel_client connect_info reply_msg result ttl +#types time string addr port addr port string string addr string string string string interval 1440447766.440305 CHhAvVGS1DHFjwGM9 127.0.0.1 53031 127.0.0.1 1812 steve - 172.16.3.33 - - - failed 1.005906 1440447839.947454 ClEkJM2Vm5giqnMf4h 127.0.0.1 65443 127.0.0.1 1812 steve - 172.16.3.33 - - - success 0.000779 1440447848.196115 C4J4Th3PJpwUYZZ6gc 127.0.0.1 57717 127.0.0.1 1812 steve - - - - - success 0.000275 @@ -13,4 +13,4 @@ 1440447880.931272 CUM0KZ3MLUfNB0cl11 127.0.0.1 52178 127.0.0.1 1812 steve - - - - - failed 1.001459 1440447904.122012 CmES5u32sYpV7JYN 127.0.0.1 62956 127.0.0.1 1812 steve - - - - - unknown - 1440448190.335333 CP5puj4I8PtEU4qzYg 127.0.0.1 53127 127.0.0.1 1812 steve - - - - - success 0.000517 -#close 2017-02-20-04-56-31 +#close 2019-06-05-18-04-34 diff --git a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-client-cluster-data/out b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-client-cluster-data/out new file mode 100644 index 0000000000..53973a2324 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-client-cluster-data/out @@ -0,0 +1,12 @@ +RDP Client Cluster Data +Flags: 0000000d +RedirSessionId: 00000000 +Redirection Supported: 00000000 +ServerSessionRedirectionVersionMask: 00000000 +RedirectionSessionIDFieldValid: 00000000 +RedirectedSmartCard: 00000000 +RDP Client Channel List Options +80800000 +c0000000 +c0800000 +c0a00000 diff --git a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-client-security-data/out b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-client-security-data/out new file mode 100644 index 0000000000..0c7563f5a4 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-client-security-data/out @@ -0,0 +1,5 @@ +rdp_client_security_data, [encryption_methods=27, ext_encryption_methods=0] + 40-bit flag, T + 128-bit flag, T + 56-bit flag, T + fips flag, T diff --git a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-native-encrypted-data/out b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-native-encrypted-data/out new file mode 100644 index 0000000000..98d86f4d55 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-native-encrypted-data/out @@ -0,0 +1,3 @@ +rdp native encrypted data, T, 12 +rdp native encrypted data, T, 8 +rdp native encrypted data, F, 11 diff --git a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-proprietary-encryption/rdp.log b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-proprietary-encryption/rdp.log index c32ebd3fbb..bf8b038223 100644 --- a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-proprietary-encryption/rdp.log +++ b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-proprietary-encryption/rdp.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path rdp -#open 2016-07-13-16-16-47 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cookie result security_protocol keyboard_layout client_build client_name client_dig_product_id desktop_width desktop_height requested_color_depth cert_type cert_count cert_permanent encryption_level encryption_method -#types time string addr port addr port string string string string string string string count count string string count bool string string -1193369795.014346 CHhAvVGS1DHFjwGM9 172.21.128.16 1311 10.226.24.52 3389 FTBCO\\A70 SSL_NOT_ALLOWED_BY_SERVER - - - - - - - - - 0 - - - -1193369797.582740 ClEkJM2Vm5giqnMf4h 172.21.128.16 1312 10.226.24.52 3389 FTBCO\\A70 Success RDP English - United States RDP 6.0 FROG-POND (empty) 1152 864 32bit RSA 1 T High 128bit -#close 2016-07-13-16-16-48 +#open 2019-05-28-14-29-19 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cookie result security_protocol client_channels keyboard_layout client_build client_name client_dig_product_id desktop_width desktop_height requested_color_depth cert_type cert_count cert_permanent encryption_level encryption_method +#types time string addr port addr port string string string vector[string] string string string string count count string string count bool string string +1193369795.014346 CHhAvVGS1DHFjwGM9 172.21.128.16 1311 10.226.24.52 3389 FTBCO\\A70 SSL_NOT_ALLOWED_BY_SERVER - - - - - - - - - - 0 - - - +1193369797.582740 ClEkJM2Vm5giqnMf4h 172.21.128.16 1312 10.226.24.52 3389 FTBCO\\A70 Success RDP rdpdr,rdpsnd,drdynvc,cliprdr English - United States RDP 6.0 FROG-POND (empty) 1152 864 32bit RSA 1 T High 128bit +#close 2019-05-28-14-29-19 diff --git a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-to-ssl/rdp.log b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-to-ssl/rdp.log index 247e2e56f3..d6be31000c 100644 --- a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-to-ssl/rdp.log +++ b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-to-ssl/rdp.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path rdp -#open 2016-07-13-16-16-48 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cookie result security_protocol keyboard_layout client_build client_name client_dig_product_id desktop_width desktop_height requested_color_depth cert_type cert_count cert_permanent encryption_level encryption_method -#types time string addr port addr port string string string string string string string count count string string count bool string string -1297551041.284715 CHhAvVGS1DHFjwGM9 192.168.1.200 49206 192.168.1.150 3389 AWAKECODI encrypted HYBRID - - - - - - - - 0 - - - -1297551078.958821 ClEkJM2Vm5giqnMf4h 192.168.1.200 49207 192.168.1.150 3389 AWAKECODI encrypted HYBRID - - - - - - - - 0 - - - -#close 2016-07-13-16-16-48 +#open 2019-05-28-14-29-20 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cookie result security_protocol client_channels keyboard_layout client_build client_name client_dig_product_id desktop_width desktop_height requested_color_depth cert_type cert_count cert_permanent encryption_level encryption_method +#types time string addr port addr port string string string vector[string] string string string string count count string string count bool string string +1297551041.284715 CHhAvVGS1DHFjwGM9 192.168.1.200 49206 192.168.1.150 3389 AWAKECODI encrypted HYBRID - - - - - - - - - 0 - - - +1297551078.958821 ClEkJM2Vm5giqnMf4h 192.168.1.200 49207 192.168.1.150 3389 AWAKECODI encrypted HYBRID - - - - - - - - - 0 - - - +#close 2019-05-28-14-29-20 diff --git a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-x509/rdp.log b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-x509/rdp.log index 2ee8af8095..e3a87b0edd 100644 --- a/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-x509/rdp.log +++ b/testing/btest/Baseline/scripts.base.protocols.rdp.rdp-x509/rdp.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path rdp -#open 2016-07-13-16-16-49 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cookie result security_protocol keyboard_layout client_build client_name client_dig_product_id desktop_width desktop_height requested_color_depth cert_type cert_count cert_permanent encryption_level encryption_method -#types time string addr port addr port string string string string string string string count count string string count bool string string -1423755598.202845 CHhAvVGS1DHFjwGM9 192.168.1.1 54990 192.168.1.2 3389 JOHN-PC Success RDP English - United States RDP 8.1 JOHN-PC-LAPTOP 3c571ed0-3415-474b-ae94-74e151b 1920 1080 16bit X.509 2 F Client compatible 128bit -#close 2016-07-13-16-16-49 +#open 2019-05-28-14-29-20 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p cookie result security_protocol client_channels keyboard_layout client_build client_name client_dig_product_id desktop_width desktop_height requested_color_depth cert_type cert_count cert_permanent encryption_level encryption_method +#types time string addr port addr port string string string vector[string] string string string string count count string string count bool string string +1423755598.202845 CHhAvVGS1DHFjwGM9 192.168.1.1 54990 192.168.1.2 3389 JOHN-PC Success RDP rdpdr,rdpsnd,cliprdr,drdynvc English - United States RDP 8.1 JOHN-PC-LAPTOP 3c571ed0-3415-474b-ae94-74e151b 1920 1080 16bit X.509 2 F Client compatible 128bit +#close 2019-05-28-14-29-20 diff --git a/testing/btest/Baseline/scripts.base.protocols.smb.smb2-write-response/.stdout b/testing/btest/Baseline/scripts.base.protocols.smb.smb2-write-response/.stdout new file mode 100644 index 0000000000..87e558461b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.smb.smb2-write-response/.stdout @@ -0,0 +1,2 @@ +smb2_write_response 169.254.128.18 -> 169.254.128.15:445, length: 7000 +[credit_charge=1, status=0, command=9, credits=1, flags=1, message_id=1238, process_id=65279, tree_id=1394175098, session_id=1008934080, signature=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00] diff --git a/testing/btest/Baseline/doc.manual.data_type_const_simple/.stdout b/testing/btest/Baseline/scripts.base.protocols.ssl.dtls-no-dtls/.stdout similarity index 100% rename from testing/btest/Baseline/doc.manual.data_type_const_simple/.stdout rename to testing/btest/Baseline/scripts.base.protocols.ssl.dtls-no-dtls/.stdout diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.keyexchange/ssl-all.log b/testing/btest/Baseline/scripts.base.protocols.ssl.keyexchange/ssl-all.log index 7d15c707b3..506c3a5945 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ssl.keyexchange/ssl-all.log +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.keyexchange/ssl-all.log @@ -3,60 +3,60 @@ #empty_field (empty) #unset_field - #path ssl -#open 2018-08-27-22-38-51 +#open 2019-04-29-19-23-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer client_record_version client_random client_cipher_suites server_record_version server_random server_dh_p server_dh_q server_dh_Ys server_ecdh_point server_signature_sig_alg server_signature_hash_alg server_signature server_cert_sha1 client_rsa_pms client_dh_Yc client_ecdh_point #types time string addr port addr port string string string string bool string string bool vector[string] vector[string] string string string string string string string string string string string string string count count string string string string string -1398558136.319509 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 TLSv12 TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA - - F - - T F6fLv13PBYz8MNqx68,F8cTDl1penwXxGu4K7 (empty) emailAddress=denicadmmail@arcor.de,CN=www.lilawelt.net,C=US CN=StartCom Class 1 Primary Intermediate Server CA,OU=Secure Digital Certificate Signing,O=StartCom Ltd.,C=IL - - TLSv10 1f7f8ae4d8dd45f31ed2e158f5f9ee676b7cb2c92585d8a3e1c2da7e TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV TLSv12 5c3660849d1ba4081e9c5863f11c64233c045d58380ea393bdca5322 bbbc2dcad84674907c43fcf580e9cfdbd958a3f568b42d4b08eed4eb0fb3504c6c030276e710800c5ccbbaa8922614c5beeca565a5fdf1d287a2bc049be6778060e91a92a757e3048f68b076f7d36cc8f29ba5df81dc2ca725ece66270cc9a5035d8ceceef9ea0274a63ab1e58fafd4988d0f65d146757da071df045cfe16b9b 02 af5e4cde6c7ac4ad3f62f9df82e6a378a1c80fccf26abcbd13120339707baae172c0381abde73c3d607c14706bb8ab4d09dd39c5961ea86114c37f6b803554925a3e4c64c54ed1ba171e52f97fa2df2ef7e52725c62635e4c3ab625a018bfa75b266446f24b8e0c13dcc258db35b52e8ed5add68ca54de905395304cf3e1eeac - 1 6 18fd31815d4c5316d23bddb61ada198272ffa76ab0a4f7505b2a232150bd79874a9e5aabd7e39aef8cccdd2eba9cfcef6cc77842c7b359899b976f930e671d9308c3a07eeb6eaf1411a4c91a3523e4a8a4ccf523df2b70d56da5173e862643ad894495f14a94bda7115cedda87d520e524f917197dec625ffa1283d156e39f2daa49424a42aba2e0d0e43ebd537f348db770fe6c901a17432c7dd1a9146a2039c383f293cab5b04cb653332454883396863dd419b63745cc65bfc905c06a5d4283f5ff33a1d59584610293a1b08da9a6884c8e67675568e2c357b3d040350694c8b1c74c4be16e76eafeb8efe69dc501154fd36347d04ffc0c6e9a5646a1902a c3d48226a8f94d3bbb49918ac02187493258e74e - 0080545ca1e5a9978e411a23f7ce3b50d2919cb7da2dfd4c97d1dd20db9535d6240b684751b08845d44b780750371c5f229903cf59216bcfbe255de370f9a801177fa0dd11061a0173cd7fe4d740e3a74cc594a8c2510d03039126388730c2c73ca0db5fdad2a2021e9ea025b86dc0ba87aea5629246a4cf0f98726fcda9c89d4483 - -#close 2018-08-27-22-38-51 +1398558136.319509 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 TLSv12 TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA - - F - - T F6fLv13PBYz8MNqx68,F8cTDl1penwXxGu4K7 (empty) emailAddress=denicadmmail@arcor.de,CN=www.lilawelt.net,C=US CN=StartCom Class 1 Primary Intermediate Server CA,OU=Secure Digital Certificate Signing,O=StartCom Ltd.,C=IL - - TLSv10 1f7f8ae4d8dd45f31ed2e158f5f9ee676b7cb2c92585d8a3e1c2da7e TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV TLSv12 535c4db35c3660849d1ba4081e9c5863f11c64233c045d58380ea393bdca5322 bbbc2dcad84674907c43fcf580e9cfdbd958a3f568b42d4b08eed4eb0fb3504c6c030276e710800c5ccbbaa8922614c5beeca565a5fdf1d287a2bc049be6778060e91a92a757e3048f68b076f7d36cc8f29ba5df81dc2ca725ece66270cc9a5035d8ceceef9ea0274a63ab1e58fafd4988d0f65d146757da071df045cfe16b9b 02 af5e4cde6c7ac4ad3f62f9df82e6a378a1c80fccf26abcbd13120339707baae172c0381abde73c3d607c14706bb8ab4d09dd39c5961ea86114c37f6b803554925a3e4c64c54ed1ba171e52f97fa2df2ef7e52725c62635e4c3ab625a018bfa75b266446f24b8e0c13dcc258db35b52e8ed5add68ca54de905395304cf3e1eeac - 1 6 18fd31815d4c5316d23bddb61ada198272ffa76ab0a4f7505b2a232150bd79874a9e5aabd7e39aef8cccdd2eba9cfcef6cc77842c7b359899b976f930e671d9308c3a07eeb6eaf1411a4c91a3523e4a8a4ccf523df2b70d56da5173e862643ad894495f14a94bda7115cedda87d520e524f917197dec625ffa1283d156e39f2daa49424a42aba2e0d0e43ebd537f348db770fe6c901a17432c7dd1a9146a2039c383f293cab5b04cb653332454883396863dd419b63745cc65bfc905c06a5d4283f5ff33a1d59584610293a1b08da9a6884c8e67675568e2c357b3d040350694c8b1c74c4be16e76eafeb8efe69dc501154fd36347d04ffc0c6e9a5646a1902a c3d48226a8f94d3bbb49918ac02187493258e74e - 0080545ca1e5a9978e411a23f7ce3b50d2919cb7da2dfd4c97d1dd20db9535d6240b684751b08845d44b780750371c5f229903cf59216bcfbe255de370f9a801177fa0dd11061a0173cd7fe4d740e3a74cc594a8c2510d03039126388730c2c73ca0db5fdad2a2021e9ea025b86dc0ba87aea5629246a4cf0f98726fcda9c89d4483 - +#close 2019-04-29-19-23-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path ssl -#open 2018-08-27-22-38-52 +#open 2019-04-29-19-23-03 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer client_record_version client_random client_cipher_suites server_record_version server_random server_dh_p server_dh_q server_dh_Ys server_ecdh_point server_signature_sig_alg server_signature_hash_alg server_signature server_cert_sha1 client_rsa_pms client_dh_Yc client_ecdh_point #types time string addr port addr port string string string string bool string string bool vector[string] vector[string] string string string string string string string string string string string string string count count string string string string string -1398529018.678827 CHhAvVGS1DHFjwGM9 192.168.18.50 56981 74.125.239.97 443 TLSv12 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA secp256r1 - F - - T FDy6ve1m58lwPRfhE9,FnGjwc1EVGk5x0WZk5,F2T07R1XZFCmeWafv2 (empty) CN=*.google.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority G2,O=Google Inc,C=US - - TLSv10 d170a048a025925479f1a573610851d30a1f3e7267836932797def95 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV TLSv12 5cb1fbd2e5c1f3605984d826eca11a8562b3c36d1f70fa44ba2f723c - - - 04c177ab173fed188d8455b2bd0eeac7c1fc334b5d9d38e651b6a31cbda4a7b62a4a222493711e6aec7590d27292ba300d722841ca52795ca55b9b26d12730b807 1 6 bb8ed698a89f33367af245236d1483c2caa406f61a6e3639a6483c8ed3baadaf18bfdfd967697ad29497dd7f16fde1b5d8933b6f5d72e63f0e0dfd416785a3ee3ad7b6d65e71c67c219740723695136678feaca0db5f1cd00a2f2c5b1a0b83098e796bb6539b486639ab02a288d0f0bf68123151437e1b2ef610af17993a107acfcb3791d00b509a5271ddcf60b31b202571c06ceaf51b846a0ff8fd85cf1bc99f82bb936bae69a13f81727f0810280306abb942fd80e0fdf93a51e7e036c26e429295aa60e36506ab1762d49e31152d02bd7850fcaa251219b3dde81ea5fc61c4c63b940120fa6847ccc43fad0a2ac252153254baa03b0baebb6db899ade45e e2fb0771ee6fc0d0e324bc863c02b57921257c86 - - 4104a92b630b25f4404c632dcf9cf454d1cf685a95f4d7c34e1bed244d1051c6bf9fda52edd0c840620b6ddf7941f9ee8a2684eec11a5a2131a0a3389d1e49122472 -#close 2018-08-27-22-38-52 +1398529018.678827 CHhAvVGS1DHFjwGM9 192.168.18.50 56981 74.125.239.97 443 TLSv12 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA secp256r1 - F - - T FDy6ve1m58lwPRfhE9,FnGjwc1EVGk5x0WZk5,F2T07R1XZFCmeWafv2 (empty) CN=*.google.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority G2,O=Google Inc,C=US - - TLSv10 d170a048a025925479f1a573610851d30a1f3e7267836932797def95 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV TLSv12 535bdbf95cb1fbd2e5c1f3605984d826eca11a8562b3c36d1f70fa44ba2f723c - - - 04c177ab173fed188d8455b2bd0eeac7c1fc334b5d9d38e651b6a31cbda4a7b62a4a222493711e6aec7590d27292ba300d722841ca52795ca55b9b26d12730b807 1 6 bb8ed698a89f33367af245236d1483c2caa406f61a6e3639a6483c8ed3baadaf18bfdfd967697ad29497dd7f16fde1b5d8933b6f5d72e63f0e0dfd416785a3ee3ad7b6d65e71c67c219740723695136678feaca0db5f1cd00a2f2c5b1a0b83098e796bb6539b486639ab02a288d0f0bf68123151437e1b2ef610af17993a107acfcb3791d00b509a5271ddcf60b31b202571c06ceaf51b846a0ff8fd85cf1bc99f82bb936bae69a13f81727f0810280306abb942fd80e0fdf93a51e7e036c26e429295aa60e36506ab1762d49e31152d02bd7850fcaa251219b3dde81ea5fc61c4c63b940120fa6847ccc43fad0a2ac252153254baa03b0baebb6db899ade45e e2fb0771ee6fc0d0e324bc863c02b57921257c86 - - 4104a92b630b25f4404c632dcf9cf454d1cf685a95f4d7c34e1bed244d1051c6bf9fda52edd0c840620b6ddf7941f9ee8a2684eec11a5a2131a0a3389d1e49122472 +#close 2019-04-29-19-23-03 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path ssl -#open 2018-08-27-22-38-52 +#open 2019-04-29-19-23-04 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer client_record_version client_random client_cipher_suites server_record_version server_random server_dh_p server_dh_q server_dh_Ys server_ecdh_point server_signature_sig_alg server_signature_hash_alg server_signature server_cert_sha1 client_rsa_pms client_dh_Yc client_ecdh_point #types time string addr port addr port string string string string bool string string bool vector[string] vector[string] string string string string string string string string string string string string string count count string string string string string -1170717505.549109 CHhAvVGS1DHFjwGM9 192.150.187.164 58868 194.127.84.106 443 TLSv10 TLS_RSA_WITH_RC4_128_MD5 - - F - - T FeCwNK3rzqPnZ7eBQ5,FfqS7r3rymnsSKq0m2 (empty) CN=www.dresdner-privat.de,OU=Terms of use at www.verisign.com/rpa (c)00,O=AGIS Allianz Dresdner Informationssysteme GmbH,L=Muenchen,ST=Bayern,C=DE OU=www.verisign.com/CPS Incorp.by Ref. LIABILITY LTD.(c)97 VeriSign,OU=VeriSign International Server CA - Class 3,OU=VeriSign\\, Inc.,O=VeriSign Trust Network - - unknown-0 e6b8efdf91cf44f7eae43c83398fdcb2 TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,SSL_RSA_FIPS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,TLS_RSA_EXPORT_WITH_RC4_40_MD5,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 TLSv10 2b658d5183bbaedbf35e8f126ff926b14979cd703d242aea996a5fda - - - - - - - 2c322ae2b7fe91391345e070b63668978bb1c9da 008057aaeea52e6d030e54fa9328781fda6f8de80ed8531946bfa8adc4b51ca7502cbce62bae6949f6b865d7125e256643b5ede4dd4cf42107cfa73c418f10881edf38a75f968b507f08f9c1089ef26bfd322cf44c0b746b8e3dff731f2585dcf26abb048d55e661e1d2868ccc9c338e451c30431239f96a00e4843b6aa00ba51785 - - -1170717508.697180 ClEkJM2Vm5giqnMf4h 192.150.187.164 58869 194.127.84.106 443 TLSv10 TLS_RSA_WITH_RC4_128_MD5 - - F - - T FjkLnG4s34DVZlaBNc,FpMjNF4snD7UDqI5sk (empty) CN=www.dresdner-privat.de,OU=Terms of use at www.verisign.com/rpa (c)00,O=AGIS Allianz Dresdner Informationssysteme GmbH,L=Muenchen,ST=Bayern,C=DE OU=www.verisign.com/CPS Incorp.by Ref. LIABILITY LTD.(c)97 VeriSign,OU=VeriSign International Server CA - Class 3,OU=VeriSign\\, Inc.,O=VeriSign Trust Network - - TLSv10 a8a2ab739a64abb4e68cfcfc3470ff6269b1a86858501fbbd1327ed8 TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,SSL_RSA_FIPS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,TLS_RSA_EXPORT_WITH_RC4_40_MD5,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 TLSv10 0fac7f7823587c68438c87876533af7b0baa2a8f1078eb8d182247e9 - - - - - - - 2c322ae2b7fe91391345e070b63668978bb1c9da 0080891c1b6b5f0ec9da1b38d5ba6efe9c0380219d1ac4e63a0e8993306cddc6944a57c9292beb5652794181f747d0e868b84dca7dfe9783d1baa2ef3bb68d929b2818c5b58b8f47663220f9781fa469fea7e7d17d410d3979aa15a7be651c9f16fbf1a04f87a95e742c3fe20ca6faf0d2e950708533fd3346e17e410f0f86c01f52 - - -1170717511.722913 C4J4Th3PJpwUYZZ6gc 192.150.187.164 58870 194.127.84.106 443 TLSv10 TLS_RSA_WITH_RC4_128_MD5 - - F - - T FQXAWgI2FB5STbrff,FUmSiM3TCtsyMGhcd (empty) CN=www.dresdner-privat.de,OU=Terms of use at www.verisign.com/rpa (c)00,O=AGIS Allianz Dresdner Informationssysteme GmbH,L=Muenchen,ST=Bayern,C=DE OU=www.verisign.com/CPS Incorp.by Ref. LIABILITY LTD.(c)97 VeriSign,OU=VeriSign International Server CA - Class 3,OU=VeriSign\\, Inc.,O=VeriSign Trust Network - - TLSv10 240604be2f5644c8dfd2e51cc2b3a30171bd58853ed7c6e3fcd18846 TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,SSL_RSA_FIPS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,TLS_RSA_EXPORT_WITH_RC4_40_MD5,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 TLSv10 fd1b8c1308a2caac010fcb76e9bd21987d897cb6c028cdb3176d5904 - - - - - - - 2c322ae2b7fe91391345e070b63668978bb1c9da 008032a6f5fd530f342e4d5b4043765005ba018f488800f897c259b005ad2a544f5800e99812d9a6336e84b07e4595d1b8ae00a582d91804fe715c132d1bdb112e66361db80a57a441fc8ea784ea76ec44b9f3a0f9ddc29be68010ff3bcfffc285a294511991d7952cbbfee88a869818bae31f32f7099b0754d9ce75b8fea887e1b8 - - -#close 2018-08-27-22-38-52 +1170717505.549109 CHhAvVGS1DHFjwGM9 192.150.187.164 58868 194.127.84.106 443 TLSv10 TLS_RSA_WITH_RC4_128_MD5 - - F - - T FeCwNK3rzqPnZ7eBQ5,FfqS7r3rymnsSKq0m2 (empty) CN=www.dresdner-privat.de,OU=Terms of use at www.verisign.com/rpa (c)00,O=AGIS Allianz Dresdner Informationssysteme GmbH,L=Muenchen,ST=Bayern,C=DE OU=www.verisign.com/CPS Incorp.by Ref. LIABILITY LTD.(c)97 VeriSign,OU=VeriSign International Server CA - Class 3,OU=VeriSign\\, Inc.,O=VeriSign Trust Network - - unknown-0 e6b8efdf91cf44f7eae43c83398fdcb2 TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,SSL_RSA_FIPS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,TLS_RSA_EXPORT_WITH_RC4_40_MD5,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 TLSv10 45c7bb492b658d5183bbaedbf35e8f126ff926b14979cd703d242aea996a5fda - - - - - - - 2c322ae2b7fe91391345e070b63668978bb1c9da 008057aaeea52e6d030e54fa9328781fda6f8de80ed8531946bfa8adc4b51ca7502cbce62bae6949f6b865d7125e256643b5ede4dd4cf42107cfa73c418f10881edf38a75f968b507f08f9c1089ef26bfd322cf44c0b746b8e3dff731f2585dcf26abb048d55e661e1d2868ccc9c338e451c30431239f96a00e4843b6aa00ba51785 - - +1170717508.697180 ClEkJM2Vm5giqnMf4h 192.150.187.164 58869 194.127.84.106 443 TLSv10 TLS_RSA_WITH_RC4_128_MD5 - - F - - T FjkLnG4s34DVZlaBNc,FpMjNF4snD7UDqI5sk (empty) CN=www.dresdner-privat.de,OU=Terms of use at www.verisign.com/rpa (c)00,O=AGIS Allianz Dresdner Informationssysteme GmbH,L=Muenchen,ST=Bayern,C=DE OU=www.verisign.com/CPS Incorp.by Ref. LIABILITY LTD.(c)97 VeriSign,OU=VeriSign International Server CA - Class 3,OU=VeriSign\\, Inc.,O=VeriSign Trust Network - - TLSv10 a8a2ab739a64abb4e68cfcfc3470ff6269b1a86858501fbbd1327ed8 TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,SSL_RSA_FIPS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,TLS_RSA_EXPORT_WITH_RC4_40_MD5,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 TLSv10 45c7bb4c0fac7f7823587c68438c87876533af7b0baa2a8f1078eb8d182247e9 - - - - - - - 2c322ae2b7fe91391345e070b63668978bb1c9da 0080891c1b6b5f0ec9da1b38d5ba6efe9c0380219d1ac4e63a0e8993306cddc6944a57c9292beb5652794181f747d0e868b84dca7dfe9783d1baa2ef3bb68d929b2818c5b58b8f47663220f9781fa469fea7e7d17d410d3979aa15a7be651c9f16fbf1a04f87a95e742c3fe20ca6faf0d2e950708533fd3346e17e410f0f86c01f52 - - +1170717511.722913 C4J4Th3PJpwUYZZ6gc 192.150.187.164 58870 194.127.84.106 443 TLSv10 TLS_RSA_WITH_RC4_128_MD5 - - F - - T FQXAWgI2FB5STbrff,FUmSiM3TCtsyMGhcd (empty) CN=www.dresdner-privat.de,OU=Terms of use at www.verisign.com/rpa (c)00,O=AGIS Allianz Dresdner Informationssysteme GmbH,L=Muenchen,ST=Bayern,C=DE OU=www.verisign.com/CPS Incorp.by Ref. LIABILITY LTD.(c)97 VeriSign,OU=VeriSign International Server CA - Class 3,OU=VeriSign\\, Inc.,O=VeriSign Trust Network - - TLSv10 240604be2f5644c8dfd2e51cc2b3a30171bd58853ed7c6e3fcd18846 TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,SSL_RSA_FIPS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,TLS_RSA_EXPORT_WITH_RC4_40_MD5,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 TLSv10 45c7bb4ffd1b8c1308a2caac010fcb76e9bd21987d897cb6c028cdb3176d5904 - - - - - - - 2c322ae2b7fe91391345e070b63668978bb1c9da 008032a6f5fd530f342e4d5b4043765005ba018f488800f897c259b005ad2a544f5800e99812d9a6336e84b07e4595d1b8ae00a582d91804fe715c132d1bdb112e66361db80a57a441fc8ea784ea76ec44b9f3a0f9ddc29be68010ff3bcfffc285a294511991d7952cbbfee88a869818bae31f32f7099b0754d9ce75b8fea887e1b8 - - +#close 2019-04-29-19-23-04 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path ssl -#open 2018-08-27-22-38-53 +#open 2019-04-29-19-23-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer client_record_version client_random client_cipher_suites server_record_version server_random server_dh_p server_dh_q server_dh_Ys server_ecdh_point server_signature_sig_alg server_signature_hash_alg server_signature server_cert_sha1 client_rsa_pms client_dh_Yc client_ecdh_point #types time string addr port addr port string string string string bool string string bool vector[string] vector[string] string string string string string string string string string string string string string count count string string string string string -1512072318.429417 CHhAvVGS1DHFjwGM9 192.168.17.58 62987 216.58.192.14 443 TLSv11 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA secp256r1 - F - - T F1uIRd10FHM79akjJ1,FBy2pg1ix88ibHSEEf,FlfUEZ3rbay3xxsd9i (empty) CN=*.google.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority G2,O=Google Inc,C=US - - TLSv10 ae1b693f91b97315fc38b4b19f600e2aff7f24ce9b11bf538b1667e5 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_DH_RSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_DH_RSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_SEED_CBC_SHA,TLS_DHE_DSS_WITH_SEED_CBC_SHA,TLS_DH_RSA_WITH_SEED_CBC_SHA,TLS_DH_DSS_WITH_SEED_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_SEED_CBC_SHA,TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_RSA_WITH_IDEA_CBC_SHA,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDH_RSA_WITH_RC4_128_SHA,TLS_ECDH_ECDSA_WITH_RC4_128_SHA,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV TLSv11 0bdeb3f9e87d53e65a458a89d647f40fab7658f9d4a6ac93a5a65d71 - - - 04c8dd2cfb5dce034588f47acea36d8a0443857ec302c7be2974ce2a5a6d8db18e6161b1ee657dacc3b6ceb92f52dd122f0d466e01f21a39dfe35d48143e41d3cb 256 256 72abf64adf8d025394e3dddab15681f669efc25301458e20a35d2c0c8aa696992c49baca5096656dbae6acd79374aaec2c0be0b85614d8d647f4e56e956d52d959761f3a18ef80a695e6cd549ba4f2802e44983382b07d0fde27296bbb1fa72bb7ceb1b0ae1959bbcf9e4560d9771c2267518b44b9e6f472fa6b9fe6c60d41a57dc0de81d9cc57706a80e0818170e503dd44f221160096593ea2f83bd8755e0ae4a3380b5c52811eb33d95944535148bed5f16817df4b9938be40b4bc8f55f86ded30efe48a0f37fd66316fba484f62dd2f7e1c0825b59b84aa5cbee6c0fd09779023f3e5ea6e7ec337d9acc1cb831c5df5f6499ed97c1f454d31e5a323b541a b453697b78df7c522c3e2bfc889b7fa6674903ca - - 4104887d740719eb306e32bf94ba4b9bf31ecabf9cca860e12f7fa55ac95c6676b0da90513aa453b18b82bf424bf2654a72a46b8d3d19210502a88381ba146533792 -#close 2018-08-27-22-38-53 +1512072318.429417 CHhAvVGS1DHFjwGM9 192.168.17.58 62987 216.58.192.14 443 TLSv11 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA secp256r1 - F - - T F1uIRd10FHM79akjJ1,FBy2pg1ix88ibHSEEf,FlfUEZ3rbay3xxsd9i (empty) CN=*.google.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority G2,O=Google Inc,C=US - - TLSv10 ae1b693f91b97315fc38b4b19f600e2aff7f24ce9b11bf538b1667e5 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_DH_RSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_DH_RSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_SEED_CBC_SHA,TLS_DHE_DSS_WITH_SEED_CBC_SHA,TLS_DH_RSA_WITH_SEED_CBC_SHA,TLS_DH_DSS_WITH_SEED_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_SEED_CBC_SHA,TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_RSA_WITH_IDEA_CBC_SHA,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDH_RSA_WITH_RC4_128_SHA,TLS_ECDH_ECDSA_WITH_RC4_128_SHA,TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_RC4_128_MD5,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV TLSv11 5a20647e0bdeb3f9e87d53e65a458a89d647f40fab7658f9d4a6ac93a5a65d71 - - - 04c8dd2cfb5dce034588f47acea36d8a0443857ec302c7be2974ce2a5a6d8db18e6161b1ee657dacc3b6ceb92f52dd122f0d466e01f21a39dfe35d48143e41d3cb 256 256 72abf64adf8d025394e3dddab15681f669efc25301458e20a35d2c0c8aa696992c49baca5096656dbae6acd79374aaec2c0be0b85614d8d647f4e56e956d52d959761f3a18ef80a695e6cd549ba4f2802e44983382b07d0fde27296bbb1fa72bb7ceb1b0ae1959bbcf9e4560d9771c2267518b44b9e6f472fa6b9fe6c60d41a57dc0de81d9cc57706a80e0818170e503dd44f221160096593ea2f83bd8755e0ae4a3380b5c52811eb33d95944535148bed5f16817df4b9938be40b4bc8f55f86ded30efe48a0f37fd66316fba484f62dd2f7e1c0825b59b84aa5cbee6c0fd09779023f3e5ea6e7ec337d9acc1cb831c5df5f6499ed97c1f454d31e5a323b541a b453697b78df7c522c3e2bfc889b7fa6674903ca - - 4104887d740719eb306e32bf94ba4b9bf31ecabf9cca860e12f7fa55ac95c6676b0da90513aa453b18b82bf424bf2654a72a46b8d3d19210502a88381ba146533792 +#close 2019-04-29-19-23-05 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path ssl -#open 2018-08-27-22-38-54 +#open 2019-04-29-19-23-06 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer client_record_version client_random client_cipher_suites server_record_version server_random server_dh_p server_dh_q server_dh_Ys server_ecdh_point server_signature_sig_alg server_signature_hash_alg server_signature server_cert_sha1 client_rsa_pms client_dh_Yc client_ecdh_point #types time string addr port addr port string string string string bool string string bool vector[string] vector[string] string string string string string string string string string string string string string count count string string string string string -1425932016.520029 CHhAvVGS1DHFjwGM9 192.168.6.86 63721 104.236.167.107 4433 DTLSv10 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA secp256r1 - F - - T FZi2Ct2AcCswhiIjKe (empty) CN=bro CN=bro - - DTLSv10 543f24d1a377e53b63d935157e76c81e2067b1333bccaad6c24ce92d TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_DH_RSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_DH_RSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_SEED_CBC_SHA,TLS_DHE_DSS_WITH_SEED_CBC_SHA,TLS_DH_RSA_WITH_SEED_CBC_SHA,TLS_DH_DSS_WITH_SEED_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_SEED_CBC_SHA,TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_RSA_WITH_IDEA_CBC_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,TLS_DH_RSA_WITH_DES_CBC_SHA,TLS_DH_DSS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,TLS_EMPTY_RENEGOTIATION_INFO_SCSV DTLSv10 e29e9780bd73e567dba0ae66ed5b7fb1ee86efba4b09f98bd7b03ad2 - - - 043c5e4b4508b840ef8ac34f592fba8716445aeb9ab2028695541ea62eb79b735da9dbfdbdd01a7beab2c832a633b7fd1ce278659355d7b8a1c88503bfb938b7ef 256 256 17569f292088d5383ffa009ffd5ae4a34b5aec68a206d68eea910b808831c098e5385b2fcf49bbd5df914d2b9d7efcd67a493c324daf48c929bdb3838e56fef25d67f45d6f03f7b195a9d688ec5efe96f1ffe0d88e73458b87175fac7073ca8d8e340657e805cb1e91db02ee687fe5ce37c57fb177368bf3ac787971591a67eaf1880eabac8307ec74e269539b9894781c0026ea61101dafbac1995bc32d39584a03ef82d413731df06dae085dc5984b7fcbedd860715fb84ebb75e74406b88bee23533eba46fe5b3f0936c130e262dcc48d3809f5e208719a70a2a918c0e9fe60b4e992ac555048ff6c2cd077ca2afdc0c36cde432a38c1058fb6bd9cb2cc39 fa6d780625219f5e1ae0b4c863e8321328241134 - - 4104093d316a7b6bdfdbc28c02516e145b8f52881cbb7a5f327e3d0967fc4303617d03d423277420024e6f89b9ab16414681d47a221998a2ba85c4e2f625a0ad7c49 -#close 2018-08-27-22-38-54 +1425932016.520029 CHhAvVGS1DHFjwGM9 192.168.6.86 63721 104.236.167.107 4433 DTLSv10 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA secp256r1 - F - - T FZi2Ct2AcCswhiIjKe (empty) CN=bro CN=bro - - DTLSv10 543f24d1a377e53b63d935157e76c81e2067b1333bccaad6c24ce92d TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_DH_RSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_DH_RSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_SEED_CBC_SHA,TLS_DHE_DSS_WITH_SEED_CBC_SHA,TLS_DH_RSA_WITH_SEED_CBC_SHA,TLS_DH_DSS_WITH_SEED_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_SEED_CBC_SHA,TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_RSA_WITH_IDEA_CBC_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_DES_CBC_SHA,TLS_DHE_DSS_WITH_DES_CBC_SHA,TLS_DH_RSA_WITH_DES_CBC_SHA,TLS_DH_DSS_WITH_DES_CBC_SHA,TLS_RSA_WITH_DES_CBC_SHA,TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,TLS_EMPTY_RENEGOTIATION_INFO_SCSV DTLSv10 54fdfee7e29e9780bd73e567dba0ae66ed5b7fb1ee86efba4b09f98bd7b03ad2 - - - 043c5e4b4508b840ef8ac34f592fba8716445aeb9ab2028695541ea62eb79b735da9dbfdbdd01a7beab2c832a633b7fd1ce278659355d7b8a1c88503bfb938b7ef 256 256 17569f292088d5383ffa009ffd5ae4a34b5aec68a206d68eea910b808831c098e5385b2fcf49bbd5df914d2b9d7efcd67a493c324daf48c929bdb3838e56fef25d67f45d6f03f7b195a9d688ec5efe96f1ffe0d88e73458b87175fac7073ca8d8e340657e805cb1e91db02ee687fe5ce37c57fb177368bf3ac787971591a67eaf1880eabac8307ec74e269539b9894781c0026ea61101dafbac1995bc32d39584a03ef82d413731df06dae085dc5984b7fcbedd860715fb84ebb75e74406b88bee23533eba46fe5b3f0936c130e262dcc48d3809f5e208719a70a2a918c0e9fe60b4e992ac555048ff6c2cd077ca2afdc0c36cde432a38c1058fb6bd9cb2cc39 fa6d780625219f5e1ae0b4c863e8321328241134 - - 4104093d316a7b6bdfdbc28c02516e145b8f52881cbb7a5f327e3d0967fc4303617d03d423277420024e6f89b9ab16414681d47a221998a2ba85c4e2f625a0ad7c49 +#close 2019-04-29-19-23-06 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path ssl -#open 2018-08-27-22-38-54 +#open 2019-04-29-19-23-07 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher curve server_name resumed last_alert next_protocol established cert_chain_fuids client_cert_chain_fuids subject issuer client_subject client_issuer client_record_version client_random client_cipher_suites server_record_version server_random server_dh_p server_dh_q server_dh_Ys server_ecdh_point server_signature_sig_alg server_signature_hash_alg server_signature server_cert_sha1 client_rsa_pms client_dh_Yc client_ecdh_point #types time string addr port addr port string string string string bool string string bool vector[string] vector[string] string string string string string string string string string string string string string count count string string string string string -1512070268.982498 CHhAvVGS1DHFjwGM9 192.168.17.58 60934 165.227.57.17 4400 DTLSv12 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 secp256r1 - F - - T Fox0Fc3MY8kLKfhNK6 (empty) O=Internet Widgits Pty Ltd,ST=Some-State,C=AU O=Internet Widgits Pty Ltd,ST=Some-State,C=AU - - DTLSv12 e701fd74cac15bdb8d0fb735dca354f8e4cc1e65944f8d443a1af9b2 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_GCM_SHA384,TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,TLS_DH_RSA_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,TLS_DH_RSA_WITH_AES_256_CBC_SHA256,TLS_DH_DSS_WITH_AES_256_CBC_SHA256,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_DH_RSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_CBC_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_GCM_SHA256,TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,TLS_DH_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,TLS_DH_RSA_WITH_AES_128_CBC_SHA256,TLS_DH_DSS_WITH_AES_128_CBC_SHA256,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_DH_RSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_SEED_CBC_SHA,TLS_DHE_DSS_WITH_SEED_CBC_SHA,TLS_DH_RSA_WITH_SEED_CBC_SHA,TLS_DH_DSS_WITH_SEED_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_SEED_CBC_SHA,TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_RSA_WITH_IDEA_CBC_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV DTLSv12 1fea3e397e8a4533a9f4fd6e82cd650533269d28dc7b2d62496dc490 - - - 049e5bb8781f90c66cae6b86d7a74977bccd02963bb55631fe7d916ba91c9af9a9562dec1c71b66005503523fbb72a95874bc77394aed429093ad69d7971fb13a9 1 6 e55f866f29d42c23dc5e87acaccff3fd5da17f001fbfcc1060188cc4351101bb53355ee7015edec32874dad840669578101ec98f898b87d1ce5f045ed990e1655dc9562dc83193ec2b6fbcb9410af9efd6d04c434d29cf809ee0be4bde51674ccfc2c662f76a6c2092cae471c0560f3cc358ed4211b8c6da4f2350ed479f82da84ec6d072e2b31cc0b982c2181af2066b502f5cb1b2e6becdd1e8bbd897a1038939121491c39294e3b584b618d5f9ae7dbc4b36b1a6ac99b92799ab2c8600f1698423bdde64e7476db84afaef919655f6b3dda48400995cf9334564ba70606004d805f4d9aeb4f0df42cea6034d42261d03544efeee721204c30de62268a217c 1cb43b5f1de3fe36d595da76210bbf5572a721be - - 41049c7a642fbbd5847c306ee295360442e353d78aef43297523f92be70b68b882ac708aefcb7a224b34130d6c6041030e5b62fc3def72d7774fd61043a0a430a416 -#close 2018-08-27-22-38-54 +1512070268.982498 CHhAvVGS1DHFjwGM9 192.168.17.58 60934 165.227.57.17 4400 DTLSv12 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 secp256r1 - F - - T Fox0Fc3MY8kLKfhNK6 (empty) O=Internet Widgits Pty Ltd,ST=Some-State,C=AU O=Internet Widgits Pty Ltd,ST=Some-State,C=AU - - DTLSv12 e701fd74cac15bdb8d0fb735dca354f8e4cc1e65944f8d443a1af9b2 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_GCM_SHA384,TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,TLS_DH_RSA_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,TLS_DH_RSA_WITH_AES_256_CBC_SHA256,TLS_DH_DSS_WITH_AES_256_CBC_SHA256,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_DSS_WITH_AES_256_CBC_SHA,TLS_DH_RSA_WITH_AES_256_CBC_SHA,TLS_DH_DSS_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_CBC_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_GCM_SHA256,TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,TLS_DH_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,TLS_DH_RSA_WITH_AES_128_CBC_SHA256,TLS_DH_DSS_WITH_AES_128_CBC_SHA256,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_DSS_WITH_AES_128_CBC_SHA,TLS_DH_RSA_WITH_AES_128_CBC_SHA,TLS_DH_DSS_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_SEED_CBC_SHA,TLS_DHE_DSS_WITH_SEED_CBC_SHA,TLS_DH_RSA_WITH_SEED_CBC_SHA,TLS_DH_DSS_WITH_SEED_CBC_SHA,TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_SEED_CBC_SHA,TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,TLS_RSA_WITH_IDEA_CBC_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_EMPTY_RENEGOTIATION_INFO_SCSV DTLSv12 07c5aefa1fea3e397e8a4533a9f4fd6e82cd650533269d28dc7b2d62496dc490 - - - 049e5bb8781f90c66cae6b86d7a74977bccd02963bb55631fe7d916ba91c9af9a9562dec1c71b66005503523fbb72a95874bc77394aed429093ad69d7971fb13a9 1 6 e55f866f29d42c23dc5e87acaccff3fd5da17f001fbfcc1060188cc4351101bb53355ee7015edec32874dad840669578101ec98f898b87d1ce5f045ed990e1655dc9562dc83193ec2b6fbcb9410af9efd6d04c434d29cf809ee0be4bde51674ccfc2c662f76a6c2092cae471c0560f3cc358ed4211b8c6da4f2350ed479f82da84ec6d072e2b31cc0b982c2181af2066b502f5cb1b2e6becdd1e8bbd897a1038939121491c39294e3b584b618d5f9ae7dbc4b36b1a6ac99b92799ab2c8600f1698423bdde64e7476db84afaef919655f6b3dda48400995cf9334564ba70606004d805f4d9aeb4f0df42cea6034d42261d03544efeee721204c30de62268a217c 1cb43b5f1de3fe36d595da76210bbf5572a721be - - 41049c7a642fbbd5847c306ee295360442e353d78aef43297523f92be70b68b882ac708aefcb7a224b34130d6c6041030e5b62fc3def72d7774fd61043a0a430a416 +#close 2019-04-29-19-23-07 diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.tls-1.2-random/.stdout b/testing/btest/Baseline/scripts.base.protocols.ssl.tls-1.2-random/.stdout index 85bc19633e..7482c0c3b4 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ssl.tls-1.2-random/.stdout +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.tls-1.2-random/.stdout @@ -1,2 +1,2 @@ 8\xd0U@\xf1\xaamI\xb5SE\x0b\x82\xa4\xe0\x9eG\xf3\xdd\x1f\xeey\xa6[\xcc\xd7\x04\x90 -\xa7\x02\xf4'&\x05]|c\x83KN\xb0\x0e6F\xbez\xbb\x0ey\xbf\x0f\x85p\x83\x8dX +R\xc1\xf4\xef\xa7\x02\xf4'&\x05]|c\x83KN\xb0\x0e6F\xbez\xbb\x0ey\xbf\x0f\x85p\x83\x8dX diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.tls-extension-events/.stdout b/testing/btest/Baseline/scripts.base.protocols.ssl.tls-extension-events/.stdout index d5ab2cf618..a840e43bf4 100644 --- a/testing/btest/Baseline/scripts.base.protocols.ssl.tls-extension-events/.stdout +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.tls-extension-events/.stdout @@ -33,9 +33,9 @@ signature_algorithm, 192.168.6.240, 139.162.123.134 sha256, ecdsa sha384, ecdsa sha512, ecdsa -unknown-8, unknown-4 -unknown-8, unknown-5 -unknown-8, unknown-6 +Intrinsic, rsa_pss_sha256 +Intrinsic, rsa_pss_sha384 +Intrinsic, rsa_pss_sha512 sha256, rsa sha384, rsa sha512, rsa @@ -45,7 +45,7 @@ sha1, dsa sha256, dsa sha384, dsa sha512, dsa -supported_versions(, 192.168.6.240, 139.162.123.134 +supported_versions, 192.168.6.240, 139.162.123.134 TLSv13-draft19 TLSv12 TLSv11 @@ -66,9 +66,9 @@ signature_algorithm, 192.168.6.240, 139.162.123.134 sha256, ecdsa sha384, ecdsa sha512, ecdsa -unknown-8, unknown-4 -unknown-8, unknown-5 -unknown-8, unknown-6 +Intrinsic, rsa_pss_sha256 +Intrinsic, rsa_pss_sha384 +Intrinsic, rsa_pss_sha512 sha256, rsa sha384, rsa sha512, rsa @@ -78,7 +78,7 @@ sha1, dsa sha256, dsa sha384, dsa sha512, dsa -supported_versions(, 192.168.6.240, 139.162.123.134 +supported_versions, 192.168.6.240, 139.162.123.134 TLSv13-draft19 TLSv12 TLSv11 @@ -86,3 +86,50 @@ TLSv10 psk_key_exchange_modes, 192.168.6.240, 139.162.123.134 1 0 +pre_shared_key client hello, 192.168.6.240, 139.162.123.134, [[identity=\x01\xf3\x88\x12\xae\xeb\x13\x01\xed]\xcf\x0b\x8f\xad\xf2\xc1I\x9f-\xfa\xe1\x98\x9f\xb7\x82@\x81Or\x0e\xbe\xfc\xa3\xbc\x8f\x03\x86\xf1\x8e\xae\xd7\xe5\xa2\xee\xf3\xde\xb7\xa5\xf6\\xeb\x18^ICPm!|\x09\xe0NE\xe8\x0f\xda\xf8\xf2\xa8s\x84\x17>\xe5\xd9!\x19\x09\xfe\xdb\xa87\x05\xd7\xd06JG\xeb\xad\xf9\xf8\x13?#\xdc\xe7J\xad\x14\xbfS.\x98\xd8\xd2r\x01\xef\xc5\x0c_\xdf\xc9[7\xa7l\xa7\xa0\xb5\xda\x83\x16\x10\xa1\xdb\xe2, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1254722767.492060 dns_message - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=F, AA=F, TC=F, RD=T, RA=F, Z=0, num_queries=1, num_answers=0, num_auth=0, num_addl=0] [3] len: count = 34 1254722767.492060 dns_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=, qclass=, qclass_name=, qtype=, qtype_name=, rcode=, rcode_name=, AA=F, TC=F, RD=F, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=, qclass=, qclass_name=, qtype=, qtype_name=, rcode=, rcode_name=, AA=F, TC=F, RD=F, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=, qclass=, qclass_name=, qtype=, qtype_name=, rcode=, rcode_name=, AA=F, TC=F, RD=F, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=, qclass=, qclass_name=, qtype=, qtype_name=, rcode=, rcode_name=, AA=F, TC=F, RD=F, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=F, AA=F, TC=F, RD=T, RA=F, Z=0, num_queries=1, num_answers=0, num_auth=0, num_addl=0] [2] query: string = mail.patriots.in [3] qtype: count = 1 [4] qclass: count = 1 1254722767.492060 protocol_confirmation - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0a\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] atype: enum = Analyzer::ANALYZER_DNS [2] aid: count = 3 1254722767.492060 dns_end - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0aDNS\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.0, service={\x0aDNS\x0a}, history=D, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=F, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=F, AA=F, TC=F, RD=T, RA=F, Z=0, num_queries=1, num_answers=0, num_auth=0, num_addl=0] 1254722767.526085 dns_message - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=T, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=T, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=T, saw_reply=F], dns_state=[pending_query=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=, rcode_name=, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=, total_replies=, saw_query=T, saw_reply=F], pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=T, AA=F, TC=F, RD=T, RA=T, Z=0, num_queries=1, num_answers=2, num_auth=2, num_addl=0] [3] len: count = 100 1254722767.526085 dns_CNAME_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=0, rcode_name=NOERROR, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=2, total_replies=4, saw_query=T, saw_reply=F], dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=0, rcode_name=NOERROR, AA=F, TC=F, RD=T, RA=F, Z=0, answers=, TTLs=, rejected=F, total_answers=2, total_replies=4, saw_query=T, saw_reply=F], dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=T, AA=F, TC=F, RD=T, RA=T, Z=0, num_queries=1, num_answers=2, num_auth=2, num_addl=0] [2] ans: dns_answer = [answer_type=1, query=mail.patriots.in, qtype=5, qclass=1, TTL=3.0 hrs 27.0 secs] [3] name: string = patriots.in 1254722767.526085 dns_A_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=34.0 msecs 24.0 usecs, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=0, rcode_name=NOERROR, AA=F, TC=F, RD=T, RA=T, Z=0, answers=[patriots.in], TTLs=[3.0 hrs 27.0 secs], rejected=F, total_answers=2, total_replies=4, saw_query=T, saw_reply=F], dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=34.0 msecs 24.0 usecs, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=0, rcode_name=NOERROR, AA=F, TC=F, RD=T, RA=T, Z=0, answers=[patriots.in], TTLs=[3.0 hrs 27.0 secs], rejected=F, total_answers=2, total_replies=4, saw_query=T, saw_reply=F], dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=T, AA=F, TC=F, RD=T, RA=T, Z=0, num_queries=1, num_answers=2, num_auth=2, num_addl=0] [2] ans: dns_answer = [answer_type=1, query=patriots.in, qtype=1, qclass=1, TTL=3.0 hrs 28.0 secs] [3] a: addr = 74.53.140.153 1254722767.526085 dns_end - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=34.0 msecs 24.0 usecs, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=0, rcode_name=NOERROR, AA=F, TC=F, RD=T, RA=T, Z=0, answers=[patriots.in, 74.53.140.153], TTLs=[3.0 hrs 27.0 secs, 3.0 hrs 28.0 secs], rejected=F, total_answers=2, total_replies=4, saw_query=T, saw_reply=F], dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=[ts=1254722767.49206, uid=CHhAvVGS1DHFjwGM9, id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], proto=udp, trans_id=31062, rtt=34.0 msecs 24.0 usecs, query=mail.patriots.in, qclass=1, qclass_name=C_INTERNET, qtype=1, qtype_name=A, rcode=0, rcode_name=NOERROR, AA=F, TC=F, RD=T, RA=T, Z=0, answers=[patriots.in, 74.53.140.153], TTLs=[3.0 hrs 27.0 secs, 3.0 hrs 28.0 secs], rejected=F, total_answers=2, total_replies=4, saw_query=T, saw_reply=F], dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] msg: dns_msg = [id=31062, opcode=0, rcode=0, QR=T, AA=F, TC=F, RD=T, RA=T, Z=0, num_queries=1, num_answers=2, num_auth=2, num_addl=0] 1254722767.529046 new_connection - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.0, service={\x0a\x0a}, history=, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.0, service={\x0a\x0a}, history=, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1254722767.875996 connection_established - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.34695, service={\x0a\x0a}, history=Sh, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.34695, service={\x0a\x0a}, history=Sh, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1254722768.219663 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -65,7 +65,7 @@ [5] cont_resp: bool = T 1254722768.219663 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 xc90.websitewelcome.com ESMTP Exim 4.69 #1 Mon, 05 Oct 2009 01:05:54 -0500 , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 xc90.websitewelcome.com ESMTP Exim 4.69 #1 Mon, 05 Oct 2009 01:05:54 -0500 , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -73,7 +73,7 @@ [5] cont_resp: bool = T 1254722768.219663 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 We do not authorize the use of this system to transport unsolicited, , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 We do not authorize the use of this system to transport unsolicited, , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -81,18 +81,18 @@ [5] cont_resp: bool = F 1254722768.224809 protocol_confirmation - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=2, num_bytes_ip=269, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.695763, service={\x0a\x0a}, history=ShAdD, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=2, num_bytes_ip=269, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.695763, service={\x0a\x0a}, history=ShAdD, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] atype: enum = Analyzer::ANALYZER_SMTP [2] aid: count = 7 1254722768.224809 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=2, num_bytes_ip=269, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.695763, service={\x0aSMTP\x0a}, history=ShAdD, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=2, num_bytes_ip=269, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.695763, service={\x0aSMTP\x0a}, history=ShAdD, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = EHLO [3] arg: string = GP 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -100,7 +100,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 xc90.websitewelcome.com Hello GP [122.162.143.157], path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 xc90.websitewelcome.com Hello GP [122.162.143.157], path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -108,7 +108,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 SIZE 52428800, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 SIZE 52428800, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -116,7 +116,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 PIPELINING, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 PIPELINING, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -124,7 +124,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH PLAIN LOGIN, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH PLAIN LOGIN, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -132,7 +132,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 STARTTLS, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 STARTTLS, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -140,13 +140,13 @@ [5] cont_resp: bool = F 1254722768.568729 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.039683, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.039683, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = AUTH [3] arg: string = LOGIN 1254722768.911081 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382035, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382035, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 334 [3] cmd: string = AUTH @@ -154,13 +154,13 @@ [5] cont_resp: bool = F 1254722768.911655 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382609, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382609, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = ** [3] arg: string = Z3VycGFydGFwQHBhdHJpb3RzLmlu 1254722769.253544 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.724498, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.724498, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 334 [3] cmd: string = AUTH_ANSWER @@ -168,13 +168,13 @@ [5] cont_resp: bool = F 1254722769.254118 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.725072, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.725072, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = ** [3] arg: string = cHVuamFiQDEyMw== 1254722769.613798 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.084752, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.084752, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 235 [3] cmd: string = AUTH_ANSWER @@ -182,13 +182,13 @@ [5] cont_resp: bool = F 1254722769.614414 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.085368, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.085368, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = MAIL [3] arg: string = FROM: 1254722769.956765 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.427719, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.427719, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = MAIL @@ -196,13 +196,13 @@ [5] cont_resp: bool = F 1254722769.957250 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.428204, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.428204, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1254722770.319708 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.790662, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.790662, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -210,16 +210,16 @@ [5] cont_resp: bool = F 1254722770.320203 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.791157, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.791157, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = DATA [3] arg: string = 1254722770.320203 mime_begin_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.791157, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.791157, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] 1254722770.661679 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.132633, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.132633, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 354 [3] cmd: string = DATA @@ -227,243 +227,243 @@ [5] cont_resp: bool = F 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=FROM, value="Gurpartap Singh" ] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from="Gurpartap Singh" , to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from="Gurpartap Singh" , to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=TO, value=] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=SUBJECT, value=SMTP] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=DATE, value=Mon, 5 Oct 2009 11:36:07 +0530] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=MESSAGE-ID, value=<000301ca4581$ef9e57f0$cedb07d0$@in>] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=MIME-VERSION, value=1.0] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TYPE, value=multipart/mixed;\x09boundary="----=_NextPart_000_0004_01CA45B0.095693F0"] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=X-MAILER, value=Microsoft Office Outlook 12.0] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=THREAD-INDEX, value=AcpFgem9BvjjZEDeR1Kh8i+hUyVo0A==] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-LANGUAGE, value=en-us] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=X-CR-HASHEDPUZZLE, value=SeA= AAR2 ADaH BpiO C4G1 D1gW FNB1 FPkR Fn+W HFCP HnYJ JO7s Kum6 KytW LFcI LjUt;1;cgBhAGoAXwBkAGUAbwBsADIAMAAwADIAaQBuAEAAeQBhAGgAbwBvAC4AYwBvAC4AaQBuAA==;Sosha1_v1;7;{CAA37F59-1850-45C7-8540-AA27696B5398};ZwB1AHIAcABhAHIAdABhAHAAQABwAGEAdAByAGkAbwB0AHMALgBpAG4A;Mon, 05 Oct 2009 06:06:01 GMT;UwBNAFQAUAA=] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=X-CR-PUZZLEID, value={CAA37F59-1850-45C7-8540-AA27696B5398}] 1254722770.692743 mime_begin_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=2], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=2], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TYPE, value=multipart/alternative;\x09boundary="----=_NextPart_001_0005_01CA45B0.095693F0"] 1254722770.692743 mime_begin_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=2], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=2], socks=, ssh=, syslog=] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TYPE, value=text/plain;\x09charset="us-ascii"] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TRANSFER-ENCODING, value=7bit] 1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692743 file_new - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=] 1254722770.692743 file_over_new_connection - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692743 mime_end_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] 1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692743 file_sniff - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Hello\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aI send u smtp pcap file \x0d\x0a\x0d\x0aFind the attachment\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aGPS\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=3, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Hello\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aI send u smtp pcap file \x0d\x0a\x0d\x0aFind the attachment\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aGPS\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=3, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] meta: fa_metadata = [mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], inferred=T] 1254722770.692743 file_state_remove - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Hello\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aI send u smtp pcap file \x0d\x0a\x0d\x0aFind the attachment\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aGPS\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=3, analyzers={\x0a\x0a}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Hello\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aI send u smtp pcap file \x0d\x0a\x0d\x0aFind the attachment\x0d\x0a\x0d\x0a \x0d\x0a\x0d\x0aGPS\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=3, analyzers={\x0a\x0a}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] 1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692743 mime_begin_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TYPE, value=text/html;\x09charset="us-ascii"] 1254722770.692743 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TRANSFER-ENCODING, value=quoted-printable] 1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692743 file_new - [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=] 1254722770.692743 file_over_new_connection - [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1254722770.692743, fuid=Ft4M3f2yMvLlmwtbq9, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1254722770.692743, fuid=Ft4M3f2yMvLlmwtbq9, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163697, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692804 mime_end_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] 1254722770.692804 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692804 file_sniff - [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=1868, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a

Hello

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

I send u smtp pcap file

\x0d\x0a\x0d\x0a

Find the attachment

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

GPS

\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Ft4M3f2yMvLlmwtbq9, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=4, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=1868, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a

Hello

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

I send u smtp pcap file

\x0d\x0a\x0d\x0a

Find the attachment

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

GPS

\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Ft4M3f2yMvLlmwtbq9, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=4, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] meta: fa_metadata = [mime_type=text/html, mime_types=[[strength=100, mime=text/html], [strength=20, mime=text/html], [strength=-20, mime=text/plain]], inferred=T] 1254722770.692804 file_state_remove - [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=1868, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a

Hello

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

I send u smtp pcap file

\x0d\x0a\x0d\x0a

Find the attachment

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

GPS

\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Ft4M3f2yMvLlmwtbq9, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=4, analyzers={\x0a\x0a}, mime_type=text/html, filename=, duration=61.0 usecs, local_orig=, is_orig=T, seen_bytes=1868, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=1868, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a

Hello

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

I send u smtp pcap file

\x0d\x0a\x0d\x0a

Find the attachment

\x0d\x0a\x0d\x0a

 

\x0d\x0a\x0d\x0a

GPS

\x0d\x0a\x0d\x0a
\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a, info=[ts=1254722770.692743, fuid=Ft4M3f2yMvLlmwtbq9, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=4, analyzers={\x0a\x0a}, mime_type=text/html, filename=, duration=61.0 usecs, local_orig=, is_orig=T, seen_bytes=1868, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] 1254722770.692804 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692804 mime_end_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] 1254722770.692804 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692804 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692804 mime_begin_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] 1254722770.692804 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TYPE, value=text/plain;\x09name="NEWS.txt"] 1254722770.692804 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TRANSFER-ENCODING, value=quoted-printable] 1254722770.692804 mime_one_header - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-DISPOSITION, value=attachment;\x09filename="NEWS.txt"] 1254722770.692804 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.692804 file_new - [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=] 1254722770.692804 file_over_new_connection - [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1254722770.692804, fuid=FL9Y0d45OI4LpS6fmh, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722770.692804, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1254722770.692804, fuid=FL9Y0d45OI4LpS6fmh, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722770.695115 new_connection - [0] c: connection = [id=[orig_h=192.168.1.1, orig_p=3/icmp, resp_h=10.10.1.4, resp_p=4/icmp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], start_time=1254722770.695115, duration=0.0, service={\x0a\x0a}, history=, uid=C4J4Th3PJpwUYZZ6gc, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.1.1, orig_p=3/icmp, resp_h=10.10.1.4, resp_p=4/icmp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:1f:33:d9:81:60], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], start_time=1254722770.695115, duration=0.0, service={\x0a\x0a}, history=, uid=C4J4Th3PJpwUYZZ6gc, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1254722771.494181 file_sniff - [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722771.494181, seen_bytes=4027, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Version 4.9.9.1\x0d\x0a* Many bug fixes\x0d\x0a* Improved editor\x0d\x0a\x0d\x0aVersion 4.9.9.0\x0d\x0a* Support for latest Mingw compiler system builds\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.9\x0d\x0a* New code tooltip display\x0d\x0a* Improved Indent/Unindent and Remove Comment\x0d\x0a* Improved automatic indent\x0d\x0a* Added support for the "interface" keyword\x0d\x0a* WebUpdate should now report installation problems from PackMan\x0d\x0a* New splash screen and association icons\x0d\x0a* Improved installer\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.7\x0d\x0a* Added support for GCC > 3.2\x0d\x0a* Debug variables are now resent during next debug session\x0d\x0a* Watched Variables not in correct context are now kept and updated when it is needed\x0d\x0a* Added new compiler/linker options: \x0d\x0a - Strip executable\x0d\x0a - Generate instructions for a specific machine (i386, i486, i586, i686, pentium, pentium-mmx, pentiumpro, pentium2, pentium3, pentium4, \x0d\x0a k6, k6-2, k6-3, athlon, athlon-tbird, athlon-4, athlon-xp, athlon-mp, winchip-c6, winchip2, k8, c3 and c3-2)\x0d\x0a - Enable use of processor specific built-in functions (mmmx, sse, sse2, pni, 3dnow)\x0d\x0a* "Default" button in Compiler Options is back\x0d\x0a* Error messages parsing improved\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.5\x0d\x0a* Added the possibility to modify the value of a variable during debugging (right click on a watch variable and select "Modify value")\x0d\x0a* During Dev-C++ First Time COnfiguration window, users can now choose between using or not class browser and code completion features.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.4\x0d\x0a* Added the possibility to specify an include directory for the code completion cache to be created at Dev-C++ first startup\x0d\x0a* Improved code completion cache\x0d\x0a* WebUpdate will now backup downloaded DevPaks in Dev-C++\Packages directory, and Dev-C++ executable in devcpp.exe.BACKUP\x0d\x0a* Big speed up in function parameters listing while editing\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.3\x0d\x0a* On Dev-C++ first time configuration dialog, a code completion cache of all the standard \x0d\x0a include files can now be generated.\x0d\x0a* Improved WebUpdate module\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.2\x0d\x0a* New debug feature for DLLs: attach to a running process\x0d\x0a* New project option: Use custom Makefile. \x0d\x0a* New WebUpdater module.\x0d\x0a* Allow user to specify an alternate configuration file in Environment Options \x0d\x0a (still can be overriden by using "-c" command line parameter).\x0d\x0a* Lots of bug fixes.\x0d\x0a\x0d\x0aVersion 4.9.8.1\x0d\x0a* When creating a DLL, the created static lib respects now the project-defined output directory\x0d\x0a\x0d\x0aVersion 4.9.8.0\x0d\x0a* Changed position of compiler/linker parameters in Project Options.\x0d\x0a* Improved help file\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.9\x0d\x0a* Resource errors are now reported in the Resource sheet\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.8\x0d\x0a* Made whole bottom report control floating instead of only debug output.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.7\x0d\x0a* Printing settings are now saved\x0d\x0a* New environment options : "watch variable under mouse" and "Report watch errors"\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.6\x0d\x0a* Debug variable browser\x0d\x0a* Added possibility to include in a Template the Project's directories (include, libs and ressources)\x0d\x0a* Changed tint of Class browser pictures colors to match the New Look style\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.5\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.4\x0d\x0a* When compiling with debugging symbols, an extra definition is passed to the\x0d\x0a compiler: -D__DEBUG__\x0d\x0a* Each project creates a _private.h file containing version\x0d\x0a information definitions\x0d\x0a* When compiling the current file only, no dependency checks are performed\x0d\x0a* ~300% Speed-up in class parser\x0d\x0a* Added "External programs" in Tools/Environment Options (for units "Open with")\x0d\x0a* Added "Open with" in project units context menu\x0d\x0a* Added "Classes" toolbar\x0d\x0a* Fixed pre-compilation dependency checks to work correctly\x0d\x0a* Added new file menu entry: Save Project As\x0d\x0a* Bug-fix for double quotes in devcpp.cfg file read by vUpdate\x0d\x0a* Other bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.3\x0d\x0a* When adding debugging symbols on request, remove "-s" option from linker\x0d\x0a* Compiling progress window\x0d\x0a* Environment options : "Show progress window" and "Auto-close progress , info=[ts=1254722770.692804, fuid=FL9Y0d45OI4LpS6fmh, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=5, analyzers={\x0a\x0a}, mime_type=, filename=NEWS.txt, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=4530, state=4, num_pkts=11, num_bytes_ip=3518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.163758, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722771.494181, seen_bytes=4027, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Version 4.9.9.1\x0d\x0a* Many bug fixes\x0d\x0a* Improved editor\x0d\x0a\x0d\x0aVersion 4.9.9.0\x0d\x0a* Support for latest Mingw compiler system builds\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.9\x0d\x0a* New code tooltip display\x0d\x0a* Improved Indent/Unindent and Remove Comment\x0d\x0a* Improved automatic indent\x0d\x0a* Added support for the "interface" keyword\x0d\x0a* WebUpdate should now report installation problems from PackMan\x0d\x0a* New splash screen and association icons\x0d\x0a* Improved installer\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.7\x0d\x0a* Added support for GCC > 3.2\x0d\x0a* Debug variables are now resent during next debug session\x0d\x0a* Watched Variables not in correct context are now kept and updated when it is needed\x0d\x0a* Added new compiler/linker options: \x0d\x0a - Strip executable\x0d\x0a - Generate instructions for a specific machine (i386, i486, i586, i686, pentium, pentium-mmx, pentiumpro, pentium2, pentium3, pentium4, \x0d\x0a k6, k6-2, k6-3, athlon, athlon-tbird, athlon-4, athlon-xp, athlon-mp, winchip-c6, winchip2, k8, c3 and c3-2)\x0d\x0a - Enable use of processor specific built-in functions (mmmx, sse, sse2, pni, 3dnow)\x0d\x0a* "Default" button in Compiler Options is back\x0d\x0a* Error messages parsing improved\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.5\x0d\x0a* Added the possibility to modify the value of a variable during debugging (right click on a watch variable and select "Modify value")\x0d\x0a* During Dev-C++ First Time COnfiguration window, users can now choose between using or not class browser and code completion features.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.4\x0d\x0a* Added the possibility to specify an include directory for the code completion cache to be created at Dev-C++ first startup\x0d\x0a* Improved code completion cache\x0d\x0a* WebUpdate will now backup downloaded DevPaks in Dev-C++\Packages directory, and Dev-C++ executable in devcpp.exe.BACKUP\x0d\x0a* Big speed up in function parameters listing while editing\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.3\x0d\x0a* On Dev-C++ first time configuration dialog, a code completion cache of all the standard \x0d\x0a include files can now be generated.\x0d\x0a* Improved WebUpdate module\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.2\x0d\x0a* New debug feature for DLLs: attach to a running process\x0d\x0a* New project option: Use custom Makefile. \x0d\x0a* New WebUpdater module.\x0d\x0a* Allow user to specify an alternate configuration file in Environment Options \x0d\x0a (still can be overriden by using "-c" command line parameter).\x0d\x0a* Lots of bug fixes.\x0d\x0a\x0d\x0aVersion 4.9.8.1\x0d\x0a* When creating a DLL, the created static lib respects now the project-defined output directory\x0d\x0a\x0d\x0aVersion 4.9.8.0\x0d\x0a* Changed position of compiler/linker parameters in Project Options.\x0d\x0a* Improved help file\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.9\x0d\x0a* Resource errors are now reported in the Resource sheet\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.8\x0d\x0a* Made whole bottom report control floating instead of only debug output.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.7\x0d\x0a* Printing settings are now saved\x0d\x0a* New environment options : "watch variable under mouse" and "Report watch errors"\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.6\x0d\x0a* Debug variable browser\x0d\x0a* Added possibility to include in a Template the Project's directories (include, libs and ressources)\x0d\x0a* Changed tint of Class browser pictures colors to match the New Look style\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.5\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.4\x0d\x0a* When compiling with debugging symbols, an extra definition is passed to the\x0d\x0a compiler: -D__DEBUG__\x0d\x0a* Each project creates a _private.h file containing version\x0d\x0a information definitions\x0d\x0a* When compiling the current file only, no dependency checks are performed\x0d\x0a* ~300% Speed-up in class parser\x0d\x0a* Added "External programs" in Tools/Environment Options (for units "Open with")\x0d\x0a* Added "Open with" in project units context menu\x0d\x0a* Added "Classes" toolbar\x0d\x0a* Fixed pre-compilation dependency checks to work correctly\x0d\x0a* Added new file menu entry: Save Project As\x0d\x0a* Bug-fix for double quotes in devcpp.cfg file read by vUpdate\x0d\x0a* Other bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.3\x0d\x0a* When adding debugging symbols on request, remove "-s" option from linker\x0d\x0a* Compiling progress window\x0d\x0a* Environment options : "Show progress window" and "Auto-close progress , info=[ts=1254722770.692804, fuid=FL9Y0d45OI4LpS6fmh, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=5, analyzers={\x0a\x0a}, mime_type=, filename=NEWS.txt, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] meta: fa_metadata = [mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], inferred=T] 1254722771.858334 mime_end_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=NEWS.txt], fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] 1254722771.858334 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722771.858334 file_state_remove - [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a\x09}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722771.858316, seen_bytes=10809, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Version 4.9.9.1\x0d\x0a* Many bug fixes\x0d\x0a* Improved editor\x0d\x0a\x0d\x0aVersion 4.9.9.0\x0d\x0a* Support for latest Mingw compiler system builds\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.9\x0d\x0a* New code tooltip display\x0d\x0a* Improved Indent/Unindent and Remove Comment\x0d\x0a* Improved automatic indent\x0d\x0a* Added support for the "interface" keyword\x0d\x0a* WebUpdate should now report installation problems from PackMan\x0d\x0a* New splash screen and association icons\x0d\x0a* Improved installer\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.7\x0d\x0a* Added support for GCC > 3.2\x0d\x0a* Debug variables are now resent during next debug session\x0d\x0a* Watched Variables not in correct context are now kept and updated when it is needed\x0d\x0a* Added new compiler/linker options: \x0d\x0a - Strip executable\x0d\x0a - Generate instructions for a specific machine (i386, i486, i586, i686, pentium, pentium-mmx, pentiumpro, pentium2, pentium3, pentium4, \x0d\x0a k6, k6-2, k6-3, athlon, athlon-tbird, athlon-4, athlon-xp, athlon-mp, winchip-c6, winchip2, k8, c3 and c3-2)\x0d\x0a - Enable use of processor specific built-in functions (mmmx, sse, sse2, pni, 3dnow)\x0d\x0a* "Default" button in Compiler Options is back\x0d\x0a* Error messages parsing improved\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.5\x0d\x0a* Added the possibility to modify the value of a variable during debugging (right click on a watch variable and select "Modify value")\x0d\x0a* During Dev-C++ First Time COnfiguration window, users can now choose between using or not class browser and code completion features.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.4\x0d\x0a* Added the possibility to specify an include directory for the code completion cache to be created at Dev-C++ first startup\x0d\x0a* Improved code completion cache\x0d\x0a* WebUpdate will now backup downloaded DevPaks in Dev-C++\Packages directory, and Dev-C++ executable in devcpp.exe.BACKUP\x0d\x0a* Big speed up in function parameters listing while editing\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.3\x0d\x0a* On Dev-C++ first time configuration dialog, a code completion cache of all the standard \x0d\x0a include files can now be generated.\x0d\x0a* Improved WebUpdate module\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.2\x0d\x0a* New debug feature for DLLs: attach to a running process\x0d\x0a* New project option: Use custom Makefile. \x0d\x0a* New WebUpdater module.\x0d\x0a* Allow user to specify an alternate configuration file in Environment Options \x0d\x0a (still can be overriden by using "-c" command line parameter).\x0d\x0a* Lots of bug fixes.\x0d\x0a\x0d\x0aVersion 4.9.8.1\x0d\x0a* When creating a DLL, the created static lib respects now the project-defined output directory\x0d\x0a\x0d\x0aVersion 4.9.8.0\x0d\x0a* Changed position of compiler/linker parameters in Project Options.\x0d\x0a* Improved help file\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.9\x0d\x0a* Resource errors are now reported in the Resource sheet\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.8\x0d\x0a* Made whole bottom report control floating instead of only debug output.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.7\x0d\x0a* Printing settings are now saved\x0d\x0a* New environment options : "watch variable under mouse" and "Report watch errors"\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.6\x0d\x0a* Debug variable browser\x0d\x0a* Added possibility to include in a Template the Project's directories (include, libs and ressources)\x0d\x0a* Changed tint of Class browser pictures colors to match the New Look style\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.5\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.4\x0d\x0a* When compiling with debugging symbols, an extra definition is passed to the\x0d\x0a compiler: -D__DEBUG__\x0d\x0a* Each project creates a _private.h file containing version\x0d\x0a information definitions\x0d\x0a* When compiling the current file only, no dependency checks are performed\x0d\x0a* ~300% Speed-up in class parser\x0d\x0a* Added "External programs" in Tools/Environment Options (for units "Open with")\x0d\x0a* Added "Open with" in project units context menu\x0d\x0a* Added "Classes" toolbar\x0d\x0a* Fixed pre-compilation dependency checks to work correctly\x0d\x0a* Added new file menu entry: Save Project As\x0d\x0a* Bug-fix for double quotes in devcpp.cfg file read by vUpdate\x0d\x0a* Other bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.3\x0d\x0a* When adding debugging symbols on request, remove "-s" option from linker\x0d\x0a* Compiling progress window\x0d\x0a* Environment options : "Show progress window" and "Auto-close progress , info=[ts=1254722770.692804, fuid=FL9Y0d45OI4LpS6fmh, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=5, analyzers={\x0a\x0a}, mime_type=text/plain, filename=NEWS.txt, duration=801.0 msecs 376.0 usecs, local_orig=, is_orig=T, seen_bytes=4027, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=FL9Y0d45OI4LpS6fmh, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a\x09}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a\x09}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a\x09}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=]\x0a}, last_active=1254722771.858316, seen_bytes=10809, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=Version 4.9.9.1\x0d\x0a* Many bug fixes\x0d\x0a* Improved editor\x0d\x0a\x0d\x0aVersion 4.9.9.0\x0d\x0a* Support for latest Mingw compiler system builds\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.9\x0d\x0a* New code tooltip display\x0d\x0a* Improved Indent/Unindent and Remove Comment\x0d\x0a* Improved automatic indent\x0d\x0a* Added support for the "interface" keyword\x0d\x0a* WebUpdate should now report installation problems from PackMan\x0d\x0a* New splash screen and association icons\x0d\x0a* Improved installer\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.7\x0d\x0a* Added support for GCC > 3.2\x0d\x0a* Debug variables are now resent during next debug session\x0d\x0a* Watched Variables not in correct context are now kept and updated when it is needed\x0d\x0a* Added new compiler/linker options: \x0d\x0a - Strip executable\x0d\x0a - Generate instructions for a specific machine (i386, i486, i586, i686, pentium, pentium-mmx, pentiumpro, pentium2, pentium3, pentium4, \x0d\x0a k6, k6-2, k6-3, athlon, athlon-tbird, athlon-4, athlon-xp, athlon-mp, winchip-c6, winchip2, k8, c3 and c3-2)\x0d\x0a - Enable use of processor specific built-in functions (mmmx, sse, sse2, pni, 3dnow)\x0d\x0a* "Default" button in Compiler Options is back\x0d\x0a* Error messages parsing improved\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.5\x0d\x0a* Added the possibility to modify the value of a variable during debugging (right click on a watch variable and select "Modify value")\x0d\x0a* During Dev-C++ First Time COnfiguration window, users can now choose between using or not class browser and code completion features.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.4\x0d\x0a* Added the possibility to specify an include directory for the code completion cache to be created at Dev-C++ first startup\x0d\x0a* Improved code completion cache\x0d\x0a* WebUpdate will now backup downloaded DevPaks in Dev-C++\Packages directory, and Dev-C++ executable in devcpp.exe.BACKUP\x0d\x0a* Big speed up in function parameters listing while editing\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.3\x0d\x0a* On Dev-C++ first time configuration dialog, a code completion cache of all the standard \x0d\x0a include files can now be generated.\x0d\x0a* Improved WebUpdate module\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.8.2\x0d\x0a* New debug feature for DLLs: attach to a running process\x0d\x0a* New project option: Use custom Makefile. \x0d\x0a* New WebUpdater module.\x0d\x0a* Allow user to specify an alternate configuration file in Environment Options \x0d\x0a (still can be overriden by using "-c" command line parameter).\x0d\x0a* Lots of bug fixes.\x0d\x0a\x0d\x0aVersion 4.9.8.1\x0d\x0a* When creating a DLL, the created static lib respects now the project-defined output directory\x0d\x0a\x0d\x0aVersion 4.9.8.0\x0d\x0a* Changed position of compiler/linker parameters in Project Options.\x0d\x0a* Improved help file\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.9\x0d\x0a* Resource errors are now reported in the Resource sheet\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.8\x0d\x0a* Made whole bottom report control floating instead of only debug output.\x0d\x0a* Many bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.7\x0d\x0a* Printing settings are now saved\x0d\x0a* New environment options : "watch variable under mouse" and "Report watch errors"\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.6\x0d\x0a* Debug variable browser\x0d\x0a* Added possibility to include in a Template the Project's directories (include, libs and ressources)\x0d\x0a* Changed tint of Class browser pictures colors to match the New Look style\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.5\x0d\x0a* Bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.4\x0d\x0a* When compiling with debugging symbols, an extra definition is passed to the\x0d\x0a compiler: -D__DEBUG__\x0d\x0a* Each project creates a _private.h file containing version\x0d\x0a information definitions\x0d\x0a* When compiling the current file only, no dependency checks are performed\x0d\x0a* ~300% Speed-up in class parser\x0d\x0a* Added "External programs" in Tools/Environment Options (for units "Open with")\x0d\x0a* Added "Open with" in project units context menu\x0d\x0a* Added "Classes" toolbar\x0d\x0a* Fixed pre-compilation dependency checks to work correctly\x0d\x0a* Added new file menu entry: Save Project As\x0d\x0a* Bug-fix for double quotes in devcpp.cfg file read by vUpdate\x0d\x0a* Other bug fixes\x0d\x0a\x0d\x0aVersion 4.9.7.3\x0d\x0a* When adding debugging symbols on request, remove "-s" option from linker\x0d\x0a* Compiling progress window\x0d\x0a* Environment options : "Show progress window" and "Auto-close progress , info=[ts=1254722770.692804, fuid=FL9Y0d45OI4LpS6fmh, tx_hosts={\x0a\x0910.10.1.4\x0a}, rx_hosts={\x0a\x0974.53.140.153\x0a}, conn_uids={\x0aClEkJM2Vm5giqnMf4h\x0a}, source=SMTP, depth=5, analyzers={\x0a\x0a}, mime_type=text/plain, filename=NEWS.txt, duration=801.0 msecs 376.0 usecs, local_orig=, is_orig=T, seen_bytes=4027, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] 1254722771.858334 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722771.858334 mime_end_entity - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] 1254722771.858334 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722771.858334 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722771.858334 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = T 1254722771.858334 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722771.858334 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = . [3] arg: string = . 1254722772.248789 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=24, num_bytes_ip=21507, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=21, num_bytes_ip=1310, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.719743, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=24, num_bytes_ip=21507, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=21, num_bytes_ip=1310, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.719743, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = . @@ -471,13 +471,13 @@ [5] cont_resp: bool = F 1254722774.763825 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=4, num_pkts=25, num_bytes_ip=21547, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.234779, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=4, num_pkts=25, num_bytes_ip=21547, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.234779, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = QUIT [3] arg: string = 1254722775.105467 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=5, num_pkts=27, num_bytes_ip=21633, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=538, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.576421, service={\x0aSMTP\x0a}, history=ShAdDaTF, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=5, num_pkts=27, num_bytes_ip=21633, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=538, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.576421, service={\x0aSMTP\x0a}, history=ShAdDaTF, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 221 [3] cmd: string = QUIT @@ -485,36 +485,33 @@ [5] cont_resp: bool = F 1254722776.690444 new_connection - [0] c: connection = [id=[orig_h=10.10.1.20, orig_p=138/udp, resp_h=10.10.1.255, resp_p=138/udp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:02:3f:ec:61:11], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=ff:ff:ff:ff:ff:ff], start_time=1254722776.690444, duration=0.0, service={\x0a\x0a}, history=, uid=CtPZjS20MLrsMUOJi2, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.20, orig_p=138/udp, resp_h=10.10.1.255, resp_p=138/udp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:02:3f:ec:61:11], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=ff:ff:ff:ff:ff:ff], start_time=1254722776.690444, duration=0.0, service={\x0a\x0a}, history=, uid=CtPZjS20MLrsMUOJi2, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831776.764391 ChecksumOffloading::check 1437831776.764391 connection_state_remove - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=1, num_bytes_ip=128, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=56166/udp, resp_h=10.10.1.1, resp_p=53/udp], orig=[size=34, state=1, num_pkts=1, num_bytes_ip=62, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=100, state=1, num_pkts=1, num_bytes_ip=128, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.49206, duration=0.034025, service={\x0aDNS\x0a}, history=Dd, uid=CHhAvVGS1DHFjwGM9, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=[pending_query=, pending_queries=, pending_replies=], ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831776.764391 connection_state_remove - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=5, num_pkts=28, num_bytes_ip=21673, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=538, state=5, num_pkts=25, num_bytes_ip=1546, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.576953, service={\x0aSMTP\x0a}, history=ShAdDaTFf, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=221 xc90.websitewelcome.com closing connection, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=5, num_pkts=28, num_bytes_ip=21673, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=538, state=5, num_pkts=25, num_bytes_ip=1546, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.576953, service={\x0aSMTP\x0a}, history=ShAdDaTFf, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=221 xc90.websitewelcome.com closing connection, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] 1437831776.764391 connection_state_remove - [0] c: connection = [id=[orig_h=192.168.1.1, orig_p=3/icmp, resp_h=10.10.1.4, resp_p=4/icmp], orig=[size=2192, state=1, num_pkts=4, num_bytes_ip=2304, flow_label=0, l2_addr=00:1f:33:d9:81:60], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], start_time=1254722770.695115, duration=0.001519, service={\x0a\x0a}, history=, uid=C4J4Th3PJpwUYZZ6gc, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.1.1, orig_p=3/icmp, resp_h=10.10.1.4, resp_p=4/icmp], orig=[size=2192, state=1, num_pkts=4, num_bytes_ip=2304, flow_label=0, l2_addr=00:1f:33:d9:81:60], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], start_time=1254722770.695115, duration=0.001519, service={\x0a\x0a}, history=, uid=C4J4Th3PJpwUYZZ6gc, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831776.764391 connection_state_remove - [0] c: connection = [id=[orig_h=10.10.1.20, orig_p=138/udp, resp_h=10.10.1.255, resp_p=138/udp], orig=[size=201, state=1, num_pkts=1, num_bytes_ip=229, flow_label=0, l2_addr=00:02:3f:ec:61:11], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=ff:ff:ff:ff:ff:ff], start_time=1254722776.690444, duration=0.0, service={\x0a\x0a}, history=D, uid=CtPZjS20MLrsMUOJi2, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.20, orig_p=138/udp, resp_h=10.10.1.255, resp_p=138/udp], orig=[size=201, state=1, num_pkts=1, num_bytes_ip=229, flow_label=0, l2_addr=00:02:3f:ec:61:11], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=ff:ff:ff:ff:ff:ff], start_time=1254722776.690444, duration=0.0, service={\x0a\x0a}, history=D, uid=CtPZjS20MLrsMUOJi2, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831776.764391 filter_change_tracking 1437831776.764391 new_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49285/tcp, resp_h=66.196.121.26, resp_p=5050/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831776.764391, duration=0.0, service={\x0a\x0a}, history=, uid=CUM0KZ3MLUfNB0cl11, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831777.107399 partial_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49285/tcp, resp_h=66.196.121.26, resp_p=5050/tcp], orig=[size=41, state=3, num_pkts=1, num_bytes_ip=93, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831776.764391, duration=0.343008, service={\x0a\x0a}, history=Da, uid=CUM0KZ3MLUfNB0cl11, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49285/tcp, resp_h=66.196.121.26, resp_p=5050/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831776.764391, duration=0.0, service={\x0a\x0a}, history=, uid=CUM0KZ3MLUfNB0cl11, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831787.856895 new_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.0, service={\x0a\x0a}, history=, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.0, service={\x0a\x0a}, history=, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831787.861602 connection_established - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.004707, service={\x0a\x0a}, history=Sh, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.004707, service={\x0a\x0a}, history=Sh, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831787.867142 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.010247, service={\x0a\x0a}, history=ShAd, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.010247, service={\x0a\x0a}, history=ShAd, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -522,18 +519,18 @@ [5] cont_resp: bool = F 1437831787.883306 protocol_confirmation - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=2, num_bytes_ip=147, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.026411, service={\x0a\x0a}, history=ShAdD, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=2, num_bytes_ip=147, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.026411, service={\x0a\x0a}, history=ShAdD, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] atype: enum = Analyzer::ANALYZER_SMTP [2] aid: count = 21 1437831787.883306 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=2, num_bytes_ip=147, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.026411, service={\x0aSMTP\x0a}, history=ShAdD, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=2, num_bytes_ip=147, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.026411, service={\x0aSMTP\x0a}, history=ShAdD, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = EHLO [3] arg: string = [192.168.133.100] 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -541,7 +538,7 @@ [5] cont_resp: bool = T 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 uprise, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 uprise, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -549,7 +546,7 @@ [5] cont_resp: bool = T 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 8BITMIME, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 8BITMIME, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -557,7 +554,7 @@ [5] cont_resp: bool = T 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH LOGIN, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH LOGIN, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -565,13 +562,13 @@ [5] cont_resp: bool = F 1437831787.887031 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=5, num_bytes_ip=296, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.030136, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=5, num_bytes_ip=296, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.030136, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = MAIL [3] arg: string = FROM: 1437831787.889785 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=6, num_bytes_ip=380, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.03289, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=6, num_bytes_ip=380, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.03289, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = MAIL @@ -579,13 +576,13 @@ [5] cont_resp: bool = F 1437831787.890232 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=7, num_bytes_ip=432, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.033337, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=7, num_bytes_ip=432, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.033337, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1437831787.892986 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=8, num_bytes_ip=516, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036091, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=8, num_bytes_ip=516, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036091, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -593,13 +590,13 @@ [5] cont_resp: bool = F 1437831787.893587 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=9, num_bytes_ip=568, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036692, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=9, num_bytes_ip=568, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036692, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1437831787.897624 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=10, num_bytes_ip=653, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.040729, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=10, num_bytes_ip=653, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.040729, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -607,13 +604,13 @@ [5] cont_resp: bool = F 1437831787.898413 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=11, num_bytes_ip=705, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.041518, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=11, num_bytes_ip=705, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.041518, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1437831787.901069 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=12, num_bytes_ip=792, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044174, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=12, num_bytes_ip=792, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044174, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -621,16 +618,16 @@ [5] cont_resp: bool = F 1437831787.901697 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=13, num_bytes_ip=844, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044802, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=13, num_bytes_ip=844, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044802, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = DATA [3] arg: string = 1437831787.901697 mime_begin_entity - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=13, num_bytes_ip=844, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044802, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=13, num_bytes_ip=844, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044802, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] 1437831787.904758 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=14, num_bytes_ip=902, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.047863, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=14, num_bytes_ip=902, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.047863, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 354 [3] cmd: string = DATA @@ -638,104 +635,104 @@ [5] cont_resp: bool = F 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TYPE, value=text/plain; charset=us-ascii] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=MIME-VERSION, value=1.0 (Mac OS X Mail 8.2 \(2102\))] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=SUBJECT, value=Re: Bro SMTP CC Header] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=FROM, value=Albert Zaharovits ] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=Albert Zaharovits , to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=Albert Zaharovits , to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=IN-REPLY-TO, value=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=Albert Zaharovits , to=, cc=, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=Albert Zaharovits , to=, cc=, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=DATE, value=Sat, 25 Jul 2015 16:43:07 +0300] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc=, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc=, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CC, value=felica4uu@hotmail.com, davis_mark1@outlook.com] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TRANSFER-ENCODING, value=7bit] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=MESSAGE-ID, value=] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=REFERENCES, value= <9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to=, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=TO, value=ericlim220@yahoo.com] 1437831787.905375 mime_one_header - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=X-MAILER, value=Apple Mail (2.2102)] 1437831787.905375 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [2] is_orig: bool = T 1437831787.905375 file_new - [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=] 1437831787.905375 file_over_new_connection - [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831787.905375, fuid=FKX8fw2lEHCTK8syM3, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831787.905375, fuid=FKX8fw2lEHCTK8syM3, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SMTP, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [2] is_orig: bool = T 1437831787.905375 mime_end_entity - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] 1437831787.905375 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [2] is_orig: bool = T 1437831787.905375 file_sniff - [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=204, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a> On 25 Jul 2015, at 16:38, Albert Zaharovits wrote:\x0d\x0a> \x0d\x0a> \x0d\x0a>> On 25 Jul 2015, at 16:21, Albert Zaharovits wrote:\x0d\x0a>> \x0d\x0a>> Bro SMTP CC Header\x0d\x0a>> TEST\x0d\x0a> \x0d\x0a\x0d\x0a, info=[ts=1437831787.905375, fuid=FKX8fw2lEHCTK8syM3, tx_hosts={\x0a\x09192.168.133.100\x0a}, rx_hosts={\x0a\x09192.168.133.102\x0a}, conn_uids={\x0aCmES5u32sYpV7JYN\x0a}, source=SMTP, depth=1, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=204, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a> On 25 Jul 2015, at 16:38, Albert Zaharovits wrote:\x0d\x0a> \x0d\x0a> \x0d\x0a>> On 25 Jul 2015, at 16:21, Albert Zaharovits wrote:\x0d\x0a>> \x0d\x0a>> Bro SMTP CC Header\x0d\x0a>> TEST\x0d\x0a> \x0d\x0a\x0d\x0a, info=[ts=1437831787.905375, fuid=FKX8fw2lEHCTK8syM3, tx_hosts={\x0a\x09192.168.133.100\x0a}, rx_hosts={\x0a\x09192.168.133.102\x0a}, conn_uids={\x0aCmES5u32sYpV7JYN\x0a}, source=SMTP, depth=1, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] meta: fa_metadata = [mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], inferred=T] 1437831787.905375 file_state_remove - [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=204, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a> On 25 Jul 2015, at 16:38, Albert Zaharovits wrote:\x0d\x0a> \x0d\x0a> \x0d\x0a>> On 25 Jul 2015, at 16:21, Albert Zaharovits wrote:\x0d\x0a>> \x0d\x0a>> Bro SMTP CC Header\x0d\x0a>> TEST\x0d\x0a> \x0d\x0a\x0d\x0a, info=[ts=1437831787.905375, fuid=FKX8fw2lEHCTK8syM3, tx_hosts={\x0a\x09192.168.133.100\x0a}, rx_hosts={\x0a\x09192.168.133.102\x0a}, conn_uids={\x0aCmES5u32sYpV7JYN\x0a}, source=SMTP, depth=1, analyzers={\x0a\x0a}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=204, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=FKX8fw2lEHCTK8syM3, parent_id=, source=SMTP, is_orig=T, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a\x09}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a\x09}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a\x09}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a\x09}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=]\x0a}, last_active=1437831787.905375, seen_bytes=204, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=\x0d\x0a> On 25 Jul 2015, at 16:38, Albert Zaharovits wrote:\x0d\x0a> \x0d\x0a> \x0d\x0a>> On 25 Jul 2015, at 16:21, Albert Zaharovits wrote:\x0d\x0a>> \x0d\x0a>> Bro SMTP CC Header\x0d\x0a>> TEST\x0d\x0a> \x0d\x0a\x0d\x0a, info=[ts=1437831787.905375, fuid=FKX8fw2lEHCTK8syM3, tx_hosts={\x0a\x09192.168.133.100\x0a}, rx_hosts={\x0a\x09192.168.133.102\x0a}, conn_uids={\x0aCmES5u32sYpV7JYN\x0a}, source=SMTP, depth=1, analyzers={\x0a\x0a}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=T, seen_bytes=204, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] 1437831787.905375 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [2] is_orig: bool = F 1437831787.905375 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [2] is_orig: bool = T 1437831787.905375 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [2] is_orig: bool = F 1437831787.905375 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = . [3] arg: string = . 1437831787.914113 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=16, num_bytes_ip=1813, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.057218, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=16, num_bytes_ip=1813, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.057218, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = . @@ -743,65 +740,59 @@ [5] cont_resp: bool = F 1437831798.533593 new_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49336/tcp, resp_h=74.125.71.189, resp_p=443/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831798.533593, duration=0.0, service={\x0a\x0a}, history=^, uid=CP5puj4I8PtEU4qzYg, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831798.533765 partial_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49336/tcp, resp_h=74.125.71.189, resp_p=443/tcp], orig=[size=0, state=3, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=3, num_pkts=3, num_bytes_ip=411, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831798.533593, duration=0.000172, service={\x0a\x0a}, history=^dA, uid=CP5puj4I8PtEU4qzYg, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49336/tcp, resp_h=74.125.71.189, resp_p=443/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831798.533593, duration=0.0, service={\x0a\x0a}, history=^, uid=CP5puj4I8PtEU4qzYg, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831799.262632 new_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49153/tcp, resp_h=17.172.238.21, resp_p=5223/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.262632, duration=0.0, service={\x0a\x0a}, history=, uid=C37jN32gN3y3AZzyf6, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831799.410135 partial_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49153/tcp, resp_h=17.172.238.21, resp_p=5223/tcp], orig=[size=714, state=3, num_pkts=1, num_bytes_ip=766, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.262632, duration=0.147503, service={\x0a\x0a}, history=Da, uid=C37jN32gN3y3AZzyf6, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49153/tcp, resp_h=17.172.238.21, resp_p=5223/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.262632, duration=0.0, service={\x0a\x0a}, history=, uid=C37jN32gN3y3AZzyf6, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831799.461152 new_connection - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.0, service={\x0a\x0a}, history=, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=0, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.0, service={\x0a\x0a}, history=, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831799.610433 connection_established - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.149281, service={\x0a\x0a}, history=Sh, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=0, state=4, num_pkts=1, num_bytes_ip=64, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=0, num_bytes_ip=0, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.149281, service={\x0a\x0a}, history=Sh, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831799.611764 ssl_extension_server_name - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] names: vector of string = [p31-keyvalueservice.icloud.com] 1437831799.611764 ssl_extension - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] code: count = 0 [3] val: string = \x00!\x00\x00\x1ep31-keyvalueservice.icloud.com 1437831799.611764 ssl_extension - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] code: count = 10 [3] val: string = \x00\x06\x00\x17\x00\x18\x00\x19 1437831799.611764 ssl_extension - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] code: count = 11 [3] val: string = \x01\x00 1437831799.611764 ssl_extension - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] code: count = 13 [3] val: string = \x00\x0a\x05\x01\x04\x01\x02\x01\x04\x03\x02\x03 1437831799.611764 ssl_extension - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] code: count = 13172 [3] val: string = 1437831799.611764 protocol_confirmation - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0a\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] atype: enum = Analyzer::ANALYZER_SSL [2] aid: count = 35 1437831799.611764 ssl_client_hello - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0aSSL\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0aSSL\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] version: count = 771 [2] record_version: count = 769 [3] possible_ts: time = 1437831799.0 @@ -811,266 +802,251 @@ [7] comp_methods: vector of count = [0] 1437831799.611764 ssl_handshake_message - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0aSSL\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0aSSL\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] msg_type: count = 1 [3] length: count = 192 1437831799.611764 ssl_plaintext_data - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0aSSL\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=2, num_bytes_ip=104, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=4, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.150612, service={\x0aSSL\x0a}, history=ShAD, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] record_version: count = 769 [3] content_type: count = 22 [4] length: count = 196 1437831799.764576 ssl_extension - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 65281 [3] val: string = \x00 1437831799.764576 ssl_server_hello - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=, version=, cipher=, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] version: count = 771 [2] record_version: count = 771 [3] possible_ts: time = 1437831799.0 - [4] server_random: string = \xe2RB\xdds\x11\xa9\xd4\x1d\xbc\x8e\xe2]\x09\xc5\xfc\xb1\xedl\xed\x17\xb2?a\xac\x81QM + [4] server_random: string = U\xb3\x92w\xe2RB\xdds\x11\xa9\xd4\x1d\xbc\x8e\xe2]\x09\xc5\xfc\xb1\xedl\xed\x17\xb2?a\xac\x81QM [5] session_id: string = \x17x\xe5j\x19T\x12vWY\xcf\xf3\xeai\\xdf\x09[]\xb7\xdf.[\x0e\x04\xa8\x89bJ\x94\xa7\x0c [6] cipher: count = 4 [7] comp_method: count = 0 1437831799.764576 ssl_handshake_message - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] msg_type: count = 2 [3] length: count = 77 1437831799.764576 file_new - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=] 1437831799.764576 file_over_new_connection - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [2] is_orig: bool = F 1437831799.764576 file_sniff - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=, cert_chain_fuids=, client_cert_chain=, client_cert_chain_fuids=, subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] meta: fa_metadata = [mime_type=application/x-x509-user-cert, mime_types=, inferred=F] 1437831799.764576 file_hash - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] kind: string = sha1 [2] hash: string = f5ccb1a724133607548b00d8eb402efca3076d58 1437831799.764576 x509_certificate - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] cert_ref: opaque of x509 = [2] cert: X509::Certificate = [version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE] 1437831799.764576 x509_ext_basic_constraints - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::BasicConstraints = [ca=F, path_len=] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication] 1437831799.764576 x509_extension - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com] 1437831799.764576 x509_ext_subject_alternative_name - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=, basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::SubjectAlternativeName = [dns=[*.icloud.com], uri=, email=, ip=, other_fields=F] 1437831799.764576 file_hash - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] kind: string = md5 [2] hash: string = 1bf9696d9f337805383427e88781d001 1437831799.764576 file_state_remove - [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=F1vce92FT1oRjKI328, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] 1437831799.764576 file_new - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=, ftp=, http=, irc=, pe=] 1437831799.764576 file_over_new_connection - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] - [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0a}, rx_hosts={\x0a\x0a}, conn_uids={\x0a\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] + [1] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [2] is_orig: bool = F 1437831799.764576 file_sniff - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0a\x0a}, mime_type=, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] meta: fa_metadata = [mime_type=application/x-x509-ca-cert, mime_types=, inferred=F] 1437831799.764576 file_hash - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] kind: string = sha1 [2] hash: string = 8e8321ca08b08e3726fe1d82996884eeb5f0d655 1437831799.764576 x509_certificate - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=, extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] cert_ref: opaque of x509 = [2] cert: X509::Certificate = [version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0] 1437831799.764576 x509_ext_basic_constraints - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::BasicConstraints = [ca=T, path_len=0] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a] 1437831799.764576 x509_extension - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] ext: X509::Extension = [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a] 1437831799.764576 file_hash - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] [1] kind: string = md5 [2] hash: string = 48f0e38385112eeca5fc9ffd402eaecd 1437831799.764576 file_state_remove - [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=, u2_events=] + [0] f: fa_file = [id=Fxp53s3wA5G3zdEJg8, parent_id=, source=SSL, is_orig=F, conns={\x0a\x09[[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp]] = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a\x09}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a\x09], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a\x09], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x09\x0917.167.150.73\x0a\x09}, rx_hosts={\x0a\x09\x09192.168.133.100\x0a\x09}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a\x09}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a\x09}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a\x09], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a\x09], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a\x09], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a\x09]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=]\x0a}, last_active=1437831799.764576, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=4096, bof_buffer=, info=[ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=], ftp=, http=, irc=, pe=] 1437831799.764576 ssl_handshake_message - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] msg_type: count = 11 [3] length: count = 2507 1437831799.764576 ssl_handshake_message - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] msg_type: count = 14 [3] length: count = 0 1437831799.764576 ssl_plaintext_data - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=201, state=4, num_pkts=4, num_bytes_ip=385, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=2, num_bytes_ip=1532, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.303424, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] record_version: count = 771 [3] content_type: count = 22 [4] length: count = 2596 1437831799.838196 ssl_handshake_message - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=468, state=4, num_pkts=5, num_bytes_ip=425, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377044, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=468, state=4, num_pkts=5, num_bytes_ip=425, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377044, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=F, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] msg_type: count = 16 [3] length: count = 258 1437831799.838196 ssl_plaintext_data - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=468, state=4, num_pkts=5, num_bytes_ip=425, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377044, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=468, state=4, num_pkts=5, num_bytes_ip=425, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377044, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] record_version: count = 771 [3] content_type: count = 22 [4] length: count = 262 1437831799.838197 ssl_change_cipher_spec - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=474, state=4, num_pkts=6, num_bytes_ip=732, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377045, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=474, state=4, num_pkts=6, num_bytes_ip=732, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377045, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T 1437831799.838197 ssl_plaintext_data - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=474, state=4, num_pkts=6, num_bytes_ip=732, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377045, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=474, state=4, num_pkts=6, num_bytes_ip=732, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2601, state=4, num_pkts=3, num_bytes_ip=2733, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.377045, service={\x0aSSL\x0a}, history=ShADd, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = T [2] record_version: count = 771 [3] content_type: count = 20 [4] length: count = 1 1437831800.045701 ssl_change_cipher_spec - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=511, state=4, num_pkts=8, num_bytes_ip=855, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2644, state=4, num_pkts=6, num_bytes_ip=2853, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.584549, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=511, state=4, num_pkts=8, num_bytes_ip=855, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2644, state=4, num_pkts=6, num_bytes_ip=2853, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.584549, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F 1437831800.045701 ssl_plaintext_data - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=511, state=4, num_pkts=8, num_bytes_ip=855, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2644, state=4, num_pkts=6, num_bytes_ip=2853, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.584549, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=511, state=4, num_pkts=8, num_bytes_ip=855, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2644, state=4, num_pkts=6, num_bytes_ip=2853, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.584549, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] record_version: count = 771 [3] content_type: count = 20 [4] length: count = 1 1437831800.045701 ssl_established - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=511, state=4, num_pkts=8, num_bytes_ip=855, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2644, state=4, num_pkts=6, num_bytes_ip=2853, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.584549, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=511, state=4, num_pkts=8, num_bytes_ip=855, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=2644, state=4, num_pkts=6, num_bytes_ip=2853, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.584549, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=35, established=F, logged=F, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=, issuer=, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831800.217854 net_done [0] t: time = 1437831800.217854 1437831800.217854 filter_change_tracking -1437831800.217854 connection_pending - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49285/tcp, resp_h=66.196.121.26, resp_p=5050/tcp], orig=[size=41, state=3, num_pkts=1, num_bytes_ip=93, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831776.764391, duration=0.343008, service={\x0a\x0a}, history=Da, uid=CUM0KZ3MLUfNB0cl11, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] +1437831800.217854 connection_state_remove + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49285/tcp, resp_h=66.196.121.26, resp_p=5050/tcp], orig=[size=41, state=3, num_pkts=1, num_bytes_ip=93, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831776.764391, duration=0.343008, service={\x0a\x0a}, history=Da, uid=CUM0KZ3MLUfNB0cl11, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831800.217854 connection_state_remove - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49285/tcp, resp_h=66.196.121.26, resp_p=5050/tcp], orig=[size=41, state=3, num_pkts=1, num_bytes_ip=93, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831776.764391, duration=0.343008, service={\x0a\x0a}, history=Da, uid=CUM0KZ3MLUfNB0cl11, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831800.217854 connection_pending - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49153/tcp, resp_h=17.172.238.21, resp_p=5223/tcp], orig=[size=714, state=3, num_pkts=1, num_bytes_ip=766, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.262632, duration=0.147503, service={\x0a\x0a}, history=Da, uid=C37jN32gN3y3AZzyf6, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49153/tcp, resp_h=17.172.238.21, resp_p=5223/tcp], orig=[size=714, state=3, num_pkts=1, num_bytes_ip=766, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.262632, duration=0.147503, service={\x0a\x0a}, history=Da, uid=C37jN32gN3y3AZzyf6, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831800.217854 connection_state_remove - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49153/tcp, resp_h=17.172.238.21, resp_p=5223/tcp], orig=[size=714, state=3, num_pkts=1, num_bytes_ip=766, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=0, state=3, num_pkts=1, num_bytes_ip=52, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.262632, duration=0.147503, service={\x0a\x0a}, history=Da, uid=C37jN32gN3y3AZzyf6, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831800.217854 connection_pending - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=17, num_bytes_ip=1865, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=10, num_bytes_ip=690, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.05732, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.914113, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=2, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=1, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=17, num_bytes_ip=1865, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=10, num_bytes_ip=690, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.05732, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.914113, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=2, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=1, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] 1437831800.217854 connection_state_remove - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=17, num_bytes_ip=1865, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=10, num_bytes_ip=690, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.05732, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.914113, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=2, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=1, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] - -1437831800.217854 connection_pending - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49336/tcp, resp_h=74.125.71.189, resp_p=443/tcp], orig=[size=0, state=3, num_pkts=3, num_bytes_ip=156, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=3, num_pkts=3, num_bytes_ip=411, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831798.533593, duration=0.000221, service={\x0a\x0a}, history=^dA, uid=CP5puj4I8PtEU4qzYg, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49336/tcp, resp_h=74.125.71.189, resp_p=443/tcp], orig=[size=0, state=3, num_pkts=3, num_bytes_ip=156, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=3, num_pkts=3, num_bytes_ip=411, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831798.533593, duration=0.000221, service={\x0a\x0a}, history=^dA, uid=CP5puj4I8PtEU4qzYg, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] 1437831800.217854 connection_state_remove - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49336/tcp, resp_h=74.125.71.189, resp_p=443/tcp], orig=[size=0, state=3, num_pkts=3, num_bytes_ip=156, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=3, num_pkts=3, num_bytes_ip=411, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831798.533593, duration=0.000221, service={\x0a\x0a}, history=^dA, uid=CP5puj4I8PtEU4qzYg, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=2249, state=4, num_pkts=15, num_bytes_ip=2873, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=3653, state=4, num_pkts=13, num_bytes_ip=4185, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.756702, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=T, logged=T, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] -1437831800.217854 connection_pending - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=2249, state=4, num_pkts=15, num_bytes_ip=2873, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=3653, state=4, num_pkts=13, num_bytes_ip=4185, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.756702, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=T, logged=T, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831800.217854 connection_state_remove - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], orig=[size=2249, state=4, num_pkts=15, num_bytes_ip=2873, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=3653, state=4, num_pkts=13, num_bytes_ip=4185, flow_label=0, l2_addr=cc:b2:55:f4:62:92], start_time=1437831799.461152, duration=0.756702, service={\x0aSSL\x0a}, history=ShADda, uid=C3eiCBGOLw3VtHfOj, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=[ts=1437831799.611764, uid=C3eiCBGOLw3VtHfOj, id=[orig_h=192.168.133.100, orig_p=49655/tcp, resp_h=17.167.150.73, resp_p=443/tcp], version_num=771, version=TLSv12, cipher=TLS_RSA_WITH_RC4_128_MD5, curve=, server_name=p31-keyvalueservice.icloud.com, session_id=, resumed=F, client_ticket_empty_session_seen=F, client_key_exchange_seen=T, server_appdata=0, client_appdata=F, last_alert=, next_protocol=, analyzer_id=, established=T, logged=T, delay_tokens=, cert_chain=[[ts=1437831799.764576, fuid=F1vce92FT1oRjKI328, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-user-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1406, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=1bf9696d9f337805383427e88781d001, sha1=f5ccb1a724133607548b00d8eb402efca3076d58, sha256=, x509=[ts=1437831799.764576, id=F1vce92FT1oRjKI328, certificate=[version=3, serial=053FCE9BA6805B00, subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, cn=*.icloud.com, not_valid_before=1424184331.0, not_valid_after=1489848331.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://ocsp.apple.com/ocsp04-appleistca2g101\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=8E:51:A1:0E:0A:9B:1C:04:F7:59:D3:69:2E:23:16:91:0E:AD:06:FB], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:FALSE], [name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 1.2.840.113635.100.5.11.4\x0a User Notice:\x0a Explicit Text: Reliance on this certificate by any party assumes acceptance of any applicable terms and conditions of use and/or certification practice statements.\x0a CPS: http://www.apple.com/certificateauthority/rpa\x0a], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://crl.apple.com/appleistca2g1.crl\x0a], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Digital Signature, Key Encipherment], [name=X509v3 Extended Key Usage, short_name=extendedKeyUsage, oid=2.5.29.37, critical=F, value=TLS Web Server Authentication, TLS Web Client Authentication], [name=X509v3 Subject Alternative Name, short_name=subjectAltName, oid=2.5.29.17, critical=F, value=DNS:*.icloud.com]], san=[dns=[*.icloud.com], uri=, email=, ip=, other_fields=F], basic_constraints=[ca=F, path_len=]], extracted=, extracted_cutoff=, extracted_size=], [ts=1437831799.764576, fuid=Fxp53s3wA5G3zdEJg8, tx_hosts={\x0a\x0917.167.150.73\x0a}, rx_hosts={\x0a\x09192.168.133.100\x0a}, conn_uids={\x0aC3eiCBGOLw3VtHfOj\x0a}, source=SSL, depth=0, analyzers={\x0aMD5,\x0aSHA1,\x0aX509\x0a}, mime_type=application/x-x509-ca-cert, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=1092, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=48f0e38385112eeca5fc9ffd402eaecd, sha1=8e8321ca08b08e3726fe1d82996884eeb5f0d655, sha256=, x509=[ts=1437831799.764576, id=Fxp53s3wA5G3zdEJg8, certificate=[version=3, serial=023A74, subject=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, issuer=CN=GeoTrust Global CA,O=GeoTrust Inc.,C=US, cn=Apple IST CA 2 - G1, not_valid_before=1402933322.0, not_valid_after=1653061322.0, key_alg=rsaEncryption, sig_alg=sha256WithRSAEncryption, key_type=rsa, key_length=2048, exponent=65537, curve=], handle=, extensions=[[name=X509v3 Authority Key Identifier, short_name=authorityKeyIdentifier, oid=2.5.29.35, critical=F, value=keyid:C0:7A:98:68:8D:89:FB:AB:05:64:0C:11:7D:AA:7D:65:B8:CA:CC:4E\x0a], [name=X509v3 Subject Key Identifier, short_name=subjectKeyIdentifier, oid=2.5.29.14, critical=F, value=D8:7A:94:44:7C:90:70:90:16:9E:DD:17:9C:01:44:03:86:D6:2A:29], [name=X509v3 Basic Constraints, short_name=basicConstraints, oid=2.5.29.19, critical=T, value=CA:TRUE, pathlen:0], [name=X509v3 Key Usage, short_name=keyUsage, oid=2.5.29.15, critical=T, value=Certificate Sign, CRL Sign], [name=X509v3 CRL Distribution Points, short_name=crlDistributionPoints, oid=2.5.29.31, critical=F, value=\x0aFull Name:\x0a URI:http://g.symcb.com/crls/gtglobal.crl\x0a], [name=Authority Information Access, short_name=authorityInfoAccess, oid=1.3.6.1.5.5.7.1.1, critical=F, value=OCSP - URI:http://g.symcd.com\x0a], [name=X509v3 Certificate Policies, short_name=certificatePolicies, oid=2.5.29.32, critical=F, value=Policy: 2.16.840.1.113733.1.7.54\x0a CPS: http://www.geotrust.com/resources/cps\x0a]], san=, basic_constraints=[ca=T, path_len=0]], extracted=, extracted_cutoff=, extracted_size=]], cert_chain_fuids=[F1vce92FT1oRjKI328, Fxp53s3wA5G3zdEJg8], client_cert_chain=[], client_cert_chain_fuids=[], subject=C=US,ST=California,O=Apple Inc.,OU=management:idms.group.506364,CN=*.icloud.com, issuer=C=US,O=Apple Inc.,OU=Certification Authority,CN=Apple IST CA 2 - G1, client_subject=, client_issuer=, server_depth=0, client_depth=0], http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] - -1437831800.217854 bro_done +1437831800.217854 zeek_done 1437831800.217854 ChecksumOffloading::check diff --git a/testing/btest/Baseline/scripts.policy.misc.dump-events/smtp-events.log b/testing/btest/Baseline/scripts.policy.misc.dump-events/smtp-events.log index 1d02aac2d9..3fa7d8785c 100644 --- a/testing/btest/Baseline/scripts.policy.misc.dump-events/smtp-events.log +++ b/testing/btest/Baseline/scripts.policy.misc.dump-events/smtp-events.log @@ -1,5 +1,5 @@ 1254722768.219663 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -7,7 +7,7 @@ [5] cont_resp: bool = T 1254722768.219663 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 xc90.websitewelcome.com ESMTP Exim 4.69 #1 Mon, 05 Oct 2009 01:05:54 -0500 , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 xc90.websitewelcome.com ESMTP Exim 4.69 #1 Mon, 05 Oct 2009 01:05:54 -0500 , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -15,7 +15,7 @@ [5] cont_resp: bool = T 1254722768.219663 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 We do not authorize the use of this system to transport unsolicited, , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=1, num_bytes_ip=48, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.690617, service={\x0a\x0a}, history=ShAd, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 We do not authorize the use of this system to transport unsolicited, , path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -23,13 +23,13 @@ [5] cont_resp: bool = F 1254722768.224809 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=2, num_bytes_ip=269, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.695763, service={\x0aSMTP\x0a}, history=ShAdD, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=2, num_bytes_ip=88, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=181, state=4, num_pkts=2, num_bytes_ip=269, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=0.695763, service={\x0aSMTP\x0a}, history=ShAdD, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = EHLO [3] arg: string = GP 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 and/or bulk e-mail., path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -37,7 +37,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 xc90.websitewelcome.com Hello GP [122.162.143.157], path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 xc90.websitewelcome.com Hello GP [122.162.143.157], path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -45,7 +45,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 SIZE 52428800, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 SIZE 52428800, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -53,7 +53,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 PIPELINING, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 PIPELINING, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -61,7 +61,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH PLAIN LOGIN, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH PLAIN LOGIN, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -69,7 +69,7 @@ [5] cont_resp: bool = T 1254722768.566183 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 STARTTLS, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=9, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=3, num_bytes_ip=309, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.037137, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 STARTTLS, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -77,13 +77,13 @@ [5] cont_resp: bool = F 1254722768.568729 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.039683, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=3, num_bytes_ip=137, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=318, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.039683, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = AUTH [3] arg: string = LOGIN 1254722768.911081 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382035, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=21, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=4, num_bytes_ip=486, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382035, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 HELP, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 334 [3] cmd: string = AUTH @@ -91,13 +91,13 @@ [5] cont_resp: bool = F 1254722768.911655 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382609, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=4, num_bytes_ip=189, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=336, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.382609, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = ** [3] arg: string = Z3VycGFydGFwQHBhdHJpb3RzLmlu 1254722769.253544 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.724498, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=51, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=5, num_bytes_ip=544, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.724498, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 VXNlcm5hbWU6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 334 [3] cmd: string = AUTH_ANSWER @@ -105,13 +105,13 @@ [5] cont_resp: bool = F 1254722769.254118 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.725072, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=5, num_bytes_ip=259, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=354, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=1.725072, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = ** [3] arg: string = cHVuamFiQDEyMw== 1254722769.613798 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.084752, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=69, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=6, num_bytes_ip=602, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.084752, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=334 UGFzc3dvcmQ6, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 235 [3] cmd: string = AUTH_ANSWER @@ -119,13 +119,13 @@ [5] cont_resp: bool = F 1254722769.614414 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.085368, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=6, num_bytes_ip=317, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=384, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.085368, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = MAIL [3] arg: string = FROM: 1254722769.956765 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.427719, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=105, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=7, num_bytes_ip=672, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.427719, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=235 Authentication succeeded, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = MAIL @@ -133,13 +133,13 @@ [5] cont_resp: bool = F 1254722769.957250 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.428204, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=7, num_bytes_ip=393, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=392, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.428204, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1254722770.319708 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.790662, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=144, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=8, num_bytes_ip=720, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.790662, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 OK, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -147,13 +147,13 @@ [5] cont_resp: bool = F 1254722770.320203 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.791157, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=8, num_bytes_ip=472, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=406, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=2.791157, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = DATA [3] arg: string = 1254722770.661679 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.132633, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=150, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=9, num_bytes_ip=774, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=3.132633, service={\x0aSMTP\x0a}, history=ShAdDa, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Accepted, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 354 [3] cmd: string = DATA @@ -161,13 +161,13 @@ [5] cont_resp: bool = F 1254722771.858334 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=23, num_bytes_ip=21438, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=462, state=4, num_pkts=15, num_bytes_ip=1070, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.329288, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = . [3] arg: string = . 1254722772.248789 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=24, num_bytes_ip=21507, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=21, num_bytes_ip=1310, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.719743, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14699, state=4, num_pkts=24, num_bytes_ip=21507, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=21, num_bytes_ip=1310, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=4.719743, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722768.219663, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=gurpartap@patriots.in, rcptto={\x0araj_deol2002in@yahoo.co.in\x0a}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={\x0a\x0a}, cc=, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5, Ft4M3f2yMvLlmwtbq9, FL9Y0d45OI4LpS6fmh]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = . @@ -175,13 +175,13 @@ [5] cont_resp: bool = F 1254722774.763825 smtp_request - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=4, num_pkts=25, num_bytes_ip=21547, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.234779, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=4, num_pkts=25, num_bytes_ip=21547, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=490, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.234779, service={\x0aSMTP\x0a}, history=ShAdDaT, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = QUIT [3] arg: string = 1254722775.105467 smtp_reply - [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=5, num_pkts=27, num_bytes_ip=21633, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=538, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.576421, service={\x0aSMTP\x0a}, history=ShAdDaTF, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=14705, state=5, num_pkts=27, num_bytes_ip=21633, flow_label=0, l2_addr=00:e0:1c:3c:17:c2], resp=[size=538, state=4, num_pkts=22, num_bytes_ip=1378, flow_label=0, l2_addr=00:1f:33:d9:81:60], start_time=1254722767.529046, duration=7.576421, service={\x0aSMTP\x0a}, history=ShAdDaTF, uid=ClEkJM2Vm5giqnMf4h, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1254722772.248789, uid=ClEkJM2Vm5giqnMf4h, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=2, helo=GP, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=, path=[74.53.140.153, 10.10.1.4], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=1, pending_messages=, mime_depth=5], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 221 [3] cmd: string = QUIT @@ -189,7 +189,7 @@ [5] cont_resp: bool = F 1437831787.867142 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.010247, service={\x0a\x0a}, history=ShAd, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=0, state=4, num_pkts=2, num_bytes_ip=116, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=1, num_bytes_ip=60, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.010247, service={\x0a\x0a}, history=ShAd, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=, smtp_state=, socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 220 [3] cmd: string = > @@ -197,13 +197,13 @@ [5] cont_resp: bool = F 1437831787.883306 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=2, num_bytes_ip=147, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.026411, service={\x0aSMTP\x0a}, history=ShAdD, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=3, num_bytes_ip=168, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=35, state=4, num_pkts=2, num_bytes_ip=147, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.026411, service={\x0aSMTP\x0a}, history=ShAdD, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=, mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=, messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = EHLO [3] arg: string = [192.168.133.100] 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=220 uprise ESMTP SubEthaSMTP null, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -211,7 +211,7 @@ [5] cont_resp: bool = T 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 uprise, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 uprise, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -219,7 +219,7 @@ [5] cont_resp: bool = T 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 8BITMIME, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 8BITMIME, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -227,7 +227,7 @@ [5] cont_resp: bool = T 1437831787.886281 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH LOGIN, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=24, state=4, num_pkts=4, num_bytes_ip=244, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=3, num_bytes_ip=199, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.029386, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 AUTH LOGIN, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = EHLO @@ -235,13 +235,13 @@ [5] cont_resp: bool = F 1437831787.887031 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=5, num_bytes_ip=296, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.030136, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=5, num_bytes_ip=296, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=85, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.030136, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=F, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = MAIL [3] arg: string = FROM: 1437831787.889785 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=6, num_bytes_ip=380, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.03289, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=56, state=4, num_pkts=6, num_bytes_ip=380, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=4, num_bytes_ip=301, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.03289, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = MAIL @@ -249,13 +249,13 @@ [5] cont_resp: bool = F 1437831787.890232 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=7, num_bytes_ip=432, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.033337, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=7, num_bytes_ip=432, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=93, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.033337, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto=, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1437831787.892986 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=8, num_bytes_ip=516, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036091, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=88, state=4, num_pkts=8, num_bytes_ip=516, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=5, num_bytes_ip=361, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036091, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -263,13 +263,13 @@ [5] cont_resp: bool = F 1437831787.893587 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=9, num_bytes_ip=568, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036692, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=9, num_bytes_ip=568, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=101, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.036692, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1437831787.897624 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=10, num_bytes_ip=653, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.040729, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=121, state=4, num_pkts=10, num_bytes_ip=653, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=6, num_bytes_ip=421, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.040729, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -277,13 +277,13 @@ [5] cont_resp: bool = F 1437831787.898413 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=11, num_bytes_ip=705, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.041518, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=11, num_bytes_ip=705, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=109, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.041518, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = RCPT [3] arg: string = TO: 1437831787.901069 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=12, num_bytes_ip=792, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044174, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=156, state=4, num_pkts=12, num_bytes_ip=792, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=7, num_bytes_ip=481, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044174, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = RCPT @@ -291,13 +291,13 @@ [5] cont_resp: bool = F 1437831787.901697 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=13, num_bytes_ip=844, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044802, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=13, num_bytes_ip=844, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=117, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.044802, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=0], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = DATA [3] arg: string = 1437831787.904758 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=14, num_bytes_ip=902, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.047863, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=162, state=4, num_pkts=14, num_bytes_ip=902, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=8, num_bytes_ip=541, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.047863, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=, from=, to=, cc=, reply_to=, msg_id=, in_reply_to=, subject=, x_originating_ip=, first_received=, second_received=, last_reply=250 Ok, path=[192.168.133.102, 192.168.133.100], user_agent=, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 354 [3] cmd: string = DATA @@ -305,13 +305,13 @@ [5] cont_resp: bool = F 1437831787.905375 smtp_request - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=15, num_bytes_ip=954, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=154, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.04848, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = T [2] command: string = . [3] arg: string = . 1437831787.914113 smtp_reply - [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=16, num_bytes_ip=1813, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.057218, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] + [0] c: connection = [id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], orig=[size=969, state=4, num_pkts=16, num_bytes_ip=1813, flow_label=0, l2_addr=58:b0:35:86:54:8d], resp=[size=162, state=4, num_pkts=9, num_bytes_ip=630, flow_label=0, l2_addr=00:08:ca:cc:ad:4c], start_time=1437831787.856895, duration=0.057218, service={\x0aSMTP\x0a}, history=ShAdDa, uid=CmES5u32sYpV7JYN, tunnel=, vlan=, inner_vlan=, dpd=, conn=, extract_orig=F, extract_resp=F, thresholds=, dce_rpc=, dce_rpc_state=, dce_rpc_backing=, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, krb=, modbus=, mysql=, ntlm=, ntp=, radius=, rdp=, rfb=, sip=, sip_state=, snmp=, smb_state=, smtp=[ts=1437831787.867142, uid=CmES5u32sYpV7JYN, id=[orig_h=192.168.133.100, orig_p=49648/tcp, resp_h=192.168.133.102, resp_p=25/tcp], trans_depth=1, helo=[192.168.133.100], mailfrom=albert@example.com, rcptto={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com,\x0aericlim220@yahoo.com\x0a}, date=Sat, 25 Jul 2015 16:43:07 +0300, from=Albert Zaharovits , to={\x0aericlim220@yahoo.com\x0a}, cc={\x0adavis_mark1@outlook.com,\x0afelica4uu@hotmail.com\x0a}, reply_to=, msg_id=, in_reply_to=<9ACEE03C-AB98-4046-AEC1-BF4910C61E96@example.com>, subject=Re: Bro SMTP CC Header, x_originating_ip=, first_received=, second_received=, last_reply=354 End data with ., path=[192.168.133.102, 192.168.133.100], user_agent=Apple Mail (2.2102), tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[FKX8fw2lEHCTK8syM3]], smtp_state=[helo=[192.168.133.100], messages_transferred=0, pending_messages=, mime_depth=1], socks=, ssh=, syslog=] [1] is_orig: bool = F [2] code: count = 250 [3] cmd: string = . diff --git a/testing/btest/Baseline/scripts.policy.misc.weird-stats/bro.weird_stats.log b/testing/btest/Baseline/scripts.policy.misc.weird-stats/zeek.weird_stats.log similarity index 100% rename from testing/btest/Baseline/scripts.policy.misc.weird-stats/bro.weird_stats.log rename to testing/btest/Baseline/scripts.policy.misc.weird-stats/zeek.weird_stats.log diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log index 851d9ec7dd..8cf5b98527 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-all.log @@ -3,12 +3,13 @@ #empty_field (empty) #unset_field - #path known_services -#open 2014-04-01-23-16-24 +#open 2019-06-15-23-46-49 #fields ts host port_num port_proto service #types time addr port enum set[string] -1308930691.049431 172.16.238.131 22 tcp SSH +1308930691.089263 172.16.238.131 22 tcp SSH 1308930694.550308 172.16.238.131 80 tcp HTTP 1308930716.462556 74.125.225.81 80 tcp HTTP 1308930718.361665 172.16.238.131 21 tcp FTP -1308930726.872485 141.142.192.39 22 tcp SSH -#close 2014-04-01-23-16-24 +1308930726.889624 141.142.192.39 22 tcp SSH +1308930727.236071 69.50.219.51 123 udp NTP +#close 2019-06-15-23-46-49 diff --git a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log index 54680af4cc..6441c5255e 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log +++ b/testing/btest/Baseline/scripts.policy.protocols.conn.known-services/knownservices-remote.log @@ -3,9 +3,10 @@ #empty_field (empty) #unset_field - #path known_services -#open 2014-04-01-23-16-22 +#open 2019-06-15-23-44-01 #fields ts host port_num port_proto service #types time addr port enum set[string] 1308930716.462556 74.125.225.81 80 tcp HTTP -1308930726.872485 141.142.192.39 22 tcp SSH -#close 2014-04-01-23-16-23 +1308930726.889624 141.142.192.39 22 tcp SSH +1308930727.236071 69.50.219.51 123 udp NTP +#close 2019-06-15-23-44-01 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssh.detect-bruteforcing/notice.log b/testing/btest/Baseline/scripts.policy.protocols.ssh.detect-bruteforcing/notice.log index 26aa4144c8..0cd8a83a94 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssh.detect-bruteforcing/notice.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssh.detect-bruteforcing/notice.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-29-44 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1427726759.303199 - - - - - - - - - SSH::Password_Guessing 192.168.56.1 appears to be guessing SSH passwords (seen in 10 connections). Sampled servers: 192.168.56.103, 192.168.56.103, 192.168.56.103, 192.168.56.103, 192.168.56.103 192.168.56.1 - - - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-29-44 +#open 2019-06-05-19-32-18 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1427726759.303199 - - - - - - - - - SSH::Password_Guessing 192.168.56.1 appears to be guessing SSH passwords (seen in 10 connections). Sampled servers: 192.168.56.103, 192.168.56.103, 192.168.56.103, 192.168.56.103, 192.168.56.103 192.168.56.1 - - - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-32-18 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.expiring-certs/notice.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.expiring-certs/notice.log index cdfc85691a..a1c2670059 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.expiring-certs/notice.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.expiring-certs/notice.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-30-08 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1394745603.293028 CHhAvVGS1DHFjwGM9 192.168.4.149 60539 87.98.220.10 443 F1fX1R2cDOzbvg17ye - - tcp SSL::Certificate_Expired Certificate CN=www.spidh.org,OU=COMODO SSL,OU=Domain Control Validated expired at 2014-03-04-23:59:59.000000000 - 192.168.4.149 87.98.220.10 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -1394745619.197766 ClEkJM2Vm5giqnMf4h 192.168.4.149 60540 122.1.240.204 443 F6NAbK127LhNBaEe5c - - tcp SSL::Certificate_Expires_Soon Certificate CN=www.tobu-estate.com,OU=Terms of use at www.verisign.com/rpa (c)05,O=TOBU RAILWAY Co.\\,Ltd.,L=Sumida-ku,ST=Tokyo,C=JP is going to expire at 2014-03-14-23:59:59.000000000 - 192.168.4.149 122.1.240.204 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -#close 2017-12-21-02-30-08 +#open 2019-06-05-19-32-18 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1394745603.293028 CHhAvVGS1DHFjwGM9 192.168.4.149 60539 87.98.220.10 443 F1fX1R2cDOzbvg17ye - - tcp SSL::Certificate_Expired Certificate CN=www.spidh.org,OU=COMODO SSL,OU=Domain Control Validated expired at 2014-03-04-23:59:59.000000000 - 192.168.4.149 87.98.220.10 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +1394745619.197766 ClEkJM2Vm5giqnMf4h 192.168.4.149 60540 122.1.240.204 443 F6NAbK127LhNBaEe5c - - tcp SSL::Certificate_Expires_Soon Certificate CN=www.tobu-estate.com,OU=Terms of use at www.verisign.com/rpa (c)05,O=TOBU RAILWAY Co.\\,Ltd.,L=Sumida-ku,ST=Tokyo,C=JP is going to expire at 2014-03-14-23:59:59.000000000 - 192.168.4.149 122.1.240.204 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +#close 2019-06-05-19-32-19 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-short.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-short.log index dc1f0239e2..2d74de6e3d 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-short.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-short.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-30-25 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1398954957.074664 CHhAvVGS1DHFjwGM9 192.168.4.149 54233 162.219.2.166 4443 - - - tcp Heartbleed::SSL_Heartbeat_Attack Heartbeat before ciphertext. Probable attack or scan. Length: 32, is_orig: 1 - 192.168.4.149 162.219.2.166 4443 32 - Notice::ACTION_LOG 3600.000000 F - - - - - -1398954957.074664 CHhAvVGS1DHFjwGM9 192.168.4.149 54233 162.219.2.166 4443 - - - tcp Heartbleed::SSL_Heartbeat_Odd_Length Heartbeat message smaller than minimum required length. Probable attack. Message length: 32. Required length: 48. Cipher: TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA. Cipher match: /^?(_256_CBC_SHA$)$?/ - 192.168.4.149 162.219.2.166 4443 32 - Notice::ACTION_LOG 3600.000000 F - - - - - -1398954957.145535 CHhAvVGS1DHFjwGM9 192.168.4.149 54233 162.219.2.166 4443 - - - tcp Heartbleed::SSL_Heartbeat_Attack_Success An encrypted TLS heartbleed attack was probably detected! First packet client record length 32, first packet server record length 48. Time: 0.351035 - 192.168.4.149 162.219.2.166 4443 - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-30-25 +#open 2019-06-05-19-59-24 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1398954957.074664 CHhAvVGS1DHFjwGM9 192.168.4.149 54233 162.219.2.166 4443 - - - tcp Heartbleed::SSL_Heartbeat_Attack Heartbeat before ciphertext. Probable attack or scan. Length: 32, is_orig: 1 - 192.168.4.149 162.219.2.166 4443 32 - Notice::ACTION_LOG 3600.000000 - - - - - +1398954957.074664 CHhAvVGS1DHFjwGM9 192.168.4.149 54233 162.219.2.166 4443 - - - tcp Heartbleed::SSL_Heartbeat_Odd_Length Heartbeat message smaller than minimum required length. Probable attack. Message length: 32. Required length: 48. Cipher: TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA. Cipher match: /^?(_256_CBC_SHA$)$?/ - 192.168.4.149 162.219.2.166 4443 32 - Notice::ACTION_LOG 3600.000000 - - - - - +1398954957.145535 CHhAvVGS1DHFjwGM9 192.168.4.149 54233 162.219.2.166 4443 - - - tcp Heartbleed::SSL_Heartbeat_Attack_Success An encrypted TLS heartbleed attack was probably detected! First packet client record length 32, first packet server record length 48. Time: 0.351035 - 192.168.4.149 162.219.2.166 4443 - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-59-24 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-success.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-success.log index 8a104974c4..06d326b5c2 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-success.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted-success.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-30-24 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1397169549.882425 CHhAvVGS1DHFjwGM9 192.168.4.149 59676 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack Heartbeat before ciphertext. Probable attack or scan. Length: 32, is_orig: 1 - 192.168.4.149 107.170.241.107 443 32 - Notice::ACTION_LOG 3600.000000 F - - - - - -1397169549.882425 CHhAvVGS1DHFjwGM9 192.168.4.149 59676 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Odd_Length Heartbeat message smaller than minimum required length. Probable attack. Message length: 32. Required length: 48. Cipher: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA. Cipher match: /^?(_256_CBC_SHA$)$?/ - 192.168.4.149 107.170.241.107 443 32 - Notice::ACTION_LOG 3600.000000 F - - - - - -1397169549.895057 CHhAvVGS1DHFjwGM9 192.168.4.149 59676 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack_Success An encrypted TLS heartbleed attack was probably detected! First packet client record length 32, first packet server record length 16416. Time: 0.035413 - 192.168.4.149 107.170.241.107 443 - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-30-24 +#open 2019-06-05-19-59-23 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1397169549.882425 CHhAvVGS1DHFjwGM9 192.168.4.149 59676 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack Heartbeat before ciphertext. Probable attack or scan. Length: 32, is_orig: 1 - 192.168.4.149 107.170.241.107 443 32 - Notice::ACTION_LOG 3600.000000 - - - - - +1397169549.882425 CHhAvVGS1DHFjwGM9 192.168.4.149 59676 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Odd_Length Heartbeat message smaller than minimum required length. Probable attack. Message length: 32. Required length: 48. Cipher: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA. Cipher match: /^?(_256_CBC_SHA$)$?/ - 192.168.4.149 107.170.241.107 443 32 - Notice::ACTION_LOG 3600.000000 - - - - - +1397169549.895057 CHhAvVGS1DHFjwGM9 192.168.4.149 59676 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack_Success An encrypted TLS heartbleed attack was probably detected! First packet client record length 32, first packet server record length 16416. Time: 0.035413 - 192.168.4.149 107.170.241.107 443 - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-59-23 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted.log index 0d56fcba8d..8a2859f206 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-encrypted.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-30-23 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1400106542.810248 CHhAvVGS1DHFjwGM9 54.221.166.250 56323 162.219.2.166 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack Heartbeat before ciphertext. Probable attack or scan. Length: 86, is_orig: 1 - 54.221.166.250 162.219.2.166 443 86 - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-30-23 +#open 2019-06-05-19-59-22 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1400106542.810248 CHhAvVGS1DHFjwGM9 54.221.166.250 56323 162.219.2.166 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack Heartbeat before ciphertext. Probable attack or scan. Length: 86, is_orig: 1 - 54.221.166.250 162.219.2.166 443 86 - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-59-23 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed-success.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed-success.log index 4828b15af2..4b101157df 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed-success.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed-success.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-30-22 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1396976220.863714 CHhAvVGS1DHFjwGM9 173.203.79.216 41459 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack An TLS heartbleed attack was detected! Record length 16368. Payload length 16365 - 173.203.79.216 107.170.241.107 443 - - Notice::ACTION_LOG 3600.000000 F - - - - - -1396976220.918017 CHhAvVGS1DHFjwGM9 173.203.79.216 41459 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack_Success An TLS heartbleed attack detected before was probably exploited. Message length: 16384. Payload length: 16365 - 173.203.79.216 107.170.241.107 443 - - Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2017-12-21-02-30-22 +#open 2019-06-05-19-59-22 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1396976220.863714 CHhAvVGS1DHFjwGM9 173.203.79.216 41459 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack An TLS heartbleed attack was detected! Record length 16368. Payload length 16365 - 173.203.79.216 107.170.241.107 443 - - Notice::ACTION_LOG 3600.000000 - - - - - +1396976220.918017 CHhAvVGS1DHFjwGM9 173.203.79.216 41459 107.170.241.107 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack_Success An TLS heartbleed attack detected before was probably exploited. Message length: 16384. Payload length: 16365 - 173.203.79.216 107.170.241.107 443 - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-59-22 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed.log index da376c79a0..ee9959a59f 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.heartbleed/notice-heartbleed.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path notice -#open 2014-04-24-18-29-46 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1396973486.753913 CXWv6p3arKYeMETxOg 173.203.79.216 46592 162.219.2.166 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack An TLS heartbleed attack was detected! Record length 16368, payload length 16365 - 173.203.79.216 162.219.2.166 443 - bro Notice::ACTION_LOG 3600.000000 F - - - - - -#close 2014-04-24-18-29-46 +#open 2019-06-05-19-59-21 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1396973486.753913 CHhAvVGS1DHFjwGM9 173.203.79.216 46592 162.219.2.166 443 - - - tcp Heartbleed::SSL_Heartbeat_Attack An TLS heartbleed attack was detected! Record length 16368. Payload length 16365 - 173.203.79.216 162.219.2.166 443 - - Notice::ACTION_LOG 3600.000000 - - - - - +#close 2019-06-05-19-59-21 diff --git a/testing/btest/Baseline/scripts.policy.protocols.ssl.weak-keys/notice-out.log b/testing/btest/Baseline/scripts.policy.protocols.ssl.weak-keys/notice-out.log index dddb66427d..7609b2a632 100644 --- a/testing/btest/Baseline/scripts.policy.protocols.ssl.weak-keys/notice-out.log +++ b/testing/btest/Baseline/scripts.policy.protocols.ssl.weak-keys/notice-out.log @@ -3,31 +3,31 @@ #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-31-09 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1398558136.430417 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 - - - tcp SSL::Weak_Key Host uses weak DH parameters with 1024 key bits - 192.168.18.50 162.219.2.166 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -1398558136.430417 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 - - - tcp SSL::Weak_Key DH key length of 1024 bits is smaller certificate key length of 2048 bits - 192.168.18.50 162.219.2.166 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -1398558136.542637 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 - - - tcp SSL::Weak_Key Host uses weak certificate with 2048 bit key - 192.168.18.50 162.219.2.166 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -#close 2017-12-21-02-31-09 +#open 2019-06-05-19-32-22 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1398558136.430417 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 - - - tcp SSL::Weak_Key Host uses weak DH parameters with 1024 key bits - 192.168.18.50 162.219.2.166 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +1398558136.430417 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 - - - tcp SSL::Weak_Key DH key length of 1024 bits is smaller certificate key length of 2048 bits - 192.168.18.50 162.219.2.166 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +1398558136.542637 CHhAvVGS1DHFjwGM9 192.168.18.50 62277 162.219.2.166 443 - - - tcp SSL::Weak_Key Host uses weak certificate with 2048 bit key - 192.168.18.50 162.219.2.166 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +#close 2019-06-05-19-32-22 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-31-10 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1397165496.713940 CHhAvVGS1DHFjwGM9 192.168.4.149 59062 91.227.4.92 443 - - - tcp SSL::Old_Version Host uses protocol version SSLv2 which is lower than the safe minimum TLSv10 - 192.168.4.149 91.227.4.92 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -#close 2017-12-21-02-31-10 +#open 2019-06-05-19-32-23 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1397165496.713940 CHhAvVGS1DHFjwGM9 192.168.4.149 59062 91.227.4.92 443 - - - tcp SSL::Old_Version Host uses protocol version SSLv2 which is lower than the safe minimum TLSv10 - 192.168.4.149 91.227.4.92 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +#close 2019-06-05-19-32-24 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path notice -#open 2017-12-21-02-31-11 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude -#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval bool string string string double double -1170717505.734145 CHhAvVGS1DHFjwGM9 192.150.187.164 58868 194.127.84.106 443 - - - tcp SSL::Weak_Cipher Host established connection using unsafe ciper suite TLS_RSA_WITH_RC4_128_MD5 - 192.150.187.164 194.127.84.106 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -1170717505.934612 CHhAvVGS1DHFjwGM9 192.150.187.164 58868 194.127.84.106 443 - - - tcp SSL::Weak_Key Host uses weak certificate with 1024 bit key - 192.150.187.164 194.127.84.106 443 - - Notice::ACTION_LOG 86400.000000 F - - - - - -#close 2017-12-21-02-31-11 +#open 2019-06-05-19-32-25 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc proto note msg sub src dst p n peer_descr actions suppress_for remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port string string string enum enum string string addr addr port count string set[enum] interval string string string double double +1170717505.734145 CHhAvVGS1DHFjwGM9 192.150.187.164 58868 194.127.84.106 443 - - - tcp SSL::Weak_Cipher Host established connection using unsafe ciper suite TLS_RSA_WITH_RC4_128_MD5 - 192.150.187.164 194.127.84.106 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +1170717505.934612 CHhAvVGS1DHFjwGM9 192.150.187.164 58868 194.127.84.106 443 - - - tcp SSL::Weak_Key Host uses weak certificate with 1024 bit key - 192.150.187.164 194.127.84.106 443 - - Notice::ACTION_LOG 86400.000000 - - - - - +#close 2019-06-05-19-32-25 diff --git a/testing/btest/Baseline/signatures.src-port-header-condition/src-port-range.out b/testing/btest/Baseline/signatures.src-port-header-condition/src-port-range.out new file mode 100644 index 0000000000..e3df5bf1e7 --- /dev/null +++ b/testing/btest/Baseline/signatures.src-port-header-condition/src-port-range.out @@ -0,0 +1,3 @@ +signature_match [orig_h=127.0.0.1, orig_p=29998/udp, resp_h=127.0.0.1, resp_p=13000/udp] - src-port-range +signature_match [orig_h=127.0.0.1, orig_p=30001/udp, resp_h=127.0.0.1, resp_p=13000/udp] - src-port-range +signature_match [orig_h=127.0.0.1, orig_p=30003/udp, resp_h=127.0.0.1, resp_p=13000/udp] - src-port-range diff --git a/testing/btest/Baseline/signatures.udp-packetwise-insensitive/out b/testing/btest/Baseline/signatures.udp-packetwise-insensitive/out new file mode 100644 index 0000000000..5b5066d638 --- /dev/null +++ b/testing/btest/Baseline/signatures.udp-packetwise-insensitive/out @@ -0,0 +1,6 @@ +signature match, Found .*XXXX, XXXX +signature match, Found .*YYYY, YYYY +signature match, Found XXXX, XXXX +signature match, Found YYYY, YYYY +signature match, Found ^XXXX, XXXX +signature match, Found ^YYYY, YYYY diff --git a/testing/btest/README b/testing/btest/README index 200d1a3e0e..f20205c36b 100644 --- a/testing/btest/README +++ b/testing/btest/README @@ -1,4 +1,4 @@ -This a test suite of small "unit tests" that verify individual pieces of Bro +This a test suite of small "unit tests" that verify individual pieces of Zeek functionality. They all utilize BTest, a simple framework/driver for writing unit tests. More information about BTest can be found at https://github.com/zeek/btest @@ -20,14 +20,14 @@ Significant Subdirectories Packet captures utilized by the various BTest tests. * scripts/ - This hierarchy of tests emulates the hierarchy of the Bro scripts/ + This hierarchy of tests emulates the hierarchy of the Zeek scripts/ directory. * coverage/ This collection of tests relates to checking whether we're covering everything we want to in terms of tests, documentation, and which - scripts get loaded in different Bro configurations. These tests are - more prone to fail as new Bro scripts are developed and added to the + scripts get loaded in different Zeek configurations. These tests are + more prone to fail as new Zeek scripts are developed and added to the distribution -- checking the individual test's comments is the best place to check for more details on what exactly the test is checking and hints on how to fix it when it fails. @@ -48,7 +48,7 @@ run ``btest`` directly with desired options/arguments. Examples: You can specify a directory on the command line to run just the tests contained in that directory. This is useful if you wish to run all of a given type of test, without running all the tests - there are. For example, "btest scripts" will run all of the Bro + there are. For example, "btest scripts" will run all of the Zeek script unit tests. diff --git a/testing/btest/Traces/dhcp/dhcp_time_and_nameserver.trace b/testing/btest/Traces/dhcp/dhcp_time_and_nameserver.trace new file mode 100644 index 0000000000..3395e48d6e Binary files /dev/null and b/testing/btest/Traces/dhcp/dhcp_time_and_nameserver.trace differ diff --git a/testing/btest/Traces/dns-spf.pcap b/testing/btest/Traces/dns-spf.pcap new file mode 100644 index 0000000000..4781bcd416 Binary files /dev/null and b/testing/btest/Traces/dns-spf.pcap differ diff --git a/testing/btest/Traces/ntp/NTP-digest.pcap b/testing/btest/Traces/ntp/NTP-digest.pcap new file mode 100644 index 0000000000..0e8a262cab Binary files /dev/null and b/testing/btest/Traces/ntp/NTP-digest.pcap differ diff --git a/testing/btest/Traces/ntp/NTP_sync.pcap b/testing/btest/Traces/ntp/NTP_sync.pcap new file mode 100644 index 0000000000..997d9fbf5a Binary files /dev/null and b/testing/btest/Traces/ntp/NTP_sync.pcap differ diff --git a/testing/btest/Traces/ntp/ntp.pcap b/testing/btest/Traces/ntp/ntp.pcap new file mode 100644 index 0000000000..eaacb72d6b Binary files /dev/null and b/testing/btest/Traces/ntp/ntp.pcap differ diff --git a/testing/btest/Traces/ntp/ntp2.pcap b/testing/btest/Traces/ntp/ntp2.pcap new file mode 100644 index 0000000000..d242cc5c54 Binary files /dev/null and b/testing/btest/Traces/ntp/ntp2.pcap differ diff --git a/testing/btest/Traces/ntp/ntpmode67.pcap b/testing/btest/Traces/ntp/ntpmode67.pcap new file mode 100644 index 0000000000..ca0a8ca105 Binary files /dev/null and b/testing/btest/Traces/ntp/ntpmode67.pcap differ diff --git a/testing/btest/Traces/tls/hrr.pcap b/testing/btest/Traces/tls/hrr.pcap new file mode 100644 index 0000000000..d3d55ded24 Binary files /dev/null and b/testing/btest/Traces/tls/hrr.pcap differ diff --git a/testing/btest/Traces/tls/tls13_psk_succesfull.pcap b/testing/btest/Traces/tls/tls13_psk_succesfull.pcap new file mode 100644 index 0000000000..cdf7fcf132 Binary files /dev/null and b/testing/btest/Traces/tls/tls13_psk_succesfull.pcap differ diff --git a/testing/btest/Traces/udp-multiple-source-ports.pcap b/testing/btest/Traces/udp-multiple-source-ports.pcap new file mode 100644 index 0000000000..340fa19ce8 Binary files /dev/null and b/testing/btest/Traces/udp-multiple-source-ports.pcap differ diff --git a/testing/btest/bifs/addr_count_conversion.bro b/testing/btest/bifs/addr_count_conversion.bro deleted file mode 100644 index fb87a0c6a3..0000000000 --- a/testing/btest/bifs/addr_count_conversion.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global v: index_vec; - -v = addr_to_counts([2001:0db8:85a3:0000:0000:8a2e:0370:7334]); -print v; -print counts_to_addr(v); -v = addr_to_counts(1.2.3.4); -print v; -print counts_to_addr(v); diff --git a/testing/btest/bifs/addr_count_conversion.zeek b/testing/btest/bifs/addr_count_conversion.zeek new file mode 100644 index 0000000000..c27d154932 --- /dev/null +++ b/testing/btest/bifs/addr_count_conversion.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global v: index_vec; + +v = addr_to_counts([2001:0db8:85a3:0000:0000:8a2e:0370:7334]); +print v; +print counts_to_addr(v); +v = addr_to_counts(1.2.3.4); +print v; +print counts_to_addr(v); diff --git a/testing/btest/bifs/addr_to_ptr_name.bro b/testing/btest/bifs/addr_to_ptr_name.bro deleted file mode 100644 index ac2391cf9b..0000000000 --- a/testing/btest/bifs/addr_to_ptr_name.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -print addr_to_ptr_name([2607:f8b0:4009:802::1012]); -print addr_to_ptr_name(74.125.225.52); - diff --git a/testing/btest/bifs/addr_to_ptr_name.zeek b/testing/btest/bifs/addr_to_ptr_name.zeek new file mode 100644 index 0000000000..113750cb4e --- /dev/null +++ b/testing/btest/bifs/addr_to_ptr_name.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print addr_to_ptr_name([2607:f8b0:4009:802::1012]); +print addr_to_ptr_name(74.125.225.52); + diff --git a/testing/btest/bifs/addr_version.bro b/testing/btest/bifs/addr_version.bro deleted file mode 100644 index bf96c0d1f3..0000000000 --- a/testing/btest/bifs/addr_version.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -print is_v4_addr(1.2.3.4); -print is_v4_addr([::1]); -print is_v6_addr(1.2.3.4); -print is_v6_addr([::1]); diff --git a/testing/btest/bifs/addr_version.zeek b/testing/btest/bifs/addr_version.zeek new file mode 100644 index 0000000000..ca3e4a3100 --- /dev/null +++ b/testing/btest/bifs/addr_version.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +print is_v4_addr(1.2.3.4); +print is_v4_addr([::1]); +print is_v6_addr(1.2.3.4); +print is_v6_addr([::1]); diff --git a/testing/btest/bifs/all_set.bro b/testing/btest/bifs/all_set.bro deleted file mode 100644 index 56f7b6e7f2..0000000000 --- a/testing/btest/bifs/all_set.bro +++ /dev/null @@ -1,15 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = vector( T, F, T ); - print all_set(a); - - local b: vector of bool = vector(); - print all_set(b); - - local c = vector( T ); - print all_set(c); - } diff --git a/testing/btest/bifs/all_set.zeek b/testing/btest/bifs/all_set.zeek new file mode 100644 index 0000000000..70a5ea0ecd --- /dev/null +++ b/testing/btest/bifs/all_set.zeek @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = vector( T, F, T ); + print all_set(a); + + local b: vector of bool = vector(); + print all_set(b); + + local c = vector( T ); + print all_set(c); + } diff --git a/testing/btest/bifs/analyzer_name.bro b/testing/btest/bifs/analyzer_name.bro deleted file mode 100644 index 266d1c159f..0000000000 --- a/testing/btest/bifs/analyzer_name.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = Analyzer::ANALYZER_PIA_TCP; - print Analyzer::name(a); - } diff --git a/testing/btest/bifs/analyzer_name.zeek b/testing/btest/bifs/analyzer_name.zeek new file mode 100644 index 0000000000..fc896dc417 --- /dev/null +++ b/testing/btest/bifs/analyzer_name.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = Analyzer::ANALYZER_PIA_TCP; + print Analyzer::name(a); + } diff --git a/testing/btest/bifs/any_set.bro b/testing/btest/bifs/any_set.bro deleted file mode 100644 index b3e9e3c711..0000000000 --- a/testing/btest/bifs/any_set.bro +++ /dev/null @@ -1,15 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = vector( F, T, F ); - print any_set(a); - - local b: vector of bool = vector(); - print any_set(b); - - local c = vector( F ); - print any_set(c); - } diff --git a/testing/btest/bifs/any_set.zeek b/testing/btest/bifs/any_set.zeek new file mode 100644 index 0000000000..b64fbb461d --- /dev/null +++ b/testing/btest/bifs/any_set.zeek @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = vector( F, T, F ); + print any_set(a); + + local b: vector of bool = vector(); + print any_set(b); + + local c = vector( F ); + print any_set(c); + } diff --git a/testing/btest/bifs/bloomfilter-seed.bro b/testing/btest/bifs/bloomfilter-seed.bro deleted file mode 100644 index 436638e2af..0000000000 --- a/testing/btest/bifs/bloomfilter-seed.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT global_hash_seed="foo" >>output -# @TEST-EXEC: bro -b %INPUT global_hash_seed="my_seed" >>output -# @TEST-EXEC: btest-diff output - -type Foo: record - { - a: count; - b: string; - }; - -function test_bloom_filter() - { - local bf1 = bloomfilter_basic_init(0.9, 10); - bloomfilter_add(bf1, "foo"); - bloomfilter_add(bf1, "bar"); - - local bf2 = bloomfilter_basic_init(0.9, 10); - bloomfilter_add(bf2, Foo($a=1, $b="xx")); - bloomfilter_add(bf2, Foo($a=2, $b="yy")); - - local bf3 = bloomfilter_basic_init(0.9, 10, "my_seed"); - bloomfilter_add(bf3, "foo"); - bloomfilter_add(bf3, "bar"); - - local bf4 = bloomfilter_basic_init(0.9, 10, "my_seed"); - bloomfilter_add(bf4, Foo($a=1, $b="xx")); - bloomfilter_add(bf4, Foo($a=2, $b="yy")); - - print "bf1, global_seed", bloomfilter_internal_state(bf1); - print "bf2, global_seed", bloomfilter_internal_state(bf2); - print "bf3, my_seed", bloomfilter_internal_state(bf3); - print "bf4, my_seed", bloomfilter_internal_state(bf4); - - - } - -event bro_init() - { - test_bloom_filter(); - } diff --git a/testing/btest/bifs/bloomfilter-seed.zeek b/testing/btest/bifs/bloomfilter-seed.zeek new file mode 100644 index 0000000000..bfa0b0795f --- /dev/null +++ b/testing/btest/bifs/bloomfilter-seed.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT global_hash_seed="foo" >>output +# @TEST-EXEC: zeek -b %INPUT global_hash_seed="my_seed" >>output +# @TEST-EXEC: btest-diff output + +type Foo: record + { + a: count; + b: string; + }; + +function test_bloom_filter() + { + local bf1 = bloomfilter_basic_init(0.9, 10); + bloomfilter_add(bf1, "foo"); + bloomfilter_add(bf1, "bar"); + + local bf2 = bloomfilter_basic_init(0.9, 10); + bloomfilter_add(bf2, Foo($a=1, $b="xx")); + bloomfilter_add(bf2, Foo($a=2, $b="yy")); + + local bf3 = bloomfilter_basic_init(0.9, 10, "my_seed"); + bloomfilter_add(bf3, "foo"); + bloomfilter_add(bf3, "bar"); + + local bf4 = bloomfilter_basic_init(0.9, 10, "my_seed"); + bloomfilter_add(bf4, Foo($a=1, $b="xx")); + bloomfilter_add(bf4, Foo($a=2, $b="yy")); + + print "bf1, global_seed", bloomfilter_internal_state(bf1); + print "bf2, global_seed", bloomfilter_internal_state(bf2); + print "bf3, my_seed", bloomfilter_internal_state(bf3); + print "bf4, my_seed", bloomfilter_internal_state(bf4); + + + } + +event zeek_init() + { + test_bloom_filter(); + } diff --git a/testing/btest/bifs/bloomfilter.bro b/testing/btest/bifs/bloomfilter.bro deleted file mode 100644 index c0ccc2a552..0000000000 --- a/testing/btest/bifs/bloomfilter.bro +++ /dev/null @@ -1,95 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -function test_basic_bloom_filter() - { - # Basic usage with counts. - local bf_cnt = bloomfilter_basic_init(0.1, 1000); - bloomfilter_add(bf_cnt, 42); - bloomfilter_add(bf_cnt, 84); - bloomfilter_add(bf_cnt, 168); - print bloomfilter_lookup(bf_cnt, 0); - print bloomfilter_lookup(bf_cnt, 42); - print bloomfilter_lookup(bf_cnt, 168); - print bloomfilter_lookup(bf_cnt, 336); - bloomfilter_add(bf_cnt, 0.5); # Type mismatch - bloomfilter_add(bf_cnt, "foo"); # Type mismatch - - # Alternative constructor. - local bf_dbl = bloomfilter_basic_init2(4, 10); - bloomfilter_add(bf_dbl, 4.2); - bloomfilter_add(bf_dbl, 3.14); - print bloomfilter_lookup(bf_dbl, 4.2); - print bloomfilter_lookup(bf_dbl, 3.14); - - # Basic usage with strings. - local bf_str = bloomfilter_basic_init(0.9, 10); - bloomfilter_add(bf_str, "foo"); - bloomfilter_add(bf_str, "bar"); - print bloomfilter_lookup(bf_str, "foo"); - print bloomfilter_lookup(bf_str, "bar"); - # print bloomfilter_lookup(bf_str, "bazzz"), "fp"; # FP false positive does no longer trigger after hash function change - print bloomfilter_lookup(bf_str, "quuux"), "fp"; # FP - bloomfilter_add(bf_str, 0.5); # Type mismatch - bloomfilter_add(bf_str, 100); # Type mismatch - - # Edge cases. - local bf_edge0 = bloomfilter_basic_init(0.000000000001, 1); - local bf_edge1 = bloomfilter_basic_init(0.00000001, 100000000); - local bf_edge2 = bloomfilter_basic_init(0.9999999, 1); - local bf_edge3 = bloomfilter_basic_init(0.9999999, 100000000000); - - # Invalid parameters. - local bf_bug0 = bloomfilter_basic_init(-0.5, 42); - local bf_bug1 = bloomfilter_basic_init(1.1, 42); - - # Merging - local bf_cnt2 = bloomfilter_basic_init(0.1, 1000); - bloomfilter_add(bf_cnt2, 42); - bloomfilter_add(bf_cnt, 100); - local bf_merged = bloomfilter_merge(bf_cnt, bf_cnt2); - print bloomfilter_lookup(bf_merged, 42); - print bloomfilter_lookup(bf_merged, 84); - print bloomfilter_lookup(bf_merged, 100); - print bloomfilter_lookup(bf_merged, 168); - - #empty filter tests - local bf_empty = bloomfilter_basic_init(0.1, 1000); - local bf_empty_merged = bloomfilter_merge(bf_merged, bf_empty); - print bloomfilter_lookup(bf_empty_merged, 42); - } - -function test_counting_bloom_filter() - { - local bf = bloomfilter_counting_init(3, 32, 3); - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # 1 - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # 2 - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # 3 - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # still 3 - - - bloomfilter_add(bf, "bar"); - bloomfilter_add(bf, "bar"); - print bloomfilter_lookup(bf, "bar"); # 2 - print bloomfilter_lookup(bf, "foo"); # still 3 - - # Merging - local bf2 = bloomfilter_counting_init(3, 32, 3); - bloomfilter_add(bf2, "baz"); - bloomfilter_add(bf2, "baz"); - bloomfilter_add(bf2, "bar"); - local bf_merged = bloomfilter_merge(bf, bf2); - print bloomfilter_lookup(bf_merged, "foo"); - print bloomfilter_lookup(bf_merged, "bar"); - print bloomfilter_lookup(bf_merged, "baz"); - } - -event bro_init() - { - test_basic_bloom_filter(); - test_counting_bloom_filter(); - } diff --git a/testing/btest/bifs/bloomfilter.zeek b/testing/btest/bifs/bloomfilter.zeek new file mode 100644 index 0000000000..6b7abf3a17 --- /dev/null +++ b/testing/btest/bifs/bloomfilter.zeek @@ -0,0 +1,95 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +function test_basic_bloom_filter() + { + # Basic usage with counts. + local bf_cnt = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt, 42); + bloomfilter_add(bf_cnt, 84); + bloomfilter_add(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 0); + print bloomfilter_lookup(bf_cnt, 42); + print bloomfilter_lookup(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 336); + bloomfilter_add(bf_cnt, 0.5); # Type mismatch + bloomfilter_add(bf_cnt, "foo"); # Type mismatch + + # Alternative constructor. + local bf_dbl = bloomfilter_basic_init2(4, 10); + bloomfilter_add(bf_dbl, 4.2); + bloomfilter_add(bf_dbl, 3.14); + print bloomfilter_lookup(bf_dbl, 4.2); + print bloomfilter_lookup(bf_dbl, 3.14); + + # Basic usage with strings. + local bf_str = bloomfilter_basic_init(0.9, 10); + bloomfilter_add(bf_str, "foo"); + bloomfilter_add(bf_str, "bar"); + print bloomfilter_lookup(bf_str, "foo"); + print bloomfilter_lookup(bf_str, "bar"); + # print bloomfilter_lookup(bf_str, "bazzz"), "fp"; # FP false positive does no longer trigger after hash function change + print bloomfilter_lookup(bf_str, "quuux"), "fp"; # FP + bloomfilter_add(bf_str, 0.5); # Type mismatch + bloomfilter_add(bf_str, 100); # Type mismatch + + # Edge cases. + local bf_edge0 = bloomfilter_basic_init(0.000000000001, 1); + local bf_edge1 = bloomfilter_basic_init(0.00000001, 100000000); + local bf_edge2 = bloomfilter_basic_init(0.9999999, 1); + local bf_edge3 = bloomfilter_basic_init(0.9999999, 100000000000); + + # Invalid parameters. + local bf_bug0 = bloomfilter_basic_init(-0.5, 42); + local bf_bug1 = bloomfilter_basic_init(1.1, 42); + + # Merging + local bf_cnt2 = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt2, 42); + bloomfilter_add(bf_cnt, 100); + local bf_merged = bloomfilter_merge(bf_cnt, bf_cnt2); + print bloomfilter_lookup(bf_merged, 42); + print bloomfilter_lookup(bf_merged, 84); + print bloomfilter_lookup(bf_merged, 100); + print bloomfilter_lookup(bf_merged, 168); + + #empty filter tests + local bf_empty = bloomfilter_basic_init(0.1, 1000); + local bf_empty_merged = bloomfilter_merge(bf_merged, bf_empty); + print bloomfilter_lookup(bf_empty_merged, 42); + } + +function test_counting_bloom_filter() + { + local bf = bloomfilter_counting_init(3, 32, 3); + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # 1 + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # 2 + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # 3 + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # still 3 + + + bloomfilter_add(bf, "bar"); + bloomfilter_add(bf, "bar"); + print bloomfilter_lookup(bf, "bar"); # 2 + print bloomfilter_lookup(bf, "foo"); # still 3 + + # Merging + local bf2 = bloomfilter_counting_init(3, 32, 3); + bloomfilter_add(bf2, "baz"); + bloomfilter_add(bf2, "baz"); + bloomfilter_add(bf2, "bar"); + local bf_merged = bloomfilter_merge(bf, bf2); + print bloomfilter_lookup(bf_merged, "foo"); + print bloomfilter_lookup(bf_merged, "bar"); + print bloomfilter_lookup(bf_merged, "baz"); + } + +event zeek_init() + { + test_basic_bloom_filter(); + test_counting_bloom_filter(); + } diff --git a/testing/btest/bifs/bro_version.bro b/testing/btest/bifs/bro_version.bro deleted file mode 100644 index 35975559a5..0000000000 --- a/testing/btest/bifs/bro_version.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT - -event bro_init() - { - local a = bro_version(); - if ( |a| == 0 ) - exit(1); - } diff --git a/testing/btest/bifs/bytestring_to_count.bro b/testing/btest/bifs/bytestring_to_count.bro deleted file mode 100644 index db50929cb7..0000000000 --- a/testing/btest/bifs/bytestring_to_count.bro +++ /dev/null @@ -1,58 +0,0 @@ - # -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - - -event bro_init() - { - - # unsupported byte lengths - print bytestring_to_count("", T); # 0 - print bytestring_to_count("", F); # 0 - print bytestring_to_count("\xAA\xBB\xCC", T); # 0 - print bytestring_to_count("\xAA\xBB\xCC", F); # 0 - print bytestring_to_count("\xAA\xBB\xCC\xDD\xEE", T); # 0 - print bytestring_to_count("\xAA\xBB\xCC\xDD\xEE", F); # 0 - - # 8 bit - print bytestring_to_count("\xff", T); # 255 - print bytestring_to_count("\xff", F); # 255 - print bytestring_to_count("\x00", T); # 0 - print bytestring_to_count("\x00", F); # 0 - - # 16 bit - print bytestring_to_count("\x03\xe8", F); # 1000 - print bytestring_to_count("\xe8\x03", T); # 1000 - print bytestring_to_count("\x30\x39", F); # 12345 - print bytestring_to_count("\x39\x30", T); # 12345 - print bytestring_to_count("\x00\x00", F); # 0 - print bytestring_to_count("\x00\x00", T); # 0 - - # 32 bit - print bytestring_to_count("\x00\x00\xff\xff", F); # 65535 - print bytestring_to_count("\xff\xff\x00\x00", T); # 65535 - print bytestring_to_count("\xff\xff\xff\xff", F); # 4294967295 - print bytestring_to_count("\xff\xff\xff\xff", T); # 4294967295 - print bytestring_to_count("\x11\x22\x33\x44", F); # 287454020 - print bytestring_to_count("\x11\x22\x33\x44", T); # 1144201745 - print bytestring_to_count("\x00\x00\x00\xff", F); # 255 - print bytestring_to_count("\xff\x00\x00\x00", T); # 255 - print bytestring_to_count("\xAA\xBB\xBB\xAA", F); # 2864429994 - print bytestring_to_count("\xAA\xBB\xBB\xAA", T); # 2864429994 - print bytestring_to_count("\x00\x00\x00\x00", F); # 0 - print bytestring_to_count("\x00\x00\x00\x00", T); # 0 - - # 64 bit - print bytestring_to_count("\xff\xff\xff\xff\xff\xff\xff\xff", F); # 18446744073709551615 - print bytestring_to_count("\xff\xff\xff\xff\xff\xff\xff\xff", T); # 18446744073709551615 - print bytestring_to_count("\xff\xff\xff\x00\x00\xff\xff\xff", F); # 18446742974214701055 - print bytestring_to_count("\xff\xff\xff\x00\x00\xff\xff\xff", T); # 18446742974214701055 - print bytestring_to_count("\x00\x00\x00\x00\x00\x00\xff\xff", F); # 65535 - print bytestring_to_count("\xff\xff\x00\x00\x00\x00\x00\x00", T); # 65535 - print bytestring_to_count("\x00\x00\x00\x00\x00\x00\x00\x00", T); # 0 - print bytestring_to_count("\x00\x00\x00\x00\x00\x00\x00\x00", F); # 0 - - # test the default endianness parameter - print bytestring_to_count("\x00\x00\x00\x00\x00\x00\xff\xff"); # 65535 - - } diff --git a/testing/btest/bifs/bytestring_to_count.zeek b/testing/btest/bifs/bytestring_to_count.zeek new file mode 100644 index 0000000000..2368533432 --- /dev/null +++ b/testing/btest/bifs/bytestring_to_count.zeek @@ -0,0 +1,58 @@ + # +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + + +event zeek_init() + { + + # unsupported byte lengths + print bytestring_to_count("", T); # 0 + print bytestring_to_count("", F); # 0 + print bytestring_to_count("\xAA\xBB\xCC", T); # 0 + print bytestring_to_count("\xAA\xBB\xCC", F); # 0 + print bytestring_to_count("\xAA\xBB\xCC\xDD\xEE", T); # 0 + print bytestring_to_count("\xAA\xBB\xCC\xDD\xEE", F); # 0 + + # 8 bit + print bytestring_to_count("\xff", T); # 255 + print bytestring_to_count("\xff", F); # 255 + print bytestring_to_count("\x00", T); # 0 + print bytestring_to_count("\x00", F); # 0 + + # 16 bit + print bytestring_to_count("\x03\xe8", F); # 1000 + print bytestring_to_count("\xe8\x03", T); # 1000 + print bytestring_to_count("\x30\x39", F); # 12345 + print bytestring_to_count("\x39\x30", T); # 12345 + print bytestring_to_count("\x00\x00", F); # 0 + print bytestring_to_count("\x00\x00", T); # 0 + + # 32 bit + print bytestring_to_count("\x00\x00\xff\xff", F); # 65535 + print bytestring_to_count("\xff\xff\x00\x00", T); # 65535 + print bytestring_to_count("\xff\xff\xff\xff", F); # 4294967295 + print bytestring_to_count("\xff\xff\xff\xff", T); # 4294967295 + print bytestring_to_count("\x11\x22\x33\x44", F); # 287454020 + print bytestring_to_count("\x11\x22\x33\x44", T); # 1144201745 + print bytestring_to_count("\x00\x00\x00\xff", F); # 255 + print bytestring_to_count("\xff\x00\x00\x00", T); # 255 + print bytestring_to_count("\xAA\xBB\xBB\xAA", F); # 2864429994 + print bytestring_to_count("\xAA\xBB\xBB\xAA", T); # 2864429994 + print bytestring_to_count("\x00\x00\x00\x00", F); # 0 + print bytestring_to_count("\x00\x00\x00\x00", T); # 0 + + # 64 bit + print bytestring_to_count("\xff\xff\xff\xff\xff\xff\xff\xff", F); # 18446744073709551615 + print bytestring_to_count("\xff\xff\xff\xff\xff\xff\xff\xff", T); # 18446744073709551615 + print bytestring_to_count("\xff\xff\xff\x00\x00\xff\xff\xff", F); # 18446742974214701055 + print bytestring_to_count("\xff\xff\xff\x00\x00\xff\xff\xff", T); # 18446742974214701055 + print bytestring_to_count("\x00\x00\x00\x00\x00\x00\xff\xff", F); # 65535 + print bytestring_to_count("\xff\xff\x00\x00\x00\x00\x00\x00", T); # 65535 + print bytestring_to_count("\x00\x00\x00\x00\x00\x00\x00\x00", T); # 0 + print bytestring_to_count("\x00\x00\x00\x00\x00\x00\x00\x00", F); # 0 + + # test the default endianness parameter + print bytestring_to_count("\x00\x00\x00\x00\x00\x00\xff\xff"); # 65535 + + } diff --git a/testing/btest/bifs/bytestring_to_double.bro b/testing/btest/bifs/bytestring_to_double.bro deleted file mode 100644 index 78820b207c..0000000000 --- a/testing/btest/bifs/bytestring_to_double.bro +++ /dev/null @@ -1,26 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local s1 = "\x43\x26\x4f\xa0\x71\x30\x80\x00"; # 3.14e15 - local s2 = "\xc3\x26\x4f\xa0\x71\x30\x80\x00"; #-3.14e15 - local s3 = "\x00\x1c\xc3\x59\xe0\x67\xa3\x49"; # 4e-308 - local s4 = "\x00\x00\x00\x00\x00\x00\x00\x00"; # 0.0 - local s5 = "\x80\x00\x00\x00\x00\x00\x00\x00"; #-0.0 - local s6 = "\x7f\xf0\x00\x00\x00\x00\x00\x00"; # Inf - local s7 = "\xff\xf0\x00\x00\x00\x00\x00\x00"; #-Inf - local s8 = "\x7f\xf8\x00\x00\x00\x00\x00\x00"; # NaN - local s9 = "\x00\x00\x00\x00\x00\x00\x00\x01"; # subnormal - - print bytestring_to_double(s1); - print bytestring_to_double(s2); - print fmt("%e", bytestring_to_double(s3)); - print fmt("%e", bytestring_to_double(s4)); - print fmt("%e", bytestring_to_double(s5)); - print bytestring_to_double(s6); - print bytestring_to_double(s7); - print bytestring_to_double(s8); - print fmt("%.2e", bytestring_to_double(s9)); - } diff --git a/testing/btest/bifs/bytestring_to_double.zeek b/testing/btest/bifs/bytestring_to_double.zeek new file mode 100644 index 0000000000..ef6890bd61 --- /dev/null +++ b/testing/btest/bifs/bytestring_to_double.zeek @@ -0,0 +1,26 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local s1 = "\x43\x26\x4f\xa0\x71\x30\x80\x00"; # 3.14e15 + local s2 = "\xc3\x26\x4f\xa0\x71\x30\x80\x00"; #-3.14e15 + local s3 = "\x00\x1c\xc3\x59\xe0\x67\xa3\x49"; # 4e-308 + local s4 = "\x00\x00\x00\x00\x00\x00\x00\x00"; # 0.0 + local s5 = "\x80\x00\x00\x00\x00\x00\x00\x00"; #-0.0 + local s6 = "\x7f\xf0\x00\x00\x00\x00\x00\x00"; # Inf + local s7 = "\xff\xf0\x00\x00\x00\x00\x00\x00"; #-Inf + local s8 = "\x7f\xf8\x00\x00\x00\x00\x00\x00"; # NaN + local s9 = "\x00\x00\x00\x00\x00\x00\x00\x01"; # subnormal + + print bytestring_to_double(s1); + print bytestring_to_double(s2); + print fmt("%e", bytestring_to_double(s3)); + print fmt("%e", bytestring_to_double(s4)); + print fmt("%e", bytestring_to_double(s5)); + print bytestring_to_double(s6); + print bytestring_to_double(s7); + print bytestring_to_double(s8); + print fmt("%.2e", bytestring_to_double(s9)); + } diff --git a/testing/btest/bifs/bytestring_to_hexstr.bro b/testing/btest/bifs/bytestring_to_hexstr.bro deleted file mode 100644 index 4087047f40..0000000000 --- a/testing/btest/bifs/bytestring_to_hexstr.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print bytestring_to_hexstr("04"); - print bytestring_to_hexstr(""); - print bytestring_to_hexstr("\0"); - } diff --git a/testing/btest/bifs/bytestring_to_hexstr.zeek b/testing/btest/bifs/bytestring_to_hexstr.zeek new file mode 100644 index 0000000000..ec0e23005e --- /dev/null +++ b/testing/btest/bifs/bytestring_to_hexstr.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print bytestring_to_hexstr("04"); + print bytestring_to_hexstr(""); + print bytestring_to_hexstr("\0"); + } diff --git a/testing/btest/bifs/capture_state_updates.bro b/testing/btest/bifs/capture_state_updates.bro deleted file mode 100644 index 6a44e0f86f..0000000000 --- a/testing/btest/bifs/capture_state_updates.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: test -f testfile - -event bro_init() - { - print capture_state_updates("testfile"); - } diff --git a/testing/btest/bifs/cat.bro b/testing/btest/bifs/cat.bro deleted file mode 100644 index e923d5d066..0000000000 --- a/testing/btest/bifs/cat.bro +++ /dev/null @@ -1,22 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "foo"; - local b = 3; - local c = T; - - print cat(a, b, c); - - print cat(); - - print cat("", 3, T); - - print cat_sep("|", "", a, b, c); - - print cat_sep("|", ""); - - print cat_sep("|", "", "", b, c); - } diff --git a/testing/btest/bifs/cat.zeek b/testing/btest/bifs/cat.zeek new file mode 100644 index 0000000000..5540ebf106 --- /dev/null +++ b/testing/btest/bifs/cat.zeek @@ -0,0 +1,22 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "foo"; + local b = 3; + local c = T; + + print cat(a, b, c); + + print cat(); + + print cat("", 3, T); + + print cat_sep("|", "", a, b, c); + + print cat_sep("|", ""); + + print cat_sep("|", "", "", b, c); + } diff --git a/testing/btest/bifs/cat_string_array.bro b/testing/btest/bifs/cat_string_array.bro deleted file mode 100644 index e799f4b282..0000000000 --- a/testing/btest/bifs/cat_string_array.bro +++ /dev/null @@ -1,14 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a: string_array = { - [0] = "this", [1] = "is", [2] = "a", [3] = "test" - }; - - print cat_string_array(a); - print cat_string_array_n(a, 0, |a|-1); - print cat_string_array_n(a, 1, 2); - } diff --git a/testing/btest/bifs/check_subnet.bro b/testing/btest/bifs/check_subnet.bro deleted file mode 100644 index b725cae73c..0000000000 --- a/testing/btest/bifs/check_subnet.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global testt: set[subnet] = { - 10.0.0.0/8, - 10.2.0.0/16, - 10.2.0.2/31, - 10.1.0.0/16, - 10.3.0.0/16, - 5.0.0.0/8, - 5.5.0.0/25, - 5.2.0.0/32, - 7.2.0.0/32, - [2607:f8b0:4008:807::200e]/64, - [2607:f8b0:4007:807::200e]/64, - [2607:f8b0:4007:807::200e]/128 -}; - -function check_member(s: subnet) - { - if ( s in testt ) - print fmt("in says: %s is member", s); - else - print fmt("in says: %s is no member", s); - - if ( check_subnet(s, testt) ) - print fmt("check_subnet says: %s is member", s); - else - print fmt("check_subnet says: %s is no member", s); - - } - -event bro_init() - { - check_member(10.2.0.2/32); - check_member(10.2.0.2/31); - check_member(10.6.0.0/9); - check_member(10.2.0.0/8); - } diff --git a/testing/btest/bifs/check_subnet.zeek b/testing/btest/bifs/check_subnet.zeek new file mode 100644 index 0000000000..5dfe2c1f72 --- /dev/null +++ b/testing/btest/bifs/check_subnet.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global testt: set[subnet] = { + 10.0.0.0/8, + 10.2.0.0/16, + 10.2.0.2/31, + 10.1.0.0/16, + 10.3.0.0/16, + 5.0.0.0/8, + 5.5.0.0/25, + 5.2.0.0/32, + 7.2.0.0/32, + [2607:f8b0:4008:807::200e]/64, + [2607:f8b0:4007:807::200e]/64, + [2607:f8b0:4007:807::200e]/128 +}; + +function check_member(s: subnet) + { + if ( s in testt ) + print fmt("in says: %s is member", s); + else + print fmt("in says: %s is no member", s); + + if ( check_subnet(s, testt) ) + print fmt("check_subnet says: %s is member", s); + else + print fmt("check_subnet says: %s is no member", s); + + } + +event zeek_init() + { + check_member(10.2.0.2/32); + check_member(10.2.0.2/31); + check_member(10.6.0.0/9); + check_member(10.2.0.0/8); + } diff --git a/testing/btest/bifs/checkpoint_state.bro b/testing/btest/bifs/checkpoint_state.bro deleted file mode 100644 index 7a46516ba0..0000000000 --- a/testing/btest/bifs/checkpoint_state.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: test -f .state/state.bst - -event bro_init() - { - local a = checkpoint_state(); - if ( a != T ) - exit(1); - } diff --git a/testing/btest/bifs/clear_table.bro b/testing/btest/bifs/clear_table.bro deleted file mode 100644 index 9485eba1f5..0000000000 --- a/testing/btest/bifs/clear_table.bro +++ /dev/null @@ -1,14 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT > out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local mytable: table[string] of string = { ["key1"] = "val1" }; - - print |mytable|; - - clear_table(mytable); - - print |mytable|; - } diff --git a/testing/btest/bifs/clear_table.zeek b/testing/btest/bifs/clear_table.zeek new file mode 100644 index 0000000000..08c91e9908 --- /dev/null +++ b/testing/btest/bifs/clear_table.zeek @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: zeek -b %INPUT > out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local mytable: table[string] of string = { ["key1"] = "val1" }; + + print |mytable|; + + clear_table(mytable); + + print |mytable|; + } diff --git a/testing/btest/bifs/convert_for_pattern.bro b/testing/btest/bifs/convert_for_pattern.bro deleted file mode 100644 index b99b010f97..0000000000 --- a/testing/btest/bifs/convert_for_pattern.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print convert_for_pattern("foo"); - print convert_for_pattern(""); - print convert_for_pattern("b[a-z]+"); - } diff --git a/testing/btest/bifs/convert_for_pattern.zeek b/testing/btest/bifs/convert_for_pattern.zeek new file mode 100644 index 0000000000..0962abfe31 --- /dev/null +++ b/testing/btest/bifs/convert_for_pattern.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print convert_for_pattern("foo"); + print convert_for_pattern(""); + print convert_for_pattern("b[a-z]+"); + } diff --git a/testing/btest/bifs/count_to_addr.bro b/testing/btest/bifs/count_to_addr.bro deleted file mode 100644 index 993a701bc8..0000000000 --- a/testing/btest/bifs/count_to_addr.bro +++ /dev/null @@ -1,20 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "1"; - print count_to_v4_addr(to_count(a)); - - a = "806716794"; - print count_to_v4_addr(to_count(a)); - - a = "4294967295"; - print count_to_v4_addr(to_count(a)); - - # This *should* fail and return 0.0.0.0 since it's 255.255.255.255 + 1 - # Note: How do I check for runtime errors? - a = "4294967296"; - print count_to_v4_addr(to_count(a)); - } diff --git a/testing/btest/bifs/count_to_addr.zeek b/testing/btest/bifs/count_to_addr.zeek new file mode 100644 index 0000000000..8229f9a4a9 --- /dev/null +++ b/testing/btest/bifs/count_to_addr.zeek @@ -0,0 +1,20 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "1"; + print count_to_v4_addr(to_count(a)); + + a = "806716794"; + print count_to_v4_addr(to_count(a)); + + a = "4294967295"; + print count_to_v4_addr(to_count(a)); + + # This *should* fail and return 0.0.0.0 since it's 255.255.255.255 + 1 + # Note: How do I check for runtime errors? + a = "4294967296"; + print count_to_v4_addr(to_count(a)); + } diff --git a/testing/btest/bifs/create_file.bro b/testing/btest/bifs/create_file.bro deleted file mode 100644 index af2cfb4979..0000000000 --- a/testing/btest/bifs/create_file.bro +++ /dev/null @@ -1,65 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff testfile -# @TEST-EXEC: btest-diff testfile2 -# @TEST-EXEC: test -f testdir/testfile4 - -event bro_init() - { - # Test that creating a file works as expected - local a = open("testfile"); - print active_file(a); - print get_file_name(a); - write_file(a, "This is a test\n"); - close(a); - - print active_file(a); - print file_size("testfile"); - - # Test that "open_for_append" doesn't overwrite an existing file - a = open_for_append("testfile"); - print active_file(a); - write_file(a, "another test\n"); - close(a); - - print active_file(a); - print file_size("testfile"); - - # This should fail - print file_size("doesnotexist"); - - # Test that "open" overwrites existing file - a = open("testfile2"); - write_file(a, "this will be overwritten\n"); - close(a); - a = open("testfile2"); - write_file(a, "new text\n"); - close(a); - - # Test that set_buf and flush_all work correctly - a = open("testfile3"); - set_buf(a, F); - write_file(a, "This is a test\n"); - print file_size("testfile3"); - close(a); - a = open("testfile3"); - set_buf(a, T); - write_file(a, "This is a test\n"); - print file_size("testfile3"); - print flush_all(); - print file_size("testfile3"); - close(a); - - # Create a new directory - print mkdir("testdir"); - - # Create a file in the new directory - a = open("testdir/testfile4"); - print get_file_name(a); - write_file(a, "This is a test\n"); - close(a); - - # This should fail - print mkdir("/thisdoesnotexist/dir"); - } diff --git a/testing/btest/bifs/create_file.zeek b/testing/btest/bifs/create_file.zeek new file mode 100644 index 0000000000..0336f9ab33 --- /dev/null +++ b/testing/btest/bifs/create_file.zeek @@ -0,0 +1,65 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff testfile +# @TEST-EXEC: btest-diff testfile2 +# @TEST-EXEC: test -f testdir/testfile4 + +event zeek_init() + { + # Test that creating a file works as expected + local a = open("testfile"); + print active_file(a); + print get_file_name(a); + write_file(a, "This is a test\n"); + close(a); + + print active_file(a); + print file_size("testfile"); + + # Test that "open_for_append" doesn't overwrite an existing file + a = open_for_append("testfile"); + print active_file(a); + write_file(a, "another test\n"); + close(a); + + print active_file(a); + print file_size("testfile"); + + # This should fail + print file_size("doesnotexist"); + + # Test that "open" overwrites existing file + a = open("testfile2"); + write_file(a, "this will be overwritten\n"); + close(a); + a = open("testfile2"); + write_file(a, "new text\n"); + close(a); + + # Test that set_buf and flush_all work correctly + a = open("testfile3"); + set_buf(a, F); + write_file(a, "This is a test\n"); + print file_size("testfile3"); + close(a); + a = open("testfile3"); + set_buf(a, T); + write_file(a, "This is a test\n"); + print file_size("testfile3"); + print flush_all(); + print file_size("testfile3"); + close(a); + + # Create a new directory + print mkdir("testdir"); + + # Create a file in the new directory + a = open("testdir/testfile4"); + print get_file_name(a); + write_file(a, "This is a test\n"); + close(a); + + # This should fail + print mkdir("/thisdoesnotexist/dir"); + } diff --git a/testing/btest/bifs/current_analyzer.bro b/testing/btest/bifs/current_analyzer.bro deleted file mode 100644 index e221d7aed0..0000000000 --- a/testing/btest/bifs/current_analyzer.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT - -event bro_init() - { - local a = current_analyzer(); - if ( a != 0 ) - exit(1); - - # TODO: add a test for non-zero return value - } diff --git a/testing/btest/bifs/current_analyzer.zeek b/testing/btest/bifs/current_analyzer.zeek new file mode 100644 index 0000000000..14acc0d55c --- /dev/null +++ b/testing/btest/bifs/current_analyzer.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -b %INPUT + +event zeek_init() + { + local a = current_analyzer(); + if ( a != 0 ) + exit(1); + + # TODO: add a test for non-zero return value + } diff --git a/testing/btest/bifs/current_time.bro b/testing/btest/bifs/current_time.bro deleted file mode 100644 index 9d4899aa06..0000000000 --- a/testing/btest/bifs/current_time.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT - -event bro_init() - { - local a = current_time(); - if ( a <= double_to_time(0) ) - exit(1); - } diff --git a/testing/btest/bifs/current_time.zeek b/testing/btest/bifs/current_time.zeek new file mode 100644 index 0000000000..c29ae969f8 --- /dev/null +++ b/testing/btest/bifs/current_time.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT + +event zeek_init() + { + local a = current_time(); + if ( a <= double_to_time(0) ) + exit(1); + } diff --git a/testing/btest/bifs/decode_base64.bro b/testing/btest/bifs/decode_base64.bro deleted file mode 100644 index 2d552a2523..0000000000 --- a/testing/btest/bifs/decode_base64.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -global default_alphabet: string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - -global my_alphabet: string = "!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"; - -print decode_base64("YnJv"); -print decode_base64("YnJv", default_alphabet); -print decode_base64("YnJv", ""); # should use default alpabet -print decode_base64("}n-v", my_alphabet); -print decode_base64_custom("YnJv", default_alphabet); -print decode_base64_custom("YnJv", ""); # should use default alpabet -print decode_base64_custom("}n-v", my_alphabet); - -print decode_base64("YnJv"); -print decode_base64("YnJv", default_alphabet); -print decode_base64("YnJv", ""); # should use default alpabet -print decode_base64("}n-v", my_alphabet); -print decode_base64_custom("YnJv", default_alphabet); -print decode_base64_custom("YnJv", ""); # should use default alpabet -print decode_base64_custom("}n-v", my_alphabet); diff --git a/testing/btest/bifs/decode_base64.zeek b/testing/btest/bifs/decode_base64.zeek new file mode 100644 index 0000000000..f88203a89a --- /dev/null +++ b/testing/btest/bifs/decode_base64.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +global default_alphabet: string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +global my_alphabet: string = "!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"; + +print decode_base64("YnJv"); +print decode_base64("YnJv", default_alphabet); +print decode_base64("YnJv", ""); # should use default alpabet +print decode_base64("}n-v", my_alphabet); + +print decode_base64("YnJv"); +print decode_base64("YnJv", default_alphabet); +print decode_base64("YnJv", ""); # should use default alpabet +print decode_base64("}n-v", my_alphabet); diff --git a/testing/btest/bifs/decode_base64_conn.bro b/testing/btest/bifs/decode_base64_conn.bro deleted file mode 100644 index e515ed68ac..0000000000 --- a/testing/btest/bifs/decode_base64_conn.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT >out -# @TEST-EXEC: btest-diff weird.log - -event connection_established(c: connection) - { - # This should be logged into weird. - print decode_base64_conn(c$id, "kaputt"); - } diff --git a/testing/btest/bifs/decode_base64_conn.zeek b/testing/btest/bifs/decode_base64_conn.zeek new file mode 100644 index 0000000000..57d9af69c9 --- /dev/null +++ b/testing/btest/bifs/decode_base64_conn.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT >out +# @TEST-EXEC: btest-diff weird.log + +event connection_established(c: connection) + { + # This should be logged into weird. + print decode_base64_conn(c$id, "kaputt"); + } diff --git a/testing/btest/bifs/directory_operations.bro b/testing/btest/bifs/directory_operations.bro deleted file mode 100644 index 9db34511b2..0000000000 --- a/testing/btest/bifs/directory_operations.bro +++ /dev/null @@ -1,24 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - # Test succesful operations... - print mkdir("testdir"); - print mkdir("testdir"); - local a = open("testdir/testfile"); - close(a); - print rename("testdir/testfile", "testdir/testfile2"); - print rename("testdir", "testdir2"); - print unlink("testdir2/testfile2"); - print rmdir("testdir2"); - - - print unlink("nonexisting"); - print rename("a", "b"); - print rmdir("nonexisting"); - a = open("testfile"); - close(a); - print mkdir("testfile"); - } diff --git a/testing/btest/bifs/directory_operations.zeek b/testing/btest/bifs/directory_operations.zeek new file mode 100644 index 0000000000..e5282eb47b --- /dev/null +++ b/testing/btest/bifs/directory_operations.zeek @@ -0,0 +1,24 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + # Test succesful operations... + print mkdir("testdir"); + print mkdir("testdir"); + local a = open("testdir/testfile"); + close(a); + print rename("testdir/testfile", "testdir/testfile2"); + print rename("testdir", "testdir2"); + print unlink("testdir2/testfile2"); + print rmdir("testdir2"); + + + print unlink("nonexisting"); + print rename("a", "b"); + print rmdir("nonexisting"); + a = open("testfile"); + close(a); + print mkdir("testfile"); + } diff --git a/testing/btest/bifs/dump_current_packet.bro b/testing/btest/bifs/dump_current_packet.bro deleted file mode 100644 index e61c9585cd..0000000000 --- a/testing/btest/bifs/dump_current_packet.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-REQUIRES: which hexdump -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: hexdump -C 1.pcap >1.hex -# @TEST-EXEC: hexdump -C 2.pcap >2.hex -# @TEST-EXEC: btest-diff 1.hex -# @TEST-EXEC: btest-diff 2.hex - -# Note that the hex output will contain global pcap header information, -# including Bro's snaplen setting (so maybe check that out in the case -# you are reading this message due to this test failing in the future). - -global i: count = 0; - -event new_packet(c: connection, p: pkt_hdr) - { - ++i; - dump_current_packet(cat(i, ".pcap")); - if ( i >= 3 ) - terminate(); - } diff --git a/testing/btest/bifs/dump_current_packet.zeek b/testing/btest/bifs/dump_current_packet.zeek new file mode 100644 index 0000000000..ce177a1daf --- /dev/null +++ b/testing/btest/bifs/dump_current_packet.zeek @@ -0,0 +1,20 @@ +# @TEST-REQUIRES: which hexdump +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: hexdump -C 1.pcap >1.hex +# @TEST-EXEC: hexdump -C 2.pcap >2.hex +# @TEST-EXEC: btest-diff 1.hex +# @TEST-EXEC: btest-diff 2.hex + +# Note that the hex output will contain global pcap header information, +# including Zeek's snaplen setting (so maybe check that out in the case +# you are reading this message due to this test failing in the future). + +global i: count = 0; + +event new_packet(c: connection, p: pkt_hdr) + { + ++i; + dump_current_packet(cat(i, ".pcap")); + if ( i >= 3 ) + terminate(); + } diff --git a/testing/btest/bifs/edit.bro b/testing/btest/bifs/edit.bro deleted file mode 100644 index 346c0bdbf7..0000000000 --- a/testing/btest/bifs/edit.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "hello there"; - - print edit(a, "e"); - } diff --git a/testing/btest/bifs/edit.zeek b/testing/btest/bifs/edit.zeek new file mode 100644 index 0000000000..c33289f0e5 --- /dev/null +++ b/testing/btest/bifs/edit.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "hello there"; + + print edit(a, "e"); + } diff --git a/testing/btest/bifs/enable_raw_output.test b/testing/btest/bifs/enable_raw_output.test index ebaff36c8f..c46b6e317f 100644 --- a/testing/btest/bifs/enable_raw_output.test +++ b/testing/btest/bifs/enable_raw_output.test @@ -1,12 +1,12 @@ # Files which enable raw output via the BiF shouldn't interpret NUL characters # in strings that are `print`ed to it. -# @TEST-EXEC: bro -b %INPUT +# @TEST-EXEC: zeek -b %INPUT # @TEST-EXEC: tr '\000' 'X' output # @TEST-EXEC: btest-diff output # @TEST-EXEC: cmp myfile hookfile -event bro_init() +event zeek_init() { local myfile: file; myfile = open("myfile"); diff --git a/testing/btest/bifs/encode_base64.bro b/testing/btest/bifs/encode_base64.bro deleted file mode 100644 index bbad715ecc..0000000000 --- a/testing/btest/bifs/encode_base64.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -global default_alphabet: string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - -global my_alphabet: string = "!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"; - -print encode_base64("bro"); -print encode_base64("bro", default_alphabet); -print encode_base64("bro", ""); # should use default alpabet -print encode_base64("bro", my_alphabet); - -print encode_base64_custom("bro", default_alphabet); -print encode_base64_custom("bro", ""); # should use default alpabet -print encode_base64_custom("bro", my_alphabet); - -print encode_base64("padding"); -print encode_base64("padding1"); -print encode_base64("padding12"); diff --git a/testing/btest/bifs/encode_base64.zeek b/testing/btest/bifs/encode_base64.zeek new file mode 100644 index 0000000000..351a8e3b0c --- /dev/null +++ b/testing/btest/bifs/encode_base64.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +global default_alphabet: string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +global my_alphabet: string = "!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"; + +print encode_base64("bro"); +print encode_base64("bro", default_alphabet); +print encode_base64("bro", ""); # should use default alpabet +print encode_base64("bro", my_alphabet); + +print encode_base64("padding"); +print encode_base64("padding1"); +print encode_base64("padding12"); diff --git a/testing/btest/bifs/entropy_test.bro b/testing/btest/bifs/entropy_test.bro deleted file mode 100644 index 2a2dd422d1..0000000000 --- a/testing/btest/bifs/entropy_test.bro +++ /dev/null @@ -1,18 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"; - local handle = entropy_test_init(); - if ( ! entropy_test_add(handle, a) ) - exit(1); - print entropy_test_finish(handle); - - local b = "0011000aaabbbbcccc000011111000000000aaaabbbbcccc0000000"; - handle = entropy_test_init(); - if ( ! entropy_test_add(handle, b) ) - exit(1); - print entropy_test_finish(handle); - } diff --git a/testing/btest/bifs/entropy_test.zeek b/testing/btest/bifs/entropy_test.zeek new file mode 100644 index 0000000000..fe1d80cc21 --- /dev/null +++ b/testing/btest/bifs/entropy_test.zeek @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"; + local handle = entropy_test_init(); + if ( ! entropy_test_add(handle, a) ) + exit(1); + print entropy_test_finish(handle); + + local b = "0011000aaabbbbcccc000011111000000000aaaabbbbcccc0000000"; + handle = entropy_test_init(); + if ( ! entropy_test_add(handle, b) ) + exit(1); + print entropy_test_finish(handle); + } diff --git a/testing/btest/bifs/enum_to_int.bro b/testing/btest/bifs/enum_to_int.bro deleted file mode 100644 index 3d577d2920..0000000000 --- a/testing/btest/bifs/enum_to_int.bro +++ /dev/null @@ -1,29 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -export { - type test_enum: enum { - A, - B, - C, - }; - - type test_enum_with_val: enum { - AV = 0xA, - BV = 0xB, - CV = 0xC, - }; -} - -event bro_init() - { - - - print A, enum_to_int(A); - print B, enum_to_int(B); - print C, enum_to_int(C); - print AV, enum_to_int(AV); - print BV, enum_to_int(BV); - print CV, enum_to_int(CV); - } diff --git a/testing/btest/bifs/enum_to_int.zeek b/testing/btest/bifs/enum_to_int.zeek new file mode 100644 index 0000000000..17fd1ff8a9 --- /dev/null +++ b/testing/btest/bifs/enum_to_int.zeek @@ -0,0 +1,29 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +export { + type test_enum: enum { + A, + B, + C, + }; + + type test_enum_with_val: enum { + AV = 0xA, + BV = 0xB, + CV = 0xC, + }; +} + +event zeek_init() + { + + + print A, enum_to_int(A); + print B, enum_to_int(B); + print C, enum_to_int(C); + print AV, enum_to_int(AV); + print BV, enum_to_int(BV); + print CV, enum_to_int(CV); + } diff --git a/testing/btest/bifs/escape_string.bro b/testing/btest/bifs/escape_string.bro deleted file mode 100644 index fd796497be..0000000000 --- a/testing/btest/bifs/escape_string.bro +++ /dev/null @@ -1,27 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "Test \0string"; - - print |a|; - print a; - - local b = clean(a); - print |b|; - print b; - - local c = to_string_literal(a); - print |c|; - print c; - - local d = escape_string(a); - print |d|; - print d; - - local e = string_to_ascii_hex(a); - print |e|; - print e; - } diff --git a/testing/btest/bifs/escape_string.zeek b/testing/btest/bifs/escape_string.zeek new file mode 100644 index 0000000000..93c593d833 --- /dev/null +++ b/testing/btest/bifs/escape_string.zeek @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "Test \0string"; + + print |a|; + print a; + + local b = clean(a); + print |b|; + print b; + + local c = to_string_literal(a); + print |c|; + print c; + + local d = escape_string(a); + print |d|; + print d; + + local e = string_to_ascii_hex(a); + print |e|; + print e; + } diff --git a/testing/btest/bifs/exit.bro b/testing/btest/bifs/exit.bro deleted file mode 100644 index b942a5e81c..0000000000 --- a/testing/btest/bifs/exit.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out || test $? -eq 7 -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print "hello"; - exit(7); - } diff --git a/testing/btest/bifs/exit.zeek b/testing/btest/bifs/exit.zeek new file mode 100644 index 0000000000..e9a27f6379 --- /dev/null +++ b/testing/btest/bifs/exit.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out || test $? -eq 7 +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print "hello"; + exit(7); + } diff --git a/testing/btest/bifs/file_mode.bro b/testing/btest/bifs/file_mode.bro deleted file mode 100644 index 62bee05c6c..0000000000 --- a/testing/btest/bifs/file_mode.bro +++ /dev/null @@ -1,36 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 420; # octal: 0644 - print file_mode(a); - - a = 511; # octal: 0777 - print file_mode(a); - - a = 1023; # octal: 01777 - print file_mode(a); - - a = 1000; # octal: 01750 - print file_mode(a); - - a = 2541; # octal: 04755 - print file_mode(a); - - a = 2304; # octal: 04400 - print file_mode(a); - - a = 1517; # octal: 02755 - print file_mode(a); - - a = 1312; # octal: 02440 - print file_mode(a); - - a = 111; # octal: 0157 - print file_mode(a); - - a = 0; - print file_mode(a); - } diff --git a/testing/btest/bifs/file_mode.zeek b/testing/btest/bifs/file_mode.zeek new file mode 100644 index 0000000000..8fe39b6404 --- /dev/null +++ b/testing/btest/bifs/file_mode.zeek @@ -0,0 +1,36 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 420; # octal: 0644 + print file_mode(a); + + a = 511; # octal: 0777 + print file_mode(a); + + a = 1023; # octal: 01777 + print file_mode(a); + + a = 1000; # octal: 01750 + print file_mode(a); + + a = 2541; # octal: 04755 + print file_mode(a); + + a = 2304; # octal: 04400 + print file_mode(a); + + a = 1517; # octal: 02755 + print file_mode(a); + + a = 1312; # octal: 02440 + print file_mode(a); + + a = 111; # octal: 0157 + print file_mode(a); + + a = 0; + print file_mode(a); + } diff --git a/testing/btest/bifs/filter_subnet_table.bro b/testing/btest/bifs/filter_subnet_table.bro deleted file mode 100644 index 7659096a71..0000000000 --- a/testing/btest/bifs/filter_subnet_table.bro +++ /dev/null @@ -1,49 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global testa: set[subnet] = { - 10.0.0.0/8, - 10.2.0.0/16, - 10.2.0.2/31, - 10.1.0.0/16, - 10.3.0.0/16, - 5.0.0.0/8, - 5.5.0.0/25, - 5.2.0.0/32, - 7.2.0.0/32, - [2607:f8b0:4008:807::200e]/64, - [2607:f8b0:4007:807::200e]/64, - [2607:f8b0:4007:807::200e]/128 -}; - -global testb: table[subnet] of string = { - [10.0.0.0/8] = "a", - [10.2.0.0/16] = "b", - [10.2.0.2/31] = "c", - [10.1.0.0/16] = "d", - [10.3.0.0/16] = "e", - [5.0.0.0/8] = "f", - [5.5.0.0/25] = "g", - [5.2.0.0/32] = "h", - [7.2.0.0/32] = "i", - [[2607:f8b0:4008:807::200e]/64] = "j", - [[2607:f8b0:4007:807::200e]/64] = "k", - [[2607:f8b0:4007:807::200e]/128] = "l" -}; - - -event bro_init() - { - local c = filter_subnet_table(10.2.0.2/32, testa); - print c; - c = filter_subnet_table(10.2.0.2/32, testb); - print c; - c = filter_subnet_table(10.3.0.2/32, testb); - print c; - c = filter_subnet_table(1.0.0.0/8, testb); - print c; - - local unspecified: table[subnet] of string = table(); - c = filter_subnet_table(10.2.0.2/32, unspecified); - print c; - } diff --git a/testing/btest/bifs/filter_subnet_table.zeek b/testing/btest/bifs/filter_subnet_table.zeek new file mode 100644 index 0000000000..b11cbf0a8f --- /dev/null +++ b/testing/btest/bifs/filter_subnet_table.zeek @@ -0,0 +1,49 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global testa: set[subnet] = { + 10.0.0.0/8, + 10.2.0.0/16, + 10.2.0.2/31, + 10.1.0.0/16, + 10.3.0.0/16, + 5.0.0.0/8, + 5.5.0.0/25, + 5.2.0.0/32, + 7.2.0.0/32, + [2607:f8b0:4008:807::200e]/64, + [2607:f8b0:4007:807::200e]/64, + [2607:f8b0:4007:807::200e]/128 +}; + +global testb: table[subnet] of string = { + [10.0.0.0/8] = "a", + [10.2.0.0/16] = "b", + [10.2.0.2/31] = "c", + [10.1.0.0/16] = "d", + [10.3.0.0/16] = "e", + [5.0.0.0/8] = "f", + [5.5.0.0/25] = "g", + [5.2.0.0/32] = "h", + [7.2.0.0/32] = "i", + [[2607:f8b0:4008:807::200e]/64] = "j", + [[2607:f8b0:4007:807::200e]/64] = "k", + [[2607:f8b0:4007:807::200e]/128] = "l" +}; + + +event zeek_init() + { + local c = filter_subnet_table(10.2.0.2/32, testa); + print c; + c = filter_subnet_table(10.2.0.2/32, testb); + print c; + c = filter_subnet_table(10.3.0.2/32, testb); + print c; + c = filter_subnet_table(1.0.0.0/8, testb); + print c; + + local unspecified: table[subnet] of string = table(); + c = filter_subnet_table(10.2.0.2/32, unspecified); + print c; + } diff --git a/testing/btest/bifs/find_all.bro b/testing/btest/bifs/find_all.bro deleted file mode 100644 index 4fe451a9d4..0000000000 --- a/testing/btest/bifs/find_all.bro +++ /dev/null @@ -1,18 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a test"; - local pat = /hi|es/; - local pat2 = /aa|bb/; - - local b = find_all(a, pat); - local b2 = find_all(a, pat2); - - for (i in b) - print i; - print "-------------------"; - print |b2|; - } diff --git a/testing/btest/bifs/find_all.zeek b/testing/btest/bifs/find_all.zeek new file mode 100644 index 0000000000..c51086ade0 --- /dev/null +++ b/testing/btest/bifs/find_all.zeek @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is a test"; + local pat = /hi|es/; + local pat2 = /aa|bb/; + + local b = find_all(a, pat); + local b2 = find_all(a, pat2); + + for (i in b) + print i; + print "-------------------"; + print |b2|; + } diff --git a/testing/btest/bifs/find_entropy.bro b/testing/btest/bifs/find_entropy.bro deleted file mode 100644 index 2eb24fe118..0000000000 --- a/testing/btest/bifs/find_entropy.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"; - local b = "0011000aaabbbbcccc000011111000000000aaaabbbbcccc0000000"; - - print find_entropy(a); - - print find_entropy(b); - } diff --git a/testing/btest/bifs/find_entropy.zeek b/testing/btest/bifs/find_entropy.zeek new file mode 100644 index 0000000000..d8be9c08a6 --- /dev/null +++ b/testing/btest/bifs/find_entropy.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"; + local b = "0011000aaabbbbcccc000011111000000000aaaabbbbcccc0000000"; + + print find_entropy(a); + + print find_entropy(b); + } diff --git a/testing/btest/bifs/find_last.bro b/testing/btest/bifs/find_last.bro deleted file mode 100644 index 00ae2a874d..0000000000 --- a/testing/btest/bifs/find_last.bro +++ /dev/null @@ -1,17 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a test"; - local pat = /hi|es/; - local pat2 = /aa|bb/; - - local b = find_last(a, pat); - local b2 = find_last(a, pat2); - - print b; - print "-------------------"; - print |b2|; - } diff --git a/testing/btest/bifs/find_last.zeek b/testing/btest/bifs/find_last.zeek new file mode 100644 index 0000000000..1f986cc6cd --- /dev/null +++ b/testing/btest/bifs/find_last.zeek @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is a test"; + local pat = /hi|es/; + local pat2 = /aa|bb/; + + local b = find_last(a, pat); + local b2 = find_last(a, pat2); + + print b; + print "-------------------"; + print |b2|; + } diff --git a/testing/btest/bifs/fmt.bro b/testing/btest/bifs/fmt.bro deleted file mode 100644 index 7fc4dc38d7..0000000000 --- a/testing/btest/bifs/fmt.bro +++ /dev/null @@ -1,80 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type color: enum { Red, Blue }; - -event bro_init() - { - local a = Blue; - local b = vector( 1, 2, 3); - local c = set( 1, 2, 3); - local d: table[count] of string = { [1] = "test", [2] = "bro" }; - - # tests with only a format string (no additional args) - print fmt("test"); - print fmt("%%"); - - # no arguments - print fmt(); - - # tests of various data types with field width specified - print fmt("*%-10s*", "test"); - print fmt("*%10s*", "test"); - print fmt("*%10s*", T); - print fmt("*%-10s*", T); - print fmt("*%10.2e*", 3.14159265); - print fmt("*%-10.2e*", 3.14159265); - print fmt("*%10.2f*", 3.14159265); - print fmt("*%10.2g*", 3.14159265); - print fmt("*%10.2e*", -3.14159265); - print fmt("*%10.2f*", -3.14159265); - print fmt("*%10.2g*", -3.14159265); - print fmt("*%-10.2e*", -3.14159265); - print fmt("*%-10.2f*", -3.14159265); - print fmt("*%-10.2g*", -3.14159265); - print fmt("*%10d*", -128); - print fmt("*%-10d*", -128); - print fmt("*%10d*", 128); - print fmt("*%010d*", 128); - print fmt("*%-10d*", 128); - print fmt("*%10x*", 160); - print fmt("*%010x*", 160); - print fmt("*%10x*", 160/tcp); - print fmt("*%10s*", 160/tcp); - print fmt("*%10s*", 127.0.0.1); - print fmt("*%10x*", 127.0.0.1); - print fmt("*%10s*", 192.168.0.0/16); - print fmt("*%10s*", [::1]); - print fmt("*%10x*", [fe00::1]); - print fmt("*%10s*", [fe80:1234::1]); - print fmt("*%10s*", [fe80:1234::]/32); - print fmt("*%10s*", 3hr); - print fmt("*%10s*", /^foo|bar/); - print fmt("*%10s*", a); - print fmt("*%10s*", b); - print fmt("*%10s*", c); - print fmt("*%10s*", d); - - # tests of various data types without field width - print fmt("%e", 3.1e+2); - print fmt("%f", 3.1e+2); - print fmt("%g", 3.1e+2); - print fmt("%.3e", 3.1e+2); - print fmt("%.3f", 3.1e+2); - print fmt("%.3g", 3.1e+2); - print fmt("%.7g", 3.1e+2); - - # Tests of "%s" with non-printable characters (the string length is printed - # instead of the string itself because the print command does its own - # escaping) - local s0 = "\x00\x1f"; - local s1 = fmt("%s", s0); - print |s0|; - print |s1|; - - s0 = "\x7f\xff"; - s1 = fmt("%s", s0); - print |s0|; - print |s1|; - } diff --git a/testing/btest/bifs/fmt.zeek b/testing/btest/bifs/fmt.zeek new file mode 100644 index 0000000000..3f3b58073d --- /dev/null +++ b/testing/btest/bifs/fmt.zeek @@ -0,0 +1,80 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type color: enum { Red, Blue }; + +event zeek_init() + { + local a = Blue; + local b = vector( 1, 2, 3); + local c = set( 1, 2, 3); + local d: table[count] of string = { [1] = "test", [2] = "bro" }; + + # tests with only a format string (no additional args) + print fmt("test"); + print fmt("%%"); + + # no arguments + print fmt(); + + # tests of various data types with field width specified + print fmt("*%-10s*", "test"); + print fmt("*%10s*", "test"); + print fmt("*%10s*", T); + print fmt("*%-10s*", T); + print fmt("*%10.2e*", 3.14159265); + print fmt("*%-10.2e*", 3.14159265); + print fmt("*%10.2f*", 3.14159265); + print fmt("*%10.2g*", 3.14159265); + print fmt("*%10.2e*", -3.14159265); + print fmt("*%10.2f*", -3.14159265); + print fmt("*%10.2g*", -3.14159265); + print fmt("*%-10.2e*", -3.14159265); + print fmt("*%-10.2f*", -3.14159265); + print fmt("*%-10.2g*", -3.14159265); + print fmt("*%10d*", -128); + print fmt("*%-10d*", -128); + print fmt("*%10d*", 128); + print fmt("*%010d*", 128); + print fmt("*%-10d*", 128); + print fmt("*%10x*", 160); + print fmt("*%010x*", 160); + print fmt("*%10x*", 160/tcp); + print fmt("*%10s*", 160/tcp); + print fmt("*%10s*", 127.0.0.1); + print fmt("*%10x*", 127.0.0.1); + print fmt("*%10s*", 192.168.0.0/16); + print fmt("*%10s*", [::1]); + print fmt("*%10x*", [fe00::1]); + print fmt("*%10s*", [fe80:1234::1]); + print fmt("*%10s*", [fe80:1234::]/32); + print fmt("*%10s*", 3hr); + print fmt("*%10s*", /^foo|bar/); + print fmt("*%10s*", a); + print fmt("*%10s*", b); + print fmt("*%10s*", c); + print fmt("*%10s*", d); + + # tests of various data types without field width + print fmt("%e", 3.1e+2); + print fmt("%f", 3.1e+2); + print fmt("%g", 3.1e+2); + print fmt("%.3e", 3.1e+2); + print fmt("%.3f", 3.1e+2); + print fmt("%.3g", 3.1e+2); + print fmt("%.7g", 3.1e+2); + + # Tests of "%s" with non-printable characters (the string length is printed + # instead of the string itself because the print command does its own + # escaping) + local s0 = "\x00\x1f"; + local s1 = fmt("%s", s0); + print |s0|; + print |s1|; + + s0 = "\x7f\xff"; + s1 = fmt("%s", s0); + print |s0|; + print |s1|; + } diff --git a/testing/btest/bifs/fmt_ftp_port.bro b/testing/btest/bifs/fmt_ftp_port.bro deleted file mode 100644 index 6a7b4d20c7..0000000000 --- a/testing/btest/bifs/fmt_ftp_port.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 192.168.0.2; - local b = 257/tcp; - print fmt_ftp_port(a, b); - - a = [fe80::1234]; - print fmt_ftp_port(a, b); - } diff --git a/testing/btest/bifs/fmt_ftp_port.zeek b/testing/btest/bifs/fmt_ftp_port.zeek new file mode 100644 index 0000000000..956b223cf0 --- /dev/null +++ b/testing/btest/bifs/fmt_ftp_port.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 192.168.0.2; + local b = 257/tcp; + print fmt_ftp_port(a, b); + + a = [fe80::1234]; + print fmt_ftp_port(a, b); + } diff --git a/testing/btest/bifs/get_current_packet_header.bro b/testing/btest/bifs/get_current_packet_header.bro deleted file mode 100644 index 24144545ef..0000000000 --- a/testing/btest/bifs/get_current_packet_header.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/icmp/icmp6-neighbor-solicit.pcap %INPUT > output -# @TEST-EXEC: btest-diff output - -event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr, options: icmp6_nd_options) - { - local hdr: raw_pkt_hdr = get_current_packet_header(); - print fmt("%s", hdr); - } \ No newline at end of file diff --git a/testing/btest/bifs/get_current_packet_header.zeek b/testing/btest/bifs/get_current_packet_header.zeek new file mode 100644 index 0000000000..8efa727e11 --- /dev/null +++ b/testing/btest/bifs/get_current_packet_header.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -C -r $TRACES/icmp/icmp6-neighbor-solicit.pcap %INPUT > output +# @TEST-EXEC: btest-diff output + +event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr, options: icmp6_nd_options) + { + local hdr: raw_pkt_hdr = get_current_packet_header(); + print fmt("%s", hdr); + } \ No newline at end of file diff --git a/testing/btest/bifs/get_matcher_stats.bro b/testing/btest/bifs/get_matcher_stats.bro deleted file mode 100644 index eeaa8cb86a..0000000000 --- a/testing/btest/bifs/get_matcher_stats.bro +++ /dev/null @@ -1,18 +0,0 @@ -# -# @TEST-EXEC: bro -b -s mysig %INPUT - -@TEST-START-FILE mysig.sig -signature my_ftp_client { - ip-proto == tcp - payload /(|.*[\n\r]) *[uU][sS][eE][rR] / - tcp-state originator - event "matched my_ftp_client" -} -@TEST-END-FILE - -event bro_init() - { - local a = get_matcher_stats(); - if ( a$matchers == 0 ) - exit(1); - } diff --git a/testing/btest/bifs/get_matcher_stats.zeek b/testing/btest/bifs/get_matcher_stats.zeek new file mode 100644 index 0000000000..5126f614dd --- /dev/null +++ b/testing/btest/bifs/get_matcher_stats.zeek @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: zeek -b -s mysig %INPUT + +@TEST-START-FILE mysig.sig +signature my_ftp_client { + ip-proto == tcp + payload /(|.*[\n\r]) *[uU][sS][eE][rR] / + tcp-state originator + event "matched my_ftp_client" +} +@TEST-END-FILE + +event zeek_init() + { + local a = get_matcher_stats(); + if ( a$matchers == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/get_port_transport_proto.bro b/testing/btest/bifs/get_port_transport_proto.bro deleted file mode 100644 index ae3c496d88..0000000000 --- a/testing/btest/bifs/get_port_transport_proto.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 123/tcp; - local b = 123/udp; - local c = 123/icmp; - print get_port_transport_proto(a); - print get_port_transport_proto(b); - print get_port_transport_proto(c); - } diff --git a/testing/btest/bifs/get_port_transport_proto.zeek b/testing/btest/bifs/get_port_transport_proto.zeek new file mode 100644 index 0000000000..8ebbc3adaa --- /dev/null +++ b/testing/btest/bifs/get_port_transport_proto.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 123/tcp; + local b = 123/udp; + local c = 123/icmp; + print get_port_transport_proto(a); + print get_port_transport_proto(b); + print get_port_transport_proto(c); + } diff --git a/testing/btest/bifs/gethostname.bro b/testing/btest/bifs/gethostname.bro deleted file mode 100644 index 1d760525cb..0000000000 --- a/testing/btest/bifs/gethostname.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT - -event bro_init() - { - local a = gethostname(); - if ( |a| == 0 ) - exit(1); - } diff --git a/testing/btest/bifs/gethostname.zeek b/testing/btest/bifs/gethostname.zeek new file mode 100644 index 0000000000..dd94b446c6 --- /dev/null +++ b/testing/btest/bifs/gethostname.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT + +event zeek_init() + { + local a = gethostname(); + if ( |a| == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/getpid.bro b/testing/btest/bifs/getpid.bro deleted file mode 100644 index 1852b1287e..0000000000 --- a/testing/btest/bifs/getpid.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT - -event bro_init() - { - local a = getpid(); - if ( a == 0 ) - exit(1); - } diff --git a/testing/btest/bifs/getpid.zeek b/testing/btest/bifs/getpid.zeek new file mode 100644 index 0000000000..a1fbcde8bf --- /dev/null +++ b/testing/btest/bifs/getpid.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT + +event zeek_init() + { + local a = getpid(); + if ( a == 0 ) + exit(1); + } diff --git a/testing/btest/bifs/getsetenv.bro b/testing/btest/bifs/getsetenv.bro deleted file mode 100644 index d217a14ea9..0000000000 --- a/testing/btest/bifs/getsetenv.bro +++ /dev/null @@ -1,20 +0,0 @@ -# -# @TEST-EXEC: TESTBRO=testvalue bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = getenv("NOTDEFINED"); - local b = getenv("TESTBRO"); - if ( |a| == 0 ) - print "OK"; - if ( b == "testvalue" ) - print "OK"; - - if ( setenv("NOTDEFINED", "now defined" ) == T ) - { - if ( getenv("NOTDEFINED") == "now defined" ) - print "OK"; - } - - } diff --git a/testing/btest/bifs/getsetenv.zeek b/testing/btest/bifs/getsetenv.zeek new file mode 100644 index 0000000000..63f973e36d --- /dev/null +++ b/testing/btest/bifs/getsetenv.zeek @@ -0,0 +1,20 @@ +# +# @TEST-EXEC: TESTBRO=testvalue zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = getenv("NOTDEFINED"); + local b = getenv("TESTBRO"); + if ( |a| == 0 ) + print "OK"; + if ( b == "testvalue" ) + print "OK"; + + if ( setenv("NOTDEFINED", "now defined" ) == T ) + { + if ( getenv("NOTDEFINED") == "now defined" ) + print "OK"; + } + + } diff --git a/testing/btest/bifs/global_ids.bro b/testing/btest/bifs/global_ids.bro deleted file mode 100644 index 2dcb6e844d..0000000000 --- a/testing/btest/bifs/global_ids.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = global_ids(); - for ( i in a ) - { - # the table is quite large, so just print one item we expect - if ( i == "bro_init" ) - print a[i]$type_name; - - } - - } diff --git a/testing/btest/bifs/global_ids.zeek b/testing/btest/bifs/global_ids.zeek new file mode 100644 index 0000000000..b3cf1d3645 --- /dev/null +++ b/testing/btest/bifs/global_ids.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = global_ids(); + for ( i in a ) + { + # the table is quite large, so just print one item we expect + if ( i == "zeek_init" ) + print a[i]$type_name; + + } + + } diff --git a/testing/btest/bifs/global_sizes.bro b/testing/btest/bifs/global_sizes.bro deleted file mode 100644 index 4b0805172c..0000000000 --- a/testing/btest/bifs/global_sizes.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = global_sizes(); - for ( i in a ) - { - # the table is quite large, so just look for one item we expect - if ( i == "bro_init" ) - print "found bro_init"; - - } - - } diff --git a/testing/btest/bifs/global_sizes.zeek b/testing/btest/bifs/global_sizes.zeek new file mode 100644 index 0000000000..373cf74425 --- /dev/null +++ b/testing/btest/bifs/global_sizes.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = global_sizes(); + for ( i in a ) + { + # the table is quite large, so just look for one item we expect + if ( i == "zeek_init" ) + print "found zeek_init"; + + } + + } diff --git a/testing/btest/bifs/haversine_distance.bro b/testing/btest/bifs/haversine_distance.bro deleted file mode 100644 index b0a87a2c2d..0000000000 --- a/testing/btest/bifs/haversine_distance.bro +++ /dev/null @@ -1,30 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test(la1: double, lo1: double, la2: double, lo2: double) - { - print fmt("%.4e", haversine_distance(la1, lo1, la2, lo2)); - } - -event bro_init() - { - # Test two arbitrary locations. - test(37.866798, -122.253601, 48.25, 11.65); - # Swap the order of locations to verify the distance doesn't change. - test(48.25, 11.65, 37.866798, -122.253601); - - # Distance of one second of latitude (crossing the equator). - test(.0001388889, 0, -.0001388889, 0); - - # Distance of one second of longitude (crossing the prime meridian). - test(38, 0.000138999, 38, -0.000138999); - - # Distance of one minute of longitude (test extreme longitude values). - test(38, 180, 38, -179.98333); - - # Two locations on opposite ends of the Earth. - test(45, -90, -45, 90); - # Same, but verify that extreme latitude values work. - test(90, 0, -90, 0); - } diff --git a/testing/btest/bifs/haversine_distance.zeek b/testing/btest/bifs/haversine_distance.zeek new file mode 100644 index 0000000000..b1429b13c1 --- /dev/null +++ b/testing/btest/bifs/haversine_distance.zeek @@ -0,0 +1,30 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test(la1: double, lo1: double, la2: double, lo2: double) + { + print fmt("%.4e", haversine_distance(la1, lo1, la2, lo2)); + } + +event zeek_init() + { + # Test two arbitrary locations. + test(37.866798, -122.253601, 48.25, 11.65); + # Swap the order of locations to verify the distance doesn't change. + test(48.25, 11.65, 37.866798, -122.253601); + + # Distance of one second of latitude (crossing the equator). + test(.0001388889, 0, -.0001388889, 0); + + # Distance of one second of longitude (crossing the prime meridian). + test(38, 0.000138999, 38, -0.000138999); + + # Distance of one minute of longitude (test extreme longitude values). + test(38, 180, 38, -179.98333); + + # Two locations on opposite ends of the Earth. + test(45, -90, -45, 90); + # Same, but verify that extreme latitude values work. + test(90, 0, -90, 0); + } diff --git a/testing/btest/bifs/hexdump.bro b/testing/btest/bifs/hexdump.bro deleted file mode 100644 index 1c86ce0db8..0000000000 --- a/testing/btest/bifs/hexdump.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "abc\xffdefghijklmnopqrstuvwxyz"; - - print hexdump(a); - } diff --git a/testing/btest/bifs/hexdump.zeek b/testing/btest/bifs/hexdump.zeek new file mode 100644 index 0000000000..eae0f58409 --- /dev/null +++ b/testing/btest/bifs/hexdump.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "abc\xffdefghijklmnopqrstuvwxyz"; + + print hexdump(a); + } diff --git a/testing/btest/bifs/hexstr_to_bytestring.bro b/testing/btest/bifs/hexstr_to_bytestring.bro deleted file mode 100644 index f0815a6269..0000000000 --- a/testing/btest/bifs/hexstr_to_bytestring.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff .stderr - -event bro_init() - { - print hexstr_to_bytestring("3034"); - print hexstr_to_bytestring(""); - print hexstr_to_bytestring("00"); - print hexstr_to_bytestring("a"); - } diff --git a/testing/btest/bifs/hexstr_to_bytestring.zeek b/testing/btest/bifs/hexstr_to_bytestring.zeek new file mode 100644 index 0000000000..41ca6a4823 --- /dev/null +++ b/testing/btest/bifs/hexstr_to_bytestring.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff .stderr + +event zeek_init() + { + print hexstr_to_bytestring("3034"); + print hexstr_to_bytestring(""); + print hexstr_to_bytestring("00"); + print hexstr_to_bytestring("a"); + } diff --git a/testing/btest/bifs/hll_cardinality.bro b/testing/btest/bifs/hll_cardinality.bro deleted file mode 100644 index d1b0807416..0000000000 --- a/testing/btest/bifs/hll_cardinality.bro +++ /dev/null @@ -1,83 +0,0 @@ -# -# @TEST-EXEC: bro %INPUT>out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff .stderr - -event bro_init() - { - local c1 = hll_cardinality_init(0.01, 0.95); - local c2 = hll_cardinality_init(0.01, 0.95); - - local add1 = 2001; - local add2 = 2002; - local add3 = 2003; - - hll_cardinality_add(c1, add1); - hll_cardinality_add(c1, add2); - hll_cardinality_add(c1, add3); - hll_cardinality_add(c1, 1000); - hll_cardinality_add(c1, 1001); - hll_cardinality_add(c1, 101); - hll_cardinality_add(c1, 1003); - hll_cardinality_add(c1, 1004); - hll_cardinality_add(c1, 1005); - hll_cardinality_add(c1, 1006); - hll_cardinality_add(c1, 1007); - hll_cardinality_add(c1, 1008); - hll_cardinality_add(c1, 1009); - - hll_cardinality_add(c2, add1); - hll_cardinality_add(c2, add2); - hll_cardinality_add(c2, add3); - hll_cardinality_add(c2, 1); - hll_cardinality_add(c2, "b"); - hll_cardinality_add(c2, 101); - hll_cardinality_add(c2, 2); - hll_cardinality_add(c2, 3); - hll_cardinality_add(c2, 4); - hll_cardinality_add(c2, 5); - hll_cardinality_add(c2, 6); - hll_cardinality_add(c2, 7); - hll_cardinality_add(c2, 8); - - print "This value should be around 13:"; - print hll_cardinality_estimate(c1); - - print "This value should be about 12:"; - print hll_cardinality_estimate(c2); - - local m2 = hll_cardinality_init(0.02, 0.95); - - print "This value should be around 0:"; - print hll_cardinality_estimate(m2); - - local c3 = hll_cardinality_copy(c1); - - print "This value should be around 13:"; - print hll_cardinality_estimate(c3); - - c3 = hll_cardinality_init(0.01, 0.95); - print "This value should be 0:"; - print hll_cardinality_estimate(c3); - - print "This value should be true:"; - print hll_cardinality_merge_into(c3, c2); - - print "This value should be about 12:"; - print hll_cardinality_estimate(c2); - print hll_cardinality_estimate(c3); - - print "This value should be true:"; - print hll_cardinality_merge_into(c2, c1); - - print "This value should be about 21:"; - print hll_cardinality_estimate(c2); - - print "This value should be about 13:"; - print hll_cardinality_estimate(c1); - - print "This value should be about 12:"; - print hll_cardinality_estimate(c3); - - } - diff --git a/testing/btest/bifs/hll_cardinality.zeek b/testing/btest/bifs/hll_cardinality.zeek new file mode 100644 index 0000000000..5a919a9f2f --- /dev/null +++ b/testing/btest/bifs/hll_cardinality.zeek @@ -0,0 +1,83 @@ +# +# @TEST-EXEC: zeek %INPUT>out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff .stderr + +event zeek_init() + { + local c1 = hll_cardinality_init(0.01, 0.95); + local c2 = hll_cardinality_init(0.01, 0.95); + + local add1 = 2001; + local add2 = 2002; + local add3 = 2003; + + hll_cardinality_add(c1, add1); + hll_cardinality_add(c1, add2); + hll_cardinality_add(c1, add3); + hll_cardinality_add(c1, 1000); + hll_cardinality_add(c1, 1001); + hll_cardinality_add(c1, 101); + hll_cardinality_add(c1, 1003); + hll_cardinality_add(c1, 1004); + hll_cardinality_add(c1, 1005); + hll_cardinality_add(c1, 1006); + hll_cardinality_add(c1, 1007); + hll_cardinality_add(c1, 1008); + hll_cardinality_add(c1, 1009); + + hll_cardinality_add(c2, add1); + hll_cardinality_add(c2, add2); + hll_cardinality_add(c2, add3); + hll_cardinality_add(c2, 1); + hll_cardinality_add(c2, "b"); + hll_cardinality_add(c2, 101); + hll_cardinality_add(c2, 2); + hll_cardinality_add(c2, 3); + hll_cardinality_add(c2, 4); + hll_cardinality_add(c2, 5); + hll_cardinality_add(c2, 6); + hll_cardinality_add(c2, 7); + hll_cardinality_add(c2, 8); + + print "This value should be around 13:"; + print hll_cardinality_estimate(c1); + + print "This value should be about 12:"; + print hll_cardinality_estimate(c2); + + local m2 = hll_cardinality_init(0.02, 0.95); + + print "This value should be around 0:"; + print hll_cardinality_estimate(m2); + + local c3 = hll_cardinality_copy(c1); + + print "This value should be around 13:"; + print hll_cardinality_estimate(c3); + + c3 = hll_cardinality_init(0.01, 0.95); + print "This value should be 0:"; + print hll_cardinality_estimate(c3); + + print "This value should be true:"; + print hll_cardinality_merge_into(c3, c2); + + print "This value should be about 12:"; + print hll_cardinality_estimate(c2); + print hll_cardinality_estimate(c3); + + print "This value should be true:"; + print hll_cardinality_merge_into(c2, c1); + + print "This value should be about 21:"; + print hll_cardinality_estimate(c2); + + print "This value should be about 13:"; + print hll_cardinality_estimate(c1); + + print "This value should be about 12:"; + print hll_cardinality_estimate(c3); + + } + diff --git a/testing/btest/bifs/hll_large_estimate.bro b/testing/btest/bifs/hll_large_estimate.bro deleted file mode 100644 index b17b50678d..0000000000 --- a/testing/btest/bifs/hll_large_estimate.bro +++ /dev/null @@ -1,26 +0,0 @@ -# -# Test the quality of HLL once by checking adding a large number of IP entries. -# -# @TEST-EXEC: bro -b %INPUT > out -# @TEST-EXEC: BRO_SEED_FILE="" bro -b %INPUT > out2 -# @TEST-EXEC: head -n1 out2 >> out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local cp: opaque of cardinality = hll_cardinality_init(0.1, 1.0); - local base: count = 2130706432; # 127.0.0.0 - local i: count = 0; - while ( ++i < 170000 ) - { - hll_cardinality_add(cp, count_to_v4_addr(base+i)); - } - - local res: int = double_to_count(hll_cardinality_estimate(cp)); - if ( |res - 170000| > 17000 ) - print "Big error"; - else - print "Ok error"; - - print hll_cardinality_estimate(cp); - } diff --git a/testing/btest/bifs/hll_large_estimate.zeek b/testing/btest/bifs/hll_large_estimate.zeek new file mode 100644 index 0000000000..5016c6adf8 --- /dev/null +++ b/testing/btest/bifs/hll_large_estimate.zeek @@ -0,0 +1,26 @@ +# +# Test the quality of HLL once by checking adding a large number of IP entries. +# +# @TEST-EXEC: zeek -b %INPUT > out +# @TEST-EXEC: ZEEK_SEED_FILE="" zeek -b %INPUT > out2 +# @TEST-EXEC: head -n1 out2 >> out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local cp: opaque of cardinality = hll_cardinality_init(0.1, 1.0); + local base: count = 2130706432; # 127.0.0.0 + local i: count = 0; + while ( ++i < 170000 ) + { + hll_cardinality_add(cp, count_to_v4_addr(base+i)); + } + + local res: int = double_to_count(hll_cardinality_estimate(cp)); + if ( |res - 170000| > 17000 ) + print "Big error"; + else + print "Ok error"; + + print hll_cardinality_estimate(cp); + } diff --git a/testing/btest/bifs/identify_data.bro b/testing/btest/bifs/identify_data.bro deleted file mode 100644 index 048c409553..0000000000 --- a/testing/btest/bifs/identify_data.bro +++ /dev/null @@ -1,14 +0,0 @@ -# Text encodings may vary with libmagic version so don't test that part. -# @TEST-EXEC: bro -b %INPUT | sed 's/; charset=.*//g' >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - # plain text - local a = "This is a test"; - print identify_data(a, T); - - # PNG image - local b = "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00"; - print identify_data(b, T); - } diff --git a/testing/btest/bifs/identify_data.zeek b/testing/btest/bifs/identify_data.zeek new file mode 100644 index 0000000000..8ea6e267a1 --- /dev/null +++ b/testing/btest/bifs/identify_data.zeek @@ -0,0 +1,14 @@ +# Text encodings may vary with libmagic version so don't test that part. +# @TEST-EXEC: zeek -b %INPUT | sed 's/; charset=.*//g' >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + # plain text + local a = "This is a test"; + print identify_data(a, T); + + # PNG image + local b = "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00"; + print identify_data(b, T); + } diff --git a/testing/btest/bifs/install_src_addr_filter.test b/testing/btest/bifs/install_src_addr_filter.test index 5b387832de..95d1f51d54 100644 --- a/testing/btest/bifs/install_src_addr_filter.test +++ b/testing/btest/bifs/install_src_addr_filter.test @@ -1,7 +1,7 @@ -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT >output # @TEST-EXEC: btest-diff output -event bro_init() +event zeek_init() { install_src_addr_filter(141.142.220.118, TH_SYN, 100.0); } diff --git a/testing/btest/bifs/is_ascii.bro b/testing/btest/bifs/is_ascii.bro deleted file mode 100644 index fa2d39d2d8..0000000000 --- a/testing/btest/bifs/is_ascii.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a test\xfe"; - local b = "this is a test\x7f"; - - print is_ascii(a); - print is_ascii(b); - } diff --git a/testing/btest/bifs/is_ascii.zeek b/testing/btest/bifs/is_ascii.zeek new file mode 100644 index 0000000000..505e21e715 --- /dev/null +++ b/testing/btest/bifs/is_ascii.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is a test\xfe"; + local b = "this is a test\x7f"; + + print is_ascii(a); + print is_ascii(b); + } diff --git a/testing/btest/bifs/is_local_interface.bro b/testing/btest/bifs/is_local_interface.bro deleted file mode 100644 index ac21b04bd3..0000000000 --- a/testing/btest/bifs/is_local_interface.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print is_local_interface(127.0.0.1); - print is_local_interface(1.2.3.4); - print is_local_interface([2607::a:b:c:d]); - print is_local_interface([::1]); - } diff --git a/testing/btest/bifs/is_local_interface.zeek b/testing/btest/bifs/is_local_interface.zeek new file mode 100644 index 0000000000..f1ee1e9990 --- /dev/null +++ b/testing/btest/bifs/is_local_interface.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print is_local_interface(127.0.0.1); + print is_local_interface(1.2.3.4); + print is_local_interface([2607::a:b:c:d]); + print is_local_interface([::1]); + } diff --git a/testing/btest/bifs/is_port.bro b/testing/btest/bifs/is_port.bro deleted file mode 100644 index 2fe4964913..0000000000 --- a/testing/btest/bifs/is_port.bro +++ /dev/null @@ -1,22 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 123/tcp; - local b = 123/udp; - local c = 123/icmp; - - print is_tcp_port(a); - print is_tcp_port(b); - print is_tcp_port(c); - - print is_udp_port(a); - print is_udp_port(b); - print is_udp_port(c); - - print is_icmp_port(a); - print is_icmp_port(b); - print is_icmp_port(c); - } diff --git a/testing/btest/bifs/is_port.zeek b/testing/btest/bifs/is_port.zeek new file mode 100644 index 0000000000..28f63f63b6 --- /dev/null +++ b/testing/btest/bifs/is_port.zeek @@ -0,0 +1,22 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 123/tcp; + local b = 123/udp; + local c = 123/icmp; + + print is_tcp_port(a); + print is_tcp_port(b); + print is_tcp_port(c); + + print is_udp_port(a); + print is_udp_port(b); + print is_udp_port(c); + + print is_icmp_port(a); + print is_icmp_port(b); + print is_icmp_port(c); + } diff --git a/testing/btest/bifs/join_string.bro b/testing/btest/bifs/join_string.bro deleted file mode 100644 index 0b2d94029a..0000000000 --- a/testing/btest/bifs/join_string.bro +++ /dev/null @@ -1,25 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a: string_array = { - [1] = "this", [2] = "is", [3] = "a", [4] = "test" - }; - local b: string_array = { [1] = "mytest" }; - local c: string_vec = vector( "this", "is", "another", "test" ); - local d: string_vec = vector( "Test" ); - local e: string_vec = vector(); - e[3] = "hi"; - e[5] = "there"; - - print join_string_array(" * ", a); - print join_string_array("", a); - print join_string_array("x", b); - - print join_string_vec(c, "__"); - print join_string_vec(c, ""); - print join_string_vec(d, "-"); - print join_string_vec(e, "."); - } diff --git a/testing/btest/bifs/join_string.zeek b/testing/btest/bifs/join_string.zeek new file mode 100644 index 0000000000..9cac2faa0d --- /dev/null +++ b/testing/btest/bifs/join_string.zeek @@ -0,0 +1,21 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a: string_array = { + [1] = "this", [2] = "is", [3] = "a", [4] = "test" + }; + local b: string_array = { [1] = "mytest" }; + local c: string_vec = vector( "this", "is", "another", "test" ); + local d: string_vec = vector( "Test" ); + local e: string_vec = vector(); + e[3] = "hi"; + e[5] = "there"; + + print join_string_vec(c, "__"); + print join_string_vec(c, ""); + print join_string_vec(d, "-"); + print join_string_vec(e, "."); + } diff --git a/testing/btest/bifs/levenshtein_distance.bro b/testing/btest/bifs/levenshtein_distance.bro deleted file mode 100644 index 86d5e386f4..0000000000 --- a/testing/btest/bifs/levenshtein_distance.bro +++ /dev/null @@ -1,27 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() -{ - local a = "this is a string"; - local b = "this is a tring"; - local c = "this is a strings"; - local d = "this is a strink"; - - print levenshtein_distance(a, b); - print levenshtein_distance(b, a); - - print levenshtein_distance(a, c); - print levenshtein_distance(c, a); - - print levenshtein_distance(a, d); - print levenshtein_distance(d, a); - - print levenshtein_distance(d, ""); - print levenshtein_distance("", d); - print levenshtein_distance("", ""); - print levenshtein_distance(d, d); - - print levenshtein_distance("kitten", "sitting"); -} diff --git a/testing/btest/bifs/levenshtein_distance.zeek b/testing/btest/bifs/levenshtein_distance.zeek new file mode 100644 index 0000000000..14aaa78264 --- /dev/null +++ b/testing/btest/bifs/levenshtein_distance.zeek @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() +{ + local a = "this is a string"; + local b = "this is a tring"; + local c = "this is a strings"; + local d = "this is a strink"; + + print levenshtein_distance(a, b); + print levenshtein_distance(b, a); + + print levenshtein_distance(a, c); + print levenshtein_distance(c, a); + + print levenshtein_distance(a, d); + print levenshtein_distance(d, a); + + print levenshtein_distance(d, ""); + print levenshtein_distance("", d); + print levenshtein_distance("", ""); + print levenshtein_distance(d, d); + + print levenshtein_distance("kitten", "sitting"); +} diff --git a/testing/btest/bifs/lookup_ID.bro b/testing/btest/bifs/lookup_ID.bro deleted file mode 100644 index e263c192da..0000000000 --- a/testing/btest/bifs/lookup_ID.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -global a = "bro test"; - -event bro_init() - { - local b = "local value"; - - print lookup_ID("a"); - print lookup_ID(""); - print lookup_ID("xyz"); - print lookup_ID("b"); - print type_name( lookup_ID("bro_init") ); - } diff --git a/testing/btest/bifs/lookup_ID.zeek b/testing/btest/bifs/lookup_ID.zeek new file mode 100644 index 0000000000..534e678729 --- /dev/null +++ b/testing/btest/bifs/lookup_ID.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +global a = "zeek test"; + +event zeek_init() + { + local b = "local value"; + + print lookup_ID("a"); + print lookup_ID(""); + print lookup_ID("xyz"); + print lookup_ID("b"); + print type_name( lookup_ID("zeek_init") ); + } diff --git a/testing/btest/bifs/lowerupper.bro b/testing/btest/bifs/lowerupper.bro deleted file mode 100644 index 77e6b1c9d1..0000000000 --- a/testing/btest/bifs/lowerupper.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a Test"; - - print to_lower(a); - print to_upper(a); - } diff --git a/testing/btest/bifs/lowerupper.zeek b/testing/btest/bifs/lowerupper.zeek new file mode 100644 index 0000000000..dfda21d39e --- /dev/null +++ b/testing/btest/bifs/lowerupper.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is a Test"; + + print to_lower(a); + print to_upper(a); + } diff --git a/testing/btest/bifs/lstrip.bro b/testing/btest/bifs/lstrip.bro deleted file mode 100644 index f382b06e23..0000000000 --- a/testing/btest/bifs/lstrip.bro +++ /dev/null @@ -1,18 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local link_test = "https://www.zeek.org"; - local one_side = "abcdcab"; - local strange_chars = "ådog"; - - print lstrip(link_test, "htps:/"); - print lstrip(one_side, "abc"); - print lstrip("", "å"); - print lstrip(link_test, ""); - print lstrip(strange_chars, "å"); - print fmt("*%s*", lstrip("aaa", "a")); - print fmt("*%s*", lstrip("\n testing ")); - } diff --git a/testing/btest/bifs/lstrip.zeek b/testing/btest/bifs/lstrip.zeek new file mode 100644 index 0000000000..6674b2a49c --- /dev/null +++ b/testing/btest/bifs/lstrip.zeek @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local link_test = "https://www.zeek.org"; + local one_side = "abcdcab"; + local strange_chars = "ådog"; + + print lstrip(link_test, "htps:/"); + print lstrip(one_side, "abc"); + print lstrip("", "å"); + print lstrip(link_test, ""); + print lstrip(strange_chars, "å"); + print fmt("*%s*", lstrip("aaa", "a")); + print fmt("*%s*", lstrip("\n testing ")); + } diff --git a/testing/btest/bifs/mask_addr.bro b/testing/btest/bifs/mask_addr.bro deleted file mode 100644 index e69a55f590..0000000000 --- a/testing/btest/bifs/mask_addr.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; - -for ( i in one_to_32 ) - { - print mask_addr(255.255.255.255, one_to_32[i]); - } diff --git a/testing/btest/bifs/mask_addr.zeek b/testing/btest/bifs/mask_addr.zeek new file mode 100644 index 0000000000..36ac6d91dd --- /dev/null +++ b/testing/btest/bifs/mask_addr.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; + +for ( i in one_to_32 ) + { + print mask_addr(255.255.255.255, one_to_32[i]); + } diff --git a/testing/btest/bifs/matching_subnets.bro b/testing/btest/bifs/matching_subnets.bro deleted file mode 100644 index 87effed19f..0000000000 --- a/testing/btest/bifs/matching_subnets.bro +++ /dev/null @@ -1,30 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global testt: set[subnet] = { - 10.0.0.0/8, - 10.2.0.0/16, - 10.2.0.2/31, - 10.1.0.0/16, - 10.3.0.0/16, - 5.0.0.0/8, - 5.5.0.0/25, - 5.2.0.0/32, - 7.2.0.0/32, - [2607:f8b0:4008:807::200e]/64, - [2607:f8b0:4007:807::200e]/64, - [2607:f8b0:4007:807::200e]/128 -}; - -event bro_init() - { - print testt; - local c = matching_subnets(10.2.0.2/32, testt); - print c; - c = matching_subnets([2607:f8b0:4007:807::200e]/128, testt); - print c; - c = matching_subnets(128.0.0.1/32, testt); - print c; - c = matching_subnets(10.0.0.2/8, testt); - print c; - } diff --git a/testing/btest/bifs/matching_subnets.zeek b/testing/btest/bifs/matching_subnets.zeek new file mode 100644 index 0000000000..c51915ec0d --- /dev/null +++ b/testing/btest/bifs/matching_subnets.zeek @@ -0,0 +1,30 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global testt: set[subnet] = { + 10.0.0.0/8, + 10.2.0.0/16, + 10.2.0.2/31, + 10.1.0.0/16, + 10.3.0.0/16, + 5.0.0.0/8, + 5.5.0.0/25, + 5.2.0.0/32, + 7.2.0.0/32, + [2607:f8b0:4008:807::200e]/64, + [2607:f8b0:4007:807::200e]/64, + [2607:f8b0:4007:807::200e]/128 +}; + +event zeek_init() + { + print testt; + local c = matching_subnets(10.2.0.2/32, testt); + print c; + c = matching_subnets([2607:f8b0:4007:807::200e]/128, testt); + print c; + c = matching_subnets(128.0.0.1/32, testt); + print c; + c = matching_subnets(10.0.0.2/8, testt); + print c; + } diff --git a/testing/btest/bifs/math.bro b/testing/btest/bifs/math.bro deleted file mode 100644 index 84ace8620c..0000000000 --- a/testing/btest/bifs/math.bro +++ /dev/null @@ -1,24 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 3.14; - local b = 2.71; - local c = -3.14; - local d = -2.71; - - print floor(a); - print floor(b); - print floor(c); - print floor(d); - - print sqrt(a); - - print exp(a); - - print ln(a); - - print log10(a); - } diff --git a/testing/btest/bifs/math.zeek b/testing/btest/bifs/math.zeek new file mode 100644 index 0000000000..353704f0f9 --- /dev/null +++ b/testing/btest/bifs/math.zeek @@ -0,0 +1,24 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 3.14; + local b = 2.71; + local c = -3.14; + local d = -2.71; + + print floor(a); + print floor(b); + print floor(c); + print floor(d); + + print sqrt(a); + + print exp(a); + + print ln(a); + + print log10(a); + } diff --git a/testing/btest/bifs/md5.test b/testing/btest/bifs/md5.test index b022302c59..1d00d3f173 100644 --- a/testing/btest/bifs/md5.test +++ b/testing/btest/bifs/md5.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: zeek -b %INPUT >output # @TEST-EXEC: btest-diff output print md5_hash("one"); diff --git a/testing/btest/bifs/merge_pattern.bro b/testing/btest/bifs/merge_pattern.bro deleted file mode 100644 index de4a3afd6a..0000000000 --- a/testing/btest/bifs/merge_pattern.bro +++ /dev/null @@ -1,17 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = /foo/; - local b = /b[a-z]+/; - local c = merge_pattern(a, b); - - if ( "bar" == c ) - print "match"; - - if ( "foo" == c ) - print "match"; - - } diff --git a/testing/btest/bifs/net_stats_trace.test b/testing/btest/bifs/net_stats_trace.test index cd9ee52a27..0b593c11e4 100644 --- a/testing/btest/bifs/net_stats_trace.test +++ b/testing/btest/bifs/net_stats_trace.test @@ -1,8 +1,8 @@ # Checks that accurate stats are returned when reading from a trace file. -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace >output %INPUT +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace >output %INPUT # @TEST-EXEC: btest-diff output -event bro_done() +event zeek_done() { print get_net_stats(); } diff --git a/testing/btest/bifs/netbios-functions.bro b/testing/btest/bifs/netbios-functions.bro deleted file mode 100644 index 9b075e8729..0000000000 --- a/testing/btest/bifs/netbios-functions.bro +++ /dev/null @@ -1,18 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local names_to_decode = set( - "ejfdebfeebfacacacacacacacacacaaa", # ISATAP - "fhepfcelehfcepfffacacacacacacabl", # WORKGROUP - "abacfpfpenfdecfcepfhfdeffpfpacab", # \001\002__MSBROWSE__\002 - "enebfcfeejeocacacacacacacacacaad"); # MARTIN - - for ( name in names_to_decode ) - { - print decode_netbios_name(name); - print decode_netbios_name_type(name); - } - } diff --git a/testing/btest/bifs/netbios-functions.zeek b/testing/btest/bifs/netbios-functions.zeek new file mode 100644 index 0000000000..c3e951ffa8 --- /dev/null +++ b/testing/btest/bifs/netbios-functions.zeek @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local names_to_decode = set( + "ejfdebfeebfacacacacacacacacacaaa", # ISATAP + "fhepfcelehfcepfffacacacacacacabl", # WORKGROUP + "abacfpfpenfdecfcepfhfdeffpfpacab", # \001\002__MSBROWSE__\002 + "enebfcfeejeocacacacacacacacacaad"); # MARTIN + + for ( name in names_to_decode ) + { + print decode_netbios_name(name); + print decode_netbios_name_type(name); + } + } diff --git a/testing/btest/bifs/order.bro b/testing/btest/bifs/order.bro deleted file mode 100644 index cb4b050686..0000000000 --- a/testing/btest/bifs/order.bro +++ /dev/null @@ -1,55 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function myfunc1(a: addr, b: addr): int - { - local x = addr_to_counts(a); - local y = addr_to_counts(b); - if (x[0] < y[0]) - return -1; - else - return 1; - } - -function myfunc2(a: double, b: double): int - { - if (a < b) - return -1; - else - return 1; - } - -event bro_init() - { - - # Tests without supplying a comparison function - - local a1 = vector( 5, 2, 8, 3 ); - local b1 = order(a1); - print a1; - print b1; - - local a2: vector of interval = vector( 5hr, 2days, 1sec, -7min ); - local b2 = order(a2); - print a2; - print b2; - - # Tests with a comparison function - - local c1: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); - local d1 = order(c1, myfunc1); - print c1; - print d1; - - local c2: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); - local d2 = order(c2, myfunc2); - print c2; - print d2; - - # Tests with large numbers - - local l1 = vector(2304, 1156, 13, 42, 4294967296); - print l1; - print order(l1); - } diff --git a/testing/btest/bifs/order.zeek b/testing/btest/bifs/order.zeek new file mode 100644 index 0000000000..b989bb6095 --- /dev/null +++ b/testing/btest/bifs/order.zeek @@ -0,0 +1,55 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function myfunc1(a: addr, b: addr): int + { + local x = addr_to_counts(a); + local y = addr_to_counts(b); + if (x[0] < y[0]) + return -1; + else + return 1; + } + +function myfunc2(a: double, b: double): int + { + if (a < b) + return -1; + else + return 1; + } + +event zeek_init() + { + + # Tests without supplying a comparison function + + local a1 = vector( 5, 2, 8, 3 ); + local b1 = order(a1); + print a1; + print b1; + + local a2: vector of interval = vector( 5hr, 2days, 1sec, -7min ); + local b2 = order(a2); + print a2; + print b2; + + # Tests with a comparison function + + local c1: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); + local d1 = order(c1, myfunc1); + print c1; + print d1; + + local c2: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); + local d2 = order(c2, myfunc2); + print c2; + print d2; + + # Tests with large numbers + + local l1 = vector(2304, 1156, 13, 42, 4294967296); + print l1; + print order(l1); + } diff --git a/testing/btest/bifs/parse_ftp.bro b/testing/btest/bifs/parse_ftp.bro deleted file mode 100644 index a8993fa6e0..0000000000 --- a/testing/btest/bifs/parse_ftp.bro +++ /dev/null @@ -1,15 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print parse_ftp_port("192,168,0,2,1,1"); - - print parse_eftp_port("|1|192.168.0.2|257|"); - print parse_eftp_port("|2|fe80::12|1234|"); - - print parse_ftp_pasv("227 Entering Passive Mode (192,168,0,2,1,1)"); - - print parse_ftp_epsv("229 Entering Extended Passive Mode (|||1234|)"); - } diff --git a/testing/btest/bifs/parse_ftp.zeek b/testing/btest/bifs/parse_ftp.zeek new file mode 100644 index 0000000000..47b53284e6 --- /dev/null +++ b/testing/btest/bifs/parse_ftp.zeek @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print parse_ftp_port("192,168,0,2,1,1"); + + print parse_eftp_port("|1|192.168.0.2|257|"); + print parse_eftp_port("|2|fe80::12|1234|"); + + print parse_ftp_pasv("227 Entering Passive Mode (192,168,0,2,1,1)"); + + print parse_ftp_epsv("229 Entering Extended Passive Mode (|||1234|)"); + } diff --git a/testing/btest/bifs/piped_exec.bro b/testing/btest/bifs/piped_exec.bro deleted file mode 100644 index 70f8d70523..0000000000 --- a/testing/btest/bifs/piped_exec.bro +++ /dev/null @@ -1,14 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff test.txt - - -global cmds = "print \"hello world\";"; -cmds = string_cat(cmds, "\nprint \"foobar\";"); -if ( piped_exec("bro", cmds) != T ) - exit(1); - -# Test null output. -if ( piped_exec("cat > test.txt", "\x00\x00hello\x00\x00") != T ) - exit(1); - diff --git a/testing/btest/bifs/piped_exec.zeek b/testing/btest/bifs/piped_exec.zeek new file mode 100644 index 0000000000..469803735e --- /dev/null +++ b/testing/btest/bifs/piped_exec.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff test.txt + + +global cmds = "print \"hello world\";"; +cmds = string_cat(cmds, "\nprint \"foobar\";"); +if ( piped_exec("zeek", cmds) != T ) + exit(1); + +# Test null output. +if ( piped_exec("cat > test.txt", "\x00\x00hello\x00\x00") != T ) + exit(1); + diff --git a/testing/btest/bifs/ptr_name_to_addr.bro b/testing/btest/bifs/ptr_name_to_addr.bro deleted file mode 100644 index d1a7878e3d..0000000000 --- a/testing/btest/bifs/ptr_name_to_addr.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global v6 = ptr_name_to_addr("2.1.0.1.0.0.0.0.0.0.0.0.0.0.0.0.2.0.8.0.9.0.0.4.0.b.8.f.7.0.6.2.ip6.arpa"); -global v4 = ptr_name_to_addr("52.225.125.74.in-addr.arpa"); - -print v6; -print v4; \ No newline at end of file diff --git a/testing/btest/bifs/ptr_name_to_addr.zeek b/testing/btest/bifs/ptr_name_to_addr.zeek new file mode 100644 index 0000000000..7779ec7772 --- /dev/null +++ b/testing/btest/bifs/ptr_name_to_addr.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global v6 = ptr_name_to_addr("2.1.0.1.0.0.0.0.0.0.0.0.0.0.0.0.2.0.8.0.9.0.0.4.0.b.8.f.7.0.6.2.ip6.arpa"); +global v4 = ptr_name_to_addr("52.225.125.74.in-addr.arpa"); + +print v6; +print v4; \ No newline at end of file diff --git a/testing/btest/bifs/rand.bro b/testing/btest/bifs/rand.bro deleted file mode 100644 index caf3f16031..0000000000 --- a/testing/btest/bifs/rand.bro +++ /dev/null @@ -1,29 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: bro -b %INPUT do_seed=F >out.2 -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff out.2 - -const do_seed = T &redef; - -event bro_init() - { - local a = rand(1000); - local b = rand(1000); - local c = rand(1000); - - print a; - print b; - print c; - - if ( do_seed ) - srand(575); - - local d = rand(1000); - local e = rand(1000); - local f = rand(1000); - - print d; - print e; - print f; - } diff --git a/testing/btest/bifs/rand.zeek b/testing/btest/bifs/rand.zeek new file mode 100644 index 0000000000..b4b0facabc --- /dev/null +++ b/testing/btest/bifs/rand.zeek @@ -0,0 +1,29 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: zeek -b %INPUT do_seed=F >out.2 +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff out.2 + +const do_seed = T &redef; + +event zeek_init() + { + local a = rand(1000); + local b = rand(1000); + local c = rand(1000); + + print a; + print b; + print c; + + if ( do_seed ) + srand(575); + + local d = rand(1000); + local e = rand(1000); + local f = rand(1000); + + print d; + print e; + print f; + } diff --git a/testing/btest/bifs/raw_bytes_to_v4_addr.bro b/testing/btest/bifs/raw_bytes_to_v4_addr.bro deleted file mode 100644 index bd685216ef..0000000000 --- a/testing/btest/bifs/raw_bytes_to_v4_addr.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print raw_bytes_to_v4_addr("ABCD"); - print raw_bytes_to_v4_addr("ABC"); - } diff --git a/testing/btest/bifs/raw_bytes_to_v4_addr.zeek b/testing/btest/bifs/raw_bytes_to_v4_addr.zeek new file mode 100644 index 0000000000..1229ac6135 --- /dev/null +++ b/testing/btest/bifs/raw_bytes_to_v4_addr.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print raw_bytes_to_v4_addr("ABCD"); + print raw_bytes_to_v4_addr("ABC"); + } diff --git a/testing/btest/bifs/reading_traces.bro b/testing/btest/bifs/reading_traces.bro deleted file mode 100644 index 46ad04c25f..0000000000 --- a/testing/btest/bifs/reading_traces.bro +++ /dev/null @@ -1,10 +0,0 @@ - -# @TEST-EXEC: bro -b %INPUT >out1 -# @TEST-EXEC: btest-diff out1 -# @TEST-EXEC: bro -r $TRACES/web.trace %INPUT >out2 -# @TEST-EXEC: btest-diff out2 - -event bro_init() - { - print reading_traces(); - } diff --git a/testing/btest/bifs/reading_traces.zeek b/testing/btest/bifs/reading_traces.zeek new file mode 100644 index 0000000000..11d1e2a3f7 --- /dev/null +++ b/testing/btest/bifs/reading_traces.zeek @@ -0,0 +1,10 @@ + +# @TEST-EXEC: zeek -b %INPUT >out1 +# @TEST-EXEC: btest-diff out1 +# @TEST-EXEC: zeek -r $TRACES/web.trace %INPUT >out2 +# @TEST-EXEC: btest-diff out2 + +event zeek_init() + { + print reading_traces(); + } diff --git a/testing/btest/bifs/record_type_to_vector.bro b/testing/btest/bifs/record_type_to_vector.bro deleted file mode 100644 index 9795ce886b..0000000000 --- a/testing/btest/bifs/record_type_to_vector.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type myrecord: record { - ct: count; - str1: string; -}; - -event bro_init() - { - print record_type_to_vector("myrecord"); - } diff --git a/testing/btest/bifs/record_type_to_vector.zeek b/testing/btest/bifs/record_type_to_vector.zeek new file mode 100644 index 0000000000..3b45af835b --- /dev/null +++ b/testing/btest/bifs/record_type_to_vector.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type myrecord: record { + ct: count; + str1: string; +}; + +event zeek_init() + { + print record_type_to_vector("myrecord"); + } diff --git a/testing/btest/bifs/records_fields.bro b/testing/btest/bifs/records_fields.bro deleted file mode 100644 index 88df239b57..0000000000 --- a/testing/btest/bifs/records_fields.bro +++ /dev/null @@ -1,46 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type myrec: record { - myfield: bool; -}; - -type tt: record { - a: bool; - b: string &default="Bar"; - c: double &optional; - d: string &log; - m: myrec; -}; - -type r: record { - a: count; - b: string &default="Foo"; - c: double &optional; - d: string &log; - e: any; -}; - -type mystring: string; - -event bro_init() -{ - local x: r = [$a=42, $d="Bar", $e=tt]; - print x; - local t: record_field_table; - t = record_fields(x); - print t; - print t["c"]?$value; - - t = record_fields(x$e); - print t; - t = record_fields(tt); - print t; - - x = [$a=42, $d="Bar", $e=mystring]; - t = record_fields(x); - print t; - t = record_fields(x$e); - print t; -} diff --git a/testing/btest/bifs/records_fields.zeek b/testing/btest/bifs/records_fields.zeek new file mode 100644 index 0000000000..632bcb2fcf --- /dev/null +++ b/testing/btest/bifs/records_fields.zeek @@ -0,0 +1,46 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type myrec: record { + myfield: bool; +}; + +type tt: record { + a: bool; + b: string &default="Bar"; + c: double &optional; + d: string &log; + m: myrec; +}; + +type r: record { + a: count; + b: string &default="Foo"; + c: double &optional; + d: string &log; + e: any; +}; + +type mystring: string; + +event zeek_init() +{ + local x: r = [$a=42, $d="Bar", $e=tt]; + print x; + local t: record_field_table; + t = record_fields(x); + print t; + print t["c"]?$value; + + t = record_fields(x$e); + print t; + t = record_fields(tt); + print t; + + x = [$a=42, $d="Bar", $e=mystring]; + t = record_fields(x); + print t; + t = record_fields(x$e); + print t; +} diff --git a/testing/btest/bifs/remask_addr.bro b/testing/btest/bifs/remask_addr.bro deleted file mode 100644 index 7b7e89c018..0000000000 --- a/testing/btest/bifs/remask_addr.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; - -for ( i in one_to_32 ) - { - print fmt("%s: %s", one_to_32[i], - remask_addr(0.0.255.255, 255.255.0.0, 96+one_to_32[i])); - } diff --git a/testing/btest/bifs/remask_addr.zeek b/testing/btest/bifs/remask_addr.zeek new file mode 100644 index 0000000000..1014b22550 --- /dev/null +++ b/testing/btest/bifs/remask_addr.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; + +for ( i in one_to_32 ) + { + print fmt("%s: %s", one_to_32[i], + remask_addr(0.0.255.255, 255.255.0.0, 96+one_to_32[i])); + } diff --git a/testing/btest/bifs/resize.bro b/testing/btest/bifs/resize.bro deleted file mode 100644 index f4067f31c7..0000000000 --- a/testing/btest/bifs/resize.bro +++ /dev/null @@ -1,26 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = vector( 5, 3, 8 ); - - print |a|; - - if ( resize(a, 5) != 3 ) - exit(1); - - print |a|; - - if ( resize(a, 0) != 5 ) - exit(1); - - print |a|; - - if ( resize(a, 7) != 0 ) - exit(1); - - print |a|; - - } diff --git a/testing/btest/bifs/resize.zeek b/testing/btest/bifs/resize.zeek new file mode 100644 index 0000000000..483564ef1f --- /dev/null +++ b/testing/btest/bifs/resize.zeek @@ -0,0 +1,26 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = vector( 5, 3, 8 ); + + print |a|; + + if ( resize(a, 5) != 3 ) + exit(1); + + print |a|; + + if ( resize(a, 0) != 5 ) + exit(1); + + print |a|; + + if ( resize(a, 7) != 0 ) + exit(1); + + print |a|; + + } diff --git a/testing/btest/bifs/reverse.bro b/testing/btest/bifs/reverse.bro deleted file mode 100644 index bbb386bb80..0000000000 --- a/testing/btest/bifs/reverse.bro +++ /dev/null @@ -1,19 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local s1 = "hello world!"; - local s2 = "rise to vote sir"; - local s3 = "\xff\x00"; - local s4 = "\xff\x39\x30\xff"; - - print reverse(s1); - print reverse(reverse(s1)); - print subst_string(reverse(s2), " ", ""); - print bytestring_to_hexstr(s3); - print bytestring_to_hexstr(reverse(s3)); - print bytestring_to_hexstr(reverse(sub_bytes(s4, 2, 2))); - print reverse("A"); - } diff --git a/testing/btest/bifs/reverse.zeek b/testing/btest/bifs/reverse.zeek new file mode 100644 index 0000000000..9a87704cc0 --- /dev/null +++ b/testing/btest/bifs/reverse.zeek @@ -0,0 +1,19 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local s1 = "hello world!"; + local s2 = "rise to vote sir"; + local s3 = "\xff\x00"; + local s4 = "\xff\x39\x30\xff"; + + print reverse(s1); + print reverse(reverse(s1)); + print subst_string(reverse(s2), " ", ""); + print bytestring_to_hexstr(s3); + print bytestring_to_hexstr(reverse(s3)); + print bytestring_to_hexstr(reverse(sub_bytes(s4, 2, 2))); + print reverse("A"); + } diff --git a/testing/btest/bifs/rotate_file.bro b/testing/btest/bifs/rotate_file.bro deleted file mode 100644 index a6109ff677..0000000000 --- a/testing/btest/bifs/rotate_file.bro +++ /dev/null @@ -1,15 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = open("testfile"); - write_file(a, "this is a test\n"); - - local b = rotate_file(a); - if ( b$new_name != "testfile" ) - print "file rotated"; - print file_size(b$new_name); - print file_size("testfile"); - } diff --git a/testing/btest/bifs/rotate_file.zeek b/testing/btest/bifs/rotate_file.zeek new file mode 100644 index 0000000000..028b374653 --- /dev/null +++ b/testing/btest/bifs/rotate_file.zeek @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = open("testfile"); + write_file(a, "this is a test\n"); + + local b = rotate_file(a); + if ( b$new_name != "testfile" ) + print "file rotated"; + print file_size(b$new_name); + print file_size("testfile"); + } diff --git a/testing/btest/bifs/rotate_file_by_name.bro b/testing/btest/bifs/rotate_file_by_name.bro deleted file mode 100644 index f647edefe2..0000000000 --- a/testing/btest/bifs/rotate_file_by_name.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = open("testfile"); - write_file(a, "this is a test\n"); - close(a); - - local b = rotate_file_by_name("testfile"); - if ( b$new_name != "testfile" ) - print "file rotated"; - print file_size(b$new_name); - print file_size("testfile"); - } diff --git a/testing/btest/bifs/rotate_file_by_name.zeek b/testing/btest/bifs/rotate_file_by_name.zeek new file mode 100644 index 0000000000..985084e6ed --- /dev/null +++ b/testing/btest/bifs/rotate_file_by_name.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = open("testfile"); + write_file(a, "this is a test\n"); + close(a); + + local b = rotate_file_by_name("testfile"); + if ( b$new_name != "testfile" ) + print "file rotated"; + print file_size(b$new_name); + print file_size("testfile"); + } diff --git a/testing/btest/bifs/routing0_data_to_addrs.test b/testing/btest/bifs/routing0_data_to_addrs.test index a20bb3bf59..1c81eb0cd1 100644 --- a/testing/btest/bifs/routing0_data_to_addrs.test +++ b/testing/btest/bifs/routing0_data_to_addrs.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output # @TEST-EXEC: btest-diff output event ipv6_ext_headers(c: connection, p: pkt_hdr) diff --git a/testing/btest/bifs/rstrip.bro b/testing/btest/bifs/rstrip.bro deleted file mode 100644 index a0695b8107..0000000000 --- a/testing/btest/bifs/rstrip.bro +++ /dev/null @@ -1,18 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local link_test = "https://www.zeek.org"; - local one_side = "abcdcab"; - local strange_chars = "dogå"; - - print fmt("%s", rstrip(link_test, "org.")); - print fmt("%s", rstrip(one_side, "abc")); - print fmt("%s", rstrip("", "å")); - print fmt("%s", rstrip(link_test, "")); - print fmt("%s", rstrip(strange_chars, "å")); - print fmt("*%s*", rstrip("aaa", "a")); - print fmt("*%s*", rstrip(" testing \n")); - } diff --git a/testing/btest/bifs/rstrip.zeek b/testing/btest/bifs/rstrip.zeek new file mode 100644 index 0000000000..2f19af4207 --- /dev/null +++ b/testing/btest/bifs/rstrip.zeek @@ -0,0 +1,18 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local link_test = "https://www.zeek.org"; + local one_side = "abcdcab"; + local strange_chars = "dogå"; + + print fmt("%s", rstrip(link_test, "org.")); + print fmt("%s", rstrip(one_side, "abc")); + print fmt("%s", rstrip("", "å")); + print fmt("%s", rstrip(link_test, "")); + print fmt("%s", rstrip(strange_chars, "å")); + print fmt("*%s*", rstrip("aaa", "a")); + print fmt("*%s*", rstrip(" testing \n")); + } diff --git a/testing/btest/bifs/safe_shell_quote.bro b/testing/btest/bifs/safe_shell_quote.bro deleted file mode 100644 index 490952c79b..0000000000 --- a/testing/btest/bifs/safe_shell_quote.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "echo `pwd` ${TEST} > \"my file\"; echo -e \"\\n\""; - print a; - - local b = safe_shell_quote(a); - print b; - } diff --git a/testing/btest/bifs/safe_shell_quote.zeek b/testing/btest/bifs/safe_shell_quote.zeek new file mode 100644 index 0000000000..46940a0976 --- /dev/null +++ b/testing/btest/bifs/safe_shell_quote.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "echo `pwd` ${TEST} > \"my file\"; echo -e \"\\n\""; + print a; + + local b = safe_shell_quote(a); + print b; + } diff --git a/testing/btest/bifs/same_object.bro b/testing/btest/bifs/same_object.bro deleted file mode 100644 index dddfd80d3d..0000000000 --- a/testing/btest/bifs/same_object.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "This is a test"; - local b: string; - local c = "This is a test"; - b = a; - print same_object(a, b); - print same_object(a, c); - - local d = vector(1, 2, 3); - print same_object(a, d); - } diff --git a/testing/btest/bifs/same_object.zeek b/testing/btest/bifs/same_object.zeek new file mode 100644 index 0000000000..0afc362f04 --- /dev/null +++ b/testing/btest/bifs/same_object.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "This is a test"; + local b: string; + local c = "This is a test"; + b = a; + print same_object(a, b); + print same_object(a, c); + + local d = vector(1, 2, 3); + print same_object(a, d); + } diff --git a/testing/btest/bifs/sha1.test b/testing/btest/bifs/sha1.test index 7bbd8b002e..1e9396b602 100644 --- a/testing/btest/bifs/sha1.test +++ b/testing/btest/bifs/sha1.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: zeek -b %INPUT >output # @TEST-EXEC: btest-diff output print sha1_hash("one"); diff --git a/testing/btest/bifs/sha256.test b/testing/btest/bifs/sha256.test index a1c17f7113..83c937029a 100644 --- a/testing/btest/bifs/sha256.test +++ b/testing/btest/bifs/sha256.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: zeek -b %INPUT >output # @TEST-EXEC: btest-diff output print sha256_hash("one"); diff --git a/testing/btest/bifs/sort.bro b/testing/btest/bifs/sort.bro deleted file mode 100644 index 2ddb44b8be..0000000000 --- a/testing/btest/bifs/sort.bro +++ /dev/null @@ -1,75 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function myfunc1(a: addr, b: addr): int - { - local x = addr_to_counts(a); - local y = addr_to_counts(b); - if (x[0] < y[0]) - return -1; - else - return 1; - } - -function myfunc2(a: double, b: double): int - { - if (a < b) - return -1; - else - return 1; - } - -event bro_init() - { - # Tests without supplying a comparison function - - local a1 = vector( 5, 2, 8, 3 ); - local b1 = sort(a1); - print a1; - print b1; - - local a2: vector of interval = vector( 5hr, 2days, 1sec, -7min ); - local b2 = sort(a2); - print a2; - print b2; - - local a3: vector of bool = vector( T, F, F, T ); - local b3 = sort(a3); - print a3; - print b3; - - local a4: vector of port = vector( 12/icmp, 123/tcp, 500/udp, 7/udp, 57/tcp ); - local b4 = sort(a4); - print a4; - print b4; - - # this one is expected to fail (i.e., "sort" doesn't sort the vector) - local a5: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); - local b5 = sort(a5); - print a5; - print b5; - - # this one is expected to fail (i.e., "sort" doesn't sort the vector) - local a6: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); - local b6 = sort(a6); - print a6; - print b6; - - # Tests with a comparison function - - local c1: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); - local d1 = sort(c1, myfunc1); - print c1; - print d1; - - local c2: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); - local d2 = sort(c2, myfunc2); - print c2; - print d2; - - # Testing large numbers - local l1 = vector(2304, 1156, 11, 42, 4294967296); - print l1; - print sort(l1); - } diff --git a/testing/btest/bifs/sort.zeek b/testing/btest/bifs/sort.zeek new file mode 100644 index 0000000000..8bfd1c5f5d --- /dev/null +++ b/testing/btest/bifs/sort.zeek @@ -0,0 +1,75 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function myfunc1(a: addr, b: addr): int + { + local x = addr_to_counts(a); + local y = addr_to_counts(b); + if (x[0] < y[0]) + return -1; + else + return 1; + } + +function myfunc2(a: double, b: double): int + { + if (a < b) + return -1; + else + return 1; + } + +event zeek_init() + { + # Tests without supplying a comparison function + + local a1 = vector( 5, 2, 8, 3 ); + local b1 = sort(a1); + print a1; + print b1; + + local a2: vector of interval = vector( 5hr, 2days, 1sec, -7min ); + local b2 = sort(a2); + print a2; + print b2; + + local a3: vector of bool = vector( T, F, F, T ); + local b3 = sort(a3); + print a3; + print b3; + + local a4: vector of port = vector( 12/icmp, 123/tcp, 500/udp, 7/udp, 57/tcp ); + local b4 = sort(a4); + print a4; + print b4; + + # this one is expected to fail (i.e., "sort" doesn't sort the vector) + local a5: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); + local b5 = sort(a5); + print a5; + print b5; + + # this one is expected to fail (i.e., "sort" doesn't sort the vector) + local a6: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); + local b6 = sort(a6); + print a6; + print b6; + + # Tests with a comparison function + + local c1: vector of addr = vector( 192.168.123.200, 10.0.0.157, 192.168.0.3 ); + local d1 = sort(c1, myfunc1); + print c1; + print d1; + + local c2: vector of double = vector( 3.03, 3.01, 3.02, 3.015 ); + local d2 = sort(c2, myfunc2); + print c2; + print d2; + + # Testing large numbers + local l1 = vector(2304, 1156, 11, 42, 4294967296); + print l1; + print sort(l1); + } diff --git a/testing/btest/bifs/sort_string_array.bro b/testing/btest/bifs/sort_string_array.bro deleted file mode 100644 index 1916f93d0c..0000000000 --- a/testing/btest/bifs/sort_string_array.bro +++ /dev/null @@ -1,17 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a: string_array = { - [1] = "this", [2] = "is", [3] = "a", [4] = "test" - }; - - local b = sort_string_array(a); - - print b[1]; - print b[2]; - print b[3]; - print b[4]; - } diff --git a/testing/btest/bifs/split.bro b/testing/btest/bifs/split.bro deleted file mode 100644 index b117844645..0000000000 --- a/testing/btest/bifs/split.bro +++ /dev/null @@ -1,58 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a test"; - local pat = /hi|es/; - local idx = vector( 3, 6, 13); - - local b = split(a, pat); - local c = split1(a, pat); - local d = split_all(a, pat); - local e1 = split_n(a, pat, F, 1); - local e2 = split_n(a, pat, T, 1); - - print b[1]; - print b[2]; - print b[3]; - print "---------------------"; - print c[1]; - print c[2]; - print "---------------------"; - print d[1]; - print d[2]; - print d[3]; - print d[4]; - print d[5]; - print "---------------------"; - print e1[1]; - print e1[2]; - print "---------------------"; - print e2[1]; - print e2[2]; - print e2[3]; - print "---------------------"; - print str_split(a, idx); - print "---------------------"; - - a = "X-Mailer: Testing Test (http://www.example.com)"; - pat = /:[[:blank:]]*/; - local f = split1(a, pat); - - print f[1]; - print f[2]; - print "---------------------"; - - a = "A = B = C = D"; - pat = /=/; - local g = split_all(a, pat); - print g[1]; - print g[2]; - print g[3]; - print g[4]; - print g[5]; - print g[6]; - print g[7]; - } diff --git a/testing/btest/bifs/split_string.bro b/testing/btest/bifs/split_string.bro deleted file mode 100644 index e4d32b7f73..0000000000 --- a/testing/btest/bifs/split_string.bro +++ /dev/null @@ -1,36 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function print_string_vector(v: string_vec) - { - for ( i in v ) - print v[i]; - } - -event bro_init() - { - local a = "this is a test"; - local pat = /hi|es/; - local idx = vector( 3, 6, 13); - - print_string_vector(split_string(a, pat)); - print "---------------------"; - print_string_vector(split_string1(a, pat)); - print "---------------------"; - print_string_vector(split_string_all(a, pat)); - print "---------------------"; - print_string_vector(split_string_n(a, pat, F, 1)); - print "---------------------"; - print_string_vector(split_string_n(a, pat, T, 1)); - print "---------------------"; - print str_split(a, idx); - print "---------------------"; - a = "X-Mailer: Testing Test (http://www.example.com)"; - pat = /:[[:blank:]]*/; - print_string_vector(split_string1(a, pat)); - print "---------------------"; - a = "A = B = C = D"; - pat = /=/; - print_string_vector(split_string_all(a, pat)); - } diff --git a/testing/btest/bifs/split_string.zeek b/testing/btest/bifs/split_string.zeek new file mode 100644 index 0000000000..9692f32da5 --- /dev/null +++ b/testing/btest/bifs/split_string.zeek @@ -0,0 +1,36 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function print_string_vector(v: string_vec) + { + for ( i in v ) + print v[i]; + } + +event zeek_init() + { + local a = "this is a test"; + local pat = /hi|es/; + local idx = vector( 3, 6, 13); + + print_string_vector(split_string(a, pat)); + print "---------------------"; + print_string_vector(split_string1(a, pat)); + print "---------------------"; + print_string_vector(split_string_all(a, pat)); + print "---------------------"; + print_string_vector(split_string_n(a, pat, F, 1)); + print "---------------------"; + print_string_vector(split_string_n(a, pat, T, 1)); + print "---------------------"; + print str_split(a, idx); + print "---------------------"; + a = "X-Mailer: Testing Test (http://www.example.com)"; + pat = /:[[:blank:]]*/; + print_string_vector(split_string1(a, pat)); + print "---------------------"; + a = "A = B = C = D"; + pat = /=/; + print_string_vector(split_string_all(a, pat)); + } diff --git a/testing/btest/bifs/str_shell_escape.bro b/testing/btest/bifs/str_shell_escape.bro deleted file mode 100644 index e631458bc1..0000000000 --- a/testing/btest/bifs/str_shell_escape.bro +++ /dev/null @@ -1,15 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "echo ${TEST} > \"my file\""; - - print |a|; - print a; - - local b = str_shell_escape(a); - print |b|; - print b; - } diff --git a/testing/btest/bifs/str_shell_escape.zeek b/testing/btest/bifs/str_shell_escape.zeek new file mode 100644 index 0000000000..f3f08b0072 --- /dev/null +++ b/testing/btest/bifs/str_shell_escape.zeek @@ -0,0 +1,15 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "echo ${TEST} > \"my file\""; + + print |a|; + print a; + + local b = str_shell_escape(a); + print |b|; + print b; + } diff --git a/testing/btest/bifs/strcmp.bro b/testing/btest/bifs/strcmp.bro deleted file mode 100644 index 92d0430f1d..0000000000 --- a/testing/btest/bifs/strcmp.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this"; - local b = "testing"; - - print strcmp(a, b) > 0; - print strcmp(b, a) < 0; - print strcmp(a, a) == 0; - } diff --git a/testing/btest/bifs/strcmp.zeek b/testing/btest/bifs/strcmp.zeek new file mode 100644 index 0000000000..93528ed8f1 --- /dev/null +++ b/testing/btest/bifs/strcmp.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this"; + local b = "testing"; + + print strcmp(a, b) > 0; + print strcmp(b, a) < 0; + print strcmp(a, a) == 0; + } diff --git a/testing/btest/bifs/strftime.bro b/testing/btest/bifs/strftime.bro deleted file mode 100644 index 3d9e388c90..0000000000 --- a/testing/btest/bifs/strftime.bro +++ /dev/null @@ -1,17 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local f1 = "%Y-%m-%d %H:%M:%S"; - local f2 = "%H%M%S %Y%m%d"; - - local a = double_to_time(0); - print strftime(f1, a); - print strftime(f2, a); - - a = double_to_time(123456789); - print strftime(f1, a); - print strftime(f2, a); - } diff --git a/testing/btest/bifs/strftime.zeek b/testing/btest/bifs/strftime.zeek new file mode 100644 index 0000000000..5a68892a22 --- /dev/null +++ b/testing/btest/bifs/strftime.zeek @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local f1 = "%Y-%m-%d %H:%M:%S"; + local f2 = "%H%M%S %Y%m%d"; + + local a = double_to_time(0); + print strftime(f1, a); + print strftime(f2, a); + + a = double_to_time(123456789); + print strftime(f1, a); + print strftime(f2, a); + } diff --git a/testing/btest/bifs/string_fill.bro b/testing/btest/bifs/string_fill.bro deleted file mode 100644 index 0968215cc0..0000000000 --- a/testing/btest/bifs/string_fill.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "test "; - - local b = string_fill(1, a); - local c = string_fill(2, a); - local d = string_fill(10, a); - - print fmt("*%s* %d", b, |b|); - print fmt("*%s* %d", c, |c|); - print fmt("*%s* %d", d, |d|); - } diff --git a/testing/btest/bifs/string_fill.zeek b/testing/btest/bifs/string_fill.zeek new file mode 100644 index 0000000000..9398588b2a --- /dev/null +++ b/testing/btest/bifs/string_fill.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "test "; + + local b = string_fill(1, a); + local c = string_fill(2, a); + local d = string_fill(10, a); + + print fmt("*%s* %d", b, |b|); + print fmt("*%s* %d", c, |c|); + print fmt("*%s* %d", d, |d|); + } diff --git a/testing/btest/bifs/string_to_pattern.bro b/testing/btest/bifs/string_to_pattern.bro deleted file mode 100644 index 4bd04bbcea..0000000000 --- a/testing/btest/bifs/string_to_pattern.bro +++ /dev/null @@ -1,14 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print string_to_pattern("foo", F); - print string_to_pattern("", F); - print string_to_pattern("b[a-z]+", F); - - print string_to_pattern("foo", T); - print string_to_pattern("", T); - print string_to_pattern("b[a-z]+", T); - } diff --git a/testing/btest/bifs/string_to_pattern.zeek b/testing/btest/bifs/string_to_pattern.zeek new file mode 100644 index 0000000000..d7e36f7fa8 --- /dev/null +++ b/testing/btest/bifs/string_to_pattern.zeek @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print string_to_pattern("foo", F); + print string_to_pattern("", F); + print string_to_pattern("b[a-z]+", F); + + print string_to_pattern("foo", T); + print string_to_pattern("", T); + print string_to_pattern("b[a-z]+", T); + } diff --git a/testing/btest/bifs/strip.bro b/testing/btest/bifs/strip.bro deleted file mode 100644 index e508f20e3d..0000000000 --- a/testing/btest/bifs/strip.bro +++ /dev/null @@ -1,17 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = " this is a test "; - local b = ""; - local c = " "; - - print fmt("*%s*", a); - print fmt("*%s*", strip(a)); - print fmt("*%s*", b); - print fmt("*%s*", strip(b)); - print fmt("*%s*", c); - print fmt("*%s*", strip(c)); - } diff --git a/testing/btest/bifs/strip.zeek b/testing/btest/bifs/strip.zeek new file mode 100644 index 0000000000..caed076f2c --- /dev/null +++ b/testing/btest/bifs/strip.zeek @@ -0,0 +1,17 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = " this is a test "; + local b = ""; + local c = " "; + + print fmt("*%s*", a); + print fmt("*%s*", strip(a)); + print fmt("*%s*", b); + print fmt("*%s*", strip(b)); + print fmt("*%s*", c); + print fmt("*%s*", strip(c)); + } diff --git a/testing/btest/bifs/strptime.bro b/testing/btest/bifs/strptime.bro deleted file mode 100644 index 215299b300..0000000000 --- a/testing/btest/bifs/strptime.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print strptime("%Y-%m-%d", "2012-10-19"); - print strptime("%m", "1980-10-24"); - } diff --git a/testing/btest/bifs/strptime.zeek b/testing/btest/bifs/strptime.zeek new file mode 100644 index 0000000000..3923ced4c0 --- /dev/null +++ b/testing/btest/bifs/strptime.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print strptime("%Y-%m-%d", "2012-10-19"); + print strptime("%m", "1980-10-24"); + } diff --git a/testing/btest/bifs/strstr.bro b/testing/btest/bifs/strstr.bro deleted file mode 100644 index 40cd8aa5fd..0000000000 --- a/testing/btest/bifs/strstr.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a test"; - local b = "his"; - local c = "are"; - - print strstr(a, b); - print strstr(a, c); - } diff --git a/testing/btest/bifs/strstr.zeek b/testing/btest/bifs/strstr.zeek new file mode 100644 index 0000000000..23f8c871ed --- /dev/null +++ b/testing/btest/bifs/strstr.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is a test"; + local b = "his"; + local c = "are"; + + print strstr(a, b); + print strstr(a, c); + } diff --git a/testing/btest/bifs/sub.bro b/testing/btest/bifs/sub.bro deleted file mode 100644 index 773530ac74..0000000000 --- a/testing/btest/bifs/sub.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is a test"; - local pat = /is|ss/; - - print sub(a, pat, "at"); - print gsub(a, pat, "at"); - } diff --git a/testing/btest/bifs/sub.zeek b/testing/btest/bifs/sub.zeek new file mode 100644 index 0000000000..1ad4e60137 --- /dev/null +++ b/testing/btest/bifs/sub.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is a test"; + local pat = /is|ss/; + + print sub(a, pat, "at"); + print gsub(a, pat, "at"); + } diff --git a/testing/btest/bifs/subnet_to_addr.bro b/testing/btest/bifs/subnet_to_addr.bro deleted file mode 100644 index 02bb6254e0..0000000000 --- a/testing/btest/bifs/subnet_to_addr.bro +++ /dev/null @@ -1,14 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>error -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff error - -function test_to_addr(sn: subnet, expect: addr) - { - local result = subnet_to_addr(sn); - print fmt("subnet_to_addr(%s) = %s (%s)", sn, result, - result == expect ? "SUCCESS" : "FAILURE"); - } - -test_to_addr(0.0.0.0/32, 0.0.0.0); -test_to_addr(1.2.3.4/16, 1.2.0.0); -test_to_addr([2607:f8b0:4005:803::200e]/128, [2607:f8b0:4005:803::200e]); diff --git a/testing/btest/bifs/subnet_to_addr.zeek b/testing/btest/bifs/subnet_to_addr.zeek new file mode 100644 index 0000000000..45cac551d2 --- /dev/null +++ b/testing/btest/bifs/subnet_to_addr.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>error +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff error + +function test_to_addr(sn: subnet, expect: addr) + { + local result = subnet_to_addr(sn); + print fmt("subnet_to_addr(%s) = %s (%s)", sn, result, + result == expect ? "SUCCESS" : "FAILURE"); + } + +test_to_addr(0.0.0.0/32, 0.0.0.0); +test_to_addr(1.2.3.4/16, 1.2.0.0); +test_to_addr([2607:f8b0:4005:803::200e]/128, [2607:f8b0:4005:803::200e]); diff --git a/testing/btest/bifs/subnet_version.bro b/testing/btest/bifs/subnet_version.bro deleted file mode 100644 index 1efd633f68..0000000000 --- a/testing/btest/bifs/subnet_version.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -print is_v4_subnet(1.2.3.4/16); -print is_v4_subnet([2607:f8b0:4005:801::200e]/64); -print is_v6_subnet(1.2.3.4/24); -print is_v6_subnet([2607:f8b0:4005:801::200e]/12); diff --git a/testing/btest/bifs/subnet_version.zeek b/testing/btest/bifs/subnet_version.zeek new file mode 100644 index 0000000000..a01bc77dd3 --- /dev/null +++ b/testing/btest/bifs/subnet_version.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +print is_v4_subnet(1.2.3.4/16); +print is_v4_subnet([2607:f8b0:4005:801::200e]/64); +print is_v6_subnet(1.2.3.4/24); +print is_v6_subnet([2607:f8b0:4005:801::200e]/12); diff --git a/testing/btest/bifs/subst_string.bro b/testing/btest/bifs/subst_string.bro deleted file mode 100644 index 6ebed72321..0000000000 --- a/testing/btest/bifs/subst_string.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "this is another test"; - local b = "is"; - local c = "at"; - - print subst_string(a, b, c); - } diff --git a/testing/btest/bifs/subst_string.zeek b/testing/btest/bifs/subst_string.zeek new file mode 100644 index 0000000000..7ceb8040a2 --- /dev/null +++ b/testing/btest/bifs/subst_string.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "this is another test"; + local b = "is"; + local c = "at"; + + print subst_string(a, b, c); + } diff --git a/testing/btest/bifs/system.bro b/testing/btest/bifs/system.bro deleted file mode 100644 index bd27fc3db5..0000000000 --- a/testing/btest/bifs/system.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = system("echo thistest > out"); - if ( a != 0 ) - exit(1); - } diff --git a/testing/btest/bifs/system.zeek b/testing/btest/bifs/system.zeek new file mode 100644 index 0000000000..7dab420ed0 --- /dev/null +++ b/testing/btest/bifs/system.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = system("echo thistest > out"); + if ( a != 0 ) + exit(1); + } diff --git a/testing/btest/bifs/system_env.bro b/testing/btest/bifs/system_env.bro deleted file mode 100644 index cfe4e7dd2a..0000000000 --- a/testing/btest/bifs/system_env.bro +++ /dev/null @@ -1,23 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff testfile - -event bro_init() - { - local vars: table[string] of string = { ["TESTBRO"] = "helloworld" }; - - # make sure the env. variable is not set - local myvar = getenv("BRO_ARG_TESTBRO"); - if ( |myvar| != 0 ) - exit(1); - - # check if command runs with the env. variable defined - local a = system_env("echo $BRO_ARG_TESTBRO > testfile", vars); - if ( a != 0 ) - exit(1); - - # make sure the env. variable is still not set - myvar = getenv("BRO_ARG_TESTBRO"); - if ( |myvar| != 0 ) - exit(1); - } diff --git a/testing/btest/bifs/system_env.zeek b/testing/btest/bifs/system_env.zeek new file mode 100644 index 0000000000..b209e6622f --- /dev/null +++ b/testing/btest/bifs/system_env.zeek @@ -0,0 +1,23 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff testfile + +event zeek_init() + { + local vars: table[string] of string = { ["TESTBRO"] = "helloworld" }; + + # make sure the env. variable is not set + local myvar = getenv("ZEEK_ARG_TESTBRO"); + if ( |myvar| != 0 ) + exit(1); + + # check if command runs with the env. variable defined + local a = system_env("echo $ZEEK_ARG_TESTBRO > testfile", vars); + if ( a != 0 ) + exit(1); + + # make sure the env. variable is still not set + myvar = getenv("ZEEK_ARG_TESTBRO"); + if ( |myvar| != 0 ) + exit(1); + } diff --git a/testing/btest/bifs/to_addr.bro b/testing/btest/bifs/to_addr.bro deleted file mode 100644 index 3a43438bb7..0000000000 --- a/testing/btest/bifs/to_addr.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>error -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff error - -function test_to_addr(ip: string, expect: addr) - { - local result = to_addr(ip); - print fmt("to_addr(%s) = %s (%s)", ip, result, - result == expect ? "SUCCESS" : "FAILURE"); - } - -test_to_addr("0.0.0.0", 0.0.0.0); -test_to_addr("1.2.3.4", 1.2.3.4); -test_to_addr("01.02.03.04", 1.2.3.4); -test_to_addr("001.002.003.004", 1.2.3.4); -test_to_addr("10.20.30.40", 10.20.30.40); -test_to_addr("100.200.30.40", 100.200.30.40); -test_to_addr("10.0.0.0", 10.0.0.0); -test_to_addr("10.00.00.000", 10.0.0.0); -test_to_addr("not an IP", [::]); diff --git a/testing/btest/bifs/to_addr.zeek b/testing/btest/bifs/to_addr.zeek new file mode 100644 index 0000000000..bbef484f72 --- /dev/null +++ b/testing/btest/bifs/to_addr.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>error +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff error + +function test_to_addr(ip: string, expect: addr) + { + local result = to_addr(ip); + print fmt("to_addr(%s) = %s (%s)", ip, result, + result == expect ? "SUCCESS" : "FAILURE"); + } + +test_to_addr("0.0.0.0", 0.0.0.0); +test_to_addr("1.2.3.4", 1.2.3.4); +test_to_addr("01.02.03.04", 1.2.3.4); +test_to_addr("001.002.003.004", 1.2.3.4); +test_to_addr("10.20.30.40", 10.20.30.40); +test_to_addr("100.200.30.40", 100.200.30.40); +test_to_addr("10.0.0.0", 10.0.0.0); +test_to_addr("10.00.00.000", 10.0.0.0); +test_to_addr("not an IP", [::]); diff --git a/testing/btest/bifs/to_count.bro b/testing/btest/bifs/to_count.bro deleted file mode 100644 index 8de8c5c674..0000000000 --- a/testing/btest/bifs/to_count.bro +++ /dev/null @@ -1,35 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a: int = -2; - print int_to_count(a); - - local b: int = 2; - print int_to_count(b); - - local c: double = 3.14; - print double_to_count(c); - - local d: double = 3.9; - print double_to_count(d); - - print to_count("7"); - print to_count(""); - print to_count("-5"); - print to_count("not a count"); - - local e: port = 123/tcp; - print port_to_count(e); - - local origString = "9223372036854775808"; - local directCount: count = 9223372036854775808; - local fromStringCount: count = to_count(origString); - - if ( directCount == fromStringCount ) - print fmt("%s and %s are the same", directCount, fromStringCount); - else - print fmt("%s and %s are not the same", directCount, fromStringCount); - } diff --git a/testing/btest/bifs/to_count.zeek b/testing/btest/bifs/to_count.zeek new file mode 100644 index 0000000000..7489ca8b79 --- /dev/null +++ b/testing/btest/bifs/to_count.zeek @@ -0,0 +1,35 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a: int = -2; + print int_to_count(a); + + local b: int = 2; + print int_to_count(b); + + local c: double = 3.14; + print double_to_count(c); + + local d: double = 3.9; + print double_to_count(d); + + print to_count("7"); + print to_count(""); + print to_count("-5"); + print to_count("not a count"); + + local e: port = 123/tcp; + print port_to_count(e); + + local origString = "9223372036854775808"; + local directCount: count = 9223372036854775808; + local fromStringCount: count = to_count(origString); + + if ( directCount == fromStringCount ) + print fmt("%s and %s are the same", directCount, fromStringCount); + else + print fmt("%s and %s are not the same", directCount, fromStringCount); + } diff --git a/testing/btest/bifs/to_double.bro b/testing/btest/bifs/to_double.bro deleted file mode 100644 index b6fb9917a7..0000000000 --- a/testing/btest/bifs/to_double.bro +++ /dev/null @@ -1,20 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 1 usec; - print interval_to_double(a); - local b = 1sec; - print interval_to_double(b); - local c = -1min; - print interval_to_double(c); - local d = 1hrs; - print interval_to_double(d); - local e = 1 day; - print interval_to_double(e); - - local f = current_time(); - print time_to_double(f); - } diff --git a/testing/btest/bifs/to_double.zeek b/testing/btest/bifs/to_double.zeek new file mode 100644 index 0000000000..d62d30d5af --- /dev/null +++ b/testing/btest/bifs/to_double.zeek @@ -0,0 +1,20 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 1 usec; + print interval_to_double(a); + local b = 1sec; + print interval_to_double(b); + local c = -1min; + print interval_to_double(c); + local d = 1hrs; + print interval_to_double(d); + local e = 1 day; + print interval_to_double(e); + + local f = current_time(); + print time_to_double(f); + } diff --git a/testing/btest/bifs/to_double_from_string.bro b/testing/btest/bifs/to_double_from_string.bro deleted file mode 100644 index 781261084f..0000000000 --- a/testing/btest/bifs/to_double_from_string.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>error -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff error - -function test_to_double(d: string, expect: double) - { - local result = to_double(d); - print fmt("to_double(%s) = %s (%s)", d, result, - result == expect ? "SUCCESS" : "FAILURE"); - } - -test_to_double("3.14", 3.14); -test_to_double("-3.14", -3.14); -test_to_double("0", 0); -test_to_double("NotADouble", 0); -test_to_double("", 0); diff --git a/testing/btest/bifs/to_double_from_string.zeek b/testing/btest/bifs/to_double_from_string.zeek new file mode 100644 index 0000000000..106a987eb4 --- /dev/null +++ b/testing/btest/bifs/to_double_from_string.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>error +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff error + +function test_to_double(d: string, expect: double) + { + local result = to_double(d); + print fmt("to_double(%s) = %s (%s)", d, result, + result == expect ? "SUCCESS" : "FAILURE"); + } + +test_to_double("3.14", 3.14); +test_to_double("-3.14", -3.14); +test_to_double("0", 0); +test_to_double("NotADouble", 0); +test_to_double("", 0); diff --git a/testing/btest/bifs/to_int.bro b/testing/btest/bifs/to_int.bro deleted file mode 100644 index e65a555cc4..0000000000 --- a/testing/btest/bifs/to_int.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print to_int("1"); - print to_int("-1"); - print to_int("4294967296"); - print to_int("not an int"); - } diff --git a/testing/btest/bifs/to_int.zeek b/testing/btest/bifs/to_int.zeek new file mode 100644 index 0000000000..23e74030ba --- /dev/null +++ b/testing/btest/bifs/to_int.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print to_int("1"); + print to_int("-1"); + print to_int("4294967296"); + print to_int("not an int"); + } diff --git a/testing/btest/bifs/to_interval.bro b/testing/btest/bifs/to_interval.bro deleted file mode 100644 index 71d73fed62..0000000000 --- a/testing/btest/bifs/to_interval.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 1234563.14; - print double_to_interval(a); - local b = -1234563.14; - print double_to_interval(b); - } diff --git a/testing/btest/bifs/to_interval.zeek b/testing/btest/bifs/to_interval.zeek new file mode 100644 index 0000000000..a9bab7b675 --- /dev/null +++ b/testing/btest/bifs/to_interval.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 1234563.14; + print double_to_interval(a); + local b = -1234563.14; + print double_to_interval(b); + } diff --git a/testing/btest/bifs/to_port.bro b/testing/btest/bifs/to_port.bro deleted file mode 100644 index b2289b8a21..0000000000 --- a/testing/btest/bifs/to_port.bro +++ /dev/null @@ -1,21 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - print to_port("123/tcp"); - print to_port("123/udp"); - print to_port("123/icmp"); - print to_port("0/tcp"); - print to_port("0/udp"); - print to_port("0/icmp"); - print to_port("not a port"); - - local a: transport_proto = tcp; - local b: transport_proto = udp; - local c: transport_proto = icmp; - print count_to_port(256, a); - print count_to_port(256, b); - print count_to_port(256, c); - } diff --git a/testing/btest/bifs/to_port.zeek b/testing/btest/bifs/to_port.zeek new file mode 100644 index 0000000000..b1e220f982 --- /dev/null +++ b/testing/btest/bifs/to_port.zeek @@ -0,0 +1,21 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + print to_port("123/tcp"); + print to_port("123/udp"); + print to_port("123/icmp"); + print to_port("0/tcp"); + print to_port("0/udp"); + print to_port("0/icmp"); + print to_port("not a port"); + + local a: transport_proto = tcp; + local b: transport_proto = udp; + local c: transport_proto = icmp; + print count_to_port(256, a); + print count_to_port(256, b); + print count_to_port(256, c); + } diff --git a/testing/btest/bifs/to_subnet.bro b/testing/btest/bifs/to_subnet.bro deleted file mode 100644 index 59064893e1..0000000000 --- a/testing/btest/bifs/to_subnet.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>error -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff error - -global sn: subnet; -sn = to_subnet("10.0.0.0/8"); -print sn, sn == 10.0.0.0/8; -sn = to_subnet("2607:f8b0::/32"); -print sn, sn == [2607:f8b0::]/32; -sn = to_subnet("10.0.0.0"); -print sn, sn == [::]/0; diff --git a/testing/btest/bifs/to_subnet.zeek b/testing/btest/bifs/to_subnet.zeek new file mode 100644 index 0000000000..ebce392c98 --- /dev/null +++ b/testing/btest/bifs/to_subnet.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>error +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff error + +global sn: subnet; +sn = to_subnet("10.0.0.0/8"); +print sn, sn == 10.0.0.0/8; +sn = to_subnet("2607:f8b0::/32"); +print sn, sn == [2607:f8b0::]/32; +sn = to_subnet("10.0.0.0"); +print sn, sn == [::]/0; diff --git a/testing/btest/bifs/to_time.bro b/testing/btest/bifs/to_time.bro deleted file mode 100644 index d5a81b0934..0000000000 --- a/testing/btest/bifs/to_time.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = 1234563.14; - print double_to_time(a); - local b = -1234563.14; - print double_to_time(b); - } diff --git a/testing/btest/bifs/to_time.zeek b/testing/btest/bifs/to_time.zeek new file mode 100644 index 0000000000..f2e9032176 --- /dev/null +++ b/testing/btest/bifs/to_time.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = 1234563.14; + print double_to_time(a); + local b = -1234563.14; + print double_to_time(b); + } diff --git a/testing/btest/bifs/topk.bro b/testing/btest/bifs/topk.bro deleted file mode 100644 index 1e650335a7..0000000000 --- a/testing/btest/bifs/topk.bro +++ /dev/null @@ -1,164 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT > out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff .stderr - -event bro_init() - { - local k1 = topk_init(2); - - # first - peculiarity check... - topk_add(k1, "a"); - topk_add(k1, "b"); - topk_add(k1, "b"); - topk_add(k1, "c"); - - local s = topk_get_top(k1, 5); - print s; - print topk_sum(k1); - print topk_count(k1, "a"); - print topk_epsilon(k1, "a"); - print topk_count(k1, "b"); - print topk_epsilon(k1, "b"); - print topk_count(k1, "c"); - print topk_epsilon(k1, "c"); - - topk_add(k1, "d"); - s = topk_get_top(k1, 5); - print s; - print topk_sum(k1); - print topk_count(k1, "b"); - print topk_epsilon(k1, "b"); - print topk_count(k1, "c"); - print topk_epsilon(k1, "c"); - print topk_count(k1, "d"); - print topk_epsilon(k1, "d"); - - topk_add(k1, "e"); - s = topk_get_top(k1, 5); - print s; - print topk_sum(k1); - print topk_count(k1, "d"); - print topk_epsilon(k1, "d"); - print topk_count(k1, "e"); - print topk_epsilon(k1, "e"); - - topk_add(k1, "f"); - s = topk_get_top(k1, 5); - print s; - print topk_sum(k1); - print topk_count(k1, "f"); - print topk_epsilon(k1, "f"); - print topk_count(k1, "e"); - print topk_epsilon(k1, "e"); - - topk_add(k1, "e"); - s = topk_get_top(k1, 5); - print s; - print topk_sum(k1); - print topk_count(k1, "f"); - print topk_epsilon(k1, "f"); - print topk_count(k1, "e"); - print topk_epsilon(k1, "e"); - - topk_add(k1, "g"); - s = topk_get_top(k1, 5); - print s; - print topk_sum(k1); - print topk_count(k1, "f"); - print topk_epsilon(k1, "f"); - print topk_count(k1, "e"); - print topk_epsilon(k1, "e"); - print topk_count(k1, "g"); - print topk_epsilon(k1, "g"); - - k1 = topk_init(100); - topk_add(k1, "a"); - topk_add(k1, "b"); - topk_add(k1, "b"); - topk_add(k1, "c"); - topk_add(k1, "c"); - topk_add(k1, "c"); - topk_add(k1, "c"); - topk_add(k1, "c"); - topk_add(k1, "c"); - topk_add(k1, "d"); - topk_add(k1, "d"); - topk_add(k1, "d"); - topk_add(k1, "d"); - topk_add(k1, "e"); - topk_add(k1, "e"); - topk_add(k1, "e"); - topk_add(k1, "e"); - topk_add(k1, "e"); - topk_add(k1, "f"); - s = topk_get_top(k1, 3); - print s; - print topk_sum(k1); - print topk_count(k1, "c"); - print topk_epsilon(k1, "c"); - print topk_count(k1, "e"); - print topk_epsilon(k1, "d"); - print topk_count(k1, "d"); - print topk_epsilon(k1, "d"); - - local k3 = topk_init(2); - topk_merge_prune(k3, k1); - - s = topk_get_top(k3, 3); - print s; - print topk_count(k3, "c"); - print topk_epsilon(k3, "c"); - print topk_count(k3, "e"); - print topk_epsilon(k3, "e"); - print topk_count(k3, "d"); - print topk_epsilon(k3, "d"); - - topk_merge_prune(k3, k1); - - s = topk_get_top(k3, 3); - print s; - print topk_sum(k3); # this gives a warning and a wrong result. - print topk_count(k3, "c"); - print topk_epsilon(k3, "c"); - print topk_count(k3, "e"); - print topk_epsilon(k3, "e"); - print topk_count(k3, "d"); - print topk_epsilon(k3, "d"); - - k3 = topk_init(2); - topk_merge(k3, k1); - print s; - print topk_sum(k3); - print topk_count(k3, "c"); - print topk_epsilon(k3, "c"); - print topk_count(k3, "e"); - print topk_epsilon(k3, "e"); - print topk_count(k3, "d"); - print topk_epsilon(k3, "d"); - - topk_merge(k3, k1); - - s = topk_get_top(k3, 3); - print s; - print topk_sum(k3); - print topk_count(k3, "c"); - print topk_epsilon(k3, "c"); - print topk_count(k3, "e"); - print topk_epsilon(k3, "e"); - print topk_count(k3, "d"); - print topk_epsilon(k3, "d"); - - local styped: vector of count; - styped = topk_get_top(k3, 3); - for ( i in styped ) - print i, styped[i]; - - local anytyped: vector of any; - anytyped = topk_get_top(k3, 3); - for ( i in anytyped ) - print i, anytyped[i]; - - local suntyped = topk_get_top(k3, 3); - for ( i in suntyped ) - print i, suntyped[i]; -} diff --git a/testing/btest/bifs/topk.zeek b/testing/btest/bifs/topk.zeek new file mode 100644 index 0000000000..667107cbc0 --- /dev/null +++ b/testing/btest/bifs/topk.zeek @@ -0,0 +1,164 @@ +# @TEST-EXEC: zeek -b %INPUT > out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff .stderr + +event zeek_init() + { + local k1 = topk_init(2); + + # first - peculiarity check... + topk_add(k1, "a"); + topk_add(k1, "b"); + topk_add(k1, "b"); + topk_add(k1, "c"); + + local s = topk_get_top(k1, 5); + print s; + print topk_sum(k1); + print topk_count(k1, "a"); + print topk_epsilon(k1, "a"); + print topk_count(k1, "b"); + print topk_epsilon(k1, "b"); + print topk_count(k1, "c"); + print topk_epsilon(k1, "c"); + + topk_add(k1, "d"); + s = topk_get_top(k1, 5); + print s; + print topk_sum(k1); + print topk_count(k1, "b"); + print topk_epsilon(k1, "b"); + print topk_count(k1, "c"); + print topk_epsilon(k1, "c"); + print topk_count(k1, "d"); + print topk_epsilon(k1, "d"); + + topk_add(k1, "e"); + s = topk_get_top(k1, 5); + print s; + print topk_sum(k1); + print topk_count(k1, "d"); + print topk_epsilon(k1, "d"); + print topk_count(k1, "e"); + print topk_epsilon(k1, "e"); + + topk_add(k1, "f"); + s = topk_get_top(k1, 5); + print s; + print topk_sum(k1); + print topk_count(k1, "f"); + print topk_epsilon(k1, "f"); + print topk_count(k1, "e"); + print topk_epsilon(k1, "e"); + + topk_add(k1, "e"); + s = topk_get_top(k1, 5); + print s; + print topk_sum(k1); + print topk_count(k1, "f"); + print topk_epsilon(k1, "f"); + print topk_count(k1, "e"); + print topk_epsilon(k1, "e"); + + topk_add(k1, "g"); + s = topk_get_top(k1, 5); + print s; + print topk_sum(k1); + print topk_count(k1, "f"); + print topk_epsilon(k1, "f"); + print topk_count(k1, "e"); + print topk_epsilon(k1, "e"); + print topk_count(k1, "g"); + print topk_epsilon(k1, "g"); + + k1 = topk_init(100); + topk_add(k1, "a"); + topk_add(k1, "b"); + topk_add(k1, "b"); + topk_add(k1, "c"); + topk_add(k1, "c"); + topk_add(k1, "c"); + topk_add(k1, "c"); + topk_add(k1, "c"); + topk_add(k1, "c"); + topk_add(k1, "d"); + topk_add(k1, "d"); + topk_add(k1, "d"); + topk_add(k1, "d"); + topk_add(k1, "e"); + topk_add(k1, "e"); + topk_add(k1, "e"); + topk_add(k1, "e"); + topk_add(k1, "e"); + topk_add(k1, "f"); + s = topk_get_top(k1, 3); + print s; + print topk_sum(k1); + print topk_count(k1, "c"); + print topk_epsilon(k1, "c"); + print topk_count(k1, "e"); + print topk_epsilon(k1, "d"); + print topk_count(k1, "d"); + print topk_epsilon(k1, "d"); + + local k3 = topk_init(2); + topk_merge_prune(k3, k1); + + s = topk_get_top(k3, 3); + print s; + print topk_count(k3, "c"); + print topk_epsilon(k3, "c"); + print topk_count(k3, "e"); + print topk_epsilon(k3, "e"); + print topk_count(k3, "d"); + print topk_epsilon(k3, "d"); + + topk_merge_prune(k3, k1); + + s = topk_get_top(k3, 3); + print s; + print topk_sum(k3); # this gives a warning and a wrong result. + print topk_count(k3, "c"); + print topk_epsilon(k3, "c"); + print topk_count(k3, "e"); + print topk_epsilon(k3, "e"); + print topk_count(k3, "d"); + print topk_epsilon(k3, "d"); + + k3 = topk_init(2); + topk_merge(k3, k1); + print s; + print topk_sum(k3); + print topk_count(k3, "c"); + print topk_epsilon(k3, "c"); + print topk_count(k3, "e"); + print topk_epsilon(k3, "e"); + print topk_count(k3, "d"); + print topk_epsilon(k3, "d"); + + topk_merge(k3, k1); + + s = topk_get_top(k3, 3); + print s; + print topk_sum(k3); + print topk_count(k3, "c"); + print topk_epsilon(k3, "c"); + print topk_count(k3, "e"); + print topk_epsilon(k3, "e"); + print topk_count(k3, "d"); + print topk_epsilon(k3, "d"); + + local styped: vector of count; + styped = topk_get_top(k3, 3); + for ( i in styped ) + print i, styped[i]; + + local anytyped: vector of any; + anytyped = topk_get_top(k3, 3); + for ( i in anytyped ) + print i, anytyped[i]; + + local suntyped = topk_get_top(k3, 3); + for ( i in suntyped ) + print i, suntyped[i]; +} diff --git a/testing/btest/bifs/type_name.bro b/testing/btest/bifs/type_name.bro deleted file mode 100644 index f331fe6aa9..0000000000 --- a/testing/btest/bifs/type_name.bro +++ /dev/null @@ -1,73 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type color: enum { Red, Blue }; - -type myrecord: record { - c: count; - s: string; -}; - -event bro_init() - { - local a = "foo"; - local b = 3; - local c = -3; - local d = 3.14; - local e = T; - local f = current_time(); - local g = 5hr; - local h = /^foo|bar/; - local i = Blue; - local j = 123/tcp; - local k = 192.168.0.2; - local l = [fe80::1]; - local m = 192.168.0.0/16; - local n = [fe80:1234::]/32; - local o = vector( 1, 2, 3); - local p: vector of table[count] of string = vector( - table( [1] = "test", [2] = "bro" ), - table( [1] = "another", [2] = "test" ) ); - local q = set( 1, 2, 3); - local r: set[port, string] = set( [21/tcp, "ftp"], [23/tcp, "telnet"] ); - local s: table[count] of string = { [1] = "test", [2] = "bro" }; - local t: table[string] of table[addr, port] of string = { - ["a"] = table( [192.168.0.2, 21/tcp] = "ftp", - [192.168.0.3, 80/tcp] = "http" ), - ["b"] = table( [192.168.0.2, 22/tcp] = "ssh" ) }; - local u: myrecord = [ $c = 2, $s = "another test" ]; - local v = function(aa: int, bb: int): bool { return aa < bb; }; - local w = function(): any { }; - local x = function() { }; - local y = open("deleteme"); - - print type_name(a); - print type_name(b); - print type_name(c); - print type_name(d); - print type_name(e); - print type_name(f); - print type_name(g); - print type_name(h); - print type_name(i); - print type_name(j); - print type_name(k); - print type_name(l); - print type_name(m); - print type_name(n); - print type_name(o); - print type_name(p); - print type_name(q); - print type_name(r); - print type_name(s); - print type_name(t); - print type_name(u); - print type_name(v); - print type_name(w); - print type_name(x); - print type_name(y); # result is "file of string" which is a bit odd; - # we should remove the (apparently unused) type argument - # from files. - print type_name(bro_init); - } diff --git a/testing/btest/bifs/type_name.zeek b/testing/btest/bifs/type_name.zeek new file mode 100644 index 0000000000..e78f52af3c --- /dev/null +++ b/testing/btest/bifs/type_name.zeek @@ -0,0 +1,73 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type color: enum { Red, Blue }; + +type myrecord: record { + c: count; + s: string; +}; + +event zeek_init() + { + local a = "foo"; + local b = 3; + local c = -3; + local d = 3.14; + local e = T; + local f = current_time(); + local g = 5hr; + local h = /^foo|bar/; + local i = Blue; + local j = 123/tcp; + local k = 192.168.0.2; + local l = [fe80::1]; + local m = 192.168.0.0/16; + local n = [fe80:1234::]/32; + local o = vector( 1, 2, 3); + local p: vector of table[count] of string = vector( + table( [1] = "test", [2] = "bro" ), + table( [1] = "another", [2] = "test" ) ); + local q = set( 1, 2, 3); + local r: set[port, string] = set( [21/tcp, "ftp"], [23/tcp, "telnet"] ); + local s: table[count] of string = { [1] = "test", [2] = "bro" }; + local t: table[string] of table[addr, port] of string = { + ["a"] = table( [192.168.0.2, 21/tcp] = "ftp", + [192.168.0.3, 80/tcp] = "http" ), + ["b"] = table( [192.168.0.2, 22/tcp] = "ssh" ) }; + local u: myrecord = [ $c = 2, $s = "another test" ]; + local v = function(aa: int, bb: int): bool { return aa < bb; }; + local w = function(): any { }; + local x = function() { }; + local y = open("deleteme"); + + print type_name(a); + print type_name(b); + print type_name(c); + print type_name(d); + print type_name(e); + print type_name(f); + print type_name(g); + print type_name(h); + print type_name(i); + print type_name(j); + print type_name(k); + print type_name(l); + print type_name(m); + print type_name(n); + print type_name(o); + print type_name(p); + print type_name(q); + print type_name(r); + print type_name(s); + print type_name(t); + print type_name(u); + print type_name(v); + print type_name(w); + print type_name(x); + print type_name(y); # result is "file of string" which is a bit odd; + # we should remove the (apparently unused) type argument + # from files. + print type_name(zeek_init); + } diff --git a/testing/btest/bifs/unique_id-pools.bro b/testing/btest/bifs/unique_id-pools.bro deleted file mode 100644 index abdc4b22ba..0000000000 --- a/testing/btest/bifs/unique_id-pools.bro +++ /dev/null @@ -1,27 +0,0 @@ -# -# @TEST-EXEC: bro order_rand | sort >out.1 -# @TEST-EXEC: bro order_base | sort >out.2 -# @TEST-EXEC: cmp out.1 out.2 - -@TEST-START-FILE order_rand.bro - -print unique_id("A-"); -print unique_id_from(5, "E-"); -print unique_id("B-"); -print unique_id_from(4, "D-"); -print unique_id("C-"); -print unique_id_from(5, "F-"); - -@TEST-END-FILE - -@TEST-START-FILE order_base.bro - -print unique_id("A-"); -print unique_id("B-"); -print unique_id("C-"); -print unique_id_from(4, "D-"); -print unique_id_from(5, "E-"); -print unique_id_from(5, "F-"); - -@TEST-END-FILE - diff --git a/testing/btest/bifs/unique_id-pools.zeek b/testing/btest/bifs/unique_id-pools.zeek new file mode 100644 index 0000000000..7e615d6625 --- /dev/null +++ b/testing/btest/bifs/unique_id-pools.zeek @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: zeek order_rand | sort >out.1 +# @TEST-EXEC: zeek order_base | sort >out.2 +# @TEST-EXEC: cmp out.1 out.2 + +@TEST-START-FILE order_rand.zeek + +print unique_id("A-"); +print unique_id_from(5, "E-"); +print unique_id("B-"); +print unique_id_from(4, "D-"); +print unique_id("C-"); +print unique_id_from(5, "F-"); + +@TEST-END-FILE + +@TEST-START-FILE order_base.zeek + +print unique_id("A-"); +print unique_id("B-"); +print unique_id("C-"); +print unique_id_from(4, "D-"); +print unique_id_from(5, "E-"); +print unique_id_from(5, "F-"); + +@TEST-END-FILE + diff --git a/testing/btest/bifs/unique_id-rnd.bro b/testing/btest/bifs/unique_id-rnd.bro deleted file mode 100644 index 02be9fcb92..0000000000 --- a/testing/btest/bifs/unique_id-rnd.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC: BRO_SEED_FILE= bro -b %INPUT >out -# @TEST-EXEC: BRO_SEED_FILE= bro -b %INPUT >>out -# @TEST-EXEC: cat out | sort | uniq | wc -l | sed 's/ //g' >count -# @TEST-EXEC: btest-diff count - -print unique_id("A-"); -print unique_id("B-"); -print unique_id("C-"); -print unique_id_from(4, "D-"); -print unique_id_from(5, "E-"); -print unique_id_from(5, "F-"); diff --git a/testing/btest/bifs/unique_id-rnd.zeek b/testing/btest/bifs/unique_id-rnd.zeek new file mode 100644 index 0000000000..d6db89a2ce --- /dev/null +++ b/testing/btest/bifs/unique_id-rnd.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC: ZEEK_SEED_FILE= zeek -b %INPUT >out +# @TEST-EXEC: ZEEK_SEED_FILE= zeek -b %INPUT >>out +# @TEST-EXEC: cat out | sort | uniq | wc -l | sed 's/ //g' >count +# @TEST-EXEC: btest-diff count + +print unique_id("A-"); +print unique_id("B-"); +print unique_id("C-"); +print unique_id_from(4, "D-"); +print unique_id_from(5, "E-"); +print unique_id_from(5, "F-"); diff --git a/testing/btest/bifs/unique_id.bro b/testing/btest/bifs/unique_id.bro deleted file mode 100644 index d87c757f3f..0000000000 --- a/testing/btest/bifs/unique_id.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -print unique_id("A-"); -print unique_id("B-"); -print unique_id("C-"); -print unique_id_from(4, "D-"); -print unique_id_from(5, "E-"); -print unique_id_from(5, "F-"); diff --git a/testing/btest/bifs/unique_id.zeek b/testing/btest/bifs/unique_id.zeek new file mode 100644 index 0000000000..db640a6081 --- /dev/null +++ b/testing/btest/bifs/unique_id.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +print unique_id("A-"); +print unique_id("B-"); +print unique_id("C-"); +print unique_id_from(4, "D-"); +print unique_id_from(5, "E-"); +print unique_id_from(5, "F-"); diff --git a/testing/btest/bifs/uuid_to_string.bro b/testing/btest/bifs/uuid_to_string.bro deleted file mode 100644 index dc84f349fa..0000000000 --- a/testing/btest/bifs/uuid_to_string.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -event bro_init() - { - local a = "\xfe\x80abcdefg0123456"; - print uuid_to_string(a); - print uuid_to_string(""); - } diff --git a/testing/btest/bifs/uuid_to_string.zeek b/testing/btest/bifs/uuid_to_string.zeek new file mode 100644 index 0000000000..21c29eb3e6 --- /dev/null +++ b/testing/btest/bifs/uuid_to_string.zeek @@ -0,0 +1,10 @@ +# +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init() + { + local a = "\xfe\x80abcdefg0123456"; + print uuid_to_string(a); + print uuid_to_string(""); + } diff --git a/testing/btest/bifs/val_size.bro b/testing/btest/bifs/val_size.bro deleted file mode 100644 index 57b512b776..0000000000 --- a/testing/btest/bifs/val_size.bro +++ /dev/null @@ -1,16 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT - -event bro_init() - { - local a = T; - local b = 12; - local c: table[string] of addr = { ["a"] = 192.168.0.2, ["b"] = 10.0.0.2 }; - - if ( val_size(a) > val_size(b) ) - exit(1); - - if ( val_size(b) > val_size(c) ) - exit(1); - - } diff --git a/testing/btest/bifs/val_size.zeek b/testing/btest/bifs/val_size.zeek new file mode 100644 index 0000000000..b375c94551 --- /dev/null +++ b/testing/btest/bifs/val_size.zeek @@ -0,0 +1,16 @@ +# +# @TEST-EXEC: zeek -b %INPUT + +event zeek_init() + { + local a = T; + local b = 12; + local c: table[string] of addr = { ["a"] = 192.168.0.2, ["b"] = 10.0.0.2 }; + + if ( val_size(a) > val_size(b) ) + exit(1); + + if ( val_size(b) > val_size(c) ) + exit(1); + + } diff --git a/testing/btest/bifs/x509_verify.bro b/testing/btest/bifs/x509_verify.bro deleted file mode 100644 index 2afc735172..0000000000 --- a/testing/btest/bifs/x509_verify.bro +++ /dev/null @@ -1,38 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls-expired-cert.trace %INPUT - -# This is a hack: the results of OpenSSL 1.1's vs 1.0's -# X509_verify_cert() -> X509_STORE_CTX_get1_chain() calls -# differ. Word seems to be that OpenSSL 1.1's cert-chain-building -# code is significantly different/rewritten so may be the reason... - -# @TEST-EXEC: cp .stdout stdout-openssl-1.0 -# @TEST-EXEC: cp .stdout stdout-openssl-1.1 - -# @TEST-EXEC: grep -q "BRO_HAVE_OPENSSL_1_1" $BUILD/CMakeCache.txt && btest-diff stdout-openssl-1.1 || btest-diff stdout-openssl-1.0 - -redef SSL::root_certs += { - ["OU=Class 3 Public Primary Certification Authority,O=VeriSign\, Inc.,C=US"] = "\x30\x82\x02\x3C\x30\x82\x01\xA5\x02\x10\x70\xBA\xE4\x1D\x10\xD9\x29\x34\xB6\x38\xCA\x7B\x03\xCC\xBA\xBF\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x02\x05\x00\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x37\x30\x35\x06\x03\x55\x04\x0B\x13\x2E\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x1E\x17\x0D\x39\x36\x30\x31\x32\x39\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x38\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x37\x30\x35\x06\x03\x55\x04\x0B\x13\x2E\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xC9\x5C\x59\x9E\xF2\x1B\x8A\x01\x14\xB4\x10\xDF\x04\x40\xDB\xE3\x57\xAF\x6A\x45\x40\x8F\x84\x0C\x0B\xD1\x33\xD9\xD9\x11\xCF\xEE\x02\x58\x1F\x25\xF7\x2A\xA8\x44\x05\xAA\xEC\x03\x1F\x78\x7F\x9E\x93\xB9\x9A\x00\xAA\x23\x7D\xD6\xAC\x85\xA2\x63\x45\xC7\x72\x27\xCC\xF4\x4C\xC6\x75\x71\xD2\x39\xEF\x4F\x42\xF0\x75\xDF\x0A\x90\xC6\x8E\x20\x6F\x98\x0F\xF8\xAC\x23\x5F\x70\x29\x36\xA4\xC9\x86\xE7\xB1\x9A\x20\xCB\x53\xA5\x85\xE7\x3D\xBE\x7D\x9A\xFE\x24\x45\x33\xDC\x76\x15\xED\x0F\xA2\x71\x64\x4C\x65\x2E\x81\x68\x45\xA7\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x02\x05\x00\x03\x81\x81\x00\xBB\x4C\x12\x2B\xCF\x2C\x26\x00\x4F\x14\x13\xDD\xA6\xFB\xFC\x0A\x11\x84\x8C\xF3\x28\x1C\x67\x92\x2F\x7C\xB6\xC5\xFA\xDF\xF0\xE8\x95\xBC\x1D\x8F\x6C\x2C\xA8\x51\xCC\x73\xD8\xA4\xC0\x53\xF0\x4E\xD6\x26\xC0\x76\x01\x57\x81\x92\x5E\x21\xF1\xD1\xB1\xFF\xE7\xD0\x21\x58\xCD\x69\x17\xE3\x44\x1C\x9C\x19\x44\x39\x89\x5C\xDC\x9C\x00\x0F\x56\x8D\x02\x99\xED\xA2\x90\x45\x4C\xE4\xBB\x10\xA4\x3D\xF0\x32\x03\x0E\xF1\xCE\xF8\xE8\xC9\x51\x8C\xE6\x62\x9F\xE6\x9F\xC0\x7D\xB7\x72\x9C\xC9\x36\x3A\x6B\x9F\x4E\xA8\xFF\x64\x0D\x64" -}; - -event ssl_established(c: connection) &priority=3 - { - local chain: vector of opaque of x509 = vector(); - for ( i in c$ssl$cert_chain ) - { - chain[i] = c$ssl$cert_chain[i]$x509$handle; - } - - local result = x509_verify(chain, SSL::root_certs); - print fmt("Validation result: %s", result$result_string); - if ( result$result != 0 ) # not ok - return; - - print "Resulting chain:"; - for ( i in result$chain_certs ) - { - local cert = result$chain_certs[i]; - local certinfo = x509_parse(cert); - local sha1 = sha1_hash(x509_get_certificate_string(cert)); - print fmt("Fingerprint: %s, Subject: %s", sha1, certinfo$subject); - } - } diff --git a/testing/btest/bifs/x509_verify.zeek b/testing/btest/bifs/x509_verify.zeek new file mode 100644 index 0000000000..35d61a03e6 --- /dev/null +++ b/testing/btest/bifs/x509_verify.zeek @@ -0,0 +1,38 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/tls-expired-cert.trace %INPUT + +# This is a hack: the results of OpenSSL 1.1's vs 1.0's +# X509_verify_cert() -> X509_STORE_CTX_get1_chain() calls +# differ. Word seems to be that OpenSSL 1.1's cert-chain-building +# code is significantly different/rewritten so may be the reason... + +# @TEST-EXEC: cp .stdout stdout-openssl-1.0 +# @TEST-EXEC: cp .stdout stdout-openssl-1.1 + +# @TEST-EXEC: grep -q "ZEEK_HAVE_OPENSSL_1_1" $BUILD/CMakeCache.txt && btest-diff stdout-openssl-1.1 || btest-diff stdout-openssl-1.0 + +redef SSL::root_certs += { + ["OU=Class 3 Public Primary Certification Authority,O=VeriSign\, Inc.,C=US"] = "\x30\x82\x02\x3C\x30\x82\x01\xA5\x02\x10\x70\xBA\xE4\x1D\x10\xD9\x29\x34\xB6\x38\xCA\x7B\x03\xCC\xBA\xBF\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x02\x05\x00\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x37\x30\x35\x06\x03\x55\x04\x0B\x13\x2E\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x1E\x17\x0D\x39\x36\x30\x31\x32\x39\x30\x30\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x38\x30\x31\x32\x33\x35\x39\x35\x39\x5A\x30\x5F\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x17\x30\x15\x06\x03\x55\x04\x0A\x13\x0E\x56\x65\x72\x69\x53\x69\x67\x6E\x2C\x20\x49\x6E\x63\x2E\x31\x37\x30\x35\x06\x03\x55\x04\x0B\x13\x2E\x43\x6C\x61\x73\x73\x20\x33\x20\x50\x75\x62\x6C\x69\x63\x20\x50\x72\x69\x6D\x61\x72\x79\x20\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x69\x6F\x6E\x20\x41\x75\x74\x68\x6F\x72\x69\x74\x79\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\xC9\x5C\x59\x9E\xF2\x1B\x8A\x01\x14\xB4\x10\xDF\x04\x40\xDB\xE3\x57\xAF\x6A\x45\x40\x8F\x84\x0C\x0B\xD1\x33\xD9\xD9\x11\xCF\xEE\x02\x58\x1F\x25\xF7\x2A\xA8\x44\x05\xAA\xEC\x03\x1F\x78\x7F\x9E\x93\xB9\x9A\x00\xAA\x23\x7D\xD6\xAC\x85\xA2\x63\x45\xC7\x72\x27\xCC\xF4\x4C\xC6\x75\x71\xD2\x39\xEF\x4F\x42\xF0\x75\xDF\x0A\x90\xC6\x8E\x20\x6F\x98\x0F\xF8\xAC\x23\x5F\x70\x29\x36\xA4\xC9\x86\xE7\xB1\x9A\x20\xCB\x53\xA5\x85\xE7\x3D\xBE\x7D\x9A\xFE\x24\x45\x33\xDC\x76\x15\xED\x0F\xA2\x71\x64\x4C\x65\x2E\x81\x68\x45\xA7\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x02\x05\x00\x03\x81\x81\x00\xBB\x4C\x12\x2B\xCF\x2C\x26\x00\x4F\x14\x13\xDD\xA6\xFB\xFC\x0A\x11\x84\x8C\xF3\x28\x1C\x67\x92\x2F\x7C\xB6\xC5\xFA\xDF\xF0\xE8\x95\xBC\x1D\x8F\x6C\x2C\xA8\x51\xCC\x73\xD8\xA4\xC0\x53\xF0\x4E\xD6\x26\xC0\x76\x01\x57\x81\x92\x5E\x21\xF1\xD1\xB1\xFF\xE7\xD0\x21\x58\xCD\x69\x17\xE3\x44\x1C\x9C\x19\x44\x39\x89\x5C\xDC\x9C\x00\x0F\x56\x8D\x02\x99\xED\xA2\x90\x45\x4C\xE4\xBB\x10\xA4\x3D\xF0\x32\x03\x0E\xF1\xCE\xF8\xE8\xC9\x51\x8C\xE6\x62\x9F\xE6\x9F\xC0\x7D\xB7\x72\x9C\xC9\x36\x3A\x6B\x9F\x4E\xA8\xFF\x64\x0D\x64" +}; + +event ssl_established(c: connection) &priority=3 + { + local chain: vector of opaque of x509 = vector(); + for ( i in c$ssl$cert_chain ) + { + chain[i] = c$ssl$cert_chain[i]$x509$handle; + } + + local result = x509_verify(chain, SSL::root_certs); + print fmt("Validation result: %s", result$result_string); + if ( result$result != 0 ) # not ok + return; + + print "Resulting chain:"; + for ( i in result$chain_certs ) + { + local cert = result$chain_certs[i]; + local certinfo = x509_parse(cert); + local sha1 = sha1_hash(x509_get_certificate_string(cert)); + print fmt("Fingerprint: %s, Subject: %s", sha1, certinfo$subject); + } + } diff --git a/testing/btest/bifs/zeek_version.zeek b/testing/btest/bifs/zeek_version.zeek new file mode 100644 index 0000000000..fd96d31676 --- /dev/null +++ b/testing/btest/bifs/zeek_version.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -b %INPUT + +event zeek_init() + { + local a = zeek_version(); + if ( |a| == 0 ) + exit(1); + } diff --git a/testing/btest/broker/connect-on-retry.bro b/testing/btest/broker/connect-on-retry.bro deleted file mode 100644 index 56e479b7ea..0000000000 --- a/testing/btest/broker/connect-on-retry.bro +++ /dev/null @@ -1,100 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -# Using btest's environment settings for connect/listen retry of 1sec. -redef exit_only_after_terminate = T; - -global event_count = 0; - -global ping: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::auto_publish("bro/event/my_topic", ping); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -function send_event() - { - event ping("my-message", ++event_count); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - send_event(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - terminate(); - } - -event pong(msg: string, n: count) - { - print fmt("sender got pong: %s, %s", msg, n); - send_event(); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -const events_to_recv = 5; - -global handler: event(msg: string, c: count); -global auto_handler: event(msg: string, c: count); - -global pong: event(msg: string, c: count); - -event delayed_listen() - { - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::auto_publish("bro/event/my_topic", pong); - schedule 5secs { delayed_listen() }; - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver added peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver lost peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - } - -event ping(msg: string, n: count) - { - print fmt("receiver got ping: %s, %s", msg, n); - - if ( n == events_to_recv ) - { - terminate(); - return; - } - - event pong(msg, n); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/connect-on-retry.zeek b/testing/btest/broker/connect-on-retry.zeek new file mode 100644 index 0000000000..c8fc7b26e5 --- /dev/null +++ b/testing/btest/broker/connect-on-retry.zeek @@ -0,0 +1,100 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +# Using btest's environment settings for connect/listen retry of 1sec. +redef exit_only_after_terminate = T; + +global event_count = 0; + +global ping: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::auto_publish("zeek/event/my_topic", ping); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +function send_event() + { + event ping("my-message", ++event_count); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + send_event(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + terminate(); + } + +event pong(msg: string, n: count) + { + print fmt("sender got pong: %s, %s", msg, n); + send_event(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +const events_to_recv = 5; + +global handler: event(msg: string, c: count); +global auto_handler: event(msg: string, c: count); + +global pong: event(msg: string, c: count); + +event delayed_listen() + { + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::auto_publish("zeek/event/my_topic", pong); + schedule 5secs { delayed_listen() }; + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + } + +event ping(msg: string, n: count) + { + print fmt("receiver got ping: %s, %s", msg, n); + + if ( n == events_to_recv ) + { + terminate(); + return; + } + + event pong(msg, n); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/disconnect.bro b/testing/btest/broker/disconnect.bro deleted file mode 100644 index 08d80f0441..0000000000 --- a/testing/btest/broker/disconnect.bro +++ /dev/null @@ -1,79 +0,0 @@ -# @TEST-PORT: BROKER_PORT - -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" - -# @TEST-EXEC: $SCRIPTS/wait-for-pid $(cat recv/.pid) 45 || (btest-bg-wait -k 1 && false) - -# @TEST-EXEC: btest-bg-run recv2 "bro -B broker -b ../recv.bro >recv2.out" -# @TEST-EXEC: btest-bg-wait 45 - -# @TEST-EXEC: btest-diff send/send.out -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff recv2/recv2.out - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -global peers = 0; -const test_topic = "bro/test/my_topic"; - -event my_event(i: count) - { - print "sender got event", i; - } - -event bro_init() - { - Broker::subscribe(test_topic); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print "peer lost", msg; - - if ( peers == 2 ) - terminate(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - ++peers; - print "peer added", msg; - Broker::publish(test_topic, my_event, peers); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -const test_topic = "bro/test/my_topic"; - -event my_event(i: count) - { - print "receiver got event", i; - terminate(); - } - -event bro_init() - { - Broker::subscribe(test_topic); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "peer added", msg; - } - -@TEST-END-FILE diff --git a/testing/btest/broker/disconnect.zeek b/testing/btest/broker/disconnect.zeek new file mode 100644 index 0000000000..500a737ee2 --- /dev/null +++ b/testing/btest/broker/disconnect.zeek @@ -0,0 +1,79 @@ +# @TEST-PORT: BROKER_PORT + +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" + +# @TEST-EXEC: $SCRIPTS/wait-for-pid $(cat recv/.pid) 45 || (btest-bg-wait -k 1 && false) + +# @TEST-EXEC: btest-bg-run recv2 "zeek -B broker -b ../recv.zeek >recv2.out" +# @TEST-EXEC: btest-bg-wait 45 + +# @TEST-EXEC: btest-diff send/send.out +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff recv2/recv2.out + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +global peers = 0; +const test_topic = "zeek/test/my_topic"; + +event my_event(i: count) + { + print "sender got event", i; + } + +event zeek_init() + { + Broker::subscribe(test_topic); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print "peer lost", msg; + + if ( peers == 2 ) + terminate(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + ++peers; + print "peer added", msg; + Broker::publish(test_topic, my_event, peers); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +const test_topic = "zeek/test/my_topic"; + +event my_event(i: count) + { + print "receiver got event", i; + terminate(); + } + +event zeek_init() + { + Broker::subscribe(test_topic); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "peer added", msg; + } + +@TEST-END-FILE diff --git a/testing/btest/broker/error.bro b/testing/btest/broker/error.bro deleted file mode 100644 index aa413ea2ac..0000000000 --- a/testing/btest/broker/error.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -B main-loop,broker -b send.bro >send.out -# @TEST-EXEC: btest-diff send.out -# - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -event do_terminate() - { - terminate(); - } - -event do_something() - { - # Will fail and generate an error. - Broker::unpeer("1.2.3.4", 1947/tcp); - } - -event Broker::status(endpoint: Broker::EndpointInfo, msg: string) - { - print "status", endpoint, endpoint$network, msg; - } - -event Broker::error(code: Broker::ErrorCode, msg: string) - { - print "error", code, msg; - } - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - - schedule 2secs { do_something() }; - schedule 4secs { do_terminate() }; - } - - -@TEST-END-FILE - diff --git a/testing/btest/broker/error.zeek b/testing/btest/broker/error.zeek new file mode 100644 index 0000000000..88c72f3f4d --- /dev/null +++ b/testing/btest/broker/error.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -B main-loop,broker -b send.zeek >send.out +# @TEST-EXEC: btest-diff send.out +# + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +event do_terminate() + { + terminate(); + } + +event do_something() + { + # Will fail and generate an error. + Broker::unpeer("1.2.3.4", 1947/tcp); + } + +event Broker::status(endpoint: Broker::EndpointInfo, msg: string) + { + print "status", endpoint, endpoint$network, msg; + } + +event Broker::error(code: Broker::ErrorCode, msg: string) + { + print "error", code, msg; + } + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + + schedule 2secs { do_something() }; + schedule 4secs { do_terminate() }; + } + + +@TEST-END-FILE + diff --git a/testing/btest/broker/opaque.zeek b/testing/btest/broker/opaque.zeek new file mode 100644 index 0000000000..e0a3bef6c7 --- /dev/null +++ b/testing/btest/broker/opaque.zeek @@ -0,0 +1,162 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff .stderr + +event zeek_init() + { + print "============ Topk"; + local k1: opaque of topk = topk_init(4); + topk_add(k1, "a"); + topk_add(k1, "b"); + topk_add(k1, "b"); + topk_add(k1, "c"); + local k2 = Broker::__opaque_clone_through_serialization(k1); + print topk_get_top(k1, 5); + topk_add(k1, "shoulnotshowup"); + print topk_get_top(k2, 5); + + print "============ HLL"; + local c1 = hll_cardinality_init(0.01, 0.95); + hll_cardinality_add(c1, 2001); + hll_cardinality_add(c1, 2002); + hll_cardinality_add(c1, 2003); + + print hll_cardinality_estimate(c1); + local c2 = Broker::__opaque_clone_through_serialization(c1); + hll_cardinality_add(c1, 2004); + print hll_cardinality_estimate(c2); + + local c3 = hll_cardinality_init(0.01, 0.95); + hll_cardinality_merge_into(c3, c2); + print hll_cardinality_estimate(c3); + + print "============ Bloom"; + local bf_cnt = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt, 42); + bloomfilter_add(bf_cnt, 84); + bloomfilter_add(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 0); + print bloomfilter_lookup(bf_cnt, 42); + local bf_copy = Broker::__opaque_clone_through_serialization(bf_cnt); + bloomfilter_add(bf_cnt, 0); + print bloomfilter_lookup(bf_copy, 0); + print bloomfilter_lookup(bf_copy, 42); + # check that typefication transfered. + bloomfilter_add(bf_copy, 0.5); # causes stderr output "error: incompatible Bloom filter types" + + print "============ Hashes"; + local md5a = md5_hash_init(); + md5_hash_update(md5a, "one"); + local md5b = Broker::__opaque_clone_through_serialization(md5a); + md5_hash_update(md5a, "two"); + md5_hash_update(md5b, "two"); + print md5_hash_finish(md5a); + print md5_hash_finish(md5b); + + local sha1a = sha1_hash_init(); + sha1_hash_update(sha1a, "one"); + local sha1b = Broker::__opaque_clone_through_serialization(sha1a); + sha1_hash_update(sha1a, "two"); + sha1_hash_update(sha1b, "two"); + print sha1_hash_finish(sha1a); + print sha1_hash_finish(sha1b); + + local sha256a = sha256_hash_init(); + sha256_hash_update(sha256a, "one"); + local sha256b = Broker::__opaque_clone_through_serialization(sha256a); + sha256_hash_update(sha256a, "two"); + sha256_hash_update(sha256b, "two"); + print sha256_hash_finish(sha256a); + print sha256_hash_finish(sha256b); + + print "============ X509"; + local x509 = x509_from_der("\x30\x82\x03\x75\x30\x82\x02\x5D\xA0\x03\x02\x01\x02\x02\x0B\x04\x00\x00\x00\x00\x01\x15\x4B\x5A\xC3\x94\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x39\x38\x30\x39\x30\x31\x31\x32\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x31\x32\x38\x31\x32\x30\x30\x30\x30\x5A\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xDA\x0E\xE6\x99\x8D\xCE\xA3\xE3\x4F\x8A\x7E\xFB\xF1\x8B\x83\x25\x6B\xEA\x48\x1F\xF1\x2A\xB0\xB9\x95\x11\x04\xBD\xF0\x63\xD1\xE2\x67\x66\xCF\x1C\xDD\xCF\x1B\x48\x2B\xEE\x8D\x89\x8E\x9A\xAF\x29\x80\x65\xAB\xE9\xC7\x2D\x12\xCB\xAB\x1C\x4C\x70\x07\xA1\x3D\x0A\x30\xCD\x15\x8D\x4F\xF8\xDD\xD4\x8C\x50\x15\x1C\xEF\x50\xEE\xC4\x2E\xF7\xFC\xE9\x52\xF2\x91\x7D\xE0\x6D\xD5\x35\x30\x8E\x5E\x43\x73\xF2\x41\xE9\xD5\x6A\xE3\xB2\x89\x3A\x56\x39\x38\x6F\x06\x3C\x88\x69\x5B\x2A\x4D\xC5\xA7\x54\xB8\x6C\x89\xCC\x9B\xF9\x3C\xCA\xE5\xFD\x89\xF5\x12\x3C\x92\x78\x96\xD6\xDC\x74\x6E\x93\x44\x61\xD1\x8D\xC7\x46\xB2\x75\x0E\x86\xE8\x19\x8A\xD5\x6D\x6C\xD5\x78\x16\x95\xA2\xE9\xC8\x0A\x38\xEB\xF2\x24\x13\x4F\x73\x54\x93\x13\x85\x3A\x1B\xBC\x1E\x34\xB5\x8B\x05\x8C\xB9\x77\x8B\xB1\xDB\x1F\x20\x91\xAB\x09\x53\x6E\x90\xCE\x7B\x37\x74\xB9\x70\x47\x91\x22\x51\x63\x16\x79\xAE\xB1\xAE\x41\x26\x08\xC8\x19\x2B\xD1\x46\xAA\x48\xD6\x64\x2A\xD7\x83\x34\xFF\x2C\x2A\xC1\x6C\x19\x43\x4A\x07\x85\xE7\xD3\x7C\xF6\x21\x68\xEF\xEA\xF2\x52\x9F\x7F\x93\x90\xCF\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x60\x7B\x66\x1A\x45\x0D\x97\xCA\x89\x50\x2F\x7D\x04\xCD\x34\xA8\xFF\xFC\xFD\x4B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\xD6\x73\xE7\x7C\x4F\x76\xD0\x8D\xBF\xEC\xBA\xA2\xBE\x34\xC5\x28\x32\xB5\x7C\xFC\x6C\x9C\x2C\x2B\xBD\x09\x9E\x53\xBF\x6B\x5E\xAA\x11\x48\xB6\xE5\x08\xA3\xB3\xCA\x3D\x61\x4D\xD3\x46\x09\xB3\x3E\xC3\xA0\xE3\x63\x55\x1B\xF2\xBA\xEF\xAD\x39\xE1\x43\xB9\x38\xA3\xE6\x2F\x8A\x26\x3B\xEF\xA0\x50\x56\xF9\xC6\x0A\xFD\x38\xCD\xC4\x0B\x70\x51\x94\x97\x98\x04\xDF\xC3\x5F\x94\xD5\x15\xC9\x14\x41\x9C\xC4\x5D\x75\x64\x15\x0D\xFF\x55\x30\xEC\x86\x8F\xFF\x0D\xEF\x2C\xB9\x63\x46\xF6\xAA\xFC\xDF\xBC\x69\xFD\x2E\x12\x48\x64\x9A\xE0\x95\xF0\xA6\xEF\x29\x8F\x01\xB1\x15\xB5\x0C\x1D\xA5\xFE\x69\x2C\x69\x24\x78\x1E\xB3\xA7\x1C\x71\x62\xEE\xCA\xC8\x97\xAC\x17\x5D\x8A\xC2\xF8\x47\x86\x6E\x2A\xC4\x56\x31\x95\xD0\x67\x89\x85\x2B\xF9\x6C\xA6\x5D\x46\x9D\x0C\xAA\x82\xE4\x99\x51\xDD\x70\xB7\xDB\x56\x3D\x61\xE4\x6A\xE1\x5C\xD6\xF6\xFE\x3D\xDE\x41\xCC\x07\xAE\x63\x52\xBF\x53\x53\xF4\x2B\xE9\xC7\xFD\xB6\xF7\x82\x5F\x85\xD2\x41\x18\xDB\x81\xB3\x04\x1C\xC5\x1F\xA4\x80\x6F\x15\x20\xC9\xDE\x0C\x88\x0A\x1D\xD6\x66\x55\xE2\xFC\x48\xC9\x29\x26\x69\xE0"); + local x5092 = Broker::__opaque_clone_through_serialization(x509); + print x509_parse(x509); + print x509_parse(x5092); + + print "============ Entropy"; + local handle = entropy_test_init(); + entropy_test_add(handle, "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"); + local handle2 = Broker::__opaque_clone_through_serialization(handle); + print entropy_test_finish(handle); + print entropy_test_finish(handle2); + + print "============ broker::Data"; + local s1: Broker::Data = Broker::set_create(); + Broker::set_insert(s1, "hi"); + Broker::set_insert(s1, "there"); + local d2 = Broker::__opaque_clone_through_serialization(s1$data); + print s1$data; + print d2; + print same_object(s1$data, d2) == F; + + print "============ broker::Set"; + local cs = Broker::set_create(); + Broker::set_insert(cs, "hi"); + Broker::set_insert(cs, "there"); + Broker::set_insert(cs, "!"); + + local i = Broker::set_iterator(cs); + while ( ! Broker::set_iterator_last(i) ) + { + local ci = Broker::__opaque_clone_through_serialization(i); + print fmt("| %s | %s", Broker::set_iterator_value(i), Broker::set_iterator_value(ci)); + Broker::set_iterator_next(i); + Broker::set_iterator_next(ci); + if ( ! Broker::set_iterator_last(i) ) + print fmt(" > %s | %s", Broker::set_iterator_value(i), Broker::set_iterator_value(ci)); + } + + print "============ broker::Table"; + local ct = Broker::table_create(); + Broker::table_insert(ct, "hi", 10); + Broker::table_insert(ct, "there", 20); + Broker::table_insert(ct, "!", 30); + + local j = Broker::table_iterator(ct); + while ( ! Broker::table_iterator_last(j) ) + { + local cj = Broker::__opaque_clone_through_serialization(j); + print fmt("| %s | %s", Broker::table_iterator_value(j), Broker::table_iterator_value(cj)); + Broker::table_iterator_next(j); + Broker::table_iterator_next(cj); + if ( ! Broker::table_iterator_last(j) ) + print fmt(" > %s | %s", Broker::table_iterator_value(j), Broker::table_iterator_value(cj)); + } + + print "============ broker::Vector"; + local cv = Broker::vector_create(); + Broker::vector_insert(cv, 0, "hi"); + Broker::vector_insert(cv, 1, "there"); + Broker::vector_insert(cv, 2, "!"); + + local k = Broker::vector_iterator(cv); + while ( ! Broker::vector_iterator_last(k) ) + { + local ck = Broker::__opaque_clone_through_serialization(k); + print fmt("| %s | %s", Broker::vector_iterator_value(k), Broker::vector_iterator_value(ck)); + Broker::vector_iterator_next(k); + Broker::vector_iterator_next(ck); + if ( ! Broker::vector_iterator_last(k) ) + print fmt(" > %s | %s", Broker::vector_iterator_value(k), Broker::vector_iterator_value(ck)); + } + + print "============ broker::Record"; + local cr = Broker::record_create(3); + Broker::record_assign(cr, 0, "hi"); + Broker::record_assign(cr, 1, "there"); + Broker::record_assign(cr, 2, "!"); + + local l = Broker::record_iterator(cr); + while ( ! Broker::record_iterator_last(l) ) + { + local cl = Broker::__opaque_clone_through_serialization(l); + print fmt("| %s | %s", Broker::record_iterator_value(l), Broker::record_iterator_value(cl)); + Broker::record_iterator_next(l); + Broker::record_iterator_next(cl); + if ( ! Broker::record_iterator_last(l) ) + print fmt(" > %s | %s", Broker::record_iterator_value(l), Broker::record_iterator_value(cl)); + } + + } diff --git a/testing/btest/broker/remote_event.bro b/testing/btest/broker/remote_event.bro deleted file mode 100644 index a9e22ec25f..0000000000 --- a/testing/btest/broker/remote_event.bro +++ /dev/null @@ -1,99 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -global event_count = 0; - -global ping: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - print "is_remote should be F, and is", is_remote_event(); - } - -function send_event() - { - ++event_count; - local e = Broker::make_event(ping, "my-message", event_count); - Broker::publish("bro/event/my_topic", e); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender added peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - send_event(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender lost peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - terminate(); - } - -event pong(msg: string, n: count) - { - print "is_remote should be T, and is", is_remote_event(); - print fmt("sender got pong: %s, %s", msg, n); - send_event(); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -const events_to_recv = 5; - -global handler: event(msg: string, c: count); -global auto_handler: event(msg: string, c: count); - -global pong: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event ping(msg: string, n: count) - { - print "is_remote should be T, and is", is_remote_event(); - print fmt("receiver got ping: %s, %s", msg, n); - - if ( n == events_to_recv ) - { - print get_broker_stats(); - terminate(); - return; - } - - local e = Broker::make_event(pong, msg, n); - Broker::publish("bro/event/my_topic", e); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_event.zeek b/testing/btest/broker/remote_event.zeek new file mode 100644 index 0000000000..cdf74e15f3 --- /dev/null +++ b/testing/btest/broker/remote_event.zeek @@ -0,0 +1,99 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +global event_count = 0; + +global ping: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + print "is_remote should be F, and is", is_remote_event(); + } + +function send_event() + { + ++event_count; + local e = Broker::make_event(ping, "my-message", event_count); + Broker::publish("zeek/event/my_topic", e); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + send_event(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + terminate(); + } + +event pong(msg: string, n: count) + { + print "is_remote should be T, and is", is_remote_event(); + print fmt("sender got pong: %s, %s", msg, n); + send_event(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +const events_to_recv = 5; + +global handler: event(msg: string, c: count); +global auto_handler: event(msg: string, c: count); + +global pong: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event ping(msg: string, n: count) + { + print "is_remote should be T, and is", is_remote_event(); + print fmt("receiver got ping: %s, %s", msg, n); + + if ( n == events_to_recv ) + { + print get_broker_stats(); + terminate(); + return; + } + + local e = Broker::make_event(pong, msg, n); + Broker::publish("zeek/event/my_topic", e); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_any.bro b/testing/btest/broker/remote_event_any.bro deleted file mode 100644 index b45e5017ef..0000000000 --- a/testing/btest/broker/remote_event_any.bro +++ /dev/null @@ -1,107 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -global event_count = 0; - -global ping: event(msg: string, c: any); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - print "is_remote should be F, and is", is_remote_event(); - } - -function send_event() - { - ++event_count; - local e = Broker::make_event(ping, "my-message", event_count); - Broker::publish("bro/event/my_topic", e); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender added peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - send_event(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender lost peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - terminate(); - } - -event pong(msg: string, n: any) - { - print "is_remote should be T, and is", is_remote_event(); - - if ( n is count ) - print fmt("sender got pong: %s, %s", msg, n as count); - - send_event(); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -const events_to_recv = 5; - -global handler: event(msg: string, c: count); -global auto_handler: event(msg: string, c: count); - -global pong: event(msg: string, c: any); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event ping(msg: string, n: any) - { - print "is_remote should be T, and is", is_remote_event(); - - if ( n is count ) - print fmt("receiver got ping: %s, %s", msg, n as count); - - if ( (n as count) == events_to_recv ) - { - print get_broker_stats(); - terminate(); - return; - } - - if ( (n as count) % 2 == 0 ) - Broker::publish("bro/event/my_topic", pong, msg, n as count); - else - # internals should not wrap n into another Broker::Data record - Broker::publish("bro/event/my_topic", pong, msg, n); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_any.zeek b/testing/btest/broker/remote_event_any.zeek new file mode 100644 index 0000000000..ac6721335c --- /dev/null +++ b/testing/btest/broker/remote_event_any.zeek @@ -0,0 +1,107 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +global event_count = 0; + +global ping: event(msg: string, c: any); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + print "is_remote should be F, and is", is_remote_event(); + } + +function send_event() + { + ++event_count; + local e = Broker::make_event(ping, "my-message", event_count); + Broker::publish("zeek/event/my_topic", e); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + send_event(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + terminate(); + } + +event pong(msg: string, n: any) + { + print "is_remote should be T, and is", is_remote_event(); + + if ( n is count ) + print fmt("sender got pong: %s, %s", msg, n as count); + + send_event(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +const events_to_recv = 5; + +global handler: event(msg: string, c: count); +global auto_handler: event(msg: string, c: count); + +global pong: event(msg: string, c: any); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event ping(msg: string, n: any) + { + print "is_remote should be T, and is", is_remote_event(); + + if ( n is count ) + print fmt("receiver got ping: %s, %s", msg, n as count); + + if ( (n as count) == events_to_recv ) + { + print get_broker_stats(); + terminate(); + return; + } + + if ( (n as count) % 2 == 0 ) + Broker::publish("zeek/event/my_topic", pong, msg, n as count); + else + # internals should not wrap n into another Broker::Data record + Broker::publish("zeek/event/my_topic", pong, msg, n); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_auto.bro b/testing/btest/broker/remote_event_auto.bro deleted file mode 100644 index 04570b9e6d..0000000000 --- a/testing/btest/broker/remote_event_auto.bro +++ /dev/null @@ -1,94 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -global event_count = 0; - -global ping: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::auto_publish("bro/event/my_topic", ping); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -function send_event() - { - event ping("my-message", ++event_count); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - send_event(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - terminate(); - } - -event pong(msg: string, n: count) - { - print fmt("sender got pong: %s, %s", msg, n); - send_event(); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -const events_to_recv = 5; - -global handler: event(msg: string, c: count); -global auto_handler: event(msg: string, c: count); - -global pong: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::auto_publish("bro/event/my_topic", pong); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver added peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver lost peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - } - -event ping(msg: string, n: count) - { - print fmt("receiver got ping: %s, %s", msg, n); - - if ( n == events_to_recv ) - { - terminate(); - return; - } - - event pong(msg, n); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_auto.zeek b/testing/btest/broker/remote_event_auto.zeek new file mode 100644 index 0000000000..c5497997ac --- /dev/null +++ b/testing/btest/broker/remote_event_auto.zeek @@ -0,0 +1,94 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +global event_count = 0; + +global ping: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::auto_publish("zeek/event/my_topic", ping); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +function send_event() + { + event ping("my-message", ++event_count); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + send_event(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + terminate(); + } + +event pong(msg: string, n: count) + { + print fmt("sender got pong: %s, %s", msg, n); + send_event(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +const events_to_recv = 5; + +global handler: event(msg: string, c: count); +global auto_handler: event(msg: string, c: count); + +global pong: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::auto_publish("zeek/event/my_topic", pong); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + } + +event ping(msg: string, n: count) + { + print fmt("receiver got ping: %s, %s", msg, n); + + if ( n == events_to_recv ) + { + terminate(); + return; + } + + event pong(msg, n); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_ssl_auth.bro b/testing/btest/broker/remote_event_ssl_auth.bro deleted file mode 100644 index 2422638416..0000000000 --- a/testing/btest/broker/remote_event_ssl_auth.bro +++ /dev/null @@ -1,259 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - - -@TEST-START-FILE cert.1.pem ------BEGIN CERTIFICATE----- -MIIDOjCCAiICCQDz7oMOR7Wm7jANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV -UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F -IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 -MjEyMzI2MzhaGA80NzU1MDMxOTIzMjYzOFowWDELMAkGA1UEBhMCVVMxCzAJBgNV -BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl -MRIwEAYDVQQDDAkxLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDHobccAQQqbZANdOdx852W/nUzGcwpurOi8zbh9yCxMwnFMogW9AsqKEnd -sypV6Ah/cIz45PAgCdEg+1pc2DG7+E0+QlV4ChNwCDuk+FSWB6pqMTCdZcLeIwlA -GPp6Ow9v40dW7IFpDetFKXEo6kqEzR5P58Q0a6KpCtpsSMqhk57Py83wB9gPA1vp -s77kN7D5CI3oay86TA5j5nfFMT1X/77Hs24csW6CLnW/OD4f1RK79UgPd/kpPKQ1 -jNq+hsR7NZTcfrAF1hcfScxnKaznO7WopSt1k75NqLdnSN1GIci2GpiXYKtXZ9l5 -TErv2Oucpw/u+a/wjKlXjrgLL9lfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAKuW -yKA2uuiNc9MKU+yVbNaP8kPaMb/wMvVaFG8FFFpCTZ0MFMLsqRpeqtj7gMK/gaJC -CQm4EyadjzfWFYDLkHzm6b7gI8digvvhjr/C2RJ5Qxr2P0iFP1buGq0CqnF20XgQ -Q+ecS43CZ77CfKfS6ZLPmAZMAwgFLImVyo5mkaTECo3+9oCnjDYBapvXLJqCJRhk -NosoTmGCV0HecWN4l38ojnXd44aSktQIND9iCLus3S6++nFnX5DHGZiv6/SnSO/6 -+Op7nV0A6zKVcMOYQ0SGZPD8UQs5wDJgrR9LY29Ox5QBwu/5NqyvNSrMQaTop5vb -wkMInaq5lLxEYQDSLBc= ------END CERTIFICATE----- -@TEST-END-FILE - -@TEST-START-FILE cert.2.pem ------BEGIN CERTIFICATE----- -MIIDOjCCAiICCQDz7oMOR7Wm7TANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV -UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F -IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 -MjEyMzI2MzNaGA80NzU1MDMxOTIzMjYzM1owWDELMAkGA1UEBhMCVVMxCzAJBgNV -BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl -MRIwEAYDVQQDDAkyLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDG9fAvW9qnhjGRmLpA++RvOHaesu7NiUQvxf2F6gF2rLJV0/+DSA/PztEv -1WJaGhgJSaEqUjaHk3HY2EKlbGXEPh1mxqgPZD5plGlu4ddTwutxCxxQiFIBH+3N -MYRjJvDN7ozJoi4uRiK0QQdDWAqWJs5hMOJqeWd6MCgmVXSP6pj5/omGROktbHzD -9jJhAW9fnYFg6k+7cGN5kLmjqqnGhJkNtgom6uW9j73S9OpU/9Er2aZme6/PrujI -qYFBV81TJK2vmonWUITxfQjk9JVJYhBdHamGTxUqVBbuRcbAqdImV9yx4LoGh55u -L6xnsW4i0n1o1k+bh03NgwPz12O3AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJmN -yCdInFIeEwomE6m+Su82BWBzkztOfMG9iRE+1aGuC8EQ8kju5NNMmWQcuKetNh0s -hJVdY6LXh27O0ZUllhQ/ig9c+dYFh6AHoZU7WjiNKIyWuyl4IAOkQ4IEdsBvst+l -0rafcdJjUpqNOMWeyg6x1s+gUD5o+ZLCZGCdkCW3fZbKgF52L+vmsSRiJg2JkYZW -8BPNNsroHZw2UXnLvRqUXCMf1hnOrlx/B0a0Q46hD4NQvl+OzlKaxfR2L2USmJ8M -XZvT6+i8fWvkGv18iunm23Yu+8Zf08wTXnbqXvmMda5upAYLmwD0YKIVYC3ycihh -mkYCYI6PVeH63a2/zxw= ------END CERTIFICATE----- -@TEST-END-FILE - -@TEST-START-FILE key.1.pem ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAx6G3HAEEKm2QDXTncfOdlv51MxnMKbqzovM24fcgsTMJxTKI -FvQLKihJ3bMqVegIf3CM+OTwIAnRIPtaXNgxu/hNPkJVeAoTcAg7pPhUlgeqajEw -nWXC3iMJQBj6ejsPb+NHVuyBaQ3rRSlxKOpKhM0eT+fENGuiqQrabEjKoZOez8vN -8AfYDwNb6bO+5Dew+QiN6GsvOkwOY+Z3xTE9V/++x7NuHLFugi51vzg+H9USu/VI -D3f5KTykNYzavobEezWU3H6wBdYXH0nMZyms5zu1qKUrdZO+Tai3Z0jdRiHIthqY -l2CrV2fZeUxK79jrnKcP7vmv8IypV464Cy/ZXwIDAQABAoIBAC0Y7jmoTR2clJ9F -modWhnI215kMqd9/atdT5EEVx8/f/MQMj0vII8GJSm6H6/duLIVFksMjTM+gCBtQ -TPCOcmXJSQHYkGBGvm9fnMG+y7T81FWa+SWFeIkgFxXgzqzQLMOU72fGk9F8sHp2 -Szb3/o+TmtZoQB2rdxqC9ibiJsxrG5IBVKkzlSPv3POkPXwSb1HcETqrTwefuioj -WMuMrqtm5Y3HddJ5l4JEF5VA3KrsfXWl3JLHH0UViemVahiNjXQAVTKAXIL1PHAV -J2MCEvlpA7sIgXREbmvPvZUTkt3pIqhVjZVJ7tHiSnSecqNTbuxcocnhKhZrHNtC -v2zYKHkCgYEA6cAIhz0qOGDycZ1lf9RSWw0RO1hO8frATMQNVoFVuJJCVL22u96u -0FvJ0JGyYbjthULnlOKyRe7DUL5HRLVS4D7vvKCrgwDmsJp1VFxMdASUdaBfq6aX -oKLUW4q7kC2lQcmK/PVRYwp2GQSx8bodWe+DtXUY/GcN03znY8mhSB0CgYEA2qJK -1GSZsm6kFbDek3BiMMHfO+X819owB2FmXiH+GQckyIZu9xA3HWrkOWTqwglEvzfO -qzFF96E9iEEtseAxhcM8gPvfFuXiUj9t2nH/7SzMnVGikhtYi0p6jrgHmscc4NBx -AOUA15kYEFOGqpZfl2uuKqgHidrHdGkJzzSUBqsCgYAVCjb6TVQejQNlnKBFOExN -a8iwScuZVlO21TLKJYwct/WGgSkQkgO0N37b6jFfQHEIvLPxn9IiH1KvUuFBWvzh -uGiF1wR5HzykitKizEgJbVwbllrmLXGagO2Sa9NkL+efG1AKYt53hrqIl/aYZoM7 -1CZL0AV2uqPw9F4zijOdNQKBgH1WmvWGMsKjQTgaLI9z1ybCjjqlj70jHXOtt+Tx -Md2hRcobn5PN3PrlY68vlpHkhF/nG3jzB3x+GGt7ijm2IE3h7la3jl5vLb8fE9gu -kJykmSz7Nurx+GHqMbaN8/Ycfga4GIB9yGzRHIWHjOVQzb5eAfv8Vk4GeV/YM8Jx -Dwd/AoGAILn8pVC9dIFac2BDOFU5y9ZvMmZAvwRxh9vEWewNvkzg27vdYc+rCHNm -I7H0S/RqfqVeo0ApE5PQ8Sll6RvxN/mbSQo9YeCDGQ1r1rNe4Vs12GAYXAbE4ipf -BTdqMbieumB/zL97iK5baHUFEJ4VRtLQhh/SOXgew/BF8ccpilI= ------END RSA PRIVATE KEY----- -@TEST-END-FILE - -@TEST-START-FILE key.2.pem ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAxvXwL1vap4YxkZi6QPvkbzh2nrLuzYlEL8X9heoBdqyyVdP/ -g0gPz87RL9ViWhoYCUmhKlI2h5Nx2NhCpWxlxD4dZsaoD2Q+aZRpbuHXU8LrcQsc -UIhSAR/tzTGEYybwze6MyaIuLkYitEEHQ1gKlibOYTDianlnejAoJlV0j+qY+f6J -hkTpLWx8w/YyYQFvX52BYOpPu3BjeZC5o6qpxoSZDbYKJurlvY+90vTqVP/RK9mm -Znuvz67oyKmBQVfNUyStr5qJ1lCE8X0I5PSVSWIQXR2phk8VKlQW7kXGwKnSJlfc -seC6Boeebi+sZ7FuItJ9aNZPm4dNzYMD89djtwIDAQABAoIBAQDDaWquGRl40GR/ -C/JjQQPr+RkIZdYGKXu/MEcA8ATf+l5tzfp3hp+BCzCKOpqOxHI3LQoN9xF3t2lq -AX3z27NYO2nFN/h4pYxnRk0Hiulia1+zd6YnsrxYPnPhxXCxsd1xZYsBvzh8WoZb -ZEMt8Zr0PskUzF6VFQh9Ci9k9ym07ooo/KqP4wjXsm/JK1ueOCTpRtabrBI1icrV -iTaw1JEGqlTAQ92vg3pXqSG5yy69Krt7miZZtiOA5mJ90VrHtlNSgp31AOcVv/Ve -/LMIwJp9EzTN+4ipT7AKPeJAoeVqpFjQk+2cW44zJ7xyzw73pTs5ErxkEIhQOp4M -ak2iMg4BAoGBAOivDZSaOcTxEB3oKxYvN/jL9eU2Io9wdZwAZdYQpkgc8lkM9elW -2rbHIwifkDxQnZbl3rXM8xmjA4c5PSCUYdPnLvx6nsUJrWTG0RjakHRliSLthNEC -LpL9MR1aQblyz1D/ulWTFOCNvHU7m3XI3RVJEQWu3qQ5pCndzT56wXjnAoGBANrl -zKvR9o2SONU8SDIcMzXrO2647Z8yXn4Kz1WhWojhRQQ1V3VOLm8gBwv8bPtc7LmE -MSX5MIcxRoHu7D98d53hd+K/ZGYV2h/638qaIEgZDf2oa8QylBgvoGljoy1DH8nN -KKOgksqWK0AAEkP0+S4IFugTxHVanw8JUkV0gVSxAoGBANIRUGJrxmHt/M3zUArs -QE0G3o28DQGQ1y0rEsVrLKQINid9UvoBpt3C9PcRD2fUpCGakDFzwbnQeRv46h3i -uFtV6Q6aKYLcFMXZ1ObqU+Yx0NhOtUz4+lFL8q58UL/7Tf3jkjc13XBJpe31DYoN -+MMBvzNxR6HeRD5j96tDqi3bAoGAT57SqZS/l5MeNQGuSPvU7MHZZlbBp+xMTpBk -BgOgyLUXw4Ybf8GmRiliJsv0YCHWwUwCDIvtSN91hAGB0T3WzIiccM+pFzDPnF5G -VI1nPJJQcnl2aXD0SS/ZqzvguK/3uhFzvMDFZAbnSGo+OpW6pTGwE05NYVpLDM8Z -K8ZK3KECgYEApNoI5Mr5tmtjq4sbZrgQq6cMlfkIj9gUubOzFCryUb6NaB38Xqkp -2N3/jqdkR+5ZiKOYhsYj+Iy6U3jyqiEl9VySYTfEIfP/ky1CD0a8/EVC9HR4iG8J -im6G7/osaSBYAZctryLqVJXObTelgEy/EFwW9jW8HVph/G+ljmHOmuQ= ------END RSA PRIVATE KEY----- -@TEST-END-FILE - -@TEST-START-FILE cert.2.pem ------BEGIN CERTIFICATE----- -MIIDOjCCAiICCQDz7oMOR7Wm7TANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV -UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F -IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 -MjEyMzI2MzNaGA80NzU1MDMxOTIzMjYzM1owWDELMAkGA1UEBhMCVVMxCzAJBgNV -BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl -MRIwEAYDVQQDDAkyLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDG9fAvW9qnhjGRmLpA++RvOHaesu7NiUQvxf2F6gF2rLJV0/+DSA/PztEv -1WJaGhgJSaEqUjaHk3HY2EKlbGXEPh1mxqgPZD5plGlu4ddTwutxCxxQiFIBH+3N -MYRjJvDN7ozJoi4uRiK0QQdDWAqWJs5hMOJqeWd6MCgmVXSP6pj5/omGROktbHzD -9jJhAW9fnYFg6k+7cGN5kLmjqqnGhJkNtgom6uW9j73S9OpU/9Er2aZme6/PrujI -qYFBV81TJK2vmonWUITxfQjk9JVJYhBdHamGTxUqVBbuRcbAqdImV9yx4LoGh55u -L6xnsW4i0n1o1k+bh03NgwPz12O3AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJmN -yCdInFIeEwomE6m+Su82BWBzkztOfMG9iRE+1aGuC8EQ8kju5NNMmWQcuKetNh0s -hJVdY6LXh27O0ZUllhQ/ig9c+dYFh6AHoZU7WjiNKIyWuyl4IAOkQ4IEdsBvst+l -0rafcdJjUpqNOMWeyg6x1s+gUD5o+ZLCZGCdkCW3fZbKgF52L+vmsSRiJg2JkYZW -8BPNNsroHZw2UXnLvRqUXCMf1hnOrlx/B0a0Q46hD4NQvl+OzlKaxfR2L2USmJ8M -XZvT6+i8fWvkGv18iunm23Yu+8Zf08wTXnbqXvmMda5upAYLmwD0YKIVYC3ycihh -mkYCYI6PVeH63a2/zxw= ------END CERTIFICATE----- -@TEST-END-FILE - -@TEST-START-FILE ca.pem ------BEGIN CERTIFICATE----- -MIIDmzCCAoOgAwIBAgIJAPLZ3e3WR0LLMA0GCSqGSIb3DQEBCwUAMGQxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIQmVya2VsZXkxIzAhBgNVBAoM -GkFDTUUgU2lnbmluZyBBdXRob3JpdHkgSW5jMRAwDgYDVQQDDAdmb28uYmFyMB4X -DTE3MDQyMTIzMjM0OFoXDTQyMDQyMTIzMjM0OFowZDELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEjMCEGA1UECgwaQUNNRSBTaWdu -aW5nIEF1dGhvcml0eSBJbmMxEDAOBgNVBAMMB2Zvby5iYXIwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC6ah79JvrN3LtcPzc9bX5THdzfidWncSmowotG -SZA3gcIhlsYD3P3RCaUR9g+f2Z/l0l7ciKgWetpNtN9hRBbg5/9tFzSpCb/Y0SSG -mwtHHovEqN2MWV+Od/MUcYSlL6MmPjSDc8Ls5NSniTr9OBE9J1jm72AsuzHasjPQ -D84TlWeTSs0HW3H5VxDb15xWYFnmgBo0JylDWj0+VWI+G41Xr7Ubu9699lWSFYF9 -FCtdjzM5e1CGZOMvqUbUBus38BhUAdQ4fE7Dwnn8seKh+7HpJ70omIgqG87e4DBo -HbnMAkZaekk8+LBl0Hfu8c66Utw9mNoMIlFf/AMlJyLDIpNxAgMBAAGjUDBOMB0G -A1UdDgQWBBRc6Cbyshtny6jFWZtd/cEUUfMQ3DAfBgNVHSMEGDAWgBRc6Cbyshtn -y6jFWZtd/cEUUfMQ3DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCY -numHau9XYH5h4R2CoMdnKPMGk6V7UZZdbidLLcE4roQrYhnBdyhT69b/ySJK2Ee4 -mt8T+E0wcg3k8Pr3aJEJA8eYYaJTqZvvv+TwuMBPjmE2rYSIpgMZv2tRD3XWMaQu -duLbwkclfejQHDD26xNXsxuU+WNB5kuvtNAg0oKFyFdNKElLQEcjyYzfxmCF4YX5 -WmElijr1Tzuzd59rWPqC/tVIsh42vQ+P6g8Y1PDmo8eTUFveZ+wcr/eEPW6IOMrg -OW7tATcrgzNuXZ1umiuGgAPuIVqPfr9ssZHBqi9UOK9L/8MQrnOxecNUpPohcTFR -vq+Zqu15QV9T4BVWKHv0 ------END CERTIFICATE----- -@TEST-END-FILE - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -redef Broker::ssl_cafile = "../ca.pem"; -redef Broker::ssl_keyfile = "../key.1.pem"; -redef Broker::ssl_certificate = "../cert.1.pem"; - -global event_count = 0; - -global ping: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -function send_event() - { - ++event_count; - local e = Broker::make_event(ping, "my-message", event_count); - Broker::publish("bro/event/my_topic", e); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender added peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - send_event(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender lost peer: endpoint=%s msg=%s", - endpoint$network$address, msg); - terminate(); - } - -event pong(msg: string, n: count) - { - print fmt("sender got pong: %s, %s", msg, n); - send_event(); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -redef Broker::ssl_cafile = "../ca.pem"; -redef Broker::ssl_keyfile = "../key.2.pem"; -redef Broker::ssl_certificate = "../cert.2.pem"; - -const events_to_recv = 5; - -global handler: event(msg: string, c: count); -global auto_handler: event(msg: string, c: count); - -global pong: event(msg: string, c: count); - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event ping(msg: string, n: count) - { - print fmt("receiver got ping: %s, %s", msg, n); - - if ( n == events_to_recv ) - { - print get_broker_stats(); - terminate(); - return; - } - - local e = Broker::make_event(pong, msg, n); - Broker::publish("bro/event/my_topic", e); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_ssl_auth.zeek b/testing/btest/broker/remote_event_ssl_auth.zeek new file mode 100644 index 0000000000..7ffdae0bda --- /dev/null +++ b/testing/btest/broker/remote_event_ssl_auth.zeek @@ -0,0 +1,259 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + + +@TEST-START-FILE cert.1.pem +-----BEGIN CERTIFICATE----- +MIIDOjCCAiICCQDz7oMOR7Wm7jANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F +IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 +MjEyMzI2MzhaGA80NzU1MDMxOTIzMjYzOFowWDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl +MRIwEAYDVQQDDAkxLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDHobccAQQqbZANdOdx852W/nUzGcwpurOi8zbh9yCxMwnFMogW9AsqKEnd +sypV6Ah/cIz45PAgCdEg+1pc2DG7+E0+QlV4ChNwCDuk+FSWB6pqMTCdZcLeIwlA +GPp6Ow9v40dW7IFpDetFKXEo6kqEzR5P58Q0a6KpCtpsSMqhk57Py83wB9gPA1vp +s77kN7D5CI3oay86TA5j5nfFMT1X/77Hs24csW6CLnW/OD4f1RK79UgPd/kpPKQ1 +jNq+hsR7NZTcfrAF1hcfScxnKaznO7WopSt1k75NqLdnSN1GIci2GpiXYKtXZ9l5 +TErv2Oucpw/u+a/wjKlXjrgLL9lfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAKuW +yKA2uuiNc9MKU+yVbNaP8kPaMb/wMvVaFG8FFFpCTZ0MFMLsqRpeqtj7gMK/gaJC +CQm4EyadjzfWFYDLkHzm6b7gI8digvvhjr/C2RJ5Qxr2P0iFP1buGq0CqnF20XgQ +Q+ecS43CZ77CfKfS6ZLPmAZMAwgFLImVyo5mkaTECo3+9oCnjDYBapvXLJqCJRhk +NosoTmGCV0HecWN4l38ojnXd44aSktQIND9iCLus3S6++nFnX5DHGZiv6/SnSO/6 ++Op7nV0A6zKVcMOYQ0SGZPD8UQs5wDJgrR9LY29Ox5QBwu/5NqyvNSrMQaTop5vb +wkMInaq5lLxEYQDSLBc= +-----END CERTIFICATE----- +@TEST-END-FILE + +@TEST-START-FILE cert.2.pem +-----BEGIN CERTIFICATE----- +MIIDOjCCAiICCQDz7oMOR7Wm7TANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F +IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 +MjEyMzI2MzNaGA80NzU1MDMxOTIzMjYzM1owWDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl +MRIwEAYDVQQDDAkyLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDG9fAvW9qnhjGRmLpA++RvOHaesu7NiUQvxf2F6gF2rLJV0/+DSA/PztEv +1WJaGhgJSaEqUjaHk3HY2EKlbGXEPh1mxqgPZD5plGlu4ddTwutxCxxQiFIBH+3N +MYRjJvDN7ozJoi4uRiK0QQdDWAqWJs5hMOJqeWd6MCgmVXSP6pj5/omGROktbHzD +9jJhAW9fnYFg6k+7cGN5kLmjqqnGhJkNtgom6uW9j73S9OpU/9Er2aZme6/PrujI +qYFBV81TJK2vmonWUITxfQjk9JVJYhBdHamGTxUqVBbuRcbAqdImV9yx4LoGh55u +L6xnsW4i0n1o1k+bh03NgwPz12O3AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJmN +yCdInFIeEwomE6m+Su82BWBzkztOfMG9iRE+1aGuC8EQ8kju5NNMmWQcuKetNh0s +hJVdY6LXh27O0ZUllhQ/ig9c+dYFh6AHoZU7WjiNKIyWuyl4IAOkQ4IEdsBvst+l +0rafcdJjUpqNOMWeyg6x1s+gUD5o+ZLCZGCdkCW3fZbKgF52L+vmsSRiJg2JkYZW +8BPNNsroHZw2UXnLvRqUXCMf1hnOrlx/B0a0Q46hD4NQvl+OzlKaxfR2L2USmJ8M +XZvT6+i8fWvkGv18iunm23Yu+8Zf08wTXnbqXvmMda5upAYLmwD0YKIVYC3ycihh +mkYCYI6PVeH63a2/zxw= +-----END CERTIFICATE----- +@TEST-END-FILE + +@TEST-START-FILE key.1.pem +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAx6G3HAEEKm2QDXTncfOdlv51MxnMKbqzovM24fcgsTMJxTKI +FvQLKihJ3bMqVegIf3CM+OTwIAnRIPtaXNgxu/hNPkJVeAoTcAg7pPhUlgeqajEw +nWXC3iMJQBj6ejsPb+NHVuyBaQ3rRSlxKOpKhM0eT+fENGuiqQrabEjKoZOez8vN +8AfYDwNb6bO+5Dew+QiN6GsvOkwOY+Z3xTE9V/++x7NuHLFugi51vzg+H9USu/VI +D3f5KTykNYzavobEezWU3H6wBdYXH0nMZyms5zu1qKUrdZO+Tai3Z0jdRiHIthqY +l2CrV2fZeUxK79jrnKcP7vmv8IypV464Cy/ZXwIDAQABAoIBAC0Y7jmoTR2clJ9F +modWhnI215kMqd9/atdT5EEVx8/f/MQMj0vII8GJSm6H6/duLIVFksMjTM+gCBtQ +TPCOcmXJSQHYkGBGvm9fnMG+y7T81FWa+SWFeIkgFxXgzqzQLMOU72fGk9F8sHp2 +Szb3/o+TmtZoQB2rdxqC9ibiJsxrG5IBVKkzlSPv3POkPXwSb1HcETqrTwefuioj +WMuMrqtm5Y3HddJ5l4JEF5VA3KrsfXWl3JLHH0UViemVahiNjXQAVTKAXIL1PHAV +J2MCEvlpA7sIgXREbmvPvZUTkt3pIqhVjZVJ7tHiSnSecqNTbuxcocnhKhZrHNtC +v2zYKHkCgYEA6cAIhz0qOGDycZ1lf9RSWw0RO1hO8frATMQNVoFVuJJCVL22u96u +0FvJ0JGyYbjthULnlOKyRe7DUL5HRLVS4D7vvKCrgwDmsJp1VFxMdASUdaBfq6aX +oKLUW4q7kC2lQcmK/PVRYwp2GQSx8bodWe+DtXUY/GcN03znY8mhSB0CgYEA2qJK +1GSZsm6kFbDek3BiMMHfO+X819owB2FmXiH+GQckyIZu9xA3HWrkOWTqwglEvzfO +qzFF96E9iEEtseAxhcM8gPvfFuXiUj9t2nH/7SzMnVGikhtYi0p6jrgHmscc4NBx +AOUA15kYEFOGqpZfl2uuKqgHidrHdGkJzzSUBqsCgYAVCjb6TVQejQNlnKBFOExN +a8iwScuZVlO21TLKJYwct/WGgSkQkgO0N37b6jFfQHEIvLPxn9IiH1KvUuFBWvzh +uGiF1wR5HzykitKizEgJbVwbllrmLXGagO2Sa9NkL+efG1AKYt53hrqIl/aYZoM7 +1CZL0AV2uqPw9F4zijOdNQKBgH1WmvWGMsKjQTgaLI9z1ybCjjqlj70jHXOtt+Tx +Md2hRcobn5PN3PrlY68vlpHkhF/nG3jzB3x+GGt7ijm2IE3h7la3jl5vLb8fE9gu +kJykmSz7Nurx+GHqMbaN8/Ycfga4GIB9yGzRHIWHjOVQzb5eAfv8Vk4GeV/YM8Jx +Dwd/AoGAILn8pVC9dIFac2BDOFU5y9ZvMmZAvwRxh9vEWewNvkzg27vdYc+rCHNm +I7H0S/RqfqVeo0ApE5PQ8Sll6RvxN/mbSQo9YeCDGQ1r1rNe4Vs12GAYXAbE4ipf +BTdqMbieumB/zL97iK5baHUFEJ4VRtLQhh/SOXgew/BF8ccpilI= +-----END RSA PRIVATE KEY----- +@TEST-END-FILE + +@TEST-START-FILE key.2.pem +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAxvXwL1vap4YxkZi6QPvkbzh2nrLuzYlEL8X9heoBdqyyVdP/ +g0gPz87RL9ViWhoYCUmhKlI2h5Nx2NhCpWxlxD4dZsaoD2Q+aZRpbuHXU8LrcQsc +UIhSAR/tzTGEYybwze6MyaIuLkYitEEHQ1gKlibOYTDianlnejAoJlV0j+qY+f6J +hkTpLWx8w/YyYQFvX52BYOpPu3BjeZC5o6qpxoSZDbYKJurlvY+90vTqVP/RK9mm +Znuvz67oyKmBQVfNUyStr5qJ1lCE8X0I5PSVSWIQXR2phk8VKlQW7kXGwKnSJlfc +seC6Boeebi+sZ7FuItJ9aNZPm4dNzYMD89djtwIDAQABAoIBAQDDaWquGRl40GR/ +C/JjQQPr+RkIZdYGKXu/MEcA8ATf+l5tzfp3hp+BCzCKOpqOxHI3LQoN9xF3t2lq +AX3z27NYO2nFN/h4pYxnRk0Hiulia1+zd6YnsrxYPnPhxXCxsd1xZYsBvzh8WoZb +ZEMt8Zr0PskUzF6VFQh9Ci9k9ym07ooo/KqP4wjXsm/JK1ueOCTpRtabrBI1icrV +iTaw1JEGqlTAQ92vg3pXqSG5yy69Krt7miZZtiOA5mJ90VrHtlNSgp31AOcVv/Ve +/LMIwJp9EzTN+4ipT7AKPeJAoeVqpFjQk+2cW44zJ7xyzw73pTs5ErxkEIhQOp4M +ak2iMg4BAoGBAOivDZSaOcTxEB3oKxYvN/jL9eU2Io9wdZwAZdYQpkgc8lkM9elW +2rbHIwifkDxQnZbl3rXM8xmjA4c5PSCUYdPnLvx6nsUJrWTG0RjakHRliSLthNEC +LpL9MR1aQblyz1D/ulWTFOCNvHU7m3XI3RVJEQWu3qQ5pCndzT56wXjnAoGBANrl +zKvR9o2SONU8SDIcMzXrO2647Z8yXn4Kz1WhWojhRQQ1V3VOLm8gBwv8bPtc7LmE +MSX5MIcxRoHu7D98d53hd+K/ZGYV2h/638qaIEgZDf2oa8QylBgvoGljoy1DH8nN +KKOgksqWK0AAEkP0+S4IFugTxHVanw8JUkV0gVSxAoGBANIRUGJrxmHt/M3zUArs +QE0G3o28DQGQ1y0rEsVrLKQINid9UvoBpt3C9PcRD2fUpCGakDFzwbnQeRv46h3i +uFtV6Q6aKYLcFMXZ1ObqU+Yx0NhOtUz4+lFL8q58UL/7Tf3jkjc13XBJpe31DYoN ++MMBvzNxR6HeRD5j96tDqi3bAoGAT57SqZS/l5MeNQGuSPvU7MHZZlbBp+xMTpBk +BgOgyLUXw4Ybf8GmRiliJsv0YCHWwUwCDIvtSN91hAGB0T3WzIiccM+pFzDPnF5G +VI1nPJJQcnl2aXD0SS/ZqzvguK/3uhFzvMDFZAbnSGo+OpW6pTGwE05NYVpLDM8Z +K8ZK3KECgYEApNoI5Mr5tmtjq4sbZrgQq6cMlfkIj9gUubOzFCryUb6NaB38Xqkp +2N3/jqdkR+5ZiKOYhsYj+Iy6U3jyqiEl9VySYTfEIfP/ky1CD0a8/EVC9HR4iG8J +im6G7/osaSBYAZctryLqVJXObTelgEy/EFwW9jW8HVph/G+ljmHOmuQ= +-----END RSA PRIVATE KEY----- +@TEST-END-FILE + +@TEST-START-FILE cert.2.pem +-----BEGIN CERTIFICATE----- +MIIDOjCCAiICCQDz7oMOR7Wm7TANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F +IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 +MjEyMzI2MzNaGA80NzU1MDMxOTIzMjYzM1owWDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl +MRIwEAYDVQQDDAkyLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDG9fAvW9qnhjGRmLpA++RvOHaesu7NiUQvxf2F6gF2rLJV0/+DSA/PztEv +1WJaGhgJSaEqUjaHk3HY2EKlbGXEPh1mxqgPZD5plGlu4ddTwutxCxxQiFIBH+3N +MYRjJvDN7ozJoi4uRiK0QQdDWAqWJs5hMOJqeWd6MCgmVXSP6pj5/omGROktbHzD +9jJhAW9fnYFg6k+7cGN5kLmjqqnGhJkNtgom6uW9j73S9OpU/9Er2aZme6/PrujI +qYFBV81TJK2vmonWUITxfQjk9JVJYhBdHamGTxUqVBbuRcbAqdImV9yx4LoGh55u +L6xnsW4i0n1o1k+bh03NgwPz12O3AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJmN +yCdInFIeEwomE6m+Su82BWBzkztOfMG9iRE+1aGuC8EQ8kju5NNMmWQcuKetNh0s +hJVdY6LXh27O0ZUllhQ/ig9c+dYFh6AHoZU7WjiNKIyWuyl4IAOkQ4IEdsBvst+l +0rafcdJjUpqNOMWeyg6x1s+gUD5o+ZLCZGCdkCW3fZbKgF52L+vmsSRiJg2JkYZW +8BPNNsroHZw2UXnLvRqUXCMf1hnOrlx/B0a0Q46hD4NQvl+OzlKaxfR2L2USmJ8M +XZvT6+i8fWvkGv18iunm23Yu+8Zf08wTXnbqXvmMda5upAYLmwD0YKIVYC3ycihh +mkYCYI6PVeH63a2/zxw= +-----END CERTIFICATE----- +@TEST-END-FILE + +@TEST-START-FILE ca.pem +-----BEGIN CERTIFICATE----- +MIIDmzCCAoOgAwIBAgIJAPLZ3e3WR0LLMA0GCSqGSIb3DQEBCwUAMGQxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIQmVya2VsZXkxIzAhBgNVBAoM +GkFDTUUgU2lnbmluZyBBdXRob3JpdHkgSW5jMRAwDgYDVQQDDAdmb28uYmFyMB4X +DTE3MDQyMTIzMjM0OFoXDTQyMDQyMTIzMjM0OFowZDELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEjMCEGA1UECgwaQUNNRSBTaWdu +aW5nIEF1dGhvcml0eSBJbmMxEDAOBgNVBAMMB2Zvby5iYXIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC6ah79JvrN3LtcPzc9bX5THdzfidWncSmowotG +SZA3gcIhlsYD3P3RCaUR9g+f2Z/l0l7ciKgWetpNtN9hRBbg5/9tFzSpCb/Y0SSG +mwtHHovEqN2MWV+Od/MUcYSlL6MmPjSDc8Ls5NSniTr9OBE9J1jm72AsuzHasjPQ +D84TlWeTSs0HW3H5VxDb15xWYFnmgBo0JylDWj0+VWI+G41Xr7Ubu9699lWSFYF9 +FCtdjzM5e1CGZOMvqUbUBus38BhUAdQ4fE7Dwnn8seKh+7HpJ70omIgqG87e4DBo +HbnMAkZaekk8+LBl0Hfu8c66Utw9mNoMIlFf/AMlJyLDIpNxAgMBAAGjUDBOMB0G +A1UdDgQWBBRc6Cbyshtny6jFWZtd/cEUUfMQ3DAfBgNVHSMEGDAWgBRc6Cbyshtn +y6jFWZtd/cEUUfMQ3DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCY +numHau9XYH5h4R2CoMdnKPMGk6V7UZZdbidLLcE4roQrYhnBdyhT69b/ySJK2Ee4 +mt8T+E0wcg3k8Pr3aJEJA8eYYaJTqZvvv+TwuMBPjmE2rYSIpgMZv2tRD3XWMaQu +duLbwkclfejQHDD26xNXsxuU+WNB5kuvtNAg0oKFyFdNKElLQEcjyYzfxmCF4YX5 +WmElijr1Tzuzd59rWPqC/tVIsh42vQ+P6g8Y1PDmo8eTUFveZ+wcr/eEPW6IOMrg +OW7tATcrgzNuXZ1umiuGgAPuIVqPfr9ssZHBqi9UOK9L/8MQrnOxecNUpPohcTFR +vq+Zqu15QV9T4BVWKHv0 +-----END CERTIFICATE----- +@TEST-END-FILE + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +redef Broker::ssl_cafile = "../ca.pem"; +redef Broker::ssl_keyfile = "../key.1.pem"; +redef Broker::ssl_certificate = "../cert.1.pem"; + +global event_count = 0; + +global ping: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +function send_event() + { + ++event_count; + local e = Broker::make_event(ping, "my-message", event_count); + Broker::publish("zeek/event/my_topic", e); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + send_event(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + terminate(); + } + +event pong(msg: string, n: count) + { + print fmt("sender got pong: %s, %s", msg, n); + send_event(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +redef Broker::ssl_cafile = "../ca.pem"; +redef Broker::ssl_keyfile = "../key.2.pem"; +redef Broker::ssl_certificate = "../cert.2.pem"; + +const events_to_recv = 5; + +global handler: event(msg: string, c: count); +global auto_handler: event(msg: string, c: count); + +global pong: event(msg: string, c: count); + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event ping(msg: string, n: count) + { + print fmt("receiver got ping: %s, %s", msg, n); + + if ( n == events_to_recv ) + { + print get_broker_stats(); + terminate(); + return; + } + + local e = Broker::make_event(pong, msg, n); + Broker::publish("zeek/event/my_topic", e); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_vector_any.bro b/testing/btest/broker/remote_event_vector_any.bro deleted file mode 100644 index 6f03d97c56..0000000000 --- a/testing/btest/broker/remote_event_vector_any.bro +++ /dev/null @@ -1,105 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -type myvec: vector of any; - -type myrec: record { - a: string &optional; - b: count &optional; - c: int &optional; -}; - -global bar: event(x: any); - -event bro_init() - { - Broker::subscribe("test"); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - Broker::publish("test", bar, 1); - Broker::publish("test", bar, "two"); - Broker::publish("test", bar, myvec("one", "two", 3)); - Broker::publish("test", bar, myrec($a = "bye")); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -type myvec: vector of any; - -type myrec: record { - a: string &optional; - b: count &optional; - c: int &optional; -}; - -function process(x: any) - { - switch ( x ) { - case type myrec as r: - print "record", r; - - if ( r$a == "bye" ) - terminate(); - - break; - case type string as s: - print "string", s; - break; - case type int as i: - print "int", i; - break; - case type count as c: - print "count", c; - break; - case type myvec as v: - { - print "vector", v; - - for ( i in v ) - process(v[i]); - } - break; - default: - print "got unknown type", x; - break; - } - } - -event bar(x: any) - { - process(x); - } - -event bro_init() - { - Broker::subscribe("test"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_event_vector_any.zeek b/testing/btest/broker/remote_event_vector_any.zeek new file mode 100644 index 0000000000..4736600429 --- /dev/null +++ b/testing/btest/broker/remote_event_vector_any.zeek @@ -0,0 +1,105 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +type myvec: vector of any; + +type myrec: record { + a: string &optional; + b: count &optional; + c: int &optional; +}; + +global bar: event(x: any); + +event zeek_init() + { + Broker::subscribe("test"); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + Broker::publish("test", bar, 1); + Broker::publish("test", bar, "two"); + Broker::publish("test", bar, myvec("one", "two", 3)); + Broker::publish("test", bar, myrec($a = "bye")); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +type myvec: vector of any; + +type myrec: record { + a: string &optional; + b: count &optional; + c: int &optional; +}; + +function process(x: any) + { + switch ( x ) { + case type myrec as r: + print "record", r; + + if ( r$a == "bye" ) + terminate(); + + break; + case type string as s: + print "string", s; + break; + case type int as i: + print "int", i; + break; + case type count as c: + print "count", c; + break; + case type myvec as v: + { + print "vector", v; + + for ( i in v ) + process(v[i]); + } + break; + default: + print "got unknown type", x; + break; + } + } + +event bar(x: any) + { + process(x); + } + +event zeek_init() + { + Broker::subscribe("test"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_id.bro b/testing/btest/broker/remote_id.bro deleted file mode 100644 index 62cddb9f25..0000000000 --- a/testing/btest/broker/remote_id.bro +++ /dev/null @@ -1,66 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro test_var=newval >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out - -@TEST-START-FILE send.bro - -const test_var = "init" &redef; - -event bro_init() - { - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print "peer lost"; - terminate(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "peer added"; - Broker::publish_id("bro/ids/test", "test_var"); - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -const test_var = "init" &redef; - -event check_var() - { - if ( test_var == "init" ) - schedule 0.1sec { check_var() }; - else - { - print "updated val", test_var; - terminate(); - } - } - -event bro_init() - { - print "intial val", test_var; - Broker::subscribe("bro/ids"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "peer added"; - schedule 1sec { check_var() }; - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print "peer lost"; - terminate(); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_id.zeek b/testing/btest/broker/remote_id.zeek new file mode 100644 index 0000000000..0357493230 --- /dev/null +++ b/testing/btest/broker/remote_id.zeek @@ -0,0 +1,66 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek test_var=newval >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out + +@TEST-START-FILE send.zeek + +const test_var = "init" &redef; + +event zeek_init() + { + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print "peer lost"; + terminate(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "peer added"; + Broker::publish_id("zeek/ids/test", "test_var"); + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +const test_var = "init" &redef; + +event check_var() + { + if ( test_var == "init" ) + schedule 0.1sec { check_var() }; + else + { + print "updated val", test_var; + terminate(); + } + } + +event zeek_init() + { + print "intial val", test_var; + Broker::subscribe("zeek/ids"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "peer added"; + schedule 1sec { check_var() }; + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print "peer lost"; + terminate(); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_log.bro b/testing/btest/broker/remote_log.bro deleted file mode 100644 index dae89d42b2..0000000000 --- a/testing/btest/broker/remote_log.bro +++ /dev/null @@ -1,98 +0,0 @@ -# @TEST-PORT: BROKER_PORT - -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff recv/test.log -# @TEST-EXEC: btest-diff send/send.out -# @TEST-EXEC: btest-diff send/test.log - -@TEST-START-FILE common.bro - -redef exit_only_after_terminate = T; - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - msg: string &log; - nolog: string &default="no"; - num: count &log; - }; -} - -event bro_init() &priority=5 - { - Log::create_stream(Test::LOG, [$columns=Test::Info]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - - -@load ./common.bro - -event bro_init() - { - Broker::subscribe("bro/"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@TEST-END-FILE - -@TEST-START-FILE send.bro - - - -@load ./common.bro - -event bro_init() - { - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -global n = 0; - -event die() - { - terminate(); - } - -event do_write() - { - if ( n == 6 ) - { - Broker::flush_logs(); - schedule 1sec { die() }; - } - else - { - Log::write(Test::LOG, [$msg = "ping", $num = n]); - ++n; - schedule 0.1secs { do_write() }; - } - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker::peer_added", endpoint$network$address; - event do_write(); - } - - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_log.zeek b/testing/btest/broker/remote_log.zeek new file mode 100644 index 0000000000..5a632d2f6f --- /dev/null +++ b/testing/btest/broker/remote_log.zeek @@ -0,0 +1,98 @@ +# @TEST-PORT: BROKER_PORT + +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff recv/test.log +# @TEST-EXEC: btest-diff send/send.out +# @TEST-EXEC: btest-diff send/test.log + +@TEST-START-FILE common.zeek + +redef exit_only_after_terminate = T; + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + msg: string &log; + nolog: string &default="no"; + num: count &log; + }; +} + +event zeek_init() &priority=5 + { + Log::create_stream(Test::LOG, [$columns=Test::Info]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + + +@load ./common + +event zeek_init() + { + Broker::subscribe("zeek/"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@TEST-END-FILE + +@TEST-START-FILE send.zeek + + + +@load ./common + +event zeek_init() + { + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +global n = 0; + +event die() + { + terminate(); + } + +event do_write() + { + if ( n == 6 ) + { + Broker::flush_logs(); + schedule 1sec { die() }; + } + else + { + Log::write(Test::LOG, [$msg = "ping", $num = n]); + ++n; + schedule 0.1secs { do_write() }; + } + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker::peer_added", endpoint$network$address; + event do_write(); + } + + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_log_late_join.bro b/testing/btest/broker/remote_log_late_join.bro deleted file mode 100644 index aea7846996..0000000000 --- a/testing/btest/broker/remote_log_late_join.bro +++ /dev/null @@ -1,105 +0,0 @@ -# @TEST-PORT: BROKER_PORT - -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff recv/test.log -# @TEST-EXEC: btest-diff send/send.out -# @TEST-EXEC: btest-diff send/test.log - -@TEST-START-FILE common.bro - -redef exit_only_after_terminate = T; - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - msg: string &log; - nolog: string &default="no"; - num: count &log; - }; -} - -event bro_init() &priority=5 - { - Log::create_stream(Test::LOG, [$columns=Test::Info]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - - -@load ./common.bro - -event bro_init() - { - Broker::subscribe("bro/"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@TEST-END-FILE - -@TEST-START-FILE send.bro - - - -@load ./common.bro - -event doconnect() - { - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -global n = 0; - -event bro_init() - { - schedule 2secs { doconnect() }; - Log::write(Test::LOG, [$msg = "ping", $num = n]); - ++n; - } - -event die() - { - terminate(); - } - -event do_write() - { - if ( n == 6 ) - { - Broker::flush_logs(); - schedule 1sec { die() }; - } - else - { - Log::write(Test::LOG, [$msg = "ping", $num = n]); - ++n; - schedule 0.1secs { do_write() }; - } - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker::peer_added", endpoint$network$address; - event do_write(); - } - - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_log_late_join.zeek b/testing/btest/broker/remote_log_late_join.zeek new file mode 100644 index 0000000000..7e69bdd496 --- /dev/null +++ b/testing/btest/broker/remote_log_late_join.zeek @@ -0,0 +1,105 @@ +# @TEST-PORT: BROKER_PORT + +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff recv/test.log +# @TEST-EXEC: btest-diff send/send.out +# @TEST-EXEC: btest-diff send/test.log + +@TEST-START-FILE common.zeek + +redef exit_only_after_terminate = T; + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + msg: string &log; + nolog: string &default="no"; + num: count &log; + }; +} + +event zeek_init() &priority=5 + { + Log::create_stream(Test::LOG, [$columns=Test::Info]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + + +@load ./common + +event zeek_init() + { + Broker::subscribe("zeek/"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@TEST-END-FILE + +@TEST-START-FILE send.zeek + + + +@load ./common + +event doconnect() + { + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +global n = 0; + +event zeek_init() + { + schedule 2secs { doconnect() }; + Log::write(Test::LOG, [$msg = "ping", $num = n]); + ++n; + } + +event die() + { + terminate(); + } + +event do_write() + { + if ( n == 6 ) + { + Broker::flush_logs(); + schedule 1sec { die() }; + } + else + { + Log::write(Test::LOG, [$msg = "ping", $num = n]); + ++n; + schedule 0.1secs { do_write() }; + } + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker::peer_added", endpoint$network$address; + event do_write(); + } + + +@TEST-END-FILE diff --git a/testing/btest/broker/remote_log_types.bro b/testing/btest/broker/remote_log_types.bro deleted file mode 100644 index 8bbc66eaa2..0000000000 --- a/testing/btest/broker/remote_log_types.bro +++ /dev/null @@ -1,131 +0,0 @@ -# @TEST-PORT: BROKER_PORT - -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff recv/test.log -# @TEST-EXEC: btest-diff send/send.out -# @TEST-EXEC: btest-diff send/test.log -# @TEST-EXEC: cat send/test.log | grep -v '#close' | grep -v '#open' >send/test.log.filtered -# @TEST-EXEC: cat recv/test.log | grep -v '#close' | grep -v '#open' >recv/test.log.filtered -# @TEST-EXEC: diff -u send/test.log.filtered recv/test.log.filtered - -@TEST-START-FILE common.bro - -redef exit_only_after_terminate = T; - -global quit_receiver: event(); -global quit_sender: event(); - - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; - -} - -event bro_init() &priority=5 - { - Log::create_stream(Test::LOG, [$columns=Test::Info]); - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -@load ./common.bro - -event bro_init() - { - Broker::subscribe("bro/"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event quit_receiver() - { - terminate(); - } - -@TEST-END-FILE - -@TEST-START-FILE send.bro - - - -@load ./common.bro - -event bro_init() - { - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event quit_sender() - { - terminate(); - } - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker::peer_added", endpoint$network$address; - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(Test::LOG, [ - $b=T, - $i=-42, - $e=Test::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=network_time(), - $iv=100secs, - $s="hurz", - $sc=set(1), # set(1,2,3,4), # Output not stable for multi-element sets. - $ss=set("AA"), # set("AA", "BB", "CC") # Output not stable for multi-element sets. - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); - - local e = Broker::make_event(quit_receiver); - Broker::publish("bro/", e); - schedule 1sec { quit_sender() }; - } - - -@TEST-END-FILE diff --git a/testing/btest/broker/remote_log_types.zeek b/testing/btest/broker/remote_log_types.zeek new file mode 100644 index 0000000000..2417c75a41 --- /dev/null +++ b/testing/btest/broker/remote_log_types.zeek @@ -0,0 +1,131 @@ +# @TEST-PORT: BROKER_PORT + +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff recv/test.log +# @TEST-EXEC: btest-diff send/send.out +# @TEST-EXEC: btest-diff send/test.log +# @TEST-EXEC: cat send/test.log | grep -v '#close' | grep -v '#open' >send/test.log.filtered +# @TEST-EXEC: cat recv/test.log | grep -v '#close' | grep -v '#open' >recv/test.log.filtered +# @TEST-EXEC: diff -u send/test.log.filtered recv/test.log.filtered + +@TEST-START-FILE common.zeek + +redef exit_only_after_terminate = T; + +global quit_receiver: event(); +global quit_sender: event(); + + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; + +} + +event zeek_init() &priority=5 + { + Log::create_stream(Test::LOG, [$columns=Test::Info]); + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +@load ./common + +event zeek_init() + { + Broker::subscribe("zeek/"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event quit_receiver() + { + terminate(); + } + +@TEST-END-FILE + +@TEST-START-FILE send.zeek + + + +@load ./common + +event zeek_init() + { + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event quit_sender() + { + terminate(); + } + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker::peer_added", endpoint$network$address; + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(Test::LOG, [ + $b=T, + $i=-42, + $e=Test::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=network_time(), + $iv=100secs, + $s="hurz", + $sc=set(1), # set(1,2,3,4), # Output not stable for multi-element sets. + $ss=set("AA"), # set("AA", "BB", "CC") # Output not stable for multi-element sets. + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); + + local e = Broker::make_event(quit_receiver); + Broker::publish("zeek/", e); + schedule 1sec { quit_sender() }; + } + + +@TEST-END-FILE diff --git a/testing/btest/broker/ssl_auth_failure.bro b/testing/btest/broker/ssl_auth_failure.bro deleted file mode 100644 index bc90d86298..0000000000 --- a/testing/btest/broker/ssl_auth_failure.bro +++ /dev/null @@ -1,164 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -B broker -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -B broker -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE ca.pem ------BEGIN CERTIFICATE----- -MIIDmzCCAoOgAwIBAgIJAPLZ3e3WR0LLMA0GCSqGSIb3DQEBCwUAMGQxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIQmVya2VsZXkxIzAhBgNVBAoM -GkFDTUUgU2lnbmluZyBBdXRob3JpdHkgSW5jMRAwDgYDVQQDDAdmb28uYmFyMB4X -DTE3MDQyMTIzMjM0OFoXDTQyMDQyMTIzMjM0OFowZDELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEjMCEGA1UECgwaQUNNRSBTaWdu -aW5nIEF1dGhvcml0eSBJbmMxEDAOBgNVBAMMB2Zvby5iYXIwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC6ah79JvrN3LtcPzc9bX5THdzfidWncSmowotG -SZA3gcIhlsYD3P3RCaUR9g+f2Z/l0l7ciKgWetpNtN9hRBbg5/9tFzSpCb/Y0SSG -mwtHHovEqN2MWV+Od/MUcYSlL6MmPjSDc8Ls5NSniTr9OBE9J1jm72AsuzHasjPQ -D84TlWeTSs0HW3H5VxDb15xWYFnmgBo0JylDWj0+VWI+G41Xr7Ubu9699lWSFYF9 -FCtdjzM5e1CGZOMvqUbUBus38BhUAdQ4fE7Dwnn8seKh+7HpJ70omIgqG87e4DBo -HbnMAkZaekk8+LBl0Hfu8c66Utw9mNoMIlFf/AMlJyLDIpNxAgMBAAGjUDBOMB0G -A1UdDgQWBBRc6Cbyshtny6jFWZtd/cEUUfMQ3DAfBgNVHSMEGDAWgBRc6Cbyshtn -y6jFWZtd/cEUUfMQ3DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCY -numHau9XYH5h4R2CoMdnKPMGk6V7UZZdbidLLcE4roQrYhnBdyhT69b/ySJK2Ee4 -mt8T+E0wcg3k8Pr3aJEJA8eYYaJTqZvvv+TwuMBPjmE2rYSIpgMZv2tRD3XWMaQu -duLbwkclfejQHDD26xNXsxuU+WNB5kuvtNAg0oKFyFdNKElLQEcjyYzfxmCF4YX5 -WmElijr1Tzuzd59rWPqC/tVIsh42vQ+P6g8Y1PDmo8eTUFveZ+wcr/eEPW6IOMrg -OW7tATcrgzNuXZ1umiuGgAPuIVqPfr9ssZHBqi9UOK9L/8MQrnOxecNUpPohcTFR -vq+Zqu15QV9T4BVWKHv0 ------END CERTIFICATE----- -@TEST-END-FILE - - -@TEST-START-FILE cert.1.pem ------BEGIN CERTIFICATE----- -MIIDOjCCAiICCQDz7oMOR7Wm7jANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV -UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F -IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 -MjEyMzI2MzhaGA80NzU1MDMxOTIzMjYzOFowWDELMAkGA1UEBhMCVVMxCzAJBgNV -BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl -MRIwEAYDVQQDDAkxLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDHobccAQQqbZANdOdx852W/nUzGcwpurOi8zbh9yCxMwnFMogW9AsqKEnd -sypV6Ah/cIz45PAgCdEg+1pc2DG7+E0+QlV4ChNwCDuk+FSWB6pqMTCdZcLeIwlA -GPp6Ow9v40dW7IFpDetFKXEo6kqEzR5P58Q0a6KpCtpsSMqhk57Py83wB9gPA1vp -s77kN7D5CI3oay86TA5j5nfFMT1X/77Hs24csW6CLnW/OD4f1RK79UgPd/kpPKQ1 -jNq+hsR7NZTcfrAF1hcfScxnKaznO7WopSt1k75NqLdnSN1GIci2GpiXYKtXZ9l5 -TErv2Oucpw/u+a/wjKlXjrgLL9lfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAKuW -yKA2uuiNc9MKU+yVbNaP8kPaMb/wMvVaFG8FFFpCTZ0MFMLsqRpeqtj7gMK/gaJC -CQm4EyadjzfWFYDLkHzm6b7gI8digvvhjr/C2RJ5Qxr2P0iFP1buGq0CqnF20XgQ -Q+ecS43CZ77CfKfS6ZLPmAZMAwgFLImVyo5mkaTECo3+9oCnjDYBapvXLJqCJRhk -NosoTmGCV0HecWN4l38ojnXd44aSktQIND9iCLus3S6++nFnX5DHGZiv6/SnSO/6 -+Op7nV0A6zKVcMOYQ0SGZPD8UQs5wDJgrR9LY29Ox5QBwu/5NqyvNSrMQaTop5vb -wkMInaq5lLxEYQDSLBc= ------END CERTIFICATE----- -@TEST-END-FILE - -@TEST-START-FILE key.1.pem ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAx6G3HAEEKm2QDXTncfOdlv51MxnMKbqzovM24fcgsTMJxTKI -FvQLKihJ3bMqVegIf3CM+OTwIAnRIPtaXNgxu/hNPkJVeAoTcAg7pPhUlgeqajEw -nWXC3iMJQBj6ejsPb+NHVuyBaQ3rRSlxKOpKhM0eT+fENGuiqQrabEjKoZOez8vN -8AfYDwNb6bO+5Dew+QiN6GsvOkwOY+Z3xTE9V/++x7NuHLFugi51vzg+H9USu/VI -D3f5KTykNYzavobEezWU3H6wBdYXH0nMZyms5zu1qKUrdZO+Tai3Z0jdRiHIthqY -l2CrV2fZeUxK79jrnKcP7vmv8IypV464Cy/ZXwIDAQABAoIBAC0Y7jmoTR2clJ9F -modWhnI215kMqd9/atdT5EEVx8/f/MQMj0vII8GJSm6H6/duLIVFksMjTM+gCBtQ -TPCOcmXJSQHYkGBGvm9fnMG+y7T81FWa+SWFeIkgFxXgzqzQLMOU72fGk9F8sHp2 -Szb3/o+TmtZoQB2rdxqC9ibiJsxrG5IBVKkzlSPv3POkPXwSb1HcETqrTwefuioj -WMuMrqtm5Y3HddJ5l4JEF5VA3KrsfXWl3JLHH0UViemVahiNjXQAVTKAXIL1PHAV -J2MCEvlpA7sIgXREbmvPvZUTkt3pIqhVjZVJ7tHiSnSecqNTbuxcocnhKhZrHNtC -v2zYKHkCgYEA6cAIhz0qOGDycZ1lf9RSWw0RO1hO8frATMQNVoFVuJJCVL22u96u -0FvJ0JGyYbjthULnlOKyRe7DUL5HRLVS4D7vvKCrgwDmsJp1VFxMdASUdaBfq6aX -oKLUW4q7kC2lQcmK/PVRYwp2GQSx8bodWe+DtXUY/GcN03znY8mhSB0CgYEA2qJK -1GSZsm6kFbDek3BiMMHfO+X819owB2FmXiH+GQckyIZu9xA3HWrkOWTqwglEvzfO -qzFF96E9iEEtseAxhcM8gPvfFuXiUj9t2nH/7SzMnVGikhtYi0p6jrgHmscc4NBx -AOUA15kYEFOGqpZfl2uuKqgHidrHdGkJzzSUBqsCgYAVCjb6TVQejQNlnKBFOExN -a8iwScuZVlO21TLKJYwct/WGgSkQkgO0N37b6jFfQHEIvLPxn9IiH1KvUuFBWvzh -uGiF1wR5HzykitKizEgJbVwbllrmLXGagO2Sa9NkL+efG1AKYt53hrqIl/aYZoM7 -1CZL0AV2uqPw9F4zijOdNQKBgH1WmvWGMsKjQTgaLI9z1ybCjjqlj70jHXOtt+Tx -Md2hRcobn5PN3PrlY68vlpHkhF/nG3jzB3x+GGt7ijm2IE3h7la3jl5vLb8fE9gu -kJykmSz7Nurx+GHqMbaN8/Ycfga4GIB9yGzRHIWHjOVQzb5eAfv8Vk4GeV/YM8Jx -Dwd/AoGAILn8pVC9dIFac2BDOFU5y9ZvMmZAvwRxh9vEWewNvkzg27vdYc+rCHNm -I7H0S/RqfqVeo0ApE5PQ8Sll6RvxN/mbSQo9YeCDGQ1r1rNe4Vs12GAYXAbE4ipf -BTdqMbieumB/zL97iK5baHUFEJ4VRtLQhh/SOXgew/BF8ccpilI= ------END RSA PRIVATE KEY----- -@TEST-END-FILE - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -redef Broker::ssl_cafile = "../ca.pem"; -redef Broker::ssl_keyfile = "../key.1.pem"; -redef Broker::ssl_certificate = "../cert.1.pem"; - -global event_count = 0; - -global ping: event(msg: string, c: count); - -event do_terminate() - { - terminate(); - } - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - schedule 5secs { do_terminate() }; - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("sender lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - terminate(); - } - -event Broker::error(code: Broker::ErrorCode, msg: string) - { - print fmt("sender error: code=%s msg=%s", code, gsub(msg, /127.0.0.1:[0-9]+/, "")); - terminate(); - } - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -# No cert here. -# -# redef Broker::ssl_cafile = "../ca.pem"; -# redef Broker::ssl_keyfile = "../key.2.pem"; -# redef Broker::ssl_certificate = "../cert.2.pem"; - -event do_terminate() - { - terminate(); - } - -event bro_init() - { - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - schedule 10secs { do_terminate() }; - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); - } - -@TEST-END-FILE diff --git a/testing/btest/broker/ssl_auth_failure.zeek b/testing/btest/broker/ssl_auth_failure.zeek new file mode 100644 index 0000000000..6260616763 --- /dev/null +++ b/testing/btest/broker/ssl_auth_failure.zeek @@ -0,0 +1,164 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE ca.pem +-----BEGIN CERTIFICATE----- +MIIDmzCCAoOgAwIBAgIJAPLZ3e3WR0LLMA0GCSqGSIb3DQEBCwUAMGQxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIQmVya2VsZXkxIzAhBgNVBAoM +GkFDTUUgU2lnbmluZyBBdXRob3JpdHkgSW5jMRAwDgYDVQQDDAdmb28uYmFyMB4X +DTE3MDQyMTIzMjM0OFoXDTQyMDQyMTIzMjM0OFowZDELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEjMCEGA1UECgwaQUNNRSBTaWdu +aW5nIEF1dGhvcml0eSBJbmMxEDAOBgNVBAMMB2Zvby5iYXIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC6ah79JvrN3LtcPzc9bX5THdzfidWncSmowotG +SZA3gcIhlsYD3P3RCaUR9g+f2Z/l0l7ciKgWetpNtN9hRBbg5/9tFzSpCb/Y0SSG +mwtHHovEqN2MWV+Od/MUcYSlL6MmPjSDc8Ls5NSniTr9OBE9J1jm72AsuzHasjPQ +D84TlWeTSs0HW3H5VxDb15xWYFnmgBo0JylDWj0+VWI+G41Xr7Ubu9699lWSFYF9 +FCtdjzM5e1CGZOMvqUbUBus38BhUAdQ4fE7Dwnn8seKh+7HpJ70omIgqG87e4DBo +HbnMAkZaekk8+LBl0Hfu8c66Utw9mNoMIlFf/AMlJyLDIpNxAgMBAAGjUDBOMB0G +A1UdDgQWBBRc6Cbyshtny6jFWZtd/cEUUfMQ3DAfBgNVHSMEGDAWgBRc6Cbyshtn +y6jFWZtd/cEUUfMQ3DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCY +numHau9XYH5h4R2CoMdnKPMGk6V7UZZdbidLLcE4roQrYhnBdyhT69b/ySJK2Ee4 +mt8T+E0wcg3k8Pr3aJEJA8eYYaJTqZvvv+TwuMBPjmE2rYSIpgMZv2tRD3XWMaQu +duLbwkclfejQHDD26xNXsxuU+WNB5kuvtNAg0oKFyFdNKElLQEcjyYzfxmCF4YX5 +WmElijr1Tzuzd59rWPqC/tVIsh42vQ+P6g8Y1PDmo8eTUFveZ+wcr/eEPW6IOMrg +OW7tATcrgzNuXZ1umiuGgAPuIVqPfr9ssZHBqi9UOK9L/8MQrnOxecNUpPohcTFR +vq+Zqu15QV9T4BVWKHv0 +-----END CERTIFICATE----- +@TEST-END-FILE + + +@TEST-START-FILE cert.1.pem +-----BEGIN CERTIFICATE----- +MIIDOjCCAiICCQDz7oMOR7Wm7jANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExETAPBgNVBAcMCEJlcmtlbGV5MSMwIQYDVQQKDBpBQ01F +IFNpZ25pbmcgQXV0aG9yaXR5IEluYzEQMA4GA1UEAwwHZm9vLmJhcjAgFw0xNzA0 +MjEyMzI2MzhaGA80NzU1MDMxOTIzMjYzOFowWDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMREwDwYDVQQHDAhCZXJrZWxleTEVMBMGA1UECgwMQUNNRSBTZXJ2aWNl +MRIwEAYDVQQDDAkxLmZvby5iYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDHobccAQQqbZANdOdx852W/nUzGcwpurOi8zbh9yCxMwnFMogW9AsqKEnd +sypV6Ah/cIz45PAgCdEg+1pc2DG7+E0+QlV4ChNwCDuk+FSWB6pqMTCdZcLeIwlA +GPp6Ow9v40dW7IFpDetFKXEo6kqEzR5P58Q0a6KpCtpsSMqhk57Py83wB9gPA1vp +s77kN7D5CI3oay86TA5j5nfFMT1X/77Hs24csW6CLnW/OD4f1RK79UgPd/kpPKQ1 +jNq+hsR7NZTcfrAF1hcfScxnKaznO7WopSt1k75NqLdnSN1GIci2GpiXYKtXZ9l5 +TErv2Oucpw/u+a/wjKlXjrgLL9lfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAKuW +yKA2uuiNc9MKU+yVbNaP8kPaMb/wMvVaFG8FFFpCTZ0MFMLsqRpeqtj7gMK/gaJC +CQm4EyadjzfWFYDLkHzm6b7gI8digvvhjr/C2RJ5Qxr2P0iFP1buGq0CqnF20XgQ +Q+ecS43CZ77CfKfS6ZLPmAZMAwgFLImVyo5mkaTECo3+9oCnjDYBapvXLJqCJRhk +NosoTmGCV0HecWN4l38ojnXd44aSktQIND9iCLus3S6++nFnX5DHGZiv6/SnSO/6 ++Op7nV0A6zKVcMOYQ0SGZPD8UQs5wDJgrR9LY29Ox5QBwu/5NqyvNSrMQaTop5vb +wkMInaq5lLxEYQDSLBc= +-----END CERTIFICATE----- +@TEST-END-FILE + +@TEST-START-FILE key.1.pem +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAx6G3HAEEKm2QDXTncfOdlv51MxnMKbqzovM24fcgsTMJxTKI +FvQLKihJ3bMqVegIf3CM+OTwIAnRIPtaXNgxu/hNPkJVeAoTcAg7pPhUlgeqajEw +nWXC3iMJQBj6ejsPb+NHVuyBaQ3rRSlxKOpKhM0eT+fENGuiqQrabEjKoZOez8vN +8AfYDwNb6bO+5Dew+QiN6GsvOkwOY+Z3xTE9V/++x7NuHLFugi51vzg+H9USu/VI +D3f5KTykNYzavobEezWU3H6wBdYXH0nMZyms5zu1qKUrdZO+Tai3Z0jdRiHIthqY +l2CrV2fZeUxK79jrnKcP7vmv8IypV464Cy/ZXwIDAQABAoIBAC0Y7jmoTR2clJ9F +modWhnI215kMqd9/atdT5EEVx8/f/MQMj0vII8GJSm6H6/duLIVFksMjTM+gCBtQ +TPCOcmXJSQHYkGBGvm9fnMG+y7T81FWa+SWFeIkgFxXgzqzQLMOU72fGk9F8sHp2 +Szb3/o+TmtZoQB2rdxqC9ibiJsxrG5IBVKkzlSPv3POkPXwSb1HcETqrTwefuioj +WMuMrqtm5Y3HddJ5l4JEF5VA3KrsfXWl3JLHH0UViemVahiNjXQAVTKAXIL1PHAV +J2MCEvlpA7sIgXREbmvPvZUTkt3pIqhVjZVJ7tHiSnSecqNTbuxcocnhKhZrHNtC +v2zYKHkCgYEA6cAIhz0qOGDycZ1lf9RSWw0RO1hO8frATMQNVoFVuJJCVL22u96u +0FvJ0JGyYbjthULnlOKyRe7DUL5HRLVS4D7vvKCrgwDmsJp1VFxMdASUdaBfq6aX +oKLUW4q7kC2lQcmK/PVRYwp2GQSx8bodWe+DtXUY/GcN03znY8mhSB0CgYEA2qJK +1GSZsm6kFbDek3BiMMHfO+X819owB2FmXiH+GQckyIZu9xA3HWrkOWTqwglEvzfO +qzFF96E9iEEtseAxhcM8gPvfFuXiUj9t2nH/7SzMnVGikhtYi0p6jrgHmscc4NBx +AOUA15kYEFOGqpZfl2uuKqgHidrHdGkJzzSUBqsCgYAVCjb6TVQejQNlnKBFOExN +a8iwScuZVlO21TLKJYwct/WGgSkQkgO0N37b6jFfQHEIvLPxn9IiH1KvUuFBWvzh +uGiF1wR5HzykitKizEgJbVwbllrmLXGagO2Sa9NkL+efG1AKYt53hrqIl/aYZoM7 +1CZL0AV2uqPw9F4zijOdNQKBgH1WmvWGMsKjQTgaLI9z1ybCjjqlj70jHXOtt+Tx +Md2hRcobn5PN3PrlY68vlpHkhF/nG3jzB3x+GGt7ijm2IE3h7la3jl5vLb8fE9gu +kJykmSz7Nurx+GHqMbaN8/Ycfga4GIB9yGzRHIWHjOVQzb5eAfv8Vk4GeV/YM8Jx +Dwd/AoGAILn8pVC9dIFac2BDOFU5y9ZvMmZAvwRxh9vEWewNvkzg27vdYc+rCHNm +I7H0S/RqfqVeo0ApE5PQ8Sll6RvxN/mbSQo9YeCDGQ1r1rNe4Vs12GAYXAbE4ipf +BTdqMbieumB/zL97iK5baHUFEJ4VRtLQhh/SOXgew/BF8ccpilI= +-----END RSA PRIVATE KEY----- +@TEST-END-FILE + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +redef Broker::ssl_cafile = "../ca.pem"; +redef Broker::ssl_keyfile = "../key.1.pem"; +redef Broker::ssl_certificate = "../cert.1.pem"; + +global event_count = 0; + +global ping: event(msg: string, c: count); + +event do_terminate() + { + terminate(); + } + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + schedule 5secs { do_terminate() }; + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + terminate(); + } + +event Broker::error(code: Broker::ErrorCode, msg: string) + { + print fmt("sender error: code=%s msg=%s", code, gsub(msg, /127.0.0.1:[0-9]+/, "")); + terminate(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +# No cert here. +# +# redef Broker::ssl_cafile = "../ca.pem"; +# redef Broker::ssl_keyfile = "../key.2.pem"; +# redef Broker::ssl_certificate = "../cert.2.pem"; + +event do_terminate() + { + terminate(); + } + +event zeek_init() + { + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + schedule 10secs { do_terminate() }; + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +@TEST-END-FILE diff --git a/testing/btest/broker/store/clone.bro b/testing/btest/broker/store/clone.bro deleted file mode 100644 index 5620303410..0000000000 --- a/testing/btest/broker/store/clone.bro +++ /dev/null @@ -1,145 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run clone "bro -B broker -b ../clone-main.bro >clone.out" -# @TEST-EXEC: btest-bg-run master "bro -B broker -b ../master-main.bro >master.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff clone/clone.out -# @TEST-EXEC: btest-diff master/master.out - -@TEST-START-FILE master-main.bro - -redef exit_only_after_terminate = T; -global query_timeout = 1sec; - -global ready: event(); - -global h: opaque of Broker::Store; - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - print "master", k, r$status, r$result; - } - timeout query_timeout - { - print "master", fmt("clone ", k); - } - } - -event done() - { - terminate(); - } - -event inserted() - { - Broker::erase(h, "four"); - - print("----"); - print_index("one"); - print_index("two"); - print_index(vector(1,2)); - print_index("three"); - print_index("four"); - print_index("five"); - print_index("six"); - schedule 6secs { done() }; - } - -event bro_init() - { - Broker::auto_publish("bro/events", done); - Broker::subscribe("bro/"); - - h = Broker::create_master("test"); - Broker::put(h, "one", "110"); - Broker::put(h, "two", 223); - Broker::put(h, vector(1,2), 1947/tcp); - - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event insert_more() - { - Broker::put(h, "three", 3.14); - Broker::put(h, "four", 1.2.3.4); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - schedule 4secs { insert_more() }; - } - -@TEST-END-FILE - - -@TEST-START-FILE clone-main.bro - -redef exit_only_after_terminate = T; - -global query_timeout = 1sec; - -global h: opaque of Broker::Store; - - -global inserted: event(); - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - print "clone", k, r$status, r$result; - } - timeout query_timeout - { - print "clone", fmt("clone ", k); - } - } - -event done() - { - terminate(); - } - -event lookup(stage: count) - { - print("----"); - print_index("one"); - print_index("two"); - print_index(vector(1,2)); - print_index("three"); - print_index("four"); - print_index("five"); - print_index("six"); - - if ( stage == 1 ) - schedule 4secs { lookup(2) }; - - if ( stage == 2 ) - { - Broker::put(h, "five", "555"); - Broker::put(h, "six", "666"); - schedule 4sec { inserted() }; - schedule 8secs { lookup(3) }; - } - - if ( stage == 3 ) - schedule 4sec { done() }; - } - -event bro_init() - { - Broker::auto_publish("bro/events", inserted); - Broker::subscribe("bro/"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - h = Broker::create_clone("test"); - schedule 2secs { lookup(1) }; - } - -@TEST-END-FILE diff --git a/testing/btest/broker/store/clone.zeek b/testing/btest/broker/store/clone.zeek new file mode 100644 index 0000000000..d22b8b9632 --- /dev/null +++ b/testing/btest/broker/store/clone.zeek @@ -0,0 +1,145 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run clone "zeek -B broker -b ../clone-main.zeek >clone.out" +# @TEST-EXEC: btest-bg-run master "zeek -B broker -b ../master-main.zeek >master.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff clone/clone.out +# @TEST-EXEC: btest-diff master/master.out + +@TEST-START-FILE master-main.zeek + +redef exit_only_after_terminate = T; +global query_timeout = 1sec; + +global ready: event(); + +global h: opaque of Broker::Store; + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + print "master", k, r$status, r$result; + } + timeout query_timeout + { + print "master", fmt("clone ", k); + } + } + +event done() + { + terminate(); + } + +event inserted() + { + Broker::erase(h, "four"); + + print("----"); + print_index("one"); + print_index("two"); + print_index(vector(1,2)); + print_index("three"); + print_index("four"); + print_index("five"); + print_index("six"); + schedule 6secs { done() }; + } + +event zeek_init() + { + Broker::auto_publish("zeek/events", done); + Broker::subscribe("zeek/"); + + h = Broker::create_master("test"); + Broker::put(h, "one", "110"); + Broker::put(h, "two", 223); + Broker::put(h, vector(1,2), 1947/tcp); + + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event insert_more() + { + Broker::put(h, "three", 3.14); + Broker::put(h, "four", 1.2.3.4); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + schedule 4secs { insert_more() }; + } + +@TEST-END-FILE + + +@TEST-START-FILE clone-main.zeek + +redef exit_only_after_terminate = T; + +global query_timeout = 1sec; + +global h: opaque of Broker::Store; + + +global inserted: event(); + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + print "clone", k, r$status, r$result; + } + timeout query_timeout + { + print "clone", fmt("clone ", k); + } + } + +event done() + { + terminate(); + } + +event lookup(stage: count) + { + print("----"); + print_index("one"); + print_index("two"); + print_index(vector(1,2)); + print_index("three"); + print_index("four"); + print_index("five"); + print_index("six"); + + if ( stage == 1 ) + schedule 4secs { lookup(2) }; + + if ( stage == 2 ) + { + Broker::put(h, "five", "555"); + Broker::put(h, "six", "666"); + schedule 4sec { inserted() }; + schedule 8secs { lookup(3) }; + } + + if ( stage == 3 ) + schedule 4sec { done() }; + } + +event zeek_init() + { + Broker::auto_publish("zeek/events", inserted); + Broker::subscribe("zeek/"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + h = Broker::create_clone("test"); + schedule 2secs { lookup(1) }; + } + +@TEST-END-FILE diff --git a/testing/btest/broker/store/local.bro b/testing/btest/broker/store/local.bro deleted file mode 100644 index b352df93f2..0000000000 --- a/testing/btest/broker/store/local.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - -redef exit_only_after_terminate = T; - -global query_timeout = 1sec; - -global h: opaque of Broker::Store; - -event done() - { - terminate(); - } - -event bro_init() - { - h = Broker::create_master("master"); - Broker::put(h, "one", "110"); - Broker::put(h, "two", 223); - - when ( local res1 = Broker::get(h, "one") ) - { - local s = (res1$result as string); - print "string", s; - } - timeout query_timeout - { - print "timeout"; - } - - when ( local res2 = Broker::get(h, "two") ) - { - local c = (res2$result as count); - print "count", c; - } - timeout query_timeout - { - print "timeout"; - } - - schedule 2secs { done() }; - } diff --git a/testing/btest/broker/store/local.zeek b/testing/btest/broker/store/local.zeek new file mode 100644 index 0000000000..9ec3140c10 --- /dev/null +++ b/testing/btest/broker/store/local.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: btest-bg-run master "zeek -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + +redef exit_only_after_terminate = T; + +global query_timeout = 1sec; + +global h: opaque of Broker::Store; + +event done() + { + terminate(); + } + +event zeek_init() + { + h = Broker::create_master("master"); + Broker::put(h, "one", "110"); + Broker::put(h, "two", 223); + + when ( local res1 = Broker::get(h, "one") ) + { + local s = (res1$result as string); + print "string", s; + } + timeout query_timeout + { + print "timeout"; + } + + when ( local res2 = Broker::get(h, "two") ) + { + local c = (res2$result as count); + print "count", c; + } + timeout query_timeout + { + print "timeout"; + } + + schedule 2secs { done() }; + } diff --git a/testing/btest/broker/store/ops.bro b/testing/btest/broker/store/ops.bro deleted file mode 100644 index 070a0f2ed3..0000000000 --- a/testing/btest/broker/store/ops.bro +++ /dev/null @@ -1,145 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -B broker -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - -redef exit_only_after_terminate = T; - -global query_timeout = 1sec; - -global h: opaque of Broker::Store; - -global step: count = 0; - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - step += 1; - print fmt("[%d]", step), k, r$status, r$result; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step, k); - } - } - -function print_exists(k: any) - { - when ( local r = Broker::exists(h, k) ) - { - step += 1; - print fmt("[%d]", step), k, r; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step, k); - } - } - -function print_index_from_value(k: any, i: any) - { - when ( local r = Broker::get_index_from_value(h, k, i) ) - { - step += 1; - print fmt("[%d]", step), k, r$status, r$result; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step, k); - } - } - -function print_keys() - { - when ( local s = Broker::keys(h) ) - { - step += 1; - print "keys", s; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step); - } - } - -event done() - { - terminate(); - } - -event pk2() - { - print_keys(); - } - -event pk1() - { - print_keys(); - Broker::clear(h); - schedule 1sec { pk2() }; - } - -event bro_init() - { - h = Broker::create_master("master"); - Broker::put(h, "one", "110"); - Broker::put(h, "two", 220); - Broker::put(h, "three", 330); - Broker::put(h, "four", set(1, 2,3)); - Broker::put(h, set("x", "y"), vector(1/tcp, 2/tcp, 3/tcp)); - - Broker::put(h, "str", "foo"); - Broker::put(h, "vec", vector(1, 2,3)); - Broker::put(h, "set", set("A", "B")); - Broker::put(h, "table", table(["a"] = 1, ["b"] = 2)); - - print_index("one"); - print_index("two"); - print_index("three"); - print_index("four"); - print_index("five"); - print_index(set("x", "y")); - - when ( step == 6 ) - { - Broker::increment(h, "two"); - Broker::increment(h, "two", 9); - Broker::decrement(h, "three"); - Broker::decrement(h, "three", 9); - print_index("two"); - print_index("three"); - print_index("four"); - print_keys(); - Broker::erase(h, "four"); - - Broker::append(h, "str", "bar"); - Broker::insert_into_set(h, "set", "C"); - Broker::insert_into_table(h, "table", "c", 3); - Broker::remove_from(h, "set", 2); - Broker::remove_from(h, "table", "b"); - Broker::push(h, "vec", 4); - Broker::push(h, "vec", 5); - Broker::pop(h, "vec"); - - print_index("str"); - print_index("set"); - print_index("table"); - print_index("vec"); - - print_exists("one"); - print_exists("NOPE"); - - print_index_from_value("vec", 1); - print_index_from_value("set", "A"); - print_index_from_value("table", "a"); - print_index_from_value("table", "X"); - - schedule 1sec { pk1() }; - } - - schedule 4secs { done() }; - } diff --git a/testing/btest/broker/store/ops.zeek b/testing/btest/broker/store/ops.zeek new file mode 100644 index 0000000000..aed9ab5d9a --- /dev/null +++ b/testing/btest/broker/store/ops.zeek @@ -0,0 +1,145 @@ +# @TEST-EXEC: btest-bg-run master "zeek -B broker -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + +redef exit_only_after_terminate = T; + +global query_timeout = 1sec; + +global h: opaque of Broker::Store; + +global step: count = 0; + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + step += 1; + print fmt("[%d]", step), k, r$status, r$result; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step, k); + } + } + +function print_exists(k: any) + { + when ( local r = Broker::exists(h, k) ) + { + step += 1; + print fmt("[%d]", step), k, r; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step, k); + } + } + +function print_index_from_value(k: any, i: any) + { + when ( local r = Broker::get_index_from_value(h, k, i) ) + { + step += 1; + print fmt("[%d]", step), k, r$status, r$result; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step, k); + } + } + +function print_keys() + { + when ( local s = Broker::keys(h) ) + { + step += 1; + print "keys", s; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step); + } + } + +event done() + { + terminate(); + } + +event pk2() + { + print_keys(); + } + +event pk1() + { + print_keys(); + Broker::clear(h); + schedule 1sec { pk2() }; + } + +event zeek_init() + { + h = Broker::create_master("master"); + Broker::put(h, "one", "110"); + Broker::put(h, "two", 220); + Broker::put(h, "three", 330); + Broker::put(h, "four", set(1, 2,3)); + Broker::put(h, set("x", "y"), vector(1/tcp, 2/tcp, 3/tcp)); + + Broker::put(h, "str", "foo"); + Broker::put(h, "vec", vector(1, 2,3)); + Broker::put(h, "set", set("A", "B")); + Broker::put(h, "table", table(["a"] = 1, ["b"] = 2)); + + print_index("one"); + print_index("two"); + print_index("three"); + print_index("four"); + print_index("five"); + print_index(set("x", "y")); + + when ( step == 6 ) + { + Broker::increment(h, "two"); + Broker::increment(h, "two", 9); + Broker::decrement(h, "three"); + Broker::decrement(h, "three", 9); + print_index("two"); + print_index("three"); + print_index("four"); + print_keys(); + Broker::erase(h, "four"); + + Broker::append(h, "str", "bar"); + Broker::insert_into_set(h, "set", "C"); + Broker::insert_into_table(h, "table", "c", 3); + Broker::remove_from(h, "set", 2); + Broker::remove_from(h, "table", "b"); + Broker::push(h, "vec", 4); + Broker::push(h, "vec", 5); + Broker::pop(h, "vec"); + + print_index("str"); + print_index("set"); + print_index("table"); + print_index("vec"); + + print_exists("one"); + print_exists("NOPE"); + + print_index_from_value("vec", 1); + print_index_from_value("set", "A"); + print_index_from_value("table", "a"); + print_index_from_value("table", "X"); + + schedule 1sec { pk1() }; + } + + schedule 4secs { done() }; + } diff --git a/testing/btest/broker/store/record.bro b/testing/btest/broker/store/record.bro deleted file mode 100644 index ab862012a6..0000000000 --- a/testing/btest/broker/store/record.bro +++ /dev/null @@ -1,38 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - -type R: record { - s1: string; - s2: string; - c: count; -}; - -event bro_init() - { - local cr = Broker::record_create(3); - print Broker::record_size(cr); - print Broker::record_assign(cr, 0, "hi"); - print Broker::record_assign(cr, 1, "hello"); - print Broker::record_assign(cr, 2, 37); - print cr, (cr as R); - print ""; - - print Broker::record_lookup(cr, 0); - print Broker::record_lookup(cr, 1); - print Broker::record_lookup(cr, 2); - print Broker::record_size(cr); - print Broker::record_assign(cr, 1, "goodbye"); - print Broker::record_size(cr); - print Broker::record_lookup(cr, 1); - print cr, (cr as R); - print ""; - - local i = Broker::record_iterator(cr); - while ( ! Broker::record_iterator_last(i) ) - { - print fmt("| %s", Broker::record_iterator_value(i)); - Broker::record_iterator_next(i); - } - print ""; - } diff --git a/testing/btest/broker/store/record.zeek b/testing/btest/broker/store/record.zeek new file mode 100644 index 0000000000..374fb7cab3 --- /dev/null +++ b/testing/btest/broker/store/record.zeek @@ -0,0 +1,38 @@ +# @TEST-EXEC: btest-bg-run master "zeek -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + +type R: record { + s1: string; + s2: string; + c: count; +}; + +event zeek_init() + { + local cr = Broker::record_create(3); + print Broker::record_size(cr); + print Broker::record_assign(cr, 0, "hi"); + print Broker::record_assign(cr, 1, "hello"); + print Broker::record_assign(cr, 2, 37); + print cr, (cr as R); + print ""; + + print Broker::record_lookup(cr, 0); + print Broker::record_lookup(cr, 1); + print Broker::record_lookup(cr, 2); + print Broker::record_size(cr); + print Broker::record_assign(cr, 1, "goodbye"); + print Broker::record_size(cr); + print Broker::record_lookup(cr, 1); + print cr, (cr as R); + print ""; + + local i = Broker::record_iterator(cr); + while ( ! Broker::record_iterator_last(i) ) + { + print fmt("| %s", Broker::record_iterator_value(i)); + Broker::record_iterator_next(i); + } + print ""; + } diff --git a/testing/btest/broker/store/set.bro b/testing/btest/broker/store/set.bro deleted file mode 100644 index 056b46e221..0000000000 --- a/testing/btest/broker/store/set.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - - -event bro_init() - { - local cs = Broker::set_create(); - - print Broker::set_size(cs); - print Broker::set_insert(cs, "hi"); - print Broker::set_size(cs); - print Broker::set_contains(cs, "hi"); - print Broker::set_contains(cs, "bye"); - print Broker::set_insert(cs, "bye"); - - print cs, (cs as set[string]); - local i = Broker::set_iterator(cs); - while ( ! Broker::set_iterator_last(i) ) - { - print fmt("| %s", Broker::set_iterator_value(i)); - Broker::set_iterator_next(i); - } - print ""; - - print Broker::set_size(cs); - print Broker::set_insert(cs, "bye"); - print Broker::set_size(cs); - print Broker::set_remove(cs, "hi"); - print Broker::set_size(cs); - print Broker::set_remove(cs, "hi"); - print cs, (cs as set[string]); - print ""; - - print Broker::set_clear(cs); - print Broker::set_size(cs); - print cs, (cs as set[string]); - print ""; - } diff --git a/testing/btest/broker/store/set.zeek b/testing/btest/broker/store/set.zeek new file mode 100644 index 0000000000..8e4b29b1da --- /dev/null +++ b/testing/btest/broker/store/set.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: btest-bg-run master "zeek -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + + +event zeek_init() + { + local cs = Broker::set_create(); + + print Broker::set_size(cs); + print Broker::set_insert(cs, "hi"); + print Broker::set_size(cs); + print Broker::set_contains(cs, "hi"); + print Broker::set_contains(cs, "bye"); + print Broker::set_insert(cs, "bye"); + + print cs, (cs as set[string]); + local i = Broker::set_iterator(cs); + while ( ! Broker::set_iterator_last(i) ) + { + print fmt("| %s", Broker::set_iterator_value(i)); + Broker::set_iterator_next(i); + } + print ""; + + print Broker::set_size(cs); + print Broker::set_insert(cs, "bye"); + print Broker::set_size(cs); + print Broker::set_remove(cs, "hi"); + print Broker::set_size(cs); + print Broker::set_remove(cs, "hi"); + print cs, (cs as set[string]); + print ""; + + print Broker::set_clear(cs); + print Broker::set_size(cs); + print cs, (cs as set[string]); + print ""; + } diff --git a/testing/btest/broker/store/sqlite.bro b/testing/btest/broker/store/sqlite.bro deleted file mode 100644 index fbce1a693a..0000000000 --- a/testing/btest/broker/store/sqlite.bro +++ /dev/null @@ -1,59 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT RUN=1 >out -# @TEST-EXEC: bro -b %INPUT RUN=2 >>out -# @TEST-EXEC: btest-diff out - -global RUN = 0 &redef; - -redef exit_only_after_terminate = T; - -global query_timeout = 1sec; - -global h: opaque of Broker::Store; - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - print k, r$status, r$result; - } - timeout query_timeout - { - print fmt("", k); - } - } - -event done() - { - terminate(); - } - -event bro_init() - { - h = Broker::create_master("master", Broker::SQLITE); - - print "Run", RUN; - - if ( RUN == 1 ) - { - print "Inserting"; - Broker::put(h, "one", "110"); - Broker::put(h, "two", 220); - Broker::put(h, "three", 330); - Broker::put(h, "four", set(1, 2,3)); - Broker::put(h, set("x", "y"), vector(1/tcp, 2/tcp, 3/tcp)); - terminate(); - } - - if ( RUN == 2 ) - { - print "Retrieving"; - print_index("one"); - print_index("two"); - print_index("three"); - print_index("four"); - print_index("five"); - print_index(set("x", "y")); - } - - schedule 2secs { done() }; - } diff --git a/testing/btest/broker/store/sqlite.zeek b/testing/btest/broker/store/sqlite.zeek new file mode 100644 index 0000000000..613f348550 --- /dev/null +++ b/testing/btest/broker/store/sqlite.zeek @@ -0,0 +1,59 @@ +# @TEST-EXEC: zeek -b %INPUT RUN=1 >out +# @TEST-EXEC: zeek -b %INPUT RUN=2 >>out +# @TEST-EXEC: btest-diff out + +global RUN = 0 &redef; + +redef exit_only_after_terminate = T; + +global query_timeout = 1sec; + +global h: opaque of Broker::Store; + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + print k, r$status, r$result; + } + timeout query_timeout + { + print fmt("", k); + } + } + +event done() + { + terminate(); + } + +event zeek_init() + { + h = Broker::create_master("master", Broker::SQLITE); + + print "Run", RUN; + + if ( RUN == 1 ) + { + print "Inserting"; + Broker::put(h, "one", "110"); + Broker::put(h, "two", 220); + Broker::put(h, "three", 330); + Broker::put(h, "four", set(1, 2,3)); + Broker::put(h, set("x", "y"), vector(1/tcp, 2/tcp, 3/tcp)); + terminate(); + } + + if ( RUN == 2 ) + { + print "Retrieving"; + print_index("one"); + print_index("two"); + print_index("three"); + print_index("four"); + print_index("five"); + print_index(set("x", "y")); + } + + schedule 2secs { done() }; + } diff --git a/testing/btest/broker/store/table.bro b/testing/btest/broker/store/table.bro deleted file mode 100644 index 11bd00028b..0000000000 --- a/testing/btest/broker/store/table.bro +++ /dev/null @@ -1,42 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - - -event bro_init() - { - local ct = Broker::table_create(); - - print Broker::table_size(ct); - print Broker::table_insert(ct, "hi", 42); - print Broker::table_size(ct); - print Broker::table_contains(ct, "hi"); - print (Broker::table_lookup(ct, "hi") as count); - print Broker::table_contains(ct, "bye"); - print Broker::table_insert(ct, "bye", 7); - print Broker::table_size(ct); - - print ct, (ct as table[string] of count); - local i = Broker::table_iterator(ct); - while ( ! Broker::table_iterator_last(i) ) - { - print fmt("| %s", Broker::table_iterator_value(i)); - Broker::table_iterator_next(i); - } - print ""; - - print Broker::table_insert(ct, "bye", 37); - print ct, (ct as table[string] of count); - print ""; - - print Broker::table_size(ct); - print (Broker::table_lookup(ct, "bye") as count); - print Broker::table_remove(ct, "hi"); - print Broker::table_size(ct); - print Broker::table_remove(ct, "hi"); - print Broker::table_size(ct); - print Broker::table_clear(ct); - print Broker::table_size(ct); - print ct, (ct as table[string] of count); - print ""; - } diff --git a/testing/btest/broker/store/table.zeek b/testing/btest/broker/store/table.zeek new file mode 100644 index 0000000000..acedef0318 --- /dev/null +++ b/testing/btest/broker/store/table.zeek @@ -0,0 +1,42 @@ +# @TEST-EXEC: btest-bg-run master "zeek -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + + +event zeek_init() + { + local ct = Broker::table_create(); + + print Broker::table_size(ct); + print Broker::table_insert(ct, "hi", 42); + print Broker::table_size(ct); + print Broker::table_contains(ct, "hi"); + print (Broker::table_lookup(ct, "hi") as count); + print Broker::table_contains(ct, "bye"); + print Broker::table_insert(ct, "bye", 7); + print Broker::table_size(ct); + + print ct, (ct as table[string] of count); + local i = Broker::table_iterator(ct); + while ( ! Broker::table_iterator_last(i) ) + { + print fmt("| %s", Broker::table_iterator_value(i)); + Broker::table_iterator_next(i); + } + print ""; + + print Broker::table_insert(ct, "bye", 37); + print ct, (ct as table[string] of count); + print ""; + + print Broker::table_size(ct); + print (Broker::table_lookup(ct, "bye") as count); + print Broker::table_remove(ct, "hi"); + print Broker::table_size(ct); + print Broker::table_remove(ct, "hi"); + print Broker::table_size(ct); + print Broker::table_clear(ct); + print Broker::table_size(ct); + print ct, (ct as table[string] of count); + print ""; + } diff --git a/testing/btest/broker/store/type-conversion.bro b/testing/btest/broker/store/type-conversion.bro deleted file mode 100644 index c92c1ea4c9..0000000000 --- a/testing/btest/broker/store/type-conversion.bro +++ /dev/null @@ -1,80 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - -type R1: record { - s: string; -}; - -type R2: record { - c: count; - r1: R1; -}; - -event bro_init() - { - ### Print every broker data type - print Broker::data_type(Broker::data(T)); - print Broker::data_type(Broker::data(+1)); - print Broker::data_type(Broker::data(1)); - print Broker::data_type(Broker::data(1.1)); - print Broker::data_type(Broker::data("1 (how creative)")); - print Broker::data_type(Broker::data(1.1.1.1)); - print Broker::data_type(Broker::data(1.1.1.1/1)); - print Broker::data_type(Broker::data(1/udp)); - print Broker::data_type(Broker::data(double_to_time(1))); - print Broker::data_type(Broker::data(1sec)); - print Broker::data_type(Broker::data(Broker::BOOL)); - print Broker::data_type(Broker::data(set("one", "two", "three"))); - print Broker::data_type(Broker::data(table(["one"] = 1, ["two"] = 2, ["three"] = 3))); - print Broker::data_type(Broker::data(vector("zero", "one", "two"))); - print Broker::data_type(Broker::data(R1($s="abc"))); - print Broker::data_type(Broker::data(R2($c=123, $r1=R1($s="xyz")))); - - print "***************************"; - - ### Convert a Bro value to a broker value, then print the result - - print (Broker::data(T) as bool); - print (Broker::data(F) as bool); - print (Broker::data(+1) as int); - print (Broker::data(+0) as int); - print (Broker::data(-1) as int); - print (Broker::data(1) as count); - print (Broker::data(0) as count); - print (Broker::data(1.1) as double); - print (Broker::data(-11.1) as double); - print (Broker::data("hello") as string); - print (Broker::data(1.2.3.4) as addr); - print (Broker::data(192.168.1.1/16) as subnet); - print (Broker::data(22/tcp) as port); - print (Broker::data(double_to_time(42)) as time); - print (Broker::data(3min) as interval); - print (Broker::data(Broker::BOOL) as Broker::DataType); - print (Broker::data(set("one", "two", "three")) as set[string]); - print (Broker::data(table(["one"] = 1, ["two"] = 2, ["three"] = 3)) as table[string] of count); - print (Broker::data(vector("zero", "one", "two")) as vector of string); - print (Broker::data(R1($s="abc")) as R1); - print (Broker::data(R2($c=123, $r1=R1($s="xyz"))) as R2); - - local md5h1 = md5_hash_init(); - md5_hash_update(md5h1, "abc"); - local md5h2 = (Broker::data(md5h1) as opaque of md5); - local md5s1 = md5_hash_finish(md5h1); - local md5s2 = md5_hash_finish(md5h2); - print "opaque of md5", md5s1 == md5s2; - - local sha1h1 = sha1_hash_init(); - sha1_hash_update(sha1h1, "abc"); - local sha1h2 = (Broker::data(sha1h1) as opaque of sha1); - local sha1s1 = sha1_hash_finish(sha1h1); - local sha1s2 = sha1_hash_finish(sha1h2); - print "opaque of sha1", sha1s1 == sha1s2; - - local h1 = sha256_hash_init(); - sha256_hash_update(h1, "abc"); - local h2 = (Broker::data(h1) as opaque of sha256); - local s1 = sha256_hash_finish(h1); - local s2 = sha256_hash_finish(h2); - print "opaque of sha256", s1 == s2; - } diff --git a/testing/btest/broker/store/type-conversion.zeek b/testing/btest/broker/store/type-conversion.zeek new file mode 100644 index 0000000000..733a10af73 --- /dev/null +++ b/testing/btest/broker/store/type-conversion.zeek @@ -0,0 +1,80 @@ +# @TEST-EXEC: btest-bg-run master "zeek -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + +type R1: record { + s: string; +}; + +type R2: record { + c: count; + r1: R1; +}; + +event zeek_init() + { + ### Print every Broker data type + print Broker::data_type(Broker::data(T)); + print Broker::data_type(Broker::data(+1)); + print Broker::data_type(Broker::data(1)); + print Broker::data_type(Broker::data(1.1)); + print Broker::data_type(Broker::data("1 (how creative)")); + print Broker::data_type(Broker::data(1.1.1.1)); + print Broker::data_type(Broker::data(1.1.1.1/1)); + print Broker::data_type(Broker::data(1/udp)); + print Broker::data_type(Broker::data(double_to_time(1))); + print Broker::data_type(Broker::data(1sec)); + print Broker::data_type(Broker::data(Broker::BOOL)); + print Broker::data_type(Broker::data(set("one", "two", "three"))); + print Broker::data_type(Broker::data(table(["one"] = 1, ["two"] = 2, ["three"] = 3))); + print Broker::data_type(Broker::data(vector("zero", "one", "two"))); + print Broker::data_type(Broker::data(R1($s="abc"))); + print Broker::data_type(Broker::data(R2($c=123, $r1=R1($s="xyz")))); + + print "***************************"; + + ### Convert a Zeek value to a Broker value, then print the result + + print (Broker::data(T) as bool); + print (Broker::data(F) as bool); + print (Broker::data(+1) as int); + print (Broker::data(+0) as int); + print (Broker::data(-1) as int); + print (Broker::data(1) as count); + print (Broker::data(0) as count); + print (Broker::data(1.1) as double); + print (Broker::data(-11.1) as double); + print (Broker::data("hello") as string); + print (Broker::data(1.2.3.4) as addr); + print (Broker::data(192.168.1.1/16) as subnet); + print (Broker::data(22/tcp) as port); + print (Broker::data(double_to_time(42)) as time); + print (Broker::data(3min) as interval); + print (Broker::data(Broker::BOOL) as Broker::DataType); + print (Broker::data(set("one", "two", "three")) as set[string]); + print (Broker::data(table(["one"] = 1, ["two"] = 2, ["three"] = 3)) as table[string] of count); + print (Broker::data(vector("zero", "one", "two")) as vector of string); + print (Broker::data(R1($s="abc")) as R1); + print (Broker::data(R2($c=123, $r1=R1($s="xyz"))) as R2); + + local md5h1 = md5_hash_init(); + md5_hash_update(md5h1, "abc"); + local md5h2 = (Broker::data(md5h1) as opaque of md5); + local md5s1 = md5_hash_finish(md5h1); + local md5s2 = md5_hash_finish(md5h2); + print "opaque of md5", md5s1 == md5s2; + + local sha1h1 = sha1_hash_init(); + sha1_hash_update(sha1h1, "abc"); + local sha1h2 = (Broker::data(sha1h1) as opaque of sha1); + local sha1s1 = sha1_hash_finish(sha1h1); + local sha1s2 = sha1_hash_finish(sha1h2); + print "opaque of sha1", sha1s1 == sha1s2; + + local h1 = sha256_hash_init(); + sha256_hash_update(h1, "abc"); + local h2 = (Broker::data(h1) as opaque of sha256); + local s1 = sha256_hash_finish(h1); + local s2 = sha256_hash_finish(h2); + print "opaque of sha256", s1 == s2; + } diff --git a/testing/btest/broker/store/vector.bro b/testing/btest/broker/store/vector.bro deleted file mode 100644 index 7edc4ba050..0000000000 --- a/testing/btest/broker/store/vector.bro +++ /dev/null @@ -1,42 +0,0 @@ -# @TEST-EXEC: btest-bg-run master "bro -b %INPUT >out" -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out - - -event bro_init() - { - local cv = Broker::vector_create(); - print Broker::vector_size(cv); - print Broker::vector_insert(cv, 0, "hi"); - print Broker::vector_insert(cv, 1, "hello"); - print Broker::vector_insert(cv, 2, "greetings"); - print Broker::vector_insert(cv, 1, "salutations"); - print Broker::vector_size(cv); - print cv, (cv as vector of string); - local i = Broker::vector_iterator(cv); - while ( ! Broker::vector_iterator_last(i) ) - { - print fmt("| %s", Broker::vector_iterator_value(i)); - Broker::vector_iterator_next(i); - } - print ""; - - print Broker::vector_replace(cv, 2, "bah"); - print cv, (cv as vector of string); - print ""; - - print Broker::vector_lookup(cv, 2); - print Broker::vector_lookup(cv, 0); - print cv, (cv as vector of string); - print ""; - - print Broker::vector_remove(cv, 2); - print cv, (cv as vector of string); - print ""; - - print Broker::vector_size(cv); - print Broker::vector_clear(cv); - print Broker::vector_size(cv); - print cv, (cv as vector of string); - print ""; - } diff --git a/testing/btest/broker/store/vector.zeek b/testing/btest/broker/store/vector.zeek new file mode 100644 index 0000000000..b896524ea8 --- /dev/null +++ b/testing/btest/broker/store/vector.zeek @@ -0,0 +1,42 @@ +# @TEST-EXEC: btest-bg-run master "zeek -b %INPUT >out" +# @TEST-EXEC: btest-bg-wait 60 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff master/out + + +event zeek_init() + { + local cv = Broker::vector_create(); + print Broker::vector_size(cv); + print Broker::vector_insert(cv, 0, "hi"); + print Broker::vector_insert(cv, 1, "hello"); + print Broker::vector_insert(cv, 2, "greetings"); + print Broker::vector_insert(cv, 1, "salutations"); + print Broker::vector_size(cv); + print cv, (cv as vector of string); + local i = Broker::vector_iterator(cv); + while ( ! Broker::vector_iterator_last(i) ) + { + print fmt("| %s", Broker::vector_iterator_value(i)); + Broker::vector_iterator_next(i); + } + print ""; + + print Broker::vector_replace(cv, 2, "bah"); + print cv, (cv as vector of string); + print ""; + + print Broker::vector_lookup(cv, 2); + print Broker::vector_lookup(cv, 0); + print cv, (cv as vector of string); + print ""; + + print Broker::vector_remove(cv, 2); + print cv, (cv as vector of string); + print ""; + + print Broker::vector_size(cv); + print Broker::vector_clear(cv); + print Broker::vector_size(cv); + print cv, (cv as vector of string); + print ""; + } diff --git a/testing/btest/broker/unpeer.bro b/testing/btest/broker/unpeer.bro deleted file mode 100644 index b591815955..0000000000 --- a/testing/btest/broker/unpeer.bro +++ /dev/null @@ -1,76 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b ../send.bro >send.out" -# -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out -# -# @TEST-EXEC: cat recv/broker.log | awk '/Broker::STATUS/ { $5="XXX"; print; }' >recv/broker.filtered.log -# @TEST-EXEC: cat send/broker.log | awk '/Broker::STATUS/ { $5="XXX"; print; }' >send/broker.filtered.log -# @TEST-EXEC: btest-diff recv/broker.filtered.log -# @TEST-EXEC: btest-diff send/broker.filtered.log - -@TEST-START-FILE send.bro - -redef exit_only_after_terminate = T; - -event do_terminate() - { - terminate(); - } - -event print_something(i: int) - { - print "Something sender", i; - } - -event unpeer(endpoint: Broker::EndpointInfo) - { - print "unpeering"; - Broker::unpeer("127.0.0.1", endpoint$network$bound_port); - schedule 2secs { print_something(2) }; - schedule 4secs { do_terminate() }; - } - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::auto_publish("bro/event/my_topic", print_something); - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - schedule 2secs { print_something(1) }; - schedule 4secs { unpeer(endpoint) }; - } - - -@TEST-END-FILE - - -@TEST-START-FILE recv.bro - -redef exit_only_after_terminate = T; - -event do_terminate() - { - terminate(); - } - -event print_something(i: int) - { - print "Something receiver", i; - } - -event bro_init() - { - Broker::subscribe("bro/event/my_topic"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - schedule 10secs { do_terminate() }; - } - - -@TEST-END-FILE diff --git a/testing/btest/broker/unpeer.zeek b/testing/btest/broker/unpeer.zeek new file mode 100644 index 0000000000..e246b3ddc5 --- /dev/null +++ b/testing/btest/broker/unpeer.zeek @@ -0,0 +1,76 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out +# +# @TEST-EXEC: cat recv/broker.log | awk '/Broker::STATUS/ { $5="XXX"; print; }' >recv/broker.filtered.log +# @TEST-EXEC: cat send/broker.log | awk '/Broker::STATUS/ { $5="XXX"; print; }' >send/broker.filtered.log +# @TEST-EXEC: btest-diff recv/broker.filtered.log +# @TEST-EXEC: btest-diff send/broker.filtered.log + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +event do_terminate() + { + terminate(); + } + +event print_something(i: int) + { + print "Something sender", i; + } + +event unpeer(endpoint: Broker::EndpointInfo) + { + print "unpeering"; + Broker::unpeer("127.0.0.1", endpoint$network$bound_port); + schedule 2secs { print_something(2) }; + schedule 4secs { do_terminate() }; + } + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::auto_publish("zeek/event/my_topic", print_something); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + schedule 2secs { print_something(1) }; + schedule 4secs { unpeer(endpoint) }; + } + + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +event do_terminate() + { + terminate(); + } + +event print_something(i: int) + { + print "Something receiver", i; + } + +event zeek_init() + { + Broker::subscribe("zeek/event/my_topic"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + schedule 10secs { do_terminate() }; + } + + +@TEST-END-FILE diff --git a/testing/btest/btest.cfg b/testing/btest/btest.cfg index 5a570d9021..ef56fd2afa 100644 --- a/testing/btest/btest.cfg +++ b/testing/btest/btest.cfg @@ -6,13 +6,13 @@ IgnoreDirs = .svn CVS .tmp IgnoreFiles = *.tmp *.swp #* *.trace .DS_Store [environment] -BROPATH=`bash -c %(testbase)s/../../build/bro-path-dev` -BRO_SEED_FILE=%(testbase)s/random.seed -BRO_PLUGIN_PATH= +ZEEKPATH=`bash -c %(testbase)s/../../build/zeek-path-dev` +ZEEK_SEED_FILE=%(testbase)s/random.seed +ZEEK_PLUGIN_PATH= TZ=UTC LC_ALL=C BTEST_PATH=%(testbase)s/../../aux/btest -PATH=%(testbase)s/../../build/src:%(testbase)s/../scripts:%(testbase)s/../../aux/btest:%(testbase)s/../../build/aux/bro-aux/bro-cut:%(testbase)s/../../aux/btest/sphinx:%(default_path)s:/sbin +PATH=%(testbase)s/../../build/src:%(testbase)s/../scripts:%(testbase)s/../../aux/btest:%(testbase)s/../../build/aux/zeek-aux/zeek-cut:%(testbase)s/../../aux/btest/sphinx:%(default_path)s:/sbin TRACES=%(testbase)s/Traces FILES=%(testbase)s/Files SCRIPTS=%(testbase)s/../scripts @@ -21,11 +21,11 @@ DIST=%(testbase)s/../.. BUILD=%(testbase)s/../../build TEST_DIFF_CANONIFIER=%(testbase)s/../scripts/diff-canonifier TMPDIR=%(testbase)s/.tmp -BRO_PROFILER_FILE=%(testbase)s/.tmp/script-coverage/XXXXXX +ZEEK_PROFILER_FILE=%(testbase)s/.tmp/script-coverage/XXXXXX BTEST_RST_FILTER=$SCRIPTS/rst-filter -BRO_DNS_FAKE=1 -BRO_DEFAULT_LISTEN_ADDRESS=127.0.0.1 -BRO_DEFAULT_LISTEN_RETRY=1 -BRO_DEFAULT_CONNECT_RETRY=1 -BRO_DISABLE_BROXYGEN=1 +ZEEK_DNS_FAKE=1 +ZEEK_DEFAULT_LISTEN_ADDRESS=127.0.0.1 +ZEEK_DEFAULT_LISTEN_RETRY=1 +ZEEK_DEFAULT_CONNECT_RETRY=1 +ZEEK_DISABLE_ZEEKYGEN=1 ZEEK_ALLOW_INIT_ERRORS=1 diff --git a/testing/btest/core/bits_per_uid.bro b/testing/btest/core/bits_per_uid.bro deleted file mode 100644 index 6e997907de..0000000000 --- a/testing/btest/core/bits_per_uid.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=32 >32 -# @TEST-EXEC: btest-diff 32 -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=64 >64 -# @TEST-EXEC: btest-diff 64 -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=96 >96 -# @TEST-EXEC: btest-diff 96 -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=128 >128 -# @TEST-EXEC: btest-diff 128 -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=256 >256 -# @TEST-EXEC: btest-diff 256 -# @TEST-EXEC: cmp 128 256 - -event new_connection(c: connection) - { - print c$uid; - } - -event file_new(f: fa_file) - { - print f$id; - } diff --git a/testing/btest/core/bits_per_uid.zeek b/testing/btest/core/bits_per_uid.zeek new file mode 100644 index 0000000000..d252eefe23 --- /dev/null +++ b/testing/btest/core/bits_per_uid.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=32 >32 +# @TEST-EXEC: btest-diff 32 +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=64 >64 +# @TEST-EXEC: btest-diff 64 +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=96 >96 +# @TEST-EXEC: btest-diff 96 +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=128 >128 +# @TEST-EXEC: btest-diff 128 +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT bits_per_uid=256 >256 +# @TEST-EXEC: btest-diff 256 +# @TEST-EXEC: cmp 128 256 + +event new_connection(c: connection) + { + print c$uid; + } + +event file_new(f: fa_file) + { + print f$id; + } diff --git a/testing/btest/core/check-unused-event-handlers.test b/testing/btest/core/check-unused-event-handlers.test index 3836414054..7d3a581d6c 100644 --- a/testing/btest/core/check-unused-event-handlers.test +++ b/testing/btest/core/check-unused-event-handlers.test @@ -1,5 +1,5 @@ # This test should print a warning that the event handler is never invoked. -# @TEST-EXEC: bro -b %INPUT check_for_unused_event_handlers=T +# @TEST-EXEC: zeek -b %INPUT check_for_unused_event_handlers=T # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff .stderr event this_is_never_used() diff --git a/testing/btest/core/checksums.test b/testing/btest/core/checksums.test index 77fe2a62d3..6d5d286097 100644 --- a/testing/btest/core/checksums.test +++ b/testing/btest/core/checksums.test @@ -1,41 +1,41 @@ -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-bad-chksum.pcap # @TEST-EXEC: mv weird.log bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-udp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-udp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-udp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-udp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-tcp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-tcp-good-chksum.pcap # @TEST-EXEC: mv weird.log good.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-udp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-udp-good-chksum.pcap # @TEST-EXEC: test ! -e weird.log -# @TEST-EXEC: bro -r $TRACES/chksums/ip4-icmp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip4-icmp-good-chksum.pcap # @TEST-EXEC: test ! -e weird.log -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap # @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap # @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap # @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-tcp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-tcp-good-chksum.pcap # @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-udp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-udp-good-chksum.pcap # @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap # @TEST-EXEC: cat weird.log >> good.out # @TEST-EXEC: btest-diff bad.out diff --git a/testing/btest/core/cisco-fabric-path.bro b/testing/btest/core/cisco-fabric-path.bro deleted file mode 100644 index ff7fa298e3..0000000000 --- a/testing/btest/core/cisco-fabric-path.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/cisco-fabric-path.pcap -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/cisco-fabric-path.zeek b/testing/btest/core/cisco-fabric-path.zeek new file mode 100644 index 0000000000..183c16f84d --- /dev/null +++ b/testing/btest/core/cisco-fabric-path.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/cisco-fabric-path.pcap +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/conn-size-threshold.bro b/testing/btest/core/conn-size-threshold.bro deleted file mode 100644 index ce83e5939d..0000000000 --- a/testing/btest/core/conn-size-threshold.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/irc-dcc-send.trace %INPUT -# @TEST-EXEC: btest-diff .stdout - -event connection_established(c: connection) - { - print get_current_conn_bytes_threshold(c$id, T); - print get_current_conn_bytes_threshold(c$id, F); - print get_current_conn_packets_threshold(c$id, T); - print get_current_conn_packets_threshold(c$id, F); - - print fmt("Threshold set for %s", cat(c$id)); - set_current_conn_bytes_threshold(c$id, 3000, T); - set_current_conn_bytes_threshold(c$id, 2000, F); - - set_current_conn_packets_threshold(c$id, 50, F); - set_current_conn_packets_threshold(c$id, 63, T); - - print get_current_conn_bytes_threshold(c$id, T); - print get_current_conn_bytes_threshold(c$id, F); - print get_current_conn_packets_threshold(c$id, T); - print get_current_conn_packets_threshold(c$id, F); - } - -event conn_bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) - { - print "triggered bytes", c$id, threshold, is_orig; - } - -event conn_packets_threshold_crossed(c: connection, threshold: count, is_orig: bool) - { - print "triggered packets", c$id, threshold, is_orig; - } diff --git a/testing/btest/core/conn-size-threshold.zeek b/testing/btest/core/conn-size-threshold.zeek new file mode 100644 index 0000000000..d886846df5 --- /dev/null +++ b/testing/btest/core/conn-size-threshold.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -r $TRACES/irc-dcc-send.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +event connection_established(c: connection) + { + print get_current_conn_bytes_threshold(c$id, T); + print get_current_conn_bytes_threshold(c$id, F); + print get_current_conn_packets_threshold(c$id, T); + print get_current_conn_packets_threshold(c$id, F); + + print fmt("Threshold set for %s", cat(c$id)); + set_current_conn_bytes_threshold(c$id, 3000, T); + set_current_conn_bytes_threshold(c$id, 2000, F); + + set_current_conn_packets_threshold(c$id, 50, F); + set_current_conn_packets_threshold(c$id, 63, T); + + print get_current_conn_bytes_threshold(c$id, T); + print get_current_conn_bytes_threshold(c$id, F); + print get_current_conn_packets_threshold(c$id, T); + print get_current_conn_packets_threshold(c$id, F); + } + +event conn_bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) + { + print "triggered bytes", c$id, threshold, is_orig; + } + +event conn_packets_threshold_crossed(c: connection, threshold: count, is_orig: bool) + { + print "triggered packets", c$id, threshold, is_orig; + } diff --git a/testing/btest/core/conn-uid.bro b/testing/btest/core/conn-uid.bro deleted file mode 100644 index 52ff8fc4d3..0000000000 --- a/testing/btest/core/conn-uid.bro +++ /dev/null @@ -1,21 +0,0 @@ -# -# In "normal" test mode, connection uids should be determistic. -# -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output -# @TEST-EXEC: btest-diff output -# -# Without a seed, they should differ each time: -# -# @TEST-EXEC: unset BRO_SEED_FILE && bro -C -r $TRACES/wikipedia.trace %INPUT >output2 -# @TEST-EXEC: cat output output2 | sort | uniq -c | wc -l | sed 's/ //g' >counts -# @TEST-EXEC: btest-diff counts - -event new_connection(c: connection) - { - print c$id, c$uid; - } - -event connection_established(c: connection) - { - print c$id, c$uid; - } diff --git a/testing/btest/core/conn-uid.zeek b/testing/btest/core/conn-uid.zeek new file mode 100644 index 0000000000..b52587ad43 --- /dev/null +++ b/testing/btest/core/conn-uid.zeek @@ -0,0 +1,21 @@ +# +# In "normal" test mode, connection uids should be determistic. +# +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: btest-diff output +# +# Without a seed, they should differ each time: +# +# @TEST-EXEC: unset ZEEK_SEED_FILE && unset BRO_SEED_FILE && zeek -C -r $TRACES/wikipedia.trace %INPUT >output2 +# @TEST-EXEC: cat output output2 | sort | uniq -c | wc -l | sed 's/ //g' >counts +# @TEST-EXEC: btest-diff counts + +event new_connection(c: connection) + { + print c$id, c$uid; + } + +event connection_established(c: connection) + { + print c$id, c$uid; + } diff --git a/testing/btest/core/connection_flip_roles.bro b/testing/btest/core/connection_flip_roles.bro deleted file mode 100644 index e68d94c5fe..0000000000 --- a/testing/btest/core/connection_flip_roles.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tcp/handshake-reorder.trace %INPUT >out -# @TEST-EXEC: btest-diff out - -# This tests the Connection::FlipRoles code path (SYN/SYN-ACK reversal). - -# The check of likely_server_ports is before Connection::FlipRoles, so -# need to make sure that isn't the mechanism used to flip src/dst stuff. -redef likely_server_ports = {}; - -global first_packet: bool = T; - -event new_packet(c: connection, p: pkt_hdr) - { - if ( ! first_packet ) - return; - - first_packet = F; - - print "schedule_analyzer, current conn_id", c$id; - # Anticipate roles getting flipped in next packet. - Analyzer::schedule_analyzer(141.142.228.5, 192.150.187.43, 80/tcp, - Analyzer::ANALYZER_HTTP, 2mins); - } - -event connection_state_remove(c: connection) - { - print "connection_state_remove", c$id; - } - -event http_request(c: connection, method: string, original_URI: string, - unescaped_URI: string, version: string) - { - print "http_request", version, method, original_URI; - } - -event http_reply(c: connection, version: string, code: count, reason: string) - { - print "http_reply", version, code, reason; - } diff --git a/testing/btest/core/connection_flip_roles.zeek b/testing/btest/core/connection_flip_roles.zeek new file mode 100644 index 0000000000..e5e52671eb --- /dev/null +++ b/testing/btest/core/connection_flip_roles.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tcp/handshake-reorder.trace %INPUT >out +# @TEST-EXEC: btest-diff out + +# This tests the Connection::FlipRoles code path (SYN/SYN-ACK reversal). + +# The check of likely_server_ports is before Connection::FlipRoles, so +# need to make sure that isn't the mechanism used to flip src/dst stuff. +redef likely_server_ports = {}; + +global first_packet: bool = T; + +event new_packet(c: connection, p: pkt_hdr) + { + if ( ! first_packet ) + return; + + first_packet = F; + + print "schedule_analyzer, current conn_id", c$id; + # Anticipate roles getting flipped in next packet. + Analyzer::schedule_analyzer(141.142.228.5, 192.150.187.43, 80/tcp, + Analyzer::ANALYZER_HTTP, 2mins); + } + +event connection_state_remove(c: connection) + { + print "connection_state_remove", c$id; + } + +event http_request(c: connection, method: string, original_URI: string, + unescaped_URI: string, version: string) + { + print "http_request", version, method, original_URI; + } + +event http_reply(c: connection, version: string, code: count, reason: string) + { + print "http_reply", version, code, reason; + } diff --git a/testing/btest/core/disable-mobile-ipv6.test b/testing/btest/core/disable-mobile-ipv6.test index 88eb2b853f..eace575cca 100644 --- a/testing/btest/core/disable-mobile-ipv6.test +++ b/testing/btest/core/disable-mobile-ipv6.test @@ -1,5 +1,6 @@ -# @TEST-REQUIRES: grep -q "#undef ENABLE_MOBILE_IPV6" $BUILD/bro-config.h -# @TEST-EXEC: bro -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT +# @TEST-REQUIRES: grep -q "#undef ENABLE_MOBILE_IPV6" $BUILD/zeek-config.h +# +# @TEST-EXEC: zeek -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT # @TEST-EXEC: btest-diff weird.log event mobile_ipv6_message(p: pkt_hdr) diff --git a/testing/btest/core/discarder.bro b/testing/btest/core/discarder.bro deleted file mode 100644 index 9e8f5e7a2f..0000000000 --- a/testing/btest/core/discarder.bro +++ /dev/null @@ -1,92 +0,0 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/wikipedia.trace discarder-ip.bro >output -# @TEST-EXEC: bro -b -C -r $TRACES/wikipedia.trace discarder-tcp.bro >>output -# @TEST-EXEC: bro -b -C -r $TRACES/wikipedia.trace discarder-udp.bro >>output -# @TEST-EXEC: bro -b -C -r $TRACES/icmp/icmp-destunreach-udp.pcap discarder-icmp.bro >>output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE discarder-ip.bro - -event bro_init() - { - print "################ IP Discarder ################"; - } - -function discarder_check_ip(p: pkt_hdr): bool - { - if ( p?$ip && p$ip$src == 141.142.220.118 && p$ip$dst == 208.80.152.2 ) - return F; - return T; - } - - -event new_packet(c: connection, p: pkt_hdr) - { - print c$id; - } - -@TEST-END-FILE - -@TEST-START-FILE discarder-tcp.bro - -event bro_init() - { - print "################ TCP Discarder ################"; - } - -function discarder_check_tcp(p: pkt_hdr, d: string): bool - { - if ( p$tcp$flags == TH_SYN ) - return F; - return T; - } - -event new_packet(c: connection, p: pkt_hdr) - { - if ( p?$tcp ) - print c$id; - } - -@TEST-END-FILE - -@TEST-START-FILE discarder-udp.bro - -event bro_init() - { - print "################ UDP Discarder ################"; - } - -function discarder_check_udp(p: pkt_hdr, d: string): bool - { - if ( p?$ip6 ) - return F; - return T; - } - -event new_packet(c: connection, p: pkt_hdr) - { - if ( p?$udp ) - print c$id; - } - -@TEST-END-FILE - -@TEST-START-FILE discarder-icmp.bro - -event bro_init() - { - print "################ ICMP Discarder ################"; - } - -function discarder_check_icmp(p: pkt_hdr): bool - { - print fmt("Discard icmp packet: %s", p$icmp); - return T; - } - -event new_packet(c: connection, p: pkt_hdr) - { - if ( p?$icmp ) - print c$id; - } - -@TEST-END-FILE diff --git a/testing/btest/core/discarder.zeek b/testing/btest/core/discarder.zeek new file mode 100644 index 0000000000..21bae33541 --- /dev/null +++ b/testing/btest/core/discarder.zeek @@ -0,0 +1,92 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/wikipedia.trace discarder-ip.zeek >output +# @TEST-EXEC: zeek -b -C -r $TRACES/wikipedia.trace discarder-tcp.zeek >>output +# @TEST-EXEC: zeek -b -C -r $TRACES/wikipedia.trace discarder-udp.zeek >>output +# @TEST-EXEC: zeek -b -C -r $TRACES/icmp/icmp-destunreach-udp.pcap discarder-icmp.zeek >>output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE discarder-ip.zeek + +event zeek_init() + { + print "################ IP Discarder ################"; + } + +function discarder_check_ip(p: pkt_hdr): bool + { + if ( p?$ip && p$ip$src == 141.142.220.118 && p$ip$dst == 208.80.152.2 ) + return F; + return T; + } + + +event new_packet(c: connection, p: pkt_hdr) + { + print c$id; + } + +@TEST-END-FILE + +@TEST-START-FILE discarder-tcp.zeek + +event zeek_init() + { + print "################ TCP Discarder ################"; + } + +function discarder_check_tcp(p: pkt_hdr, d: string): bool + { + if ( p$tcp$flags == TH_SYN ) + return F; + return T; + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$tcp ) + print c$id; + } + +@TEST-END-FILE + +@TEST-START-FILE discarder-udp.zeek + +event zeek_init() + { + print "################ UDP Discarder ################"; + } + +function discarder_check_udp(p: pkt_hdr, d: string): bool + { + if ( p?$ip6 ) + return F; + return T; + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$udp ) + print c$id; + } + +@TEST-END-FILE + +@TEST-START-FILE discarder-icmp.zeek + +event zeek_init() + { + print "################ ICMP Discarder ################"; + } + +function discarder_check_icmp(p: pkt_hdr): bool + { + print fmt("Discard icmp packet: %s", p$icmp); + return T; + } + +event new_packet(c: connection, p: pkt_hdr) + { + if ( p?$icmp ) + print c$id; + } + +@TEST-END-FILE diff --git a/testing/btest/core/div-by-zero.bro b/testing/btest/core/div-by-zero.bro deleted file mode 100644 index d1221638d6..0000000000 --- a/testing/btest/core/div-by-zero.bro +++ /dev/null @@ -1,36 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event div_int(a: int, b: int) - { - print a / b; - } - -event div_count(a: count, b: count) - { - print a / b; - } - -event div_double(a: double, b: double) - { - print a / b; - } - -event mod_int(a: int, b: int) - { - print a % b; - } - -event mod_count(a: count, b: count) - { - print a % b; - } - -event bro_init() - { - event div_int(10, 0); - event div_count(10, 0); - event div_double(10.0, 0.0); - event mod_int(10, 0); - event mod_count(10, 0); - } diff --git a/testing/btest/core/div-by-zero.zeek b/testing/btest/core/div-by-zero.zeek new file mode 100644 index 0000000000..d1c95db88c --- /dev/null +++ b/testing/btest/core/div-by-zero.zeek @@ -0,0 +1,36 @@ +# @TEST-EXEC: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +event div_int(a: int, b: int) + { + print a / b; + } + +event div_count(a: count, b: count) + { + print a / b; + } + +event div_double(a: double, b: double) + { + print a / b; + } + +event mod_int(a: int, b: int) + { + print a % b; + } + +event mod_count(a: count, b: count) + { + print a % b; + } + +event zeek_init() + { + event div_int(10, 0); + event div_count(10, 0); + event div_double(10.0, 0.0); + event mod_int(10, 0); + event mod_count(10, 0); + } diff --git a/testing/btest/core/dns-init.bro b/testing/btest/core/dns-init.bro deleted file mode 100644 index 5a7efff6fb..0000000000 --- a/testing/btest/core/dns-init.bro +++ /dev/null @@ -1,9 +0,0 @@ -# We once had a bug where DNS lookups at init time lead to an immediate crash. -# -# @TEST-EXEC: bro %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -const foo: set[addr] = { - google.com -}; - diff --git a/testing/btest/core/dns-init.zeek b/testing/btest/core/dns-init.zeek new file mode 100644 index 0000000000..0372bbf7b8 --- /dev/null +++ b/testing/btest/core/dns-init.zeek @@ -0,0 +1,9 @@ +# We once had a bug where DNS lookups at init time lead to an immediate crash. +# +# @TEST-EXEC: zeek %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +const foo: set[addr] = { + google.com +}; + diff --git a/testing/btest/core/embedded-null.bro b/testing/btest/core/embedded-null.bro deleted file mode 100644 index 95a4c965a9..0000000000 --- a/testing/btest/core/embedded-null.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT 2>&1 -# @TEST-EXEC: btest-diff .stdout - -event bro_init() - { - local a = "hi\x00there"; - unique_id(a); - } diff --git a/testing/btest/core/embedded-null.zeek b/testing/btest/core/embedded-null.zeek new file mode 100644 index 0000000000..bae3767d8c --- /dev/null +++ b/testing/btest/core/embedded-null.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -b %INPUT 2>&1 +# @TEST-EXEC: btest-diff .stdout + +event zeek_init() + { + local a = "hi\x00there"; + unique_id(a); + } diff --git a/testing/btest/core/enum-redef-exists.bro b/testing/btest/core/enum-redef-exists.bro deleted file mode 100644 index 69c331c74d..0000000000 --- a/testing/btest/core/enum-redef-exists.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output - -module SSH; - -export { - redef enum Log::ID += { LOG }; -} diff --git a/testing/btest/core/enum-redef-exists.zeek b/testing/btest/core/enum-redef-exists.zeek new file mode 100644 index 0000000000..d9b1cc2415 --- /dev/null +++ b/testing/btest/core/enum-redef-exists.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -b %INPUT >output + +module SSH; + +export { + redef enum Log::ID += { LOG }; +} diff --git a/testing/btest/core/erspan.bro b/testing/btest/core/erspan.bro deleted file mode 100644 index eb05cdcf5a..0000000000 --- a/testing/btest/core/erspan.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/erspan.trace %INPUT -# @TEST-EXEC: btest-diff tunnel.log - -@load base/frameworks/tunnels diff --git a/testing/btest/core/erspan.zeek b/testing/btest/core/erspan.zeek new file mode 100644 index 0000000000..379afb55fb --- /dev/null +++ b/testing/btest/core/erspan.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -C -b -r $TRACES/erspan.trace %INPUT +# @TEST-EXEC: btest-diff tunnel.log + +@load base/frameworks/tunnels diff --git a/testing/btest/core/erspanII.bro b/testing/btest/core/erspanII.bro deleted file mode 100644 index b59c0ecf08..0000000000 --- a/testing/btest/core/erspanII.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/erspanII.pcap %INPUT -# @TEST-EXEC: btest-diff tunnel.log -# @TEST-EXEC: btest-diff conn.log - -@load base/frameworks/tunnels -@load base/protocols/conn diff --git a/testing/btest/core/erspanII.zeek b/testing/btest/core/erspanII.zeek new file mode 100644 index 0000000000..945a8ff3d2 --- /dev/null +++ b/testing/btest/core/erspanII.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -C -b -r $TRACES/erspanII.pcap %INPUT +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log + +@load base/frameworks/tunnels +@load base/protocols/conn diff --git a/testing/btest/core/erspanIII.bro b/testing/btest/core/erspanIII.bro deleted file mode 100644 index 3215f4b9da..0000000000 --- a/testing/btest/core/erspanIII.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/erspanIII.pcap %INPUT -# @TEST-EXEC: btest-diff tunnel.log -# @TEST-EXEC: btest-diff conn.log - -@load base/frameworks/tunnels -@load base/protocols/conn diff --git a/testing/btest/core/erspanIII.zeek b/testing/btest/core/erspanIII.zeek new file mode 100644 index 0000000000..de3072e022 --- /dev/null +++ b/testing/btest/core/erspanIII.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -C -b -r $TRACES/erspanIII.pcap %INPUT +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log + +@load base/frameworks/tunnels +@load base/protocols/conn diff --git a/testing/btest/core/ether-addrs.bro b/testing/btest/core/ether-addrs.bro deleted file mode 100644 index 2cb1d42b6f..0000000000 --- a/testing/btest/core/ether-addrs.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/wikipedia.trace %INPUT >>output -# @TEST-EXEC: bro -C -b -r $TRACES/radiotap.pcap %INPUT >>output -# @TEST-EXEC: btest-diff output - -event new_connection(c: connection) - { - if ( c$orig?$l2_addr && c$resp?$l2_addr ) - print c$orig$l2_addr, c$resp$l2_addr; - else - print "-", "-"; - } diff --git a/testing/btest/core/ether-addrs.zeek b/testing/btest/core/ether-addrs.zeek new file mode 100644 index 0000000000..d905d97baa --- /dev/null +++ b/testing/btest/core/ether-addrs.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -C -b -r $TRACES/wikipedia.trace %INPUT >>output +# @TEST-EXEC: zeek -C -b -r $TRACES/radiotap.pcap %INPUT >>output +# @TEST-EXEC: btest-diff output + +event new_connection(c: connection) + { + if ( c$orig?$l2_addr && c$resp?$l2_addr ) + print c$orig$l2_addr, c$resp$l2_addr; + else + print "-", "-"; + } diff --git a/testing/btest/core/event-arg-reuse.bro b/testing/btest/core/event-arg-reuse.bro deleted file mode 100644 index ba8e0f0677..0000000000 --- a/testing/btest/core/event-arg-reuse.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-DOC: Check that assignment to event parameters isn't visible to other handlers. -# -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -event f(a: int) &priority=5 - { - a = 2; - print "f1", a; - } - -event f(a: int) &priority=-5 - { - print "f2", a; - } - -event bro_init() - { - event f(1); - } diff --git a/testing/btest/core/event-arg-reuse.zeek b/testing/btest/core/event-arg-reuse.zeek new file mode 100644 index 0000000000..b96f4a5a18 --- /dev/null +++ b/testing/btest/core/event-arg-reuse.zeek @@ -0,0 +1,20 @@ +# @TEST-DOC: Check that assignment to event parameters isn't visible to other handlers. +# +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +event f(a: int) &priority=5 + { + a = 2; + print "f1", a; + } + +event f(a: int) &priority=-5 + { + print "f2", a; + } + +event zeek_init() + { + event f(1); + } diff --git a/testing/btest/core/expr-exception.bro b/testing/btest/core/expr-exception.bro deleted file mode 100644 index 9e84717935..0000000000 --- a/testing/btest/core/expr-exception.bro +++ /dev/null @@ -1,25 +0,0 @@ -# Expressions in an event handler that raise interpreter exceptions -# shouldn't abort Bro entirely, but just return from the function body. -# -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT >output -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log -# @TEST-EXEC: btest-diff output - -event connection_established(c: connection) - { - print c$ftp; - print "not reached"; - } - -event connection_established(c: connection) - { - if ( c?$ftp ) - print c$ftp; - else - print "ftp field missing"; - } - -event connection_established(c: connection) - { - print c$id; - } diff --git a/testing/btest/core/expr-exception.zeek b/testing/btest/core/expr-exception.zeek new file mode 100644 index 0000000000..79f460b1e4 --- /dev/null +++ b/testing/btest/core/expr-exception.zeek @@ -0,0 +1,25 @@ +# Expressions in an event handler that raise interpreter exceptions +# shouldn't abort Zeek entirely, but just return from the function body. +# +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log +# @TEST-EXEC: btest-diff output + +event connection_established(c: connection) + { + print c$ftp; + print "not reached"; + } + +event connection_established(c: connection) + { + if ( c?$ftp ) + print c$ftp; + else + print "ftp field missing"; + } + +event connection_established(c: connection) + { + print c$id; + } diff --git a/testing/btest/core/fake_dns.bro b/testing/btest/core/fake_dns.bro deleted file mode 100644 index f4d8c46777..0000000000 --- a/testing/btest/core/fake_dns.bro +++ /dev/null @@ -1,41 +0,0 @@ -# @TEST-EXEC: BRO_DNS_FAKE=1 bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -global addrs: set[addr] = { - google.com, - bing.com, - yahoo.com -}; - -global c: count = 0; - -function check_terminate() - { - ++c; - - if ( c > 2 ) - terminate(); - } - -event bro_init() - { - print addrs; - - when ( local result = lookup_hostname_txt("bro.wp.dg.cx") ) - { - print "lookup_hostname_txt", result; - check_terminate(); - } - when ( local result2 = lookup_hostname("example.com") ) - { - print "lookup_hostname", result2; - check_terminate(); - } - when ( local result3 = lookup_addr(1.2.3.4) ) - { - print "lookup_addr", result3; - check_terminate(); - } - } diff --git a/testing/btest/core/fake_dns.zeek b/testing/btest/core/fake_dns.zeek new file mode 100644 index 0000000000..46dd50c5ee --- /dev/null +++ b/testing/btest/core/fake_dns.zeek @@ -0,0 +1,41 @@ +# @TEST-EXEC: ZEEK_DNS_FAKE=1 zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +global addrs: set[addr] = { + google.com, + bing.com, + yahoo.com +}; + +global c: count = 0; + +function check_terminate() + { + ++c; + + if ( c > 2 ) + terminate(); + } + +event zeek_init() + { + print addrs; + + when ( local result = lookup_hostname_txt("bro.wp.dg.cx") ) + { + print "lookup_hostname_txt", result; + check_terminate(); + } + when ( local result2 = lookup_hostname("example.com") ) + { + print "lookup_hostname", result2; + check_terminate(); + } + when ( local result3 = lookup_addr(1.2.3.4) ) + { + print "lookup_addr", result3; + check_terminate(); + } + } diff --git a/testing/btest/core/file-caching-cloning.test b/testing/btest/core/file-caching-cloning.test new file mode 100644 index 0000000000..03d0f5021e --- /dev/null +++ b/testing/btest/core/file-caching-cloning.test @@ -0,0 +1,49 @@ +# This checks that the interactions between open-file caching and +# cloning works ok. In the first case, all files can fit +# in the cache, but get cloned before every write. In the +# second case, files are eventually forced out of the cache; later writing +# requires re-opening. + +# @TEST-EXEC: zeek -b %INPUT "test_file_prefix=one" +# @TEST-EXEC: btest-diff one0 +# @TEST-EXEC: btest-diff one1 +# @TEST-EXEC: btest-diff one2 +# @TEST-EXEC: zeek -b %INPUT "test_file_prefix=two" "max_files_in_cache=2" +# @TEST-EXEC: btest-diff two0 +# @TEST-EXEC: btest-diff two1 +# @TEST-EXEC: btest-diff two2 + +const test_file_prefix = "" &redef; +global file_table: table[string] of file; +global iterations: vector of count = vector(0,1,2,3,4,5,6,7,8); + +function write_to_file(c: count) + { + local f: file; + # Take turns writing across three output files. + local filename = fmt("%s%s", test_file_prefix, c % 3 ); + + if ( filename in file_table ) + f = file_table[filename]; + else + { + f = open(filename); + file_table[filename] = f; + } + + # This when block is a trick to get the frame cloned + # and thus serialize the local file value + when ( local s = fmt("write %d", c) ) + print f, s; + } + +event file_opened(f: file) + { + print f, "opened"; + } + +event zeek_init() + { + for ( i in iterations ) + write_to_file(iterations[i]); + } diff --git a/testing/btest/core/file-caching-serialization.test b/testing/btest/core/file-caching-serialization.test deleted file mode 100644 index 7ff1d8be8d..0000000000 --- a/testing/btest/core/file-caching-serialization.test +++ /dev/null @@ -1,49 +0,0 @@ -# This checks that the interactions between open-file caching and -# serialization works ok. In the first case, all files can fit -# in the cache, but get serialized before every write. In the -# second case, files are eventually forced out of the cache and -# undergo serialization, which requires re-opening. - -# @TEST-EXEC: bro -b %INPUT "test_file_prefix=one" -# @TEST-EXEC: btest-diff one0 -# @TEST-EXEC: btest-diff one1 -# @TEST-EXEC: btest-diff one2 -# @TEST-EXEC: bro -b %INPUT "test_file_prefix=two" "max_files_in_cache=2" -# @TEST-EXEC: btest-diff two0 -# @TEST-EXEC: btest-diff two1 -# @TEST-EXEC: btest-diff two2 - -const test_file_prefix = "" &redef; -global file_table: table[string] of file; -global iterations: vector of count = vector(0,1,2,3,4,5,6,7,8); - -function write_to_file(c: count) - { - local f: file; - # Take turns writing across three output files. - local filename = fmt("%s%s", test_file_prefix, c % 3 ); - - if ( filename in file_table ) - f = file_table[filename]; - else - { - f = open(filename); - file_table[filename] = f; - } - - # This when block is a trick to get the frame cloned - # and thus serialize the local file value - when ( local s = fmt("write %d", c) ) - print f, s; - } - -event file_opened(f: file) - { - print f, "opened"; - } - -event bro_init() - { - for ( i in iterations ) - write_to_file(iterations[i]); - } diff --git a/testing/btest/core/global_opaque_val.bro b/testing/btest/core/global_opaque_val.bro deleted file mode 100644 index 84087d8295..0000000000 --- a/testing/btest/core/global_opaque_val.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global test = md5_hash_init(); - -event bro_init() - { - md5_hash_update(test, "one"); - md5_hash_update(test, "two"); - md5_hash_update(test, "three"); - print md5_hash_finish(test); - } diff --git a/testing/btest/core/global_opaque_val.zeek b/testing/btest/core/global_opaque_val.zeek new file mode 100644 index 0000000000..4bc0607029 --- /dev/null +++ b/testing/btest/core/global_opaque_val.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global test = md5_hash_init(); + +event zeek_init() + { + md5_hash_update(test, "one"); + md5_hash_update(test, "two"); + md5_hash_update(test, "three"); + print md5_hash_finish(test); + } diff --git a/testing/btest/core/history-flip.bro b/testing/btest/core/history-flip.bro deleted file mode 100644 index e9769d99b5..0000000000 --- a/testing/btest/core/history-flip.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/tcp/missing-syn.pcap %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load policy/protocols/conn/mac-logging diff --git a/testing/btest/core/history-flip.zeek b/testing/btest/core/history-flip.zeek new file mode 100644 index 0000000000..3895c3e2c6 --- /dev/null +++ b/testing/btest/core/history-flip.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tcp/missing-syn.pcap %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load policy/protocols/conn/mac-logging diff --git a/testing/btest/core/icmp/icmp-context.test b/testing/btest/core/icmp/icmp-context.test index ca7a34c5aa..58e696cf9c 100644 --- a/testing/btest/core/icmp/icmp-context.test +++ b/testing/btest/core/icmp/icmp-context.test @@ -1,8 +1,8 @@ # These tests all check that IPv6 context packet construction for ICMP6 works. -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-no-context.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-ip.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp-destunreach-no-context.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp-destunreach-ip.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp-destunreach-udp.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) diff --git a/testing/btest/core/icmp/icmp-events.test b/testing/btest/core/icmp/icmp-events.test index 1a54f05fba..3aa0ee1177 100644 --- a/testing/btest/core/icmp/icmp-events.test +++ b/testing/btest/core/icmp/icmp-events.test @@ -1,8 +1,8 @@ # These tests all check that ICMP6 events get raised with correct arguments. -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-destunreach-udp.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-timeexceeded.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp-ping.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp-destunreach-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp-timeexceeded.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp-ping.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/icmp/icmp6-context.test b/testing/btest/core/icmp/icmp6-context.test index dfa8271cbc..66d57b527b 100644 --- a/testing/btest/core/icmp/icmp6-context.test +++ b/testing/btest/core/icmp/icmp6-context.test @@ -1,9 +1,9 @@ # These tests all check that IPv6 context packet construction for ICMP6 works. -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-no-context.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-trunc.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-udp.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-destunreach-no-context.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-trunc.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-destunreach-ip6ext.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output event icmp_unreachable(c: connection, icmp: icmp_conn, code: count, context: icmp_context) diff --git a/testing/btest/core/icmp/icmp6-events.test b/testing/btest/core/icmp/icmp6-events.test index 5263dd6e7f..6174e697fd 100644 --- a/testing/btest/core/icmp/icmp6-events.test +++ b/testing/btest/core/icmp/icmp6-events.test @@ -1,15 +1,15 @@ # These tests all check that ICMP6 events get raised with correct arguments. -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-udp.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-toobig.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-timeexceeded.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-paramprob.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-ping.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-redirect.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-router-advert.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-neighbor-advert.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-router-solicit.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-neighbor-solicit.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-destunreach-ip6ext-udp.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-toobig.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-timeexceeded.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-paramprob.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-ping.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-redirect.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-router-advert.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-neighbor-advert.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-router-solicit.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-neighbor-solicit.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/icmp/icmp6-nd-options.test b/testing/btest/core/icmp/icmp6-nd-options.test index 64543852a3..93f1931524 100644 --- a/testing/btest/core/icmp/icmp6-nd-options.test +++ b/testing/btest/core/icmp/icmp6-nd-options.test @@ -1,7 +1,7 @@ # These tests all check that ICMP6 events get raised with correct arguments. -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-redirect-hdr-opt.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp6-nd-options.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-redirect-hdr-opt.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp6-nd-options.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/icmp/icmp_sent.bro b/testing/btest/core/icmp/icmp_sent.bro deleted file mode 100644 index 406ca637ba..0000000000 --- a/testing/btest/core/icmp/icmp_sent.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/icmp/icmp_sent.pcap %INPUT >out -# @TEST-EXEC: btest-diff out - -event icmp_sent(c: connection, icmp: icmp_conn) - { - print "icmp_sent", c$id, icmp; - } - -event icmp_sent_payload(c: connection, icmp: icmp_conn, payload: string) - { - print "icmp_sent_payload", c$id, icmp, |payload|; - } diff --git a/testing/btest/core/icmp/icmp_sent.zeek b/testing/btest/core/icmp/icmp_sent.zeek new file mode 100644 index 0000000000..72e6ab543b --- /dev/null +++ b/testing/btest/core/icmp/icmp_sent.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -b -r $TRACES/icmp/icmp_sent.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +event icmp_sent(c: connection, icmp: icmp_conn) + { + print "icmp_sent", c$id, icmp; + } + +event icmp_sent_payload(c: connection, icmp: icmp_conn, payload: string) + { + print "icmp_sent_payload", c$id, icmp, |payload|; + } diff --git a/testing/btest/core/init-error.bro b/testing/btest/core/init-error.bro deleted file mode 100644 index c415ca16b1..0000000000 --- a/testing/btest/core/init-error.bro +++ /dev/null @@ -1,21 +0,0 @@ -# The default is for an initialization error to be a hard failure. - -# @TEST-EXEC-FAIL: unset ZEEK_ALLOW_INIT_ERRORS && bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event bro_init() &priority=10 - { - print "1st event"; - } - -event bro_init() &priority=10 - { - print "2nd event"; - local v = vector(1, 2, 3); - print v[10]; - } - -event bro_init() &priority=-10 - { - print "3rd event"; - } diff --git a/testing/btest/core/init-error.zeek b/testing/btest/core/init-error.zeek new file mode 100644 index 0000000000..82226e9dfa --- /dev/null +++ b/testing/btest/core/init-error.zeek @@ -0,0 +1,21 @@ +# The default is for an initialization error to be a hard failure. + +# @TEST-EXEC-FAIL: unset ZEEK_ALLOW_INIT_ERRORS && zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +event zeek_init() &priority=10 + { + print "1st event"; + } + +event zeek_init() &priority=10 + { + print "2nd event"; + local v = vector(1, 2, 3); + print v[10]; + } + +event zeek_init() &priority=-10 + { + print "3rd event"; + } diff --git a/testing/btest/core/ip-broken-header.bro b/testing/btest/core/ip-broken-header.bro deleted file mode 100644 index 426e7a7bc0..0000000000 --- a/testing/btest/core/ip-broken-header.bro +++ /dev/null @@ -1,7 +0,0 @@ -# This test has a trace that was generated from fuzzing which used to cause -# OOB reads in Bro. It has a number of packets broken in weird ways. -# -# @TEST-EXEC: gunzip -c $TRACES/trunc/mpls-6in6-broken.pcap.gz | bro -C -b -r - %INPUT -# @TEST-EXEC: btest-diff weird.log - -@load base/frameworks/notice/weird.bro diff --git a/testing/btest/core/ip-broken-header.zeek b/testing/btest/core/ip-broken-header.zeek new file mode 100644 index 0000000000..08c72b06f1 --- /dev/null +++ b/testing/btest/core/ip-broken-header.zeek @@ -0,0 +1,7 @@ +# This test has a trace that was generated from fuzzing which used to cause +# OOB reads in Zeek. It has a number of packets broken in weird ways. +# +# @TEST-EXEC: gunzip -c $TRACES/trunc/mpls-6in6-broken.pcap.gz | zeek -C -b -r - %INPUT +# @TEST-EXEC: btest-diff weird.log + +@load base/frameworks/notice/weird diff --git a/testing/btest/core/ipv6-atomic-frag.test b/testing/btest/core/ipv6-atomic-frag.test index 8c8fe6ca64..a247d50cec 100644 --- a/testing/btest/core/ipv6-atomic-frag.test +++ b/testing/btest/core/ipv6-atomic-frag.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/ipv6-http-atomic-frag.trace %INPUT >output +# @TEST-EXEC: zeek -r $TRACES/ipv6-http-atomic-frag.trace %INPUT >output # @TEST-EXEC: btest-diff output event new_connection(c: connection) diff --git a/testing/btest/core/ipv6-flow-labels.test b/testing/btest/core/ipv6-flow-labels.test index 2265cd55d4..332a684cc9 100644 --- a/testing/btest/core/ipv6-flow-labels.test +++ b/testing/btest/core/ipv6-flow-labels.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/ftp/ipv6.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/ftp/ipv6.trace %INPUT >output # @TEST-EXEC: btest-diff output function print_connection(c: connection, event_name: string) diff --git a/testing/btest/core/ipv6-frag.test b/testing/btest/core/ipv6-frag.test index 32c7c0a8c1..815dd9910b 100644 --- a/testing/btest/core/ipv6-frag.test +++ b/testing/btest/core/ipv6-frag.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/ipv6-fragmented-dns.trace %INPUT >output +# @TEST-EXEC: zeek -r $TRACES/ipv6-fragmented-dns.trace %INPUT >output # @TEST-EXEC: btest-diff output # @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/core/ipv6_esp.test b/testing/btest/core/ipv6_esp.test index 508a4597f2..4f8b3a4b69 100644 --- a/testing/btest/core/ipv6_esp.test +++ b/testing/btest/core/ipv6_esp.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/ip6_esp.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/ip6_esp.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the event is raised correctly for a packet containing diff --git a/testing/btest/core/ipv6_ext_headers.test b/testing/btest/core/ipv6_ext_headers.test index 32a0f5d558..100410510b 100644 --- a/testing/btest/core/ipv6_ext_headers.test +++ b/testing/btest/core/ipv6_ext_headers.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the event is raised correctly for a packet containing diff --git a/testing/btest/core/ipv6_zero_len_ah.test b/testing/btest/core/ipv6_zero_len_ah.test index 014ba7b3cc..28c612992f 100644 --- a/testing/btest/core/ipv6_zero_len_ah.test +++ b/testing/btest/core/ipv6_zero_len_ah.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/ipv6_zero_len_ah.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/ipv6_zero_len_ah.trace %INPUT >output # @TEST-EXEC: btest-diff output # Shouldn't crash, but we also won't have seq and data fields set of the ip6_ah diff --git a/testing/btest/core/leaks/ayiya.test b/testing/btest/core/leaks/ayiya.test index 3572cf98ba..abbf46e6d8 100644 --- a/testing/btest/core/leaks/ayiya.test +++ b/testing/btest/core/leaks/ayiya.test @@ -1,8 +1,8 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/tunnels/ayiya3.trace +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/tunnels/ayiya3.trace # @TEST-EXEC: btest-bg-wait 60 diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro deleted file mode 100644 index fa73fb9a96..0000000000 --- a/testing/btest/core/leaks/basic-cluster.bro +++ /dev/null @@ -1,90 +0,0 @@ -# Needs perftools support. -# -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT -# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m %INPUT -# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -global n = 0; - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)]; - SumStats::create([$name="test", - $epoch=5secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test"]; - print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique); - }, - $epoch_finished(ts: time) = - { - terminate(); - }]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - } - -event ready_for_data() - { - if ( Cluster::node == "worker-1" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=34]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=6.5.4.3], [$num=1]); - SumStats::observe("test", [$host=7.2.1.5], [$num=54]); - } - if ( Cluster::node == "worker-2" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=75]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=1.2.3.4], [$num=3]); - SumStats::observe("test", [$host=1.2.3.4], [$num=57]); - SumStats::observe("test", [$host=1.2.3.4], [$num=52]); - SumStats::observe("test", [$host=1.2.3.4], [$num=61]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=6.5.4.3], [$num=5]); - SumStats::observe("test", [$host=7.2.1.5], [$num=91]); - SumStats::observe("test", [$host=10.10.10.10], [$num=5]); - } - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - ++peer_count; - if ( peer_count == 2 ) - event ready_for_data(); - } - -@endif diff --git a/testing/btest/core/leaks/basic-cluster.zeek b/testing/btest/core/leaks/basic-cluster.zeek new file mode 100644 index 0000000000..e77a0ec417 --- /dev/null +++ b/testing/btest/core/leaks/basic-cluster.zeek @@ -0,0 +1,90 @@ +# Needs perftools support. +# +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek -m %INPUT +# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek -m %INPUT +# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek -m %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +global n = 0; + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)]; + SumStats::create([$name="test", + $epoch=5secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test"]; + print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique); + }, + $epoch_finished(ts: time) = + { + terminate(); + }]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + } + +event ready_for_data() + { + if ( Cluster::node == "worker-1" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=34]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=6.5.4.3], [$num=1]); + SumStats::observe("test", [$host=7.2.1.5], [$num=54]); + } + if ( Cluster::node == "worker-2" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=75]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=1.2.3.4], [$num=3]); + SumStats::observe("test", [$host=1.2.3.4], [$num=57]); + SumStats::observe("test", [$host=1.2.3.4], [$num=52]); + SumStats::observe("test", [$host=1.2.3.4], [$num=61]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=6.5.4.3], [$num=5]); + SumStats::observe("test", [$host=7.2.1.5], [$num=91]); + SumStats::observe("test", [$host=10.10.10.10], [$num=5]); + } + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + ++peer_count; + if ( peer_count == 2 ) + event ready_for_data(); + } + +@endif diff --git a/testing/btest/core/leaks/bloomfilter.bro b/testing/btest/core/leaks/bloomfilter.bro deleted file mode 100644 index e93bfe23cc..0000000000 --- a/testing/btest/core/leaks/bloomfilter.bro +++ /dev/null @@ -1,101 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -function test_basic_bloom_filter() - { - # Basic usage with counts. - local bf_cnt = bloomfilter_basic_init(0.1, 1000); - bloomfilter_add(bf_cnt, 42); - bloomfilter_add(bf_cnt, 84); - bloomfilter_add(bf_cnt, 168); - print bloomfilter_lookup(bf_cnt, 0); - print bloomfilter_lookup(bf_cnt, 42); - print bloomfilter_lookup(bf_cnt, 168); - print bloomfilter_lookup(bf_cnt, 336); - bloomfilter_add(bf_cnt, 0.5); # Type mismatch - bloomfilter_add(bf_cnt, "foo"); # Type mismatch - - # Alternative constructor. - local bf_dbl = bloomfilter_basic_init2(4, 10); - bloomfilter_add(bf_dbl, 4.2); - bloomfilter_add(bf_dbl, 3.14); - print bloomfilter_lookup(bf_dbl, 4.2); - print bloomfilter_lookup(bf_dbl, 3.14); - - # Basic usage with strings. - local bf_str = bloomfilter_basic_init(0.9, 10); - bloomfilter_add(bf_str, "foo"); - bloomfilter_add(bf_str, "bar"); - print bloomfilter_lookup(bf_str, "foo"); - print bloomfilter_lookup(bf_str, "bar"); - print bloomfilter_lookup(bf_str, "bazzz"), "fp"; # FP - print bloomfilter_lookup(bf_str, "quuux"), "fp"; # FP - bloomfilter_add(bf_str, 0.5); # Type mismatch - bloomfilter_add(bf_str, 100); # Type mismatch - - # Edge cases. - local bf_edge0 = bloomfilter_basic_init(0.000000000001, 1); - local bf_edge1 = bloomfilter_basic_init(0.00000001, 100000000); - local bf_edge2 = bloomfilter_basic_init(0.9999999, 1); - local bf_edge3 = bloomfilter_basic_init(0.9999999, 100000000000); - - # Invalid parameters. - local bf_bug0 = bloomfilter_basic_init(-0.5, 42); - local bf_bug1 = bloomfilter_basic_init(1.1, 42); - - # Merging - local bf_cnt2 = bloomfilter_basic_init(0.1, 1000); - bloomfilter_add(bf_cnt2, 42); - bloomfilter_add(bf_cnt, 100); - local bf_merged = bloomfilter_merge(bf_cnt, bf_cnt2); - print bloomfilter_lookup(bf_merged, 42); - print bloomfilter_lookup(bf_merged, 84); - print bloomfilter_lookup(bf_merged, 100); - print bloomfilter_lookup(bf_merged, 168); - - #empty filter tests - local bf_empty = bloomfilter_basic_init(0.1, 1000); - local bf_empty_merged = bloomfilter_merge(bf_merged, bf_empty); - print bloomfilter_lookup(bf_empty_merged, 42); - } - -function test_counting_bloom_filter() - { - local bf = bloomfilter_counting_init(3, 32, 3); - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # 1 - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # 2 - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # 3 - bloomfilter_add(bf, "foo"); - print bloomfilter_lookup(bf, "foo"); # still 3 - - - bloomfilter_add(bf, "bar"); - bloomfilter_add(bf, "bar"); - print bloomfilter_lookup(bf, "bar"); # 2 - print bloomfilter_lookup(bf, "foo"); # still 3 - - # Merging - local bf2 = bloomfilter_counting_init(3, 32, 3); - bloomfilter_add(bf2, "baz"); - bloomfilter_add(bf2, "baz"); - bloomfilter_add(bf2, "bar"); - local bf_merged = bloomfilter_merge(bf, bf2); - print bloomfilter_lookup(bf_merged, "foo"); - print bloomfilter_lookup(bf_merged, "bar"); - print bloomfilter_lookup(bf_merged, "baz"); - } - -event new_connection(c: connection) - { - test_basic_bloom_filter(); - test_counting_bloom_filter(); - } diff --git a/testing/btest/core/leaks/bloomfilter.zeek b/testing/btest/core/leaks/bloomfilter.zeek new file mode 100644 index 0000000000..6318251767 --- /dev/null +++ b/testing/btest/core/leaks/bloomfilter.zeek @@ -0,0 +1,101 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +function test_basic_bloom_filter() + { + # Basic usage with counts. + local bf_cnt = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt, 42); + bloomfilter_add(bf_cnt, 84); + bloomfilter_add(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 0); + print bloomfilter_lookup(bf_cnt, 42); + print bloomfilter_lookup(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 336); + bloomfilter_add(bf_cnt, 0.5); # Type mismatch + bloomfilter_add(bf_cnt, "foo"); # Type mismatch + + # Alternative constructor. + local bf_dbl = bloomfilter_basic_init2(4, 10); + bloomfilter_add(bf_dbl, 4.2); + bloomfilter_add(bf_dbl, 3.14); + print bloomfilter_lookup(bf_dbl, 4.2); + print bloomfilter_lookup(bf_dbl, 3.14); + + # Basic usage with strings. + local bf_str = bloomfilter_basic_init(0.9, 10); + bloomfilter_add(bf_str, "foo"); + bloomfilter_add(bf_str, "bar"); + print bloomfilter_lookup(bf_str, "foo"); + print bloomfilter_lookup(bf_str, "bar"); + print bloomfilter_lookup(bf_str, "bazzz"), "fp"; # FP + print bloomfilter_lookup(bf_str, "quuux"), "fp"; # FP + bloomfilter_add(bf_str, 0.5); # Type mismatch + bloomfilter_add(bf_str, 100); # Type mismatch + + # Edge cases. + local bf_edge0 = bloomfilter_basic_init(0.000000000001, 1); + local bf_edge1 = bloomfilter_basic_init(0.00000001, 100000000); + local bf_edge2 = bloomfilter_basic_init(0.9999999, 1); + local bf_edge3 = bloomfilter_basic_init(0.9999999, 100000000000); + + # Invalid parameters. + local bf_bug0 = bloomfilter_basic_init(-0.5, 42); + local bf_bug1 = bloomfilter_basic_init(1.1, 42); + + # Merging + local bf_cnt2 = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt2, 42); + bloomfilter_add(bf_cnt, 100); + local bf_merged = bloomfilter_merge(bf_cnt, bf_cnt2); + print bloomfilter_lookup(bf_merged, 42); + print bloomfilter_lookup(bf_merged, 84); + print bloomfilter_lookup(bf_merged, 100); + print bloomfilter_lookup(bf_merged, 168); + + #empty filter tests + local bf_empty = bloomfilter_basic_init(0.1, 1000); + local bf_empty_merged = bloomfilter_merge(bf_merged, bf_empty); + print bloomfilter_lookup(bf_empty_merged, 42); + } + +function test_counting_bloom_filter() + { + local bf = bloomfilter_counting_init(3, 32, 3); + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # 1 + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # 2 + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # 3 + bloomfilter_add(bf, "foo"); + print bloomfilter_lookup(bf, "foo"); # still 3 + + + bloomfilter_add(bf, "bar"); + bloomfilter_add(bf, "bar"); + print bloomfilter_lookup(bf, "bar"); # 2 + print bloomfilter_lookup(bf, "foo"); # still 3 + + # Merging + local bf2 = bloomfilter_counting_init(3, 32, 3); + bloomfilter_add(bf2, "baz"); + bloomfilter_add(bf2, "baz"); + bloomfilter_add(bf2, "bar"); + local bf_merged = bloomfilter_merge(bf, bf2); + print bloomfilter_lookup(bf_merged, "foo"); + print bloomfilter_lookup(bf_merged, "bar"); + print bloomfilter_lookup(bf_merged, "baz"); + } + +event new_connection(c: connection) + { + test_basic_bloom_filter(); + test_counting_bloom_filter(); + } diff --git a/testing/btest/core/leaks/broker/clone_store.bro b/testing/btest/core/leaks/broker/clone_store.bro deleted file mode 100644 index 68235c7bab..0000000000 --- a/testing/btest/core/leaks/broker/clone_store.bro +++ /dev/null @@ -1,144 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# @TEST-GROUP: leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run clone "bro -m -b ../clone.bro >clone.out" -# @TEST-EXEC: btest-bg-run master "bro -b ../master.bro >master.out" - -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff clone/clone.out - -@TEST-START-FILE master.bro - -redef exit_only_after_terminate = T; -global query_timeout = 1sec; - -global ready: event(); - -global h: opaque of Broker::Store; - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - print "master", k, r$status, r$result; - } - timeout query_timeout - { - print "master", fmt("clone ", k); - } - } - -event done() - { - terminate(); - } - -event inserted() - { - Broker::erase(h, "four"); - - print("----"); - print_index("one"); - print_index("two"); - print_index(vector(1,2)); - print_index("three"); - print_index("four"); - print_index("five"); - print_index("six"); - schedule 2secs { done() }; - } - -event bro_init() - { - Broker::auto_publish("bro/events", done); - Broker::subscribe("bro/"); - - h = Broker::create_master("test"); - Broker::put(h, "one", "110"); - Broker::put(h, "two", 223); - Broker::put(h, vector(1,2), 1947/tcp); - - Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event insert_more() - { - Broker::put(h, "three", 3.14); - Broker::put(h, "four", 1.2.3.4); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - schedule 4secs { insert_more() }; - } - -@TEST-END-FILE - - -@TEST-START-FILE clone.bro - -redef exit_only_after_terminate = T; - -global query_timeout = 1sec; - -global h: opaque of Broker::Store; - - -global inserted: event(); - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - print "clone", k, r$status, r$result; - } - timeout query_timeout - { - print "clone", fmt("clone ", k); - } - } - -event lookup(stage: count) - { - print("----"); - print_index("one"); - print_index("two"); - print_index(vector(1,2)); - print_index("three"); - print_index("four"); - print_index("five"); - print_index("six"); - - if ( stage == 1 ) - schedule 4secs { lookup(2) }; - - if ( stage == 2 ) - { - Broker::put(h, "five", "555"); - Broker::put(h, "six", "666"); - event inserted(); - schedule 2secs { lookup(3) }; - } - } - -event done() - { - terminate(); - } - -event bro_init() - { - Broker::auto_publish("bro/events", inserted); - Broker::subscribe("bro/"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - h = Broker::create_clone("test"); - schedule 2secs { lookup(1) }; - } - -@TEST-END-FILE - diff --git a/testing/btest/core/leaks/broker/clone_store.zeek b/testing/btest/core/leaks/broker/clone_store.zeek new file mode 100644 index 0000000000..bf8732a60f --- /dev/null +++ b/testing/btest/core/leaks/broker/clone_store.zeek @@ -0,0 +1,144 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# @TEST-GROUP: leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run clone "zeek -m -b ../clone.zeek >clone.out" +# @TEST-EXEC: btest-bg-run master "zeek -b ../master.zeek >master.out" + +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff clone/clone.out + +@TEST-START-FILE master.zeek + +redef exit_only_after_terminate = T; +global query_timeout = 1sec; + +global ready: event(); + +global h: opaque of Broker::Store; + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + print "master", k, r$status, r$result; + } + timeout query_timeout + { + print "master", fmt("clone ", k); + } + } + +event done() + { + terminate(); + } + +event inserted() + { + Broker::erase(h, "four"); + + print("----"); + print_index("one"); + print_index("two"); + print_index(vector(1,2)); + print_index("three"); + print_index("four"); + print_index("five"); + print_index("six"); + schedule 2secs { done() }; + } + +event zeek_init() + { + Broker::auto_publish("bro/events", done); + Broker::subscribe("bro/"); + + h = Broker::create_master("test"); + Broker::put(h, "one", "110"); + Broker::put(h, "two", 223); + Broker::put(h, vector(1,2), 1947/tcp); + + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event insert_more() + { + Broker::put(h, "three", 3.14); + Broker::put(h, "four", 1.2.3.4); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + schedule 4secs { insert_more() }; + } + +@TEST-END-FILE + + +@TEST-START-FILE clone.zeek + +redef exit_only_after_terminate = T; + +global query_timeout = 1sec; + +global h: opaque of Broker::Store; + + +global inserted: event(); + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + print "clone", k, r$status, r$result; + } + timeout query_timeout + { + print "clone", fmt("clone ", k); + } + } + +event lookup(stage: count) + { + print("----"); + print_index("one"); + print_index("two"); + print_index(vector(1,2)); + print_index("three"); + print_index("four"); + print_index("five"); + print_index("six"); + + if ( stage == 1 ) + schedule 4secs { lookup(2) }; + + if ( stage == 2 ) + { + Broker::put(h, "five", "555"); + Broker::put(h, "six", "666"); + event inserted(); + schedule 2secs { lookup(3) }; + } + } + +event done() + { + terminate(); + } + +event zeek_init() + { + Broker::auto_publish("bro/events", inserted); + Broker::subscribe("bro/"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + h = Broker::create_clone("test"); + schedule 2secs { lookup(1) }; + } + +@TEST-END-FILE + diff --git a/testing/btest/core/leaks/broker/data.bro b/testing/btest/core/leaks/broker/data.bro deleted file mode 100644 index 590d041ff1..0000000000 --- a/testing/btest/core/leaks/broker/data.bro +++ /dev/null @@ -1,261 +0,0 @@ -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# @TEST-GROUP: leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-bg-wait 45 -# @TEST-EXEC: btest-diff bro/.stdout - -type bro_set: set[string]; -type bro_table: table[string] of count; -type bro_vector: vector of string; - -type bro_record : record { - a: string &optional; - b: string &default = "bee"; - c: count; -}; - -function broker_to_bro_record_recurse(it: opaque of Broker::RecordIterator, - rval: bro_record, - idx: count): bro_record - { - if ( Broker::record_iterator_last(it) ) - return rval; - - local field_value = Broker::record_iterator_value(it); - - if ( field_value?$data ) - switch ( idx ) { - case 0: - rval$a = field_value as string; - break; - case 1: - rval$b = field_value as string; - break; - case 2: - rval$c = field_value as count; - break; - }; - - ++idx; - Broker::record_iterator_next(it); - return broker_to_bro_record_recurse(it, rval, idx); - } - -function broker_to_bro_record(d: Broker::Data): bro_record - { - return broker_to_bro_record_recurse(Broker::record_iterator(d), - bro_record($c = 0), 0); - } - -function -broker_to_bro_set_recurse(it: opaque of Broker::SetIterator, - rval: bro_set): bro_set - { - if ( Broker::set_iterator_last(it) ) - return rval; - - add rval[Broker::set_iterator_value(it) as string]; - Broker::set_iterator_next(it); - return broker_to_bro_set_recurse(it, rval); - } - - -function broker_to_bro_set(d: Broker::Data): bro_set - { - return broker_to_bro_set_recurse(Broker::set_iterator(d), bro_set()); - } - -function -broker_to_bro_table_recurse(it: opaque of Broker::TableIterator, - rval: bro_table): bro_table - { - if ( Broker::table_iterator_last(it) ) - return rval; - - local item = Broker::table_iterator_value(it); - rval[item$key as string] = item$val as count; - Broker::table_iterator_next(it); - return broker_to_bro_table_recurse(it, rval); - } - -function broker_to_bro_table(d: Broker::Data): bro_table - { - return broker_to_bro_table_recurse(Broker::table_iterator(d), - bro_table()); - } - -function broker_to_bro_vector_recurse(it: opaque of Broker::VectorIterator, - rval: bro_vector): bro_vector - { - if ( Broker::vector_iterator_last(it) ) - return rval; - - rval += Broker::vector_iterator_value(it) as string; - Broker::vector_iterator_next(it); - return broker_to_bro_vector_recurse(it, rval); - } - -function broker_to_bro_vector(d: Broker::Data): bro_vector - { - return broker_to_bro_vector_recurse(Broker::vector_iterator(d), - bro_vector()); - } - -global did_it = F; - -event new_connection(c: connection) -{ -if ( did_it ) return; - -did_it = T; - -### Print every broker data type - -print Broker::data_type(Broker::data(T)); -print Broker::data_type(Broker::data(+1)); -print Broker::data_type(Broker::data(1)); -print Broker::data_type(Broker::data(1.1)); -print Broker::data_type(Broker::data("1 (how creative)")); -print Broker::data_type(Broker::data(1.1.1.1)); -print Broker::data_type(Broker::data(1.1.1.1/1)); -print Broker::data_type(Broker::data(1/udp)); -print Broker::data_type(Broker::data(double_to_time(1))); -print Broker::data_type(Broker::data(1sec)); -print Broker::data_type(Broker::data(Broker::BOOL)); -local s: bro_set = bro_set("one", "two", "three"); -local t: bro_table = bro_table(["one"] = 1, ["two"] = 2, ["three"] = 3); -local v: bro_vector = bro_vector("zero", "one", "two"); -local r: bro_record = bro_record($c = 1); -print Broker::data_type(Broker::data(s)); -print Broker::data_type(Broker::data(t)); -print Broker::data_type(Broker::data(v)); -print Broker::data_type(Broker::data(r)); - -print "***************************"; - -### Convert a Bro value to a broker value, then print the result - -print (Broker::data(T)) as bool; -print (Broker::data(F)) as bool; -print (Broker::data(+1)) as int; -print (Broker::data(+0)) as int; -print (Broker::data(-1)) as int; -print (Broker::data(1)) as count; -print (Broker::data(0)) as count; -print (Broker::data(1.1)) as double; -print (Broker::data(-11.1)) as double; -print (Broker::data("hello")) as string; -print (Broker::data(1.2.3.4)) as addr; -print (Broker::data(192.168.1.1/16)) as subnet; -print (Broker::data(22/tcp)) as port; -print (Broker::data(double_to_time(42))) as time; -print (Broker::data(3min)) as interval; -print (Broker::data(Broker::BOOL)) as Broker::DataType; - -local cs = Broker::data(s); -print broker_to_bro_set(cs); - -local ct = Broker::data(t); -print broker_to_bro_table(ct); - -local cv = Broker::data(v); -print broker_to_bro_vector(cv); - -local cr = Broker::data(r); -print broker_to_bro_record(cr); - -r$a = "test"; -cr = Broker::data(r); -print broker_to_bro_record(cr); - -r$b = "testagain"; -cr = Broker::data(r); -print broker_to_bro_record(cr); - -print "***************************"; - -### Test the broker set BIFs - -cs = Broker::set_create(); -print Broker::set_size(cs); -print Broker::set_insert(cs, ("hi")); -print Broker::set_size(cs); -print Broker::set_contains(cs, ("hi")); -print Broker::set_contains(cs, ("bye")); -print Broker::set_insert(cs, ("bye")); -print Broker::set_size(cs); -print Broker::set_insert(cs, ("bye")); -print Broker::set_size(cs); -print Broker::set_remove(cs, ("hi")); -print Broker::set_size(cs); -print Broker::set_remove(cs, ("hi")); -print broker_to_bro_set(cs); -print Broker::set_clear(cs); -print Broker::set_size(cs); -print broker_to_bro_set(cs); - -print "***************************"; - -### Test the broker table BIFs - -ct = Broker::table_create(); -print Broker::table_size(ct); -print Broker::table_insert(ct, ("hi"), (42)); -print Broker::table_size(ct); -print Broker::table_contains(ct, ("hi")); -print (Broker::table_lookup(ct, ("hi"))) as count; -print Broker::table_contains(ct, ("bye")); -print Broker::table_insert(ct, ("bye"), (7)); -print Broker::table_size(ct); -print Broker::table_insert(ct, ("bye"), (37)); -print Broker::table_size(ct); -print (Broker::table_lookup(ct, ("bye"))) as count; -print Broker::table_remove(ct, ("hi")); -print Broker::table_size(ct); -print Broker::table_remove(ct, ("hi")); -print Broker::table_size(ct); -print Broker::table_clear(ct); -print Broker::table_size(ct); -print broker_to_bro_table(ct); - -print "***************************"; - -### Test the broker vector BIFs - -cv = Broker::vector_create(); -print Broker::vector_size(cv); -print Broker::vector_insert(cv, 0, ("hi")); -print Broker::vector_insert(cv, 1, ("hello")); -print Broker::vector_insert(cv, 2, ("greetings")); -print Broker::vector_insert(cv, 1, ("salutations")); -print broker_to_bro_vector(cv); -print Broker::vector_size(cv); -print Broker::vector_replace(cv, 2, ("bah")); -print Broker::vector_lookup(cv, 2); -print Broker::vector_lookup(cv, 0); -print broker_to_bro_vector(cv); -print Broker::vector_remove(cv, 2); -print broker_to_bro_vector(cv); -print Broker::vector_size(cv); -print Broker::vector_clear(cv); -print Broker::vector_size(cv); -print broker_to_bro_vector(cv); - -print "***************************"; - -### Test the broker record BIFs - -cr = Broker::record_create(3); -print Broker::record_size(cr); -print Broker::record_assign(cr, 0, ("hi")); -print Broker::record_assign(cr, 1, ("hello")); -print Broker::record_assign(cr, 2, (37)); -print Broker::record_lookup(cr, 0); -print Broker::record_lookup(cr, 1); -print Broker::record_lookup(cr, 2); -print Broker::record_size(cr); -print Broker::record_assign(cr, 1, ("goodbye")); -print Broker::record_size(cr); -print Broker::record_lookup(cr, 1); -} diff --git a/testing/btest/core/leaks/broker/data.zeek b/testing/btest/core/leaks/broker/data.zeek new file mode 100644 index 0000000000..c70120b360 --- /dev/null +++ b/testing/btest/core/leaks/broker/data.zeek @@ -0,0 +1,270 @@ +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# @TEST-GROUP: leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff zeek/.stdout + +type bro_set: set[string]; +type bro_table: table[string] of count; +type bro_vector: vector of string; + +type bro_record : record { + a: string &optional; + b: string &default = "bee"; + c: count; +}; + +function broker_to_bro_record_recurse(it: opaque of Broker::RecordIterator, + rval: bro_record, + idx: count): bro_record + { + if ( Broker::record_iterator_last(it) ) + return rval; + + local field_value = Broker::record_iterator_value(it); + + if ( field_value?$data ) + switch ( idx ) { + case 0: + rval$a = field_value as string; + break; + case 1: + rval$b = field_value as string; + break; + case 2: + rval$c = field_value as count; + break; + }; + + ++idx; + Broker::record_iterator_next(it); + return broker_to_bro_record_recurse(it, rval, idx); + } + +function broker_to_bro_record(d: Broker::Data): bro_record + { + return broker_to_bro_record_recurse(Broker::record_iterator(d), + bro_record($c = 0), 0); + } + +function +broker_to_bro_set_recurse(it: opaque of Broker::SetIterator, + rval: bro_set): bro_set + { + if ( Broker::set_iterator_last(it) ) + return rval; + + add rval[Broker::set_iterator_value(it) as string]; + Broker::set_iterator_next(it); + return broker_to_bro_set_recurse(it, rval); + } + + +function broker_to_bro_set(d: Broker::Data): bro_set + { + return broker_to_bro_set_recurse(Broker::set_iterator(d), bro_set()); + } + +function +broker_to_bro_table_recurse(it: opaque of Broker::TableIterator, + rval: bro_table): bro_table + { + if ( Broker::table_iterator_last(it) ) + return rval; + + local item = Broker::table_iterator_value(it); + rval[item$key as string] = item$val as count; + Broker::table_iterator_next(it); + return broker_to_bro_table_recurse(it, rval); + } + +function broker_to_bro_table(d: Broker::Data): bro_table + { + return broker_to_bro_table_recurse(Broker::table_iterator(d), + bro_table()); + } + +function broker_to_bro_vector_recurse(it: opaque of Broker::VectorIterator, + rval: bro_vector): bro_vector + { + if ( Broker::vector_iterator_last(it) ) + return rval; + + rval += Broker::vector_iterator_value(it) as string; + Broker::vector_iterator_next(it); + return broker_to_bro_vector_recurse(it, rval); + } + +function broker_to_bro_vector(d: Broker::Data): bro_vector + { + return broker_to_bro_vector_recurse(Broker::vector_iterator(d), + bro_vector()); + } + +global did_it = F; + +event new_connection(c: connection) +{ +if ( did_it ) return; + +did_it = T; + +### Print every Broker data type + +print Broker::data_type(Broker::data(T)); +print Broker::data_type(Broker::data(+1)); +print Broker::data_type(Broker::data(1)); +print Broker::data_type(Broker::data(1.1)); +print Broker::data_type(Broker::data("1 (how creative)")); +print Broker::data_type(Broker::data(1.1.1.1)); +print Broker::data_type(Broker::data(1.1.1.1/1)); +print Broker::data_type(Broker::data(1/udp)); +print Broker::data_type(Broker::data(double_to_time(1))); +print Broker::data_type(Broker::data(1sec)); +print Broker::data_type(Broker::data(Broker::BOOL)); +local s: bro_set = bro_set("one", "two", "three"); +local t: bro_table = bro_table(["one"] = 1, ["two"] = 2, ["three"] = 3); +local v: bro_vector = bro_vector("zero", "one", "two"); +local r: bro_record = bro_record($c = 1); +print Broker::data_type(Broker::data(s)); +print Broker::data_type(Broker::data(t)); +print Broker::data_type(Broker::data(v)); +print Broker::data_type(Broker::data(r)); + +print "***************************"; + +### Convert a Zeek value to a Broker value, then print the result + +print (Broker::data(T)) as bool; +print (Broker::data(F)) as bool; +print (Broker::data(+1)) as int; +print (Broker::data(+0)) as int; +print (Broker::data(-1)) as int; +print (Broker::data(1)) as count; +print (Broker::data(0)) as count; +print (Broker::data(1.1)) as double; +print (Broker::data(-11.1)) as double; +print (Broker::data("hello")) as string; +print (Broker::data(1.2.3.4)) as addr; +print (Broker::data(192.168.1.1/16)) as subnet; +print (Broker::data(22/tcp)) as port; +print (Broker::data(double_to_time(42))) as time; +print (Broker::data(3min)) as interval; +print (Broker::data(Broker::BOOL)) as Broker::DataType; + +local cs = Broker::data(s); +print broker_to_bro_set(cs); + +local ct = Broker::data(t); +print broker_to_bro_table(ct); + +local cv = Broker::data(v); +print broker_to_bro_vector(cv); + +local cr = Broker::data(r); +print broker_to_bro_record(cr); + +r$a = "test"; +cr = Broker::data(r); +print broker_to_bro_record(cr); + +r$b = "testagain"; +cr = Broker::data(r); +print broker_to_bro_record(cr); + +print "***************************"; + +### Test the Broker set BIFs + +cs = Broker::set_create(); +print Broker::set_size(cs); +print Broker::set_insert(cs, ("hi")); +print Broker::set_size(cs); +print Broker::set_contains(cs, ("hi")); +print Broker::set_contains(cs, ("bye")); +print Broker::set_insert(cs, ("bye")); +print Broker::set_size(cs); +print Broker::set_insert(cs, ("bye")); +print Broker::set_size(cs); +print Broker::set_remove(cs, ("hi")); +print Broker::set_size(cs); +print Broker::set_remove(cs, ("hi")); +print broker_to_bro_set(cs); +print Broker::set_clear(cs); +print Broker::set_size(cs); +print broker_to_bro_set(cs); + +print "***************************"; + +### Test the Broker table BIFs + +ct = Broker::table_create(); +print Broker::table_size(ct); +print Broker::table_insert(ct, ("hi"), (42)); +print Broker::table_size(ct); +print Broker::table_contains(ct, ("hi")); +print (Broker::table_lookup(ct, ("hi"))) as count; +print Broker::table_contains(ct, ("bye")); +print Broker::table_insert(ct, ("bye"), (7)); +print Broker::table_size(ct); +print Broker::table_insert(ct, ("bye"), (37)); +print Broker::table_size(ct); +print (Broker::table_lookup(ct, ("bye"))) as count; +print Broker::table_remove(ct, ("hi")); +print Broker::table_size(ct); +print Broker::table_remove(ct, ("hi")); +print Broker::table_size(ct); +print Broker::table_clear(ct); +print Broker::table_size(ct); +print broker_to_bro_table(ct); + +print "***************************"; + +### Test the Broker vector BIFs + +cv = Broker::vector_create(); +print Broker::vector_size(cv); +print Broker::vector_insert(cv, 0, ("hi")); +print Broker::vector_insert(cv, 1, ("hello")); +print Broker::vector_insert(cv, 2, ("greetings")); +print Broker::vector_insert(cv, 1, ("salutations")); +print broker_to_bro_vector(cv); +print Broker::vector_size(cv); +print Broker::vector_replace(cv, 2, ("bah")); +print Broker::vector_lookup(cv, 2); +print Broker::vector_lookup(cv, 0); +print broker_to_bro_vector(cv); +print Broker::vector_remove(cv, 2); +print broker_to_bro_vector(cv); +print Broker::vector_size(cv); +print Broker::vector_clear(cv); +print Broker::vector_size(cv); +print broker_to_bro_vector(cv); + +print "***************************"; + +### Test the Broker record BIFs + +cr = Broker::record_create(3); +print Broker::record_size(cr); +print Broker::record_assign(cr, 0, ("hi")); +print Broker::record_assign(cr, 1, ("hello")); +print Broker::record_assign(cr, 2, (37)); +print Broker::record_lookup(cr, 0); +print Broker::record_lookup(cr, 1); +print Broker::record_lookup(cr, 2); +print Broker::record_size(cr); +print Broker::record_assign(cr, 1, ("goodbye")); +print Broker::record_size(cr); +print Broker::record_lookup(cr, 1); + +print "***************************"; + +### Test an opaque value + +local k1: opaque of topk = topk_init(4); +topk_add(k1, "a"); +print Broker::data(k1) is opaque of topk; +Broker::data(k1) as opaque of topk; +} diff --git a/testing/btest/core/leaks/broker/master_store.bro b/testing/btest/core/leaks/broker/master_store.bro deleted file mode 100644 index 583f80413b..0000000000 --- a/testing/btest/core/leaks/broker/master_store.bro +++ /dev/null @@ -1,149 +0,0 @@ -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# @TEST-GROUP: leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-bg-wait 45 - -redef exit_only_after_terminate = T; - -global query_timeout = 45sec; - -global h: opaque of Broker::Store; - -global step: count = 0; - -function print_index(k: any) - { - when ( local r = Broker::get(h, k) ) - { - step += 1; - print fmt("[%d]", step), k, r$status, r$result; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step, k); - } - } - -function print_exists(k: any) - { - when ( local r = Broker::exists(h, k) ) - { - step += 1; - print fmt("[%d]", step), k, r; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step, k); - } - } - -function print_index_from_value(k: any, i: any) - { - when ( local r = Broker::get_index_from_value(h, k, i) ) - { - step += 1; - print fmt("[%d]", step), k, r$status, r$result; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step, k); - } - } - -function print_keys() - { - when ( local s = Broker::keys(h) ) - { - step += 1; - print "keys", s; - } - timeout query_timeout - { - step += 1; - print fmt("[%d] ", step); - } - } - -event done() - { - terminate(); - } - -event pk2() - { - print_keys(); - } - -event pk1() - { - print_keys(); - Broker::clear(h); - schedule 1sec { pk2() }; - } - -event bro_init() - { - h = Broker::create_master("master"); - Broker::put(h, "one", "110"); - Broker::put(h, "two", 220); - Broker::put(h, "three", 330); - Broker::put(h, "four", set(1, 2,3)); - Broker::put(h, set("x", "y"), vector(1/tcp, 2/tcp, 3/tcp)); - - Broker::put(h, "str", "foo"); - Broker::put(h, "vec", vector(1, 2,3)); - Broker::put(h, "set", set("A", "B")); - Broker::put(h, "table", table(["a"] = 1, ["b"] = 2)); - - print_index("one"); - print_index("two"); - print_index("three"); - print_index("four"); - print_index("five"); - print_index(set("x", "y")); - - when ( step == 6 ) - { - Broker::increment(h, "two"); - Broker::increment(h, "two", 9); - Broker::decrement(h, "three"); - Broker::decrement(h, "three", 9); - print_index("two"); - print_index("three"); - print_index("four"); - print_keys(); - Broker::erase(h, "four"); - - Broker::append(h, "str", "bar"); - Broker::insert_into_set(h, "set", "C"); - Broker::insert_into_table(h, "table", "c", 3); - Broker::remove_from(h, "set", 2); - Broker::remove_from(h, "table", "b"); - Broker::push(h, "vec", 4); - Broker::push(h, "vec", 5); - Broker::pop(h, "vec"); - - print_index("str"); - print_index("set"); - print_index("table"); - print_index("vec"); - - print_exists("one"); - print_exists("NOPE"); - - print_index_from_value("vec", 1); - print_index_from_value("set", "A"); - print_index_from_value("table", "a"); - print_index_from_value("table", "X"); - - schedule 1sec { pk1() }; - } - - schedule 15secs { done() }; - } - - diff --git a/testing/btest/core/leaks/broker/master_store.zeek b/testing/btest/core/leaks/broker/master_store.zeek new file mode 100644 index 0000000000..c8527b8d73 --- /dev/null +++ b/testing/btest/core/leaks/broker/master_store.zeek @@ -0,0 +1,149 @@ +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# @TEST-GROUP: leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 45 + +redef exit_only_after_terminate = T; + +global query_timeout = 45sec; + +global h: opaque of Broker::Store; + +global step: count = 0; + +function print_index(k: any) + { + when ( local r = Broker::get(h, k) ) + { + step += 1; + print fmt("[%d]", step), k, r$status, r$result; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step, k); + } + } + +function print_exists(k: any) + { + when ( local r = Broker::exists(h, k) ) + { + step += 1; + print fmt("[%d]", step), k, r; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step, k); + } + } + +function print_index_from_value(k: any, i: any) + { + when ( local r = Broker::get_index_from_value(h, k, i) ) + { + step += 1; + print fmt("[%d]", step), k, r$status, r$result; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step, k); + } + } + +function print_keys() + { + when ( local s = Broker::keys(h) ) + { + step += 1; + print "keys", s; + } + timeout query_timeout + { + step += 1; + print fmt("[%d] ", step); + } + } + +event done() + { + terminate(); + } + +event pk2() + { + print_keys(); + } + +event pk1() + { + print_keys(); + Broker::clear(h); + schedule 1sec { pk2() }; + } + +event zeek_init() + { + h = Broker::create_master("master"); + Broker::put(h, "one", "110"); + Broker::put(h, "two", 220); + Broker::put(h, "three", 330); + Broker::put(h, "four", set(1, 2,3)); + Broker::put(h, set("x", "y"), vector(1/tcp, 2/tcp, 3/tcp)); + + Broker::put(h, "str", "foo"); + Broker::put(h, "vec", vector(1, 2,3)); + Broker::put(h, "set", set("A", "B")); + Broker::put(h, "table", table(["a"] = 1, ["b"] = 2)); + + print_index("one"); + print_index("two"); + print_index("three"); + print_index("four"); + print_index("five"); + print_index(set("x", "y")); + + when ( step == 6 ) + { + Broker::increment(h, "two"); + Broker::increment(h, "two", 9); + Broker::decrement(h, "three"); + Broker::decrement(h, "three", 9); + print_index("two"); + print_index("three"); + print_index("four"); + print_keys(); + Broker::erase(h, "four"); + + Broker::append(h, "str", "bar"); + Broker::insert_into_set(h, "set", "C"); + Broker::insert_into_table(h, "table", "c", 3); + Broker::remove_from(h, "set", 2); + Broker::remove_from(h, "table", "b"); + Broker::push(h, "vec", 4); + Broker::push(h, "vec", 5); + Broker::pop(h, "vec"); + + print_index("str"); + print_index("set"); + print_index("table"); + print_index("vec"); + + print_exists("one"); + print_exists("NOPE"); + + print_index_from_value("vec", 1); + print_index_from_value("set", "A"); + print_index_from_value("table", "a"); + print_index_from_value("table", "X"); + + schedule 1sec { pk1() }; + } + + schedule 15secs { done() }; + } + + diff --git a/testing/btest/core/leaks/broker/remote_event.test b/testing/btest/core/leaks/broker/remote_event.test index 5000bd98d7..470fc0837a 100644 --- a/testing/btest/core/leaks/broker/remote_event.test +++ b/testing/btest/core/leaks/broker/remote_event.test @@ -1,22 +1,22 @@ # @TEST-PORT: BROKER_PORT -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # @TEST-GROUP: leaks -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run recv "bro -m -b ../recv.bro >recv.out" -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run send "bro -m -b ../send.bro >send.out" +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run recv "zeek -m -b ../recv.zeek >recv.out" +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run send "zeek -m -b ../send.zeek >send.out" # @TEST-EXEC: btest-bg-wait 45 # @TEST-EXEC: btest-diff recv/recv.out # @TEST-EXEC: btest-diff send/send.out -@TEST-START-FILE recv.bro +@TEST-START-FILE recv.zeek redef exit_only_after_terminate = T; global event_handler: event(msg: string, c: count); global auto_event_handler: event(msg: string, c: count); -event bro_init() +event zeek_init() { Broker::subscribe("bro/event/"); Broker::auto_publish("bro/event/my_topic", auto_event_handler); @@ -43,14 +43,14 @@ event event_handler(msg: string, n: count) @TEST-END-FILE -@TEST-START-FILE send.bro +@TEST-START-FILE send.zeek redef exit_only_after_terminate = T; global event_handler: event(msg: string, c: count); global auto_event_handler: event(msg: string, c: count); -event bro_init() +event zeek_init() { Broker::subscribe("bro/event/my_topic"); Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT")), 1secs); diff --git a/testing/btest/core/leaks/broker/remote_log.test b/testing/btest/core/leaks/broker/remote_log.test index 12abc1a313..5c303fbfb4 100644 --- a/testing/btest/core/leaks/broker/remote_log.test +++ b/testing/btest/core/leaks/broker/remote_log.test @@ -1,9 +1,9 @@ # @TEST-PORT: BROKER_PORT -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # @TEST-GROUP: leaks -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run recv "bro -m -b ../recv.bro >recv.out" -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run send "bro -m -b ../send.bro >send.out" +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run recv "zeek -m -b ../recv.zeek >recv.out" +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run send "zeek -m -b ../send.zeek >send.out" # @TEST-EXEC: btest-bg-wait 45 # @TEST-EXEC: btest-diff recv/recv.out @@ -11,7 +11,7 @@ # @TEST-EXEC: btest-diff send/send.out # @TEST-EXEC: btest-diff send/test.log -@TEST-START-FILE common.bro +@TEST-START-FILE common.zeek redef exit_only_after_terminate = T; @@ -27,7 +27,7 @@ export { }; } -event bro_init() &priority=5 +event zeek_init() &priority=5 { Log::create_stream(Test::LOG, [$columns=Test::Info]); } @@ -39,13 +39,13 @@ event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) @TEST-END-FILE -@TEST-START-FILE recv.bro +@TEST-START-FILE recv.zeek -@load ./common.bro +@load ./common -event bro_init() +event zeek_init() { - Broker::subscribe("bro/"); + Broker::subscribe("zeek/"); Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); } @@ -56,11 +56,11 @@ event Broker::peer_removed(endpoint: Broker::EndpointInfo, msg: string) @TEST-END-FILE -@TEST-START-FILE send.bro +@TEST-START-FILE send.zeek -@load ./common.bro +@load ./common -event bro_init() +event zeek_init() { Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); } diff --git a/testing/btest/core/leaks/copy-all-opaques.zeek b/testing/btest/core/leaks/copy-all-opaques.zeek new file mode 100644 index 0000000000..259d971bb2 --- /dev/null +++ b/testing/btest/core/leaks/copy-all-opaques.zeek @@ -0,0 +1,90 @@ +# @TEST-GROUP: leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +global did_it = F; + +event new_connection(c: connection) + { + if ( did_it ) + return; + + did_it = T; + + print "============ Topk"; + local k1: opaque of topk = topk_init(4); + topk_add(k1, "a"); + topk_add(k1, "b"); + topk_add(k1, "b"); + topk_add(k1, "c"); + local k2 = copy(k1); + print topk_get_top(k1, 5); + topk_add(k1, "shoulnotshowup"); + print topk_get_top(k2, 5); + + + print "============ HLL"; + local c1 = hll_cardinality_init(0.01, 0.95); + hll_cardinality_add(c1, 2001); + hll_cardinality_add(c1, 2002); + hll_cardinality_add(c1, 2003); + + print hll_cardinality_estimate(c1); + local c2 = copy(c1); + hll_cardinality_add(c1, 2004); + print hll_cardinality_estimate(c2); + + print "============ Bloom"; + local bf_cnt = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt, 42); + bloomfilter_add(bf_cnt, 84); + bloomfilter_add(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 0); + print bloomfilter_lookup(bf_cnt, 42); + local bf_copy = copy(bf_cnt); + bloomfilter_add(bf_cnt, 0); + print bloomfilter_lookup(bf_copy, 0); + print bloomfilter_lookup(bf_copy, 42); + # check that typefication transfered. + bloomfilter_add(bf_copy, 0.5); # causes stderr output + + print "============ Hashes"; + local md5a = md5_hash_init(); + md5_hash_update(md5a, "one"); + local md5b = copy(md5a); + md5_hash_update(md5a, "two"); + md5_hash_update(md5b, "two"); + print md5_hash_finish(md5a); + print md5_hash_finish(md5b); + + local sha1a = sha1_hash_init(); + sha1_hash_update(sha1a, "one"); + local sha1b = copy(sha1a); + sha1_hash_update(sha1a, "two"); + sha1_hash_update(sha1b, "two"); + print sha1_hash_finish(sha1a); + print sha1_hash_finish(sha1b); + + local sha256a = sha256_hash_init(); + sha256_hash_update(sha256a, "one"); + local sha256b = copy(sha256a); + sha256_hash_update(sha256a, "two"); + sha256_hash_update(sha256b, "two"); + print sha256_hash_finish(sha256a); + print sha256_hash_finish(sha256b); + + print "============ X509"; + local x509 = x509_from_der("\x30\x82\x03\x75\x30\x82\x02\x5D\xA0\x03\x02\x01\x02\x02\x0B\x04\x00\x00\x00\x00\x01\x15\x4B\x5A\xC3\x94\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x39\x38\x30\x39\x30\x31\x31\x32\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x31\x32\x38\x31\x32\x30\x30\x30\x30\x5A\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xDA\x0E\xE6\x99\x8D\xCE\xA3\xE3\x4F\x8A\x7E\xFB\xF1\x8B\x83\x25\x6B\xEA\x48\x1F\xF1\x2A\xB0\xB9\x95\x11\x04\xBD\xF0\x63\xD1\xE2\x67\x66\xCF\x1C\xDD\xCF\x1B\x48\x2B\xEE\x8D\x89\x8E\x9A\xAF\x29\x80\x65\xAB\xE9\xC7\x2D\x12\xCB\xAB\x1C\x4C\x70\x07\xA1\x3D\x0A\x30\xCD\x15\x8D\x4F\xF8\xDD\xD4\x8C\x50\x15\x1C\xEF\x50\xEE\xC4\x2E\xF7\xFC\xE9\x52\xF2\x91\x7D\xE0\x6D\xD5\x35\x30\x8E\x5E\x43\x73\xF2\x41\xE9\xD5\x6A\xE3\xB2\x89\x3A\x56\x39\x38\x6F\x06\x3C\x88\x69\x5B\x2A\x4D\xC5\xA7\x54\xB8\x6C\x89\xCC\x9B\xF9\x3C\xCA\xE5\xFD\x89\xF5\x12\x3C\x92\x78\x96\xD6\xDC\x74\x6E\x93\x44\x61\xD1\x8D\xC7\x46\xB2\x75\x0E\x86\xE8\x19\x8A\xD5\x6D\x6C\xD5\x78\x16\x95\xA2\xE9\xC8\x0A\x38\xEB\xF2\x24\x13\x4F\x73\x54\x93\x13\x85\x3A\x1B\xBC\x1E\x34\xB5\x8B\x05\x8C\xB9\x77\x8B\xB1\xDB\x1F\x20\x91\xAB\x09\x53\x6E\x90\xCE\x7B\x37\x74\xB9\x70\x47\x91\x22\x51\x63\x16\x79\xAE\xB1\xAE\x41\x26\x08\xC8\x19\x2B\xD1\x46\xAA\x48\xD6\x64\x2A\xD7\x83\x34\xFF\x2C\x2A\xC1\x6C\x19\x43\x4A\x07\x85\xE7\xD3\x7C\xF6\x21\x68\xEF\xEA\xF2\x52\x9F\x7F\x93\x90\xCF\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x60\x7B\x66\x1A\x45\x0D\x97\xCA\x89\x50\x2F\x7D\x04\xCD\x34\xA8\xFF\xFC\xFD\x4B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\xD6\x73\xE7\x7C\x4F\x76\xD0\x8D\xBF\xEC\xBA\xA2\xBE\x34\xC5\x28\x32\xB5\x7C\xFC\x6C\x9C\x2C\x2B\xBD\x09\x9E\x53\xBF\x6B\x5E\xAA\x11\x48\xB6\xE5\x08\xA3\xB3\xCA\x3D\x61\x4D\xD3\x46\x09\xB3\x3E\xC3\xA0\xE3\x63\x55\x1B\xF2\xBA\xEF\xAD\x39\xE1\x43\xB9\x38\xA3\xE6\x2F\x8A\x26\x3B\xEF\xA0\x50\x56\xF9\xC6\x0A\xFD\x38\xCD\xC4\x0B\x70\x51\x94\x97\x98\x04\xDF\xC3\x5F\x94\xD5\x15\xC9\x14\x41\x9C\xC4\x5D\x75\x64\x15\x0D\xFF\x55\x30\xEC\x86\x8F\xFF\x0D\xEF\x2C\xB9\x63\x46\xF6\xAA\xFC\xDF\xBC\x69\xFD\x2E\x12\x48\x64\x9A\xE0\x95\xF0\xA6\xEF\x29\x8F\x01\xB1\x15\xB5\x0C\x1D\xA5\xFE\x69\x2C\x69\x24\x78\x1E\xB3\xA7\x1C\x71\x62\xEE\xCA\xC8\x97\xAC\x17\x5D\x8A\xC2\xF8\x47\x86\x6E\x2A\xC4\x56\x31\x95\xD0\x67\x89\x85\x2B\xF9\x6C\xA6\x5D\x46\x9D\x0C\xAA\x82\xE4\x99\x51\xDD\x70\xB7\xDB\x56\x3D\x61\xE4\x6A\xE1\x5C\xD6\xF6\xFE\x3D\xDE\x41\xCC\x07\xAE\x63\x52\xBF\x53\x53\xF4\x2B\xE9\xC7\xFD\xB6\xF7\x82\x5F\x85\xD2\x41\x18\xDB\x81\xB3\x04\x1C\xC5\x1F\xA4\x80\x6F\x15\x20\xC9\xDE\x0C\x88\x0A\x1D\xD6\x66\x55\xE2\xFC\x48\xC9\x29\x26\x69\xE0"); + local x5092 = copy(x509); + print x509_parse(x509); + print x509_parse(x5092); + + print "============ Entropy"; + local handle = entropy_test_init(); + entropy_test_add(handle, "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"); + local handle2 = copy(handle); + print entropy_test_finish(handle); + print entropy_test_finish(handle2); + } diff --git a/testing/btest/core/leaks/copy-all-types.zeek b/testing/btest/core/leaks/copy-all-types.zeek new file mode 100644 index 0000000000..3d5d9b958f --- /dev/null +++ b/testing/btest/core/leaks/copy-all-types.zeek @@ -0,0 +1,196 @@ +# Note: opaque types in separate test +# @TEST-GROUP: leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +type MyEnum: enum { ENUMME }; + +type InnerTestRecord: record { + a: string; +}; + +type TestRecord: record { + s1: string; + s2: string; + i1: InnerTestRecord; + i2: InnerTestRecord &optional; + donotset: InnerTestRecord &optional; + def: count &default=5; +}; + +function join_count_set(ss: set[count], j: string): string + { + local output=""; + local i=0; + for ( s in ss ) + { + if ( i > 0 ) + output = cat(output, j); + + output = cat(output, s); + ++i; + } + return output; + } + +function do_format(i: any): any + { + local tpe = type_name(i); + + switch ( tpe ) + { + case "set[count]": + return join_count_set(i, ","); + case "table[string] of string": + local cast: table[string] of string = i; + local vout: vector of string = vector(); + for ( el in cast ) + { + vout += cat(el, "=", cast[el]); + } + return join_string_vec(vout, ";"); + } + return i; + } + +function check(o1: any, o2: any, equal: bool, expect_same: bool) + { + local expect_msg = (equal ? "ok" : "FAIL0"); + local same = same_object(o1, o2); + + if ( expect_same && ! same ) + expect_msg = "FAIL1"; + + if ( ! expect_same && same ) + expect_msg = "FAIL2"; + + print fmt("orig=%s (%s) clone=%s (%s) equal=%s same_object=%s (%s)", do_format(o1), type_name(o1), do_format(o2), type_name(o2), equal, same, expect_msg); + } + +function check_vector_equal(a: vector of count, b: vector of count): bool + { + if ( |a| != |b| ) + return F; + + for ( i in a ) + { + if ( a[i] != b[i] ) + return F; + } + + return T; + } + +function check_string_table_equal(a: table[string] of string, b: table[string] of string): bool + { + if ( |a| != |b| ) + return F; + + for ( i in a ) + { + if ( a[i] != b[i] ) + return F; + } + + return T; + } + +function compare_otr(a: TestRecord, b: TestRecord): bool + { + if ( a$s1 != b$s1 ) + return F; + if ( a$s2 != b$s2 ) + return F; + if ( a$i1$a != b$i1$a ) + return F; + if ( a$i2$a != b$i2$a ) + return F; + + if ( same_object(a$i1, b$i1) ) + return F; + if ( same_object(a$i2, b$i2) ) + return F; + + # check that we restroe that i1 & i2 point to same object + if ( ! same_object(a$i1, a$i2) ) + return F; + if ( ! same_object(b$i1, b$i2) ) + return F; + + if ( a$def != b$def ) + return F; + + return T; + } + +global did_it = F; + +event new_connection(c: connection) + { + if ( did_it ) + return; + + did_it = T; + + local i1 = -42; + local i2 = copy(i1); + check(i1, i2, i1 == i2, T); + + local c1 : count = 42; + local c2 = copy(c1); + check(c1, c2, c1 == c2, T); + + local a1 = 127.0.0.1; + local a2 = copy(a1); + check(a1, a2, a1 == a2, T); + + local p1 = 42/tcp; + local p2 = copy(p1); + check(p1, p2, p1 == p2, T); + + local sn1 = 127.0.0.1/24; + local sn2 = copy(sn1); + check(sn1, sn2, sn1 == sn2, T); + + local s1 = "Foo"; + local s2 = copy(s1); + check(s1, s2, s1 == s2, F); + + local pat1 = /.*PATTERN.*/; + local pat2 = copy(pat1); + # patterns cannot be directoy compared + if ( same_object(pat1, pat2) ) + print "FAIL P1"; + if ( ! ( pat1 == "PATTERN" ) ) + print "FAIL P2"; + if ( ! ( pat2 == "PATTERN" ) ) + print "FAIL P3"; + if ( pat2 == "PATERN" ) + print "FAIL P4"; + print fmt("orig=%s (%s) clone=%s (%s) same_object=%s", pat1, type_name(pat1), pat2, type_name(pat2), same_object(pat1, pat2)); + + local set1 = [1, 2, 3, 4, 5]; + local set2 = copy(set1); + check(set1, set2, set1 == set2, F); + + local v1 = vector(1, 2, 3, 4, 5); + local v2 = copy(v1); + check(v1, v2, check_vector_equal(v1, v2), F); + + local t1 : table[string] of string = table(); + t1["a"] = "va"; + t1["b"] = "vb"; + local t2 = copy(t1); + check(t1, t2, check_string_table_equal(t1, t2), F); + + local e1 = ENUMME; + local e2 = copy(ENUMME); + check(e1, e2, e1 == e2, T); + + local itr = InnerTestRecord($a="a"); + local otr1 = TestRecord($s1="s1", $s2="s2", $i1=itr, $i2=itr); + local otr2 = copy(otr1); + check(otr1, otr2, compare_otr(otr1, otr2), F); + } diff --git a/testing/btest/core/leaks/dns-nsec3.bro b/testing/btest/core/leaks/dns-nsec3.bro deleted file mode 100644 index 16be0103e6..0000000000 --- a/testing/btest/core/leaks/dns-nsec3.bro +++ /dev/null @@ -1,40 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -C -m -r $TRACES/dnssec/nsec3.pcap %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load policy/protocols/dns/auth-addl - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) - { - print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) - { - print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) - { - print "NSEC", next_name, bitmaps; - - for ( i in bitmaps ) - print bytestring_to_hexstr(bitmaps[i]); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) - { - print "NSEC3", nsec3, - bytestring_to_hexstr(nsec3$nsec_salt), - bytestring_to_hexstr(nsec3$nsec_hash); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) - { - print "DS", ds, bytestring_to_hexstr(ds$digest_val); - } diff --git a/testing/btest/core/leaks/dns-nsec3.zeek b/testing/btest/core/leaks/dns-nsec3.zeek new file mode 100644 index 0000000000..29b591b0ee --- /dev/null +++ b/testing/btest/core/leaks/dns-nsec3.zeek @@ -0,0 +1,40 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -C -m -r $TRACES/dnssec/nsec3.pcap %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load policy/protocols/dns/auth-addl + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) + { + print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) + { + print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) + { + print "NSEC", next_name, bitmaps; + + for ( i in bitmaps ) + print bytestring_to_hexstr(bitmaps[i]); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) + { + print "NSEC3", nsec3, + bytestring_to_hexstr(nsec3$nsec_salt), + bytestring_to_hexstr(nsec3$nsec_hash); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) + { + print "DS", ds, bytestring_to_hexstr(ds$digest_val); + } diff --git a/testing/btest/core/leaks/dns-txt.bro b/testing/btest/core/leaks/dns-txt.bro deleted file mode 100644 index c04e5df6ea..0000000000 --- a/testing/btest/core/leaks/dns-txt.bro +++ /dev/null @@ -1,42 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -redef exit_only_after_terminate = T; - -global n1 = 0; - -function check_term_conditions() - { - if ( n1 > 7 ) - terminate(); - } - - -event do_txt(s: string) - { - when ( local t1 = lookup_hostname_txt(s) ) - { - print "t1", t1; - ++n1; - check_term_conditions(); - } - timeout 100secs - { - print "t1 timeout"; - ++n1; - check_term_conditions(); - } - } - -event connection_established(c: connection) - { - event do_txt("localhost"); - schedule 5sec { do_txt("localhost") }; - } - diff --git a/testing/btest/core/leaks/dns-txt.zeek b/testing/btest/core/leaks/dns-txt.zeek new file mode 100644 index 0000000000..93d049a40b --- /dev/null +++ b/testing/btest/core/leaks/dns-txt.zeek @@ -0,0 +1,42 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +redef exit_only_after_terminate = T; + +global n1 = 0; + +function check_term_conditions() + { + if ( n1 > 7 ) + terminate(); + } + + +event do_txt(s: string) + { + when ( local t1 = lookup_hostname_txt(s) ) + { + print "t1", t1; + ++n1; + check_term_conditions(); + } + timeout 100secs + { + print "t1 timeout"; + ++n1; + check_term_conditions(); + } + } + +event connection_established(c: connection) + { + event do_txt("localhost"); + schedule 5sec { do_txt("localhost") }; + } + diff --git a/testing/btest/core/leaks/dns.bro b/testing/btest/core/leaks/dns.bro deleted file mode 100644 index f16a4ca3bb..0000000000 --- a/testing/btest/core/leaks/dns.bro +++ /dev/null @@ -1,81 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -redef exit_only_after_terminate = T; - -const foo: set[addr] = { - google.com -}; - -global n1 = 0; -global n2 = 0; -global n3 = 0; -global n4 = 0; - -function check_term_conditions() - { - if ( n1 > 4 && n2 > 4 && n3 > 4 && n4 > 4 ) - terminate(); - } - -event connection_established(c: connection) - { - when ( local addrs = lookup_hostname("localhost") ) - { - print "1a", c$id$resp_h, addrs; - ++n1; - check_term_conditions(); - } - timeout 100secs - { - print "1b", c$id$resp_h; - ++n1; - check_term_conditions(); - } - - when ( local addrs2 = lookup_hostname("qq.ww.ee.rrrrr") ) - { - print "2a", c$id$resp_h, addrs2; - ++n2; - check_term_conditions(); - } - timeout 100secs - { - print "2b", c$id$resp_h; - ++n2; - check_term_conditions(); - } - - when ( local a = lookup_addr(c$id$resp_h) ) - { - print "3a", c$id$resp_h, a; - ++n3; - check_term_conditions(); - } - timeout 100secs - { - print "3b", c$id$resp_h; - ++n3; - check_term_conditions(); - } - - when ( local a2 = lookup_addr(1.2.3.4) ) - { - print "4a", c$id$resp_h, a2; - ++n4; - check_term_conditions(); - } - timeout 100secs - { - print "4b", c$id$resp_h; - ++n4; - check_term_conditions(); - } - } - diff --git a/testing/btest/core/leaks/dns.zeek b/testing/btest/core/leaks/dns.zeek new file mode 100644 index 0000000000..e4f8c92cdb --- /dev/null +++ b/testing/btest/core/leaks/dns.zeek @@ -0,0 +1,81 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +redef exit_only_after_terminate = T; + +const foo: set[addr] = { + google.com +}; + +global n1 = 0; +global n2 = 0; +global n3 = 0; +global n4 = 0; + +function check_term_conditions() + { + if ( n1 > 4 && n2 > 4 && n3 > 4 && n4 > 4 ) + terminate(); + } + +event connection_established(c: connection) + { + when ( local addrs = lookup_hostname("localhost") ) + { + print "1a", c$id$resp_h, addrs; + ++n1; + check_term_conditions(); + } + timeout 100secs + { + print "1b", c$id$resp_h; + ++n1; + check_term_conditions(); + } + + when ( local addrs2 = lookup_hostname("qq.ww.ee.rrrrr") ) + { + print "2a", c$id$resp_h, addrs2; + ++n2; + check_term_conditions(); + } + timeout 100secs + { + print "2b", c$id$resp_h; + ++n2; + check_term_conditions(); + } + + when ( local a = lookup_addr(c$id$resp_h) ) + { + print "3a", c$id$resp_h, a; + ++n3; + check_term_conditions(); + } + timeout 100secs + { + print "3b", c$id$resp_h; + ++n3; + check_term_conditions(); + } + + when ( local a2 = lookup_addr(1.2.3.4) ) + { + print "4a", c$id$resp_h, a2; + ++n4; + check_term_conditions(); + } + timeout 100secs + { + print "4b", c$id$resp_h; + ++n4; + check_term_conditions(); + } + } + diff --git a/testing/btest/core/leaks/dtls.bro b/testing/btest/core/leaks/dtls.bro deleted file mode 100644 index e7f75a530e..0000000000 --- a/testing/btest/core/leaks/dtls.bro +++ /dev/null @@ -1,15 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/tls/dtls1_0.pcap %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load base/protocols/ssl - -event ssl_established(c: connection) &priority=3 - { - print "established"; - } diff --git a/testing/btest/core/leaks/dtls.zeek b/testing/btest/core/leaks/dtls.zeek new file mode 100644 index 0000000000..b7f27de91d --- /dev/null +++ b/testing/btest/core/leaks/dtls.zeek @@ -0,0 +1,15 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/tls/dtls1_0.pcap %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load base/protocols/ssl + +event ssl_established(c: connection) &priority=3 + { + print "established"; + } diff --git a/testing/btest/core/leaks/exec.test b/testing/btest/core/leaks/exec.test index 4cc8240012..793954a9dc 100644 --- a/testing/btest/core/leaks/exec.test +++ b/testing/btest/core/leaks/exec.test @@ -2,12 +2,12 @@ # # @TEST-GROUP: leaks # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b ../exectest.bro +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b ../exectest.zeek # @TEST-EXEC: btest-bg-wait 60 -@TEST-START-FILE exectest.bro +@TEST-START-FILE exectest.zeek @load base/utils/exec redef exit_only_after_terminate = T; @@ -31,7 +31,7 @@ function test_cmd(label: string, cmd: Exec::Command) } } -event bro_init() +event zeek_init() { test_cmd("test1", [$cmd="bash ../somescript.sh", $read_files=set("out1", "out2")]); diff --git a/testing/btest/core/leaks/file-analysis-http-get.bro b/testing/btest/core/leaks/file-analysis-http-get.bro deleted file mode 100644 index 29aa6535a3..0000000000 --- a/testing/btest/core/leaks/file-analysis-http-get.bro +++ /dev/null @@ -1,15 +0,0 @@ -# Needs perftools support. -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-GROUP: leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -redef test_file_analysis_source = "HTTP"; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%s-file", f$id); - }; diff --git a/testing/btest/core/leaks/file-analysis-http-get.zeek b/testing/btest/core/leaks/file-analysis-http-get.zeek new file mode 100644 index 0000000000..6e0dae16be --- /dev/null +++ b/testing/btest/core/leaks/file-analysis-http-get.zeek @@ -0,0 +1,15 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.zeek %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +redef test_file_analysis_source = "HTTP"; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%s-file", f$id); + }; diff --git a/testing/btest/core/leaks/gridftp.test b/testing/btest/core/leaks/gridftp.test index 4c7d31937d..4028df6b33 100644 --- a/testing/btest/core/leaks/gridftp.test +++ b/testing/btest/core/leaks/gridftp.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/globus-url-copy.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/globus-url-copy.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/ftp/gridftp diff --git a/testing/btest/core/leaks/gtp_opt_header.test b/testing/btest/core/leaks/gtp_opt_header.test index 79cc50d752..e11ecf1942 100644 --- a/testing/btest/core/leaks/gtp_opt_header.test +++ b/testing/btest/core/leaks/gtp_opt_header.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/tunnels/gtp/gtp6_gtp_0x32.pcap %INPUT >out +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/tunnels/gtp/gtp6_gtp_0x32.pcap %INPUT >out # @TEST-EXEC: btest-bg-wait 60 # Some GTPv1 headers have some optional fields totaling to a 4-byte extension diff --git a/testing/btest/core/leaks/hll_cluster.bro b/testing/btest/core/leaks/hll_cluster.bro deleted file mode 100644 index e565778fbc..0000000000 --- a/testing/btest/core/leaks/hll_cluster.bro +++ /dev/null @@ -1,115 +0,0 @@ -# Needs perftools support. -# -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: bro -m %INPUT>out -# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT -# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m runnumber=1 %INPUT -# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m runnumber=2 %INPUT -# @TEST-EXEC: btest-bg-wait 60 -# -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -global hll_data: event(data: opaque of cardinality); - -@if ( Cluster::local_node_type() == Cluster::WORKER ) - -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, hll_data); - } - -global runnumber: count &redef; # differentiate runs - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - local c = hll_cardinality_init(0.01, 0.95); - - local add1 = 2001; - local add2 = 2002; - local add3 = 2003; - - if ( runnumber == 1 ) - { - hll_cardinality_add(c, add1); - hll_cardinality_add(c, add2); - hll_cardinality_add(c, add3); - hll_cardinality_add(c, 1000); - hll_cardinality_add(c, 1001); - hll_cardinality_add(c, 101); - hll_cardinality_add(c, 1003); - hll_cardinality_add(c, 1004); - hll_cardinality_add(c, 1005); - hll_cardinality_add(c, 1006); - hll_cardinality_add(c, 1007); - hll_cardinality_add(c, 1008); - hll_cardinality_add(c, 1009); - print "This value should be around 13:"; - print hll_cardinality_estimate(c); - } - else if ( runnumber == 2 ) - { - hll_cardinality_add(c, add1); - hll_cardinality_add(c, add2); - hll_cardinality_add(c, add3); - hll_cardinality_add(c, 1); - hll_cardinality_add(c, 101); - hll_cardinality_add(c, 2); - hll_cardinality_add(c, 3); - hll_cardinality_add(c, 4); - hll_cardinality_add(c, 5); - hll_cardinality_add(c, 6); - hll_cardinality_add(c, 7); - hll_cardinality_add(c, 8); - print "This value should be about 12:"; - print hll_cardinality_estimate(c); - } - - event hll_data(c); - - terminate(); - } - -@endif - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global result_count = 0; -global hll: opaque of cardinality; - -event bro_init() - { - hll = hll_cardinality_init(0.01, 0.95); - } - -event hll_data(data: opaque of cardinality) - { - hll_cardinality_merge_into(hll, data); - ++result_count; - - if ( result_count == 2 ) - { - print "This value should be about 21:"; - print hll_cardinality_estimate(hll); - terminate(); - } - } - -@endif diff --git a/testing/btest/core/leaks/hll_cluster.zeek b/testing/btest/core/leaks/hll_cluster.zeek new file mode 100644 index 0000000000..7d2b4c8850 --- /dev/null +++ b/testing/btest/core/leaks/hll_cluster.zeek @@ -0,0 +1,115 @@ +# Needs perftools support. +# +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: zeek -m %INPUT>out +# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek -m %INPUT +# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek -m runnumber=1 %INPUT +# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek -m runnumber=2 %INPUT +# @TEST-EXEC: btest-bg-wait 60 +# +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +global hll_data: event(data: opaque of cardinality); + +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, hll_data); + } + +global runnumber: count &redef; # differentiate runs + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + local c = hll_cardinality_init(0.01, 0.95); + + local add1 = 2001; + local add2 = 2002; + local add3 = 2003; + + if ( runnumber == 1 ) + { + hll_cardinality_add(c, add1); + hll_cardinality_add(c, add2); + hll_cardinality_add(c, add3); + hll_cardinality_add(c, 1000); + hll_cardinality_add(c, 1001); + hll_cardinality_add(c, 101); + hll_cardinality_add(c, 1003); + hll_cardinality_add(c, 1004); + hll_cardinality_add(c, 1005); + hll_cardinality_add(c, 1006); + hll_cardinality_add(c, 1007); + hll_cardinality_add(c, 1008); + hll_cardinality_add(c, 1009); + print "This value should be around 13:"; + print hll_cardinality_estimate(c); + } + else if ( runnumber == 2 ) + { + hll_cardinality_add(c, add1); + hll_cardinality_add(c, add2); + hll_cardinality_add(c, add3); + hll_cardinality_add(c, 1); + hll_cardinality_add(c, 101); + hll_cardinality_add(c, 2); + hll_cardinality_add(c, 3); + hll_cardinality_add(c, 4); + hll_cardinality_add(c, 5); + hll_cardinality_add(c, 6); + hll_cardinality_add(c, 7); + hll_cardinality_add(c, 8); + print "This value should be about 12:"; + print hll_cardinality_estimate(c); + } + + event hll_data(c); + + terminate(); + } + +@endif + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global result_count = 0; +global hll: opaque of cardinality; + +event zeek_init() + { + hll = hll_cardinality_init(0.01, 0.95); + } + +event hll_data(data: opaque of cardinality) + { + hll_cardinality_merge_into(hll, data); + ++result_count; + + if ( result_count == 2 ) + { + print "This value should be about 21:"; + print hll_cardinality_estimate(hll); + terminate(); + } + } + +@endif diff --git a/testing/btest/core/leaks/hook.bro b/testing/btest/core/leaks/hook.bro deleted file mode 100644 index 0d991bc9a0..0000000000 --- a/testing/btest/core/leaks/hook.bro +++ /dev/null @@ -1,101 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -type rec: record { - a: count; - b: string; -}; - -global myhook: hook(r: rec); -global myhook2: hook(s: string); -# a hook doesn't have to take any arguments -global myhook4: hook(); - -hook myhook(r: rec) &priority=5 - { - print "myhook, &priority=5", r; - # break statement short-circuits the hook handling chain. - break; - print "ERROR: break statement should return from hook handler body"; - } - -hook myhook(r: rec) - { - # This handler shouldn't execute ever because of the handler at priority=5 - # exiting the body from a "break" statement. - print "myhook, &priority=0", rec; - } - -hook myhook(r: rec) &priority=10 - { - print "myhook, &priority=10", r; - # modifications to the record argument will be seen by remaining handlers. - r$a = 37; - r$b = "goobye world"; - # returning from the handler early, is fine, remaining handlers still run. - return; - print "ERROR: return statement should return from hook handler body"; - } - -hook myhook(r: rec) &priority=9 - { - print "myhook return F"; - # return value is ignored, remaining handlers still run, final return - # value is whether any hook body returned via break statement - return F; - print "ERROR: return statement should return from hook handler body"; - } - -hook myhook(r: rec) &priority=8 - { - print "myhook return T"; - # return value is ignored, remaining handlers still run, final return - # value is whether any hook body returned via break statement - return T; - print "ERROR: return statement should return from hook handler body"; - } - -# hook function doesn't need a declaration, we can go straight to defining -# a handler body. -hook myhook3(i: count) - { - print "myhook3", i; - } - -hook myhook4() &priority=1 - { - print "myhook4", 1; - } - -hook myhook4() &priority=2 - { - print "myhook4", 2; - } - -event new_connection(c: connection) - { - print "new_connection", c$id; - - print hook myhook([$a=1156, $b="hello world"]); - - # A hook with no handlers is fine, it's just a no-op. - print hook myhook2("nope"); - - print hook myhook3(8); - print hook myhook4(); - if ( hook myhook4() ) - { - print "myhook4 all handlers ran"; - } - - # A hook can be treated like other data types and doesn't have to be - # invoked directly by name. - local h = myhook; - print hook h([$a=2, $b="it works"]); - } diff --git a/testing/btest/core/leaks/hook.zeek b/testing/btest/core/leaks/hook.zeek new file mode 100644 index 0000000000..5f25a8a011 --- /dev/null +++ b/testing/btest/core/leaks/hook.zeek @@ -0,0 +1,101 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +type rec: record { + a: count; + b: string; +}; + +global myhook: hook(r: rec); +global myhook2: hook(s: string); +# a hook doesn't have to take any arguments +global myhook4: hook(); + +hook myhook(r: rec) &priority=5 + { + print "myhook, &priority=5", r; + # break statement short-circuits the hook handling chain. + break; + print "ERROR: break statement should return from hook handler body"; + } + +hook myhook(r: rec) + { + # This handler shouldn't execute ever because of the handler at priority=5 + # exiting the body from a "break" statement. + print "myhook, &priority=0", rec; + } + +hook myhook(r: rec) &priority=10 + { + print "myhook, &priority=10", r; + # modifications to the record argument will be seen by remaining handlers. + r$a = 37; + r$b = "goobye world"; + # returning from the handler early, is fine, remaining handlers still run. + return; + print "ERROR: return statement should return from hook handler body"; + } + +hook myhook(r: rec) &priority=9 + { + print "myhook return F"; + # return value is ignored, remaining handlers still run, final return + # value is whether any hook body returned via break statement + return F; + print "ERROR: return statement should return from hook handler body"; + } + +hook myhook(r: rec) &priority=8 + { + print "myhook return T"; + # return value is ignored, remaining handlers still run, final return + # value is whether any hook body returned via break statement + return T; + print "ERROR: return statement should return from hook handler body"; + } + +# hook function doesn't need a declaration, we can go straight to defining +# a handler body. +hook myhook3(i: count) + { + print "myhook3", i; + } + +hook myhook4() &priority=1 + { + print "myhook4", 1; + } + +hook myhook4() &priority=2 + { + print "myhook4", 2; + } + +event new_connection(c: connection) + { + print "new_connection", c$id; + + print hook myhook([$a=1156, $b="hello world"]); + + # A hook with no handlers is fine, it's just a no-op. + print hook myhook2("nope"); + + print hook myhook3(8); + print hook myhook4(); + if ( hook myhook4() ) + { + print "myhook4 all handlers ran"; + } + + # A hook can be treated like other data types and doesn't have to be + # invoked directly by name. + local h = myhook; + print hook h([$a=2, $b="it works"]); + } diff --git a/testing/btest/core/leaks/http-connect.bro b/testing/btest/core/leaks/http-connect.bro deleted file mode 100644 index 8a7f1c8146..0000000000 --- a/testing/btest/core/leaks/http-connect.bro +++ /dev/null @@ -1,14 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/http/connect-with-smtp.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load base/protocols/conn -@load base/protocols/http -@load base/protocols/smtp -@load base/protocols/tunnels -@load base/frameworks/dpd diff --git a/testing/btest/core/leaks/http-connect.zeek b/testing/btest/core/leaks/http-connect.zeek new file mode 100644 index 0000000000..c18871c55d --- /dev/null +++ b/testing/btest/core/leaks/http-connect.zeek @@ -0,0 +1,14 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/http/connect-with-smtp.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load base/protocols/conn +@load base/protocols/http +@load base/protocols/smtp +@load base/protocols/tunnels +@load base/frameworks/dpd diff --git a/testing/btest/core/leaks/incr-vec-expr.test b/testing/btest/core/leaks/incr-vec-expr.test index 42d9d9f820..ff6117feea 100644 --- a/testing/btest/core/leaks/incr-vec-expr.test +++ b/testing/btest/core/leaks/incr-vec-expr.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT # @TEST-EXEC: btest-bg-wait 60 type rec: record { diff --git a/testing/btest/core/leaks/input-basic.bro b/testing/btest/core/leaks/input-basic.bro deleted file mode 100644 index 2f2ecf802d..0000000000 --- a/testing/btest/core/leaks/input-basic.bro +++ /dev/null @@ -1,67 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve ns -#types bool int enum count port subnet addr double time interval string table table table vector vector string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -@TEST-END-FILE - -@load base/protocols/ssh - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - ns: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - print outfile, to_count(servers[-42]$ns); # try to actually use a string. If null-termination is wrong this will fail. - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/core/leaks/input-basic.zeek b/testing/btest/core/leaks/input-basic.zeek new file mode 100644 index 0000000000..8903fa0409 --- /dev/null +++ b/testing/btest/core/leaks/input-basic.zeek @@ -0,0 +1,67 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve ns +#types bool int enum count port subnet addr double time interval string table table table vector vector string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +@TEST-END-FILE + +@load base/protocols/ssh + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + ns: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + print outfile, to_count(servers[-42]$ns); # try to actually use a string. If null-termination is wrong this will fail. + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/core/leaks/input-errors.bro b/testing/btest/core/leaks/input-errors.bro deleted file mode 100644 index 589579779f..0000000000 --- a/testing/btest/core/leaks/input-errors.bro +++ /dev/null @@ -1,196 +0,0 @@ -# Needs perftools support. -# Test different kinds of errors of the input framework -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve ns -#types bool int enum count port subnet addr double time interval string table table table vector vector string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -@TEST-END-FILE - -redef Input::accept_unsupported_types = T; - -redef exit_only_after_terminate = T; - -module Test; - -global outfile: file; - -type Idx: record { - c: count; -}; - -type Idx2: record { - c: count; - i: int; -}; - -type FileVal: record { - i: int; - s: file; -}; - -type Val: record { - i: int; - s: string; - a: addr; -}; - -type OptionalRecordVal: record { - i: int; - r: FileVal &optional; -}; - -type OptionalFileVal: record { - i: int; - s: file &optional; -}; - -global file_table: table[count] of FileVal = table(); -global optional_file_table: table[count] of OptionalFileVal = table(); -global record_table: table[count] of OptionalRecordVal = table(); -global string_table: table[string] of OptionalRecordVal = table(); - -global val_table: table[count] of Val = table(); -global val_table2: table[count, int] of Val = table(); -global val_table3: table[count, int] of int = table(); -global val_table4: table[count] of int; - -event line_file(description: Input::EventDescription, tpe: Input::Event, r:FileVal) - { - print outfile, description$name; - print outfile, r; - } - -event optional_line_file(description: Input::EventDescription, tpe: Input::Event, r:OptionalFileVal) - { - print outfile, description$name; - print outfile, r; - } - -event line_record(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) - { - print outfile, description$name; - print outfile, r; - } - -event event1(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event2(description: Input::TableDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event3(description: Input::TableDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event4(description: Input::TableDescription, tpe: Input::Event, r: Idx, r2: OptionalRecordVal) - { - } - -event event5(description: Input::EventDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event6(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) - { - } - -event event7(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2:OptionalRecordVal) - { - } - -event event8(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:string) - { - } - -event event9(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:addr, ii: int) - { - } - -event event10(description: Input::TableDescription, tpe: Input::Event, i: Idx, c: count) - { - } - -# these are legit to test the error events -event event11(description: Input::EventDescription, tpe: Input::Event, v: Val) - { - } - -event errorhandler1(desc: Input::TableDescription, msg: string, level: Reporter::Level) - { - } - -event errorhandler2(desc: Input::EventDescription, msg: string, level: Reporter::Level) - { - } - -event errorhandler3(desc: string, msg: string, level: Reporter::Level) - { - } - -event errorhandler4(desc: Input::EventDescription, msg: count, level: Reporter::Level) - { - } - -event errorhandler5(desc: Input::EventDescription, msg: string, level: count) - { - } - -event kill_me() - { - terminate(); - } - -event bro_init() - { - outfile = open("out"); - Input::add_event([$source="input.log", $name="file", $fields=FileVal, $ev=line_file, $want_record=T]); - Input::add_event([$source="input.log", $name="optionalrecord", $fields=OptionalRecordVal, $ev=line_record, $want_record=T]); - Input::add_event([$source="input.log", $name="optionalfile", $fields=OptionalFileVal, $ev=optional_line_file, $want_record=T]); - Input::add_table([$source="input.log", $name="filetable", $idx=Idx, $val=FileVal, $destination=file_table]); - Input::add_table([$source="input.log", $name="optionalrecordtable", $idx=Idx, $val=OptionalRecordVal, $destination=record_table]); - Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table]); - Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=record_table]); - Input::add_table([$source="input.log", $name="optionalfiletable2", $idx=Idx, $val=OptionalFileVal, $destination=string_table]); - Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=terminate]); - Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=kill_me]); - Input::add_table([$source="input.log", $name="optionalfiletable4", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event1]); - Input::add_table([$source="input.log", $name="optionalfiletable5", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event2]); - Input::add_table([$source="input.log", $name="optionalfiletable6", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event3]); - Input::add_table([$source="input.log", $name="optionalfiletable7", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event4]); - Input::add_table([$source="input.log", $name="optionalfiletable8", $idx=Idx, $val=Val, $destination=val_table4, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable9", $idx=Idx2, $val=Val, $destination=val_table, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable10", $idx=Idx, $val=Val, $destination=val_table2, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable11", $idx=Idx2, $val=Idx, $destination=val_table3, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable12", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable14", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event10, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable15", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=T]); - Input::add_event([$source="input.log", $name="event1", $fields=OptionalFileVal, $ev=terminate, $want_record=T]); - Input::add_event([$source="input.log", $name="event2", $fields=OptionalFileVal, $ev=kill_me, $want_record=T]); - Input::add_event([$source="input.log", $name="event3", $fields=OptionalFileVal, $ev=event3, $want_record=T]); - Input::add_event([$source="input.log", $name="event4", $fields=OptionalFileVal, $ev=event5, $want_record=T]); - Input::add_event([$source="input.log", $name="event5", $fields=OptionalFileVal, $ev=event6, $want_record=T]); - Input::add_event([$source="input.log", $name="event6", $fields=OptionalFileVal, $ev=event7, $want_record=T]); - Input::add_event([$source="input.log", $name="event7", $fields=OptionalFileVal, $ev=event7, $want_record=F]); - Input::add_event([$source="input.log", $name="event8", $fields=Val, $ev=event8, $want_record=F]); - Input::add_event([$source="input.log", $name="event9", $fields=Val, $ev=event9, $want_record=F]); - - Input::add_event([$source="input.log", $name="error1", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler1]); - Input::add_table([$source="input.log", $name="error2", $idx=Idx, $val=Val, $destination=val_table, $error_ev=errorhandler2]); - Input::add_event([$source="input.log", $name="error3", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler3]); - Input::add_event([$source="input.log", $name="error4", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler4]); - Input::add_event([$source="input.log", $name="error5", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler5]); - - schedule 3secs { kill_me() }; - } diff --git a/testing/btest/core/leaks/input-errors.zeek b/testing/btest/core/leaks/input-errors.zeek new file mode 100644 index 0000000000..7262e16c06 --- /dev/null +++ b/testing/btest/core/leaks/input-errors.zeek @@ -0,0 +1,196 @@ +# Needs perftools support. +# Test different kinds of errors of the input framework +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve ns +#types bool int enum count port subnet addr double time interval string table table table vector vector string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +@TEST-END-FILE + +redef Input::accept_unsupported_types = T; + +redef exit_only_after_terminate = T; + +module Test; + +global outfile: file; + +type Idx: record { + c: count; +}; + +type Idx2: record { + c: count; + i: int; +}; + +type FileVal: record { + i: int; + s: file; +}; + +type Val: record { + i: int; + s: string; + a: addr; +}; + +type OptionalRecordVal: record { + i: int; + r: FileVal &optional; +}; + +type OptionalFileVal: record { + i: int; + s: file &optional; +}; + +global file_table: table[count] of FileVal = table(); +global optional_file_table: table[count] of OptionalFileVal = table(); +global record_table: table[count] of OptionalRecordVal = table(); +global string_table: table[string] of OptionalRecordVal = table(); + +global val_table: table[count] of Val = table(); +global val_table2: table[count, int] of Val = table(); +global val_table3: table[count, int] of int = table(); +global val_table4: table[count] of int; + +event line_file(description: Input::EventDescription, tpe: Input::Event, r:FileVal) + { + print outfile, description$name; + print outfile, r; + } + +event optional_line_file(description: Input::EventDescription, tpe: Input::Event, r:OptionalFileVal) + { + print outfile, description$name; + print outfile, r; + } + +event line_record(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) + { + print outfile, description$name; + print outfile, r; + } + +event event1(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event2(description: Input::TableDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event3(description: Input::TableDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event4(description: Input::TableDescription, tpe: Input::Event, r: Idx, r2: OptionalRecordVal) + { + } + +event event5(description: Input::EventDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event6(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) + { + } + +event event7(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2:OptionalRecordVal) + { + } + +event event8(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:string) + { + } + +event event9(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:addr, ii: int) + { + } + +event event10(description: Input::TableDescription, tpe: Input::Event, i: Idx, c: count) + { + } + +# these are legit to test the error events +event event11(description: Input::EventDescription, tpe: Input::Event, v: Val) + { + } + +event errorhandler1(desc: Input::TableDescription, msg: string, level: Reporter::Level) + { + } + +event errorhandler2(desc: Input::EventDescription, msg: string, level: Reporter::Level) + { + } + +event errorhandler3(desc: string, msg: string, level: Reporter::Level) + { + } + +event errorhandler4(desc: Input::EventDescription, msg: count, level: Reporter::Level) + { + } + +event errorhandler5(desc: Input::EventDescription, msg: string, level: count) + { + } + +event kill_me() + { + terminate(); + } + +event zeek_init() + { + outfile = open("out"); + Input::add_event([$source="input.log", $name="file", $fields=FileVal, $ev=line_file, $want_record=T]); + Input::add_event([$source="input.log", $name="optionalrecord", $fields=OptionalRecordVal, $ev=line_record, $want_record=T]); + Input::add_event([$source="input.log", $name="optionalfile", $fields=OptionalFileVal, $ev=optional_line_file, $want_record=T]); + Input::add_table([$source="input.log", $name="filetable", $idx=Idx, $val=FileVal, $destination=file_table]); + Input::add_table([$source="input.log", $name="optionalrecordtable", $idx=Idx, $val=OptionalRecordVal, $destination=record_table]); + Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table]); + Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=record_table]); + Input::add_table([$source="input.log", $name="optionalfiletable2", $idx=Idx, $val=OptionalFileVal, $destination=string_table]); + Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=terminate]); + Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=kill_me]); + Input::add_table([$source="input.log", $name="optionalfiletable4", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event1]); + Input::add_table([$source="input.log", $name="optionalfiletable5", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event2]); + Input::add_table([$source="input.log", $name="optionalfiletable6", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event3]); + Input::add_table([$source="input.log", $name="optionalfiletable7", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event4]); + Input::add_table([$source="input.log", $name="optionalfiletable8", $idx=Idx, $val=Val, $destination=val_table4, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable9", $idx=Idx2, $val=Val, $destination=val_table, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable10", $idx=Idx, $val=Val, $destination=val_table2, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable11", $idx=Idx2, $val=Idx, $destination=val_table3, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable12", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable14", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event10, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable15", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=T]); + Input::add_event([$source="input.log", $name="event1", $fields=OptionalFileVal, $ev=terminate, $want_record=T]); + Input::add_event([$source="input.log", $name="event2", $fields=OptionalFileVal, $ev=kill_me, $want_record=T]); + Input::add_event([$source="input.log", $name="event3", $fields=OptionalFileVal, $ev=event3, $want_record=T]); + Input::add_event([$source="input.log", $name="event4", $fields=OptionalFileVal, $ev=event5, $want_record=T]); + Input::add_event([$source="input.log", $name="event5", $fields=OptionalFileVal, $ev=event6, $want_record=T]); + Input::add_event([$source="input.log", $name="event6", $fields=OptionalFileVal, $ev=event7, $want_record=T]); + Input::add_event([$source="input.log", $name="event7", $fields=OptionalFileVal, $ev=event7, $want_record=F]); + Input::add_event([$source="input.log", $name="event8", $fields=Val, $ev=event8, $want_record=F]); + Input::add_event([$source="input.log", $name="event9", $fields=Val, $ev=event9, $want_record=F]); + + Input::add_event([$source="input.log", $name="error1", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler1]); + Input::add_table([$source="input.log", $name="error2", $idx=Idx, $val=Val, $destination=val_table, $error_ev=errorhandler2]); + Input::add_event([$source="input.log", $name="error3", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler3]); + Input::add_event([$source="input.log", $name="error4", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler4]); + Input::add_event([$source="input.log", $name="error5", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler5]); + + schedule 3secs { kill_me() }; + } diff --git a/testing/btest/core/leaks/input-missing-enum.bro b/testing/btest/core/leaks/input-missing-enum.bro deleted file mode 100644 index 9037e15ed0..0000000000 --- a/testing/btest/core/leaks/input-missing-enum.bro +++ /dev/null @@ -1,41 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@TEST-START-FILE input.log -#fields e i -IdoNot::Exist 1 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - e: Log::ID; -}; - -global etable: table[int] of Log::ID = table(); - -event bro_init() - { - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="enum", $idx=Idx, $val=Val, $destination=etable, $want_record=F]); - } - -event Input::end_of_data(name: string, source:string) - { - print "Table:"; - print etable; - Input::remove("enum"); - terminate(); - } diff --git a/testing/btest/core/leaks/input-missing-enum.zeek b/testing/btest/core/leaks/input-missing-enum.zeek new file mode 100644 index 0000000000..9c34d163dd --- /dev/null +++ b/testing/btest/core/leaks/input-missing-enum.zeek @@ -0,0 +1,41 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@TEST-START-FILE input.log +#fields e i +IdoNot::Exist 1 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + e: Log::ID; +}; + +global etable: table[int] of Log::ID = table(); + +event zeek_init() + { + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="enum", $idx=Idx, $val=Val, $destination=etable, $want_record=F]); + } + +event Input::end_of_data(name: string, source:string) + { + print "Table:"; + print etable; + Input::remove("enum"); + terminate(); + } diff --git a/testing/btest/core/leaks/input-optional-event.bro b/testing/btest/core/leaks/input-optional-event.bro deleted file mode 100644 index ca141e1c4e..0000000000 --- a/testing/btest/core/leaks/input-optional-event.bro +++ /dev/null @@ -1,65 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b r.a r.b r.c -#types int bool string string string -1 T a b c -2 T a b c -3 F ba bb bc -4 T bb bd - -5 F a b c -6 T a b c -7 T a b c -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Sub: record { - a: string; - aa: string &optional; - b : string; - bb: string &optional; - c: string &optional; - d: string &optional; -}; - -type Val: record { - i: int; - b: bool; - notb: bool &optional; - r: Sub; -}; - -event servers(desc: Input::EventDescription, tpe: Input::Event, item: Val) - { - print outfile, item; - } - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=servers]); - } - -event Input::end_of_data(name: string, source: string) - { - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/core/leaks/input-optional-event.zeek b/testing/btest/core/leaks/input-optional-event.zeek new file mode 100644 index 0000000000..500a076ed6 --- /dev/null +++ b/testing/btest/core/leaks/input-optional-event.zeek @@ -0,0 +1,65 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b r.a r.b r.c +#types int bool string string string +1 T a b c +2 T a b c +3 F ba bb bc +4 T bb bd - +5 F a b c +6 T a b c +7 T a b c +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Sub: record { + a: string; + aa: string &optional; + b : string; + bb: string &optional; + c: string &optional; + d: string &optional; +}; + +type Val: record { + i: int; + b: bool; + notb: bool &optional; + r: Sub; +}; + +event servers(desc: Input::EventDescription, tpe: Input::Event, item: Val) + { + print outfile, item; + } + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=servers]); + } + +event Input::end_of_data(name: string, source: string) + { + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/core/leaks/input-optional-table.bro b/testing/btest/core/leaks/input-optional-table.bro deleted file mode 100644 index 95871b1516..0000000000 --- a/testing/btest/core/leaks/input-optional-table.bro +++ /dev/null @@ -1,68 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b r.a r.b r.c -#types int bool string string string -1 T a b c -2 T a b c -3 F ba bb bc -4 T bb bd - -5 T a b c -6 F a b c -7 T a b c -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Sub: record { - a: string; - aa: string &optional; - b : string; - bb: string &optional; - c: string &optional; - d: string &optional; -}; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - notb: bool &optional; - r: Sub; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, - $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } - ]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/core/leaks/input-optional-table.zeek b/testing/btest/core/leaks/input-optional-table.zeek new file mode 100644 index 0000000000..09f50fb8c8 --- /dev/null +++ b/testing/btest/core/leaks/input-optional-table.zeek @@ -0,0 +1,68 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b r.a r.b r.c +#types int bool string string string +1 T a b c +2 T a b c +3 F ba bb bc +4 T bb bd - +5 T a b c +6 F a b c +7 T a b c +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Sub: record { + a: string; + aa: string &optional; + b : string; + bb: string &optional; + c: string &optional; + d: string &optional; +}; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + notb: bool &optional; + r: Sub; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } + ]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/core/leaks/input-patterns.zeek b/testing/btest/core/leaks/input-patterns.zeek new file mode 100644 index 0000000000..62a7976434 --- /dev/null +++ b/testing/btest/core/leaks/input-patterns.zeek @@ -0,0 +1,52 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input.log +#separator \x09 +#fields i p +#types count pattern +1 /dog/ +2 /cat/ +3 /foo|bar/ +4 /^oob/ +@TEST-END-FILE + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + p: pattern; +}; + +global pats: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="pats", $idx=Idx, $val=Val, $destination=pats]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, (pats[3]$p in "foobar"); # T + print outfile, (pats[4]$p in "foobar"); # F + print outfile, (pats[3]$p == "foo"); # T + print outfile, pats; + Input::remove("pats"); + close(outfile); + terminate(); + } diff --git a/testing/btest/core/leaks/input-raw.bro b/testing/btest/core/leaks/input-raw.bro deleted file mode 100644 index 608ea25030..0000000000 --- a/testing/btest/core/leaks/input-raw.bro +++ /dev/null @@ -1,72 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 60 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input2.log >> input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got6 15 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait 60 - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input1.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -@TEST-END-FILE - -@TEST-START-FILE input2.log -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -@TEST-END-FILE - -@TEST-START-FILE input3.log -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. -@TEST-END-FILE - - -module A; - -type Val: record { - s: string; -}; - -global try: count; -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, description$name; - print outfile, tpe; - print outfile, s; - - try = try + 1; - - if ( try == 2 ) - system("touch got2"); - else if ( try == 6 ) - system("touch got6"); - else if ( try == 16 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - Input::remove("tail"); - terminate(); - } - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - Input::add_event([$source="tail -f ../input.log |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="tail", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/core/leaks/input-raw.zeek b/testing/btest/core/leaks/input-raw.zeek new file mode 100644 index 0000000000..938875987c --- /dev/null +++ b/testing/btest/core/leaks/input-raw.zeek @@ -0,0 +1,72 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 60 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got6 15 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait 60 + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input1.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +@TEST-END-FILE + +@TEST-START-FILE input2.log +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +@TEST-END-FILE + +@TEST-START-FILE input3.log +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + + +module A; + +type Val: record { + s: string; +}; + +global try: count; +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description$name; + print outfile, tpe; + print outfile, s; + + try = try + 1; + + if ( try == 2 ) + system("touch got2"); + else if ( try == 6 ) + system("touch got6"); + else if ( try == 16 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + Input::remove("tail"); + terminate(); + } + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + Input::add_event([$source="tail -f ../input.log |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="tail", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/core/leaks/input-reread.bro b/testing/btest/core/leaks/input-reread.bro deleted file mode 100644 index 8b6295c15d..0000000000 --- a/testing/btest/core/leaks/input-reread.bro +++ /dev/null @@ -1,164 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 60 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cp input2.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got4 10 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cp input3.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got6 10 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cp input4.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got8 10 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cp input5.log input.log -# @TEST-EXEC: btest-bg-wait 120 - -@TEST-START-FILE input1.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b -#types bool int enum count port subnet addr double time interval string table table table vector vector string string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - -@TEST-END-FILE -@TEST-START-FILE input2.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b -#types bool int enum count port subnet addr double time interval string table table table vector vector string string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - -T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortythree 43 -@TEST-END-FILE -@TEST-START-FILE input3.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b -#types bool int enum count port subnet addr double time interval string table table table vector vector string string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - -F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortythree 43 -@TEST-END-FILE -@TEST-START-FILE input4.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b r.d -#types bool int enum count port subnet addr double time interval string table table table vector vector string string string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - - -F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortythree 43 - -F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortyfour - - -F -45 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyfive - - -F -46 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtysix - - -F -47 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyseven - - -F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyeight 48 f -@TEST-END-FILE -@TEST-START-FILE input5.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b r.d -#types bool int enum count port subnet addr double time interval string table table table vector vector string string string -F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyeight 48 f -@TEST-END-FILE - -@load base/protocols/ssh - -redef exit_only_after_terminate = T; -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Sub: record { - a: string; - b: string &optional; - c: string &optional; - d: string &optional; -}; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; - r: Sub; -}; - -global servers: table[int] of Val = table(); - -global outfile: file; - -global try: count; - -event servers_ev(description: Input::EventDescription, tpe: Input::Event, item: Val) - { - print outfile, "============EVENT EVENT============"; - print outfile, item; - } - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) - { - print outfile, "============TABLE EVENT============"; - print outfile, "Left"; - print outfile, left; - print outfile, "Right"; - print outfile, right; - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, - $pred(typ: Input::Event, left: Idx, right: Val) = { - print outfile, "============PREDICATE============"; - print outfile, left; - print outfile, right; - return T; - } - ]); - Input::add_event([$source="../input.log", $mode=Input::REREAD, $name="sshevent", $fields=Val, $ev=servers_ev]); - } - - -event Input::end_of_data(name: string, source: string) - { - if ( name == "ssh" ) { - print outfile, "==========SERVERS============"; - print outfile, servers; - } else { - print outfile, "==========END OF EVENTS EVENTS==========="; - } - - try = try + 1; - - if ( try == 2 ) - system("touch got2"); - else if ( try == 4 ) - system("touch got4"); - else if ( try == 6 ) - system("touch got6"); - else if ( try == 8 ) - system("touch got8"); - else if ( try == 10 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - terminate(); - } - } diff --git a/testing/btest/core/leaks/input-reread.zeek b/testing/btest/core/leaks/input-reread.zeek new file mode 100644 index 0000000000..6621c14574 --- /dev/null +++ b/testing/btest/core/leaks/input-reread.zeek @@ -0,0 +1,164 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 60 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cp input2.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got4 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cp input3.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got6 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cp input4.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got8 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cp input5.log input.log +# @TEST-EXEC: btest-bg-wait 120 + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b +#types bool int enum count port subnet addr double time interval string table table table vector vector string string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b +#types bool int enum count port subnet addr double time interval string table table table vector vector string string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortythree 43 +@TEST-END-FILE +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b +#types bool int enum count port subnet addr double time interval string table table table vector vector string string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortythree 43 +@TEST-END-FILE +@TEST-START-FILE input4.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b r.d +#types bool int enum count port subnet addr double time interval string table table table vector vector string string string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortytwo - - +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortythree 43 - +F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fortyfour - - +F -45 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyfive - - +F -46 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtysix - - +F -47 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyseven - - +F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyeight 48 f +@TEST-END-FILE +@TEST-START-FILE input5.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve r.a r.b r.d +#types bool int enum count port subnet addr double time interval string table table table vector vector string string string +F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY fourtyeight 48 f +@TEST-END-FILE + +@load base/protocols/ssh + +redef exit_only_after_terminate = T; +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Sub: record { + a: string; + b: string &optional; + c: string &optional; + d: string &optional; +}; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; + r: Sub; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event servers_ev(description: Input::EventDescription, tpe: Input::Event, item: Val) + { + print outfile, "============EVENT EVENT============"; + print outfile, item; + } + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print outfile, "============TABLE EVENT============"; + print outfile, "Left"; + print outfile, left; + print outfile, "Right"; + print outfile, right; + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE============"; + print outfile, left; + print outfile, right; + return T; + } + ]); + Input::add_event([$source="../input.log", $mode=Input::REREAD, $name="sshevent", $fields=Val, $ev=servers_ev]); + } + + +event Input::end_of_data(name: string, source: string) + { + if ( name == "ssh" ) { + print outfile, "==========SERVERS============"; + print outfile, servers; + } else { + print outfile, "==========END OF EVENTS EVENTS==========="; + } + + try = try + 1; + + if ( try == 2 ) + system("touch got2"); + else if ( try == 4 ) + system("touch got4"); + else if ( try == 6 ) + system("touch got6"); + else if ( try == 8 ) + system("touch got8"); + else if ( try == 10 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } diff --git a/testing/btest/core/leaks/input-sqlite.bro b/testing/btest/core/leaks/input-sqlite.bro deleted file mode 100644 index ae1df163c8..0000000000 --- a/testing/btest/core/leaks/input-sqlite.bro +++ /dev/null @@ -1,105 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# @TEST-REQUIRES: which sqlite3 -# -# @TEST-EXEC: cat conn.sql | sqlite3 conn.sqlite -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@TEST-START-FILE conn.sql -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE conn ( -'ts' double precision, -'uid' text, -'id.orig_h' text, -'id.orig_p' integer, -'id.resp_h' text, -'id.resp_p' integer, -'proto' text, -'service' text, -'duration' double precision, -'orig_bytes' integer, -'resp_bytes' integer, -'conn_state' text, -'local_orig' boolean, -'local_resp' boolean, -'missed_bytes' integer, -'history' text, -'orig_pkts' integer, -'orig_ip_bytes' integer, -'resp_pkts' integer, -'resp_ip_bytes' integer, -'tunnel_parents' text -); -INSERT INTO "conn" VALUES(1.30047516709653496744e+09,'dnGM1AdIVyh','141.142.220.202',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,73,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516709701204296e+09,'fv9q7WjEgp1','fe80::217:f2ff:fed7:cf65',5353,'ff02::fb',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,199,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516709981608392e+09,'0Ox0H56yl88','141.142.220.50',5353,'224.0.0.251',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,179,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885389900212e+09,'rvmSc7rDQub','141.142.220.118',43927,'141.142.2.2',53,'udp','dns',4.351139068603515625e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885437798497e+09,'ogkztouSArh','141.142.220.118',37676,'141.142.2.2',53,'udp','dns',4.20093536376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885483694076e+09,'0UIDdXFt7Tb','141.142.220.118',40526,'141.142.2.2',53,'udp','dns',3.9196014404296875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885795593258e+09,'WqFYV51UIq7','141.142.220.118',32902,'141.142.2.2',53,'udp','dns',3.17096710205078125e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885830593104e+09,'ylcqZpbz6K2','141.142.220.118',59816,'141.142.2.2',53,'udp','dns',3.430843353271484375e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885871291159e+09,'blhldTzA7Y6','141.142.220.118',59714,'141.142.2.2',53,'udp','dns',3.750324249267578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889164400098e+09,'Sc34cGJo3Kg','141.142.220.118',58206,'141.142.2.2',53,'udp','dns',3.39031219482421875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889203691487e+09,'RzvFrfXSRfk','141.142.220.118',38911,'141.142.2.2',53,'udp','dns',3.349781036376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889241409298e+09,'GaaFI58mpbe','141.142.220.118',59746,'141.142.2.2',53,'udp','dns',4.208087921142578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889398789407e+09,'tr7M6tvAIQa','141.142.220.118',45000,'141.142.2.2',53,'udp','dns',3.840923309326171875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889442205426e+09,'gV0TcSc2pb4','141.142.220.118',48479,'141.142.2.2',53,'udp','dns',3.168582916259765625e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889478707315e+09,'MOG0z4PYOhk','141.142.220.118',48128,'141.142.2.2',53,'udp','dns',4.22954559326171875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516890174889565e+09,'PlehgEduUyj','141.142.220.118',56056,'141.142.2.2',53,'udp','dns',4.022121429443359375e-04,36,131,'SF',NULL,NULL,0,'Dd',1,64,1,159,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516890219497676e+09,'4eZgk09f2Re','141.142.220.118',55092,'141.142.2.2',53,'udp','dns',3.740787506103515625e-04,36,198,'SF',NULL,NULL,0,'Dd',1,64,1,226,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516989943790432e+09,'3xwJPc7mQ9a','141.142.220.44',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,85,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517086238408089e+09,'yxTcvvTKWQ4','141.142.220.226',137,'141.142.220.255',137,'udp','dns',2.61301684379577636718e+00,350,0,'S0',NULL,NULL,0,'D',7,546,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517167537188525e+09,'8bLW3XNfhCj','fe80::3074:17d5:2052:c324',65373,'ff02::1:3',5355,'udp','dns',1.00096225738525390625e-01,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517167708110807e+09,'rqjhiiRPjEe','141.142.220.226',55131,'224.0.0.252',5355,'udp','dns',1.00020885467529296875e-01,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517311674904827e+09,'hTPyfL3QSGa','fe80::3074:17d5:2052:c324',54213,'ff02::1:3',5355,'udp','dns',9.980106353759765625e-02,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517311736202235e+09,'EruUQ9AJRj4','141.142.220.226',55671,'224.0.0.252',5355,'udp','dns',9.98489856719970703125e-02,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517315367889406e+09,'sw1bKJOMjuk','141.142.220.238',56641,'141.142.220.255',137,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,78,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516872400689127e+09,'NPHCuyWykE7','141.142.220.118',48649,'208.80.152.118',80,'tcp','http',1.19904994964599609375e-01,525,232,'S1',NULL,NULL,0,'ShADad',4,741,3,396,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889293599126e+09,'VapPqRhPgJ4','141.142.220.118',50000,'208.80.152.3',80,'tcp','http',2.29603052139282226562e-01,1148,734,'S1',NULL,NULL,0,'ShADad',6,1468,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885916304588e+09,'3607hh8C3bc','141.142.220.118',49998,'208.80.152.3',80,'tcp','http',2.15893030166625976562e-01,1130,734,'S1',NULL,NULL,0,'ShADad',6,1450,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885530495647e+09,'tgYMrIvzDSg','141.142.220.118',49996,'208.80.152.3',80,'tcp','http',2.1850109100341796875e-01,1171,733,'S1',NULL,NULL,0,'ShADad',6,1491,4,949,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889526700977e+09,'xQsjPwNBrXd','141.142.220.118',50001,'208.80.152.3',80,'tcp','http',2.27283954620361328125e-01,1178,734,'S1',NULL,NULL,0,'ShADad',6,1498,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516890263509747e+09,'Ap3GzMI1vM9','141.142.220.118',35642,'208.80.152.2',80,'tcp','http',1.200408935546875e-01,534,412,'S1',NULL,NULL,0,'ShADad',4,750,3,576,'(empty)'); -INSERT INTO "conn" VALUES(1300475168.85533,'FTVcgrmNy52','141.142.220.118',49997,'208.80.152.3',80,'tcp','http',2.19720125198364257812e-01,1125,734,'S1',NULL,NULL,0,'ShADad',6,1445,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516978033089643e+09,'1xFx4PGdeq5','141.142.220.235',6705,'173.192.163.128',80,'tcp',NULL,NULL,NULL,NULL,'OTH',NULL,NULL,0,'h',0,0,1,48,'(empty)'); -INSERT INTO "conn" VALUES(1.3004751686520030498e+09,'WIG1ud65z22','141.142.220.118',35634,'208.80.152.2',80,'tcp',NULL,6.1328887939453125e-02,463,350,'OTH',NULL,NULL,0,'DdA',2,567,1,402,'(empty)'); -INSERT INTO "conn" VALUES(1.3004751688929131031e+09,'o2gAkl4V7sa','141.142.220.118',49999,'208.80.152.3',80,'tcp','http',2.20960855484008789062e-01,1137,733,'S1',NULL,NULL,0,'ShADad',6,1457,4,949,'(empty)'); -COMMIT; -@TEST-END-FILE - -@load base/protocols/conn - -redef exit_only_after_terminate = T; -redef Input::accept_unsupported_types = T; - -global outfile: file; - -module A; - -event line(description: Input::EventDescription, tpe: Input::Event, r: Conn::Info) - { - print outfile, r; - print outfile, |r$tunnel_parents|; # to make sure I got empty right - } - -event bro_init() - { - local config_strings: table[string] of string = { - ["query"] = "select * from conn;", - }; - - outfile = open("../out"); - Input::add_event([$source="../conn", $name="conn", $fields=Conn::Info, $ev=line, $want_record=T, $reader=Input::READER_SQLITE, $config=config_strings]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End of data"; - close(outfile); - terminate(); - } diff --git a/testing/btest/core/leaks/input-sqlite.zeek b/testing/btest/core/leaks/input-sqlite.zeek new file mode 100644 index 0000000000..9606779c7b --- /dev/null +++ b/testing/btest/core/leaks/input-sqlite.zeek @@ -0,0 +1,105 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: which sqlite3 +# +# @TEST-EXEC: cat conn.sql | sqlite3 conn.sqlite +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@TEST-START-FILE conn.sql +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE conn ( +'ts' double precision, +'uid' text, +'id.orig_h' text, +'id.orig_p' integer, +'id.resp_h' text, +'id.resp_p' integer, +'proto' text, +'service' text, +'duration' double precision, +'orig_bytes' integer, +'resp_bytes' integer, +'conn_state' text, +'local_orig' boolean, +'local_resp' boolean, +'missed_bytes' integer, +'history' text, +'orig_pkts' integer, +'orig_ip_bytes' integer, +'resp_pkts' integer, +'resp_ip_bytes' integer, +'tunnel_parents' text +); +INSERT INTO "conn" VALUES(1.30047516709653496744e+09,'dnGM1AdIVyh','141.142.220.202',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,73,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516709701204296e+09,'fv9q7WjEgp1','fe80::217:f2ff:fed7:cf65',5353,'ff02::fb',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,199,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516709981608392e+09,'0Ox0H56yl88','141.142.220.50',5353,'224.0.0.251',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,179,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885389900212e+09,'rvmSc7rDQub','141.142.220.118',43927,'141.142.2.2',53,'udp','dns',4.351139068603515625e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885437798497e+09,'ogkztouSArh','141.142.220.118',37676,'141.142.2.2',53,'udp','dns',4.20093536376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885483694076e+09,'0UIDdXFt7Tb','141.142.220.118',40526,'141.142.2.2',53,'udp','dns',3.9196014404296875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885795593258e+09,'WqFYV51UIq7','141.142.220.118',32902,'141.142.2.2',53,'udp','dns',3.17096710205078125e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885830593104e+09,'ylcqZpbz6K2','141.142.220.118',59816,'141.142.2.2',53,'udp','dns',3.430843353271484375e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885871291159e+09,'blhldTzA7Y6','141.142.220.118',59714,'141.142.2.2',53,'udp','dns',3.750324249267578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889164400098e+09,'Sc34cGJo3Kg','141.142.220.118',58206,'141.142.2.2',53,'udp','dns',3.39031219482421875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889203691487e+09,'RzvFrfXSRfk','141.142.220.118',38911,'141.142.2.2',53,'udp','dns',3.349781036376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889241409298e+09,'GaaFI58mpbe','141.142.220.118',59746,'141.142.2.2',53,'udp','dns',4.208087921142578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889398789407e+09,'tr7M6tvAIQa','141.142.220.118',45000,'141.142.2.2',53,'udp','dns',3.840923309326171875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889442205426e+09,'gV0TcSc2pb4','141.142.220.118',48479,'141.142.2.2',53,'udp','dns',3.168582916259765625e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889478707315e+09,'MOG0z4PYOhk','141.142.220.118',48128,'141.142.2.2',53,'udp','dns',4.22954559326171875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516890174889565e+09,'PlehgEduUyj','141.142.220.118',56056,'141.142.2.2',53,'udp','dns',4.022121429443359375e-04,36,131,'SF',NULL,NULL,0,'Dd',1,64,1,159,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516890219497676e+09,'4eZgk09f2Re','141.142.220.118',55092,'141.142.2.2',53,'udp','dns',3.740787506103515625e-04,36,198,'SF',NULL,NULL,0,'Dd',1,64,1,226,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516989943790432e+09,'3xwJPc7mQ9a','141.142.220.44',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,85,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517086238408089e+09,'yxTcvvTKWQ4','141.142.220.226',137,'141.142.220.255',137,'udp','dns',2.61301684379577636718e+00,350,0,'S0',NULL,NULL,0,'D',7,546,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517167537188525e+09,'8bLW3XNfhCj','fe80::3074:17d5:2052:c324',65373,'ff02::1:3',5355,'udp','dns',1.00096225738525390625e-01,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517167708110807e+09,'rqjhiiRPjEe','141.142.220.226',55131,'224.0.0.252',5355,'udp','dns',1.00020885467529296875e-01,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517311674904827e+09,'hTPyfL3QSGa','fe80::3074:17d5:2052:c324',54213,'ff02::1:3',5355,'udp','dns',9.980106353759765625e-02,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517311736202235e+09,'EruUQ9AJRj4','141.142.220.226',55671,'224.0.0.252',5355,'udp','dns',9.98489856719970703125e-02,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517315367889406e+09,'sw1bKJOMjuk','141.142.220.238',56641,'141.142.220.255',137,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,78,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516872400689127e+09,'NPHCuyWykE7','141.142.220.118',48649,'208.80.152.118',80,'tcp','http',1.19904994964599609375e-01,525,232,'S1',NULL,NULL,0,'ShADad',4,741,3,396,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889293599126e+09,'VapPqRhPgJ4','141.142.220.118',50000,'208.80.152.3',80,'tcp','http',2.29603052139282226562e-01,1148,734,'S1',NULL,NULL,0,'ShADad',6,1468,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885916304588e+09,'3607hh8C3bc','141.142.220.118',49998,'208.80.152.3',80,'tcp','http',2.15893030166625976562e-01,1130,734,'S1',NULL,NULL,0,'ShADad',6,1450,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885530495647e+09,'tgYMrIvzDSg','141.142.220.118',49996,'208.80.152.3',80,'tcp','http',2.1850109100341796875e-01,1171,733,'S1',NULL,NULL,0,'ShADad',6,1491,4,949,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889526700977e+09,'xQsjPwNBrXd','141.142.220.118',50001,'208.80.152.3',80,'tcp','http',2.27283954620361328125e-01,1178,734,'S1',NULL,NULL,0,'ShADad',6,1498,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516890263509747e+09,'Ap3GzMI1vM9','141.142.220.118',35642,'208.80.152.2',80,'tcp','http',1.200408935546875e-01,534,412,'S1',NULL,NULL,0,'ShADad',4,750,3,576,'(empty)'); +INSERT INTO "conn" VALUES(1300475168.85533,'FTVcgrmNy52','141.142.220.118',49997,'208.80.152.3',80,'tcp','http',2.19720125198364257812e-01,1125,734,'S1',NULL,NULL,0,'ShADad',6,1445,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516978033089643e+09,'1xFx4PGdeq5','141.142.220.235',6705,'173.192.163.128',80,'tcp',NULL,NULL,NULL,NULL,'OTH',NULL,NULL,0,'h',0,0,1,48,'(empty)'); +INSERT INTO "conn" VALUES(1.3004751686520030498e+09,'WIG1ud65z22','141.142.220.118',35634,'208.80.152.2',80,'tcp',NULL,6.1328887939453125e-02,463,350,'OTH',NULL,NULL,0,'DdA',2,567,1,402,'(empty)'); +INSERT INTO "conn" VALUES(1.3004751688929131031e+09,'o2gAkl4V7sa','141.142.220.118',49999,'208.80.152.3',80,'tcp','http',2.20960855484008789062e-01,1137,733,'S1',NULL,NULL,0,'ShADad',6,1457,4,949,'(empty)'); +COMMIT; +@TEST-END-FILE + +@load base/protocols/conn + +redef exit_only_after_terminate = T; +redef Input::accept_unsupported_types = T; + +global outfile: file; + +module A; + +event line(description: Input::EventDescription, tpe: Input::Event, r: Conn::Info) + { + print outfile, r; + print outfile, |r$tunnel_parents|; # to make sure I got empty right + } + +event zeek_init() + { + local config_strings: table[string] of string = { + ["query"] = "select * from conn;", + }; + + outfile = open("../out"); + Input::add_event([$source="../conn", $name="conn", $fields=Conn::Info, $ev=line, $want_record=T, $reader=Input::READER_SQLITE, $config=config_strings]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End of data"; + close(outfile); + terminate(); + } diff --git a/testing/btest/core/leaks/input-with-remove.bro b/testing/btest/core/leaks/input-with-remove.bro deleted file mode 100644 index ba58d7b2f6..0000000000 --- a/testing/btest/core/leaks/input-with-remove.bro +++ /dev/null @@ -1,63 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load base/frameworks/input - -redef exit_only_after_terminate = T; - -global c: count = 0; - - -type OneLine: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print "1", "Line"; - } - -event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) - { - Input::remove(name); - print "2", name; - } - -function run(): count - { - Input::add_event([$name=unique_id(""), - $source=fmt("%s |", "date"), - $reader=Input::READER_RAW, - $mode=Input::STREAM, - $fields=OneLine, - $ev=line, - $want_record=F]); - - return 1; - } - - -event do() - { - run(); - } - -event do_term() { - terminate(); -} - -event bro_init() { - schedule 1sec { - do() - }; - schedule 3sec { - do_term() - }; -} - diff --git a/testing/btest/core/leaks/input-with-remove.zeek b/testing/btest/core/leaks/input-with-remove.zeek new file mode 100644 index 0000000000..2a55c8a3fa --- /dev/null +++ b/testing/btest/core/leaks/input-with-remove.zeek @@ -0,0 +1,63 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load base/frameworks/input + +redef exit_only_after_terminate = T; + +global c: count = 0; + + +type OneLine: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print "1", "Line"; + } + +event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) + { + Input::remove(name); + print "2", name; + } + +function run(): count + { + Input::add_event([$name=unique_id(""), + $source=fmt("%s |", "date"), + $reader=Input::READER_RAW, + $mode=Input::STREAM, + $fields=OneLine, + $ev=line, + $want_record=F]); + + return 1; + } + + +event do() + { + run(); + } + +event do_term() { + terminate(); +} + +event zeek_init() { + schedule 1sec { + do() + }; + schedule 3sec { + do_term() + }; +} + diff --git a/testing/btest/core/leaks/ip-in-ip.test b/testing/btest/core/leaks/ip-in-ip.test index 3ceae55d49..41cc6a7724 100644 --- a/testing/btest/core/leaks/ip-in-ip.test +++ b/testing/btest/core/leaks/ip-in-ip.test @@ -1,12 +1,12 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro1 bro -m -b -r $TRACES/tunnels/6in6.pcap %INPUT -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro2 bro -m -b -r $TRACES/tunnels/6in6in6.pcap %INPUT -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro3 bro -m -b -r $TRACES/tunnels/6in6-tunnel-change.pcap %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek1 zeek -m -b -r $TRACES/tunnels/6in6.pcap %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek2 zeek -m -b -r $TRACES/tunnels/6in6in6.pcap %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek3 zeek -m -b -r $TRACES/tunnels/6in6-tunnel-change.pcap %INPUT # @TEST-EXEC: btest-bg-wait 60 event new_connection(c: connection) diff --git a/testing/btest/core/leaks/ipv6_ext_headers.test b/testing/btest/core/leaks/ipv6_ext_headers.test index 3b6f8d467c..84ad8e69a8 100644 --- a/testing/btest/core/leaks/ipv6_ext_headers.test +++ b/testing/btest/core/leaks/ipv6_ext_headers.test @@ -2,9 +2,9 @@ # # @TEST-GROUP: leaks # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/ipv6-hbh-routing0.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 # Just check that the event is raised correctly for a packet containing diff --git a/testing/btest/core/leaks/irc.test b/testing/btest/core/leaks/irc.test index 7b2ac389d4..7b3130a553 100644 --- a/testing/btest/core/leaks/irc.test +++ b/testing/btest/core/leaks/irc.test @@ -2,9 +2,9 @@ # # @TEST-GROUP: leaks # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/irc-dcc-send.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/irc-dcc-send.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 event irc_names_info(c: connection, is_orig: bool, c_type: string, channel: string, users: string_set) diff --git a/testing/btest/core/leaks/krb-service-name.test b/testing/btest/core/leaks/krb-service-name.test index a0d8a84322..5b07a48633 100644 --- a/testing/btest/core/leaks/krb-service-name.test +++ b/testing/btest/core/leaks/krb-service-name.test @@ -1,8 +1,8 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/krb/optional-service-name.pcap +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/krb/optional-service-name.pcap # @TEST-EXEC: btest-bg-wait 60 diff --git a/testing/btest/core/leaks/krb.test b/testing/btest/core/leaks/krb.test index 7bfb7a550d..a16711b850 100644 --- a/testing/btest/core/leaks/krb.test +++ b/testing/btest/core/leaks/krb.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/krb/kinit.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/krb/kinit.trace %INPUT # @TEST-EXEC: btest-bg-wait 30 @load base/protocols/krb \ No newline at end of file diff --git a/testing/btest/core/leaks/kv-iteration.bro b/testing/btest/core/leaks/kv-iteration.bro deleted file mode 100644 index 5c7a9f1f62..0000000000 --- a/testing/btest/core/leaks/kv-iteration.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-GROUP: leaks -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -event new_connection(c: connection) - { - local t: table[count] of string = table(); - t[1] = "hello"; - t[55] = "goodbye"; - - for (key, value in t) - print key, value; - - local tkk: table[string, string] of count = table(); - tkk["hello", "world"] = 1; - tkk["goodbye", "world"] = 55; - - for ([k1, k2], val in tkk) - print k1, k2, val; - } diff --git a/testing/btest/core/leaks/kv-iteration.zeek b/testing/btest/core/leaks/kv-iteration.zeek new file mode 100644 index 0000000000..7496698e42 --- /dev/null +++ b/testing/btest/core/leaks/kv-iteration.zeek @@ -0,0 +1,22 @@ +# @TEST-GROUP: leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +event new_connection(c: connection) + { + local t: table[count] of string = table(); + t[1] = "hello"; + t[55] = "goodbye"; + + for (key, value in t) + print key, value; + + local tkk: table[string, string] of count = table(); + tkk["hello", "world"] = 1; + tkk["goodbye", "world"] = 55; + + for ([k1, k2], val in tkk) + print k1, k2, val; + } diff --git a/testing/btest/core/leaks/mysql.test b/testing/btest/core/leaks/mysql.test index 2e9ec6990f..07f3239885 100644 --- a/testing/btest/core/leaks/mysql.test +++ b/testing/btest/core/leaks/mysql.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/mysql/mysql.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/mysql/mysql.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/mysql diff --git a/testing/btest/core/leaks/paraglob.zeek b/testing/btest/core/leaks/paraglob.zeek new file mode 100644 index 0000000000..aac8c87038 --- /dev/null +++ b/testing/btest/core/leaks/paraglob.zeek @@ -0,0 +1,34 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 120 + +event new_connection (c : connection) +{ + local v1 = vector("*", "d?g", "*og", "d?", "d[!wl]g"); + local v2 = vector("once", "!o*", "once"); + local v3 = vector("https://*.google.com/*", "*malware*", "*.gov*"); + + local p1 = paraglob_init(v1); + local p2: opaque of paraglob = paraglob_init(v2); + local p3 = paraglob_init(v3); + local p_eq = paraglob_init(v1); + + # paraglob_init should not modify v1 + print (v1 == vector("*", "d?g", "*og", "d?", "d[!wl]g")); + # p_eq and p1 should be the same paraglobs + print paraglob_equals(p_eq, p1); + + print paraglob_match(p1, "dog"); + + + print paraglob_match(p2, "once"); + print paraglob_match(p3, "www.strange-malware-domain.gov"); + + local large_glob: opaque of paraglob = paraglob_init(v3); + print paraglob_match(large_glob, "www.strange-malware-domain.gov"); +} diff --git a/testing/btest/core/leaks/pattern.bro b/testing/btest/core/leaks/pattern.bro deleted file mode 100644 index f48a8f28bd..0000000000 --- a/testing/btest/core/leaks/pattern.bro +++ /dev/null @@ -1,67 +0,0 @@ -# @TEST-GROUP: leaks -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -event new_connection(c: connection) - { - print "new connection"; - - local p1: pattern = /foo|bar/; - local p2: pattern = /oob/; - local p3: pattern = /^oob/; - local p4 = /foo/; - - # Type inference tests - - test_case( "type inference", type_name(p4) == "pattern" ); - - # Operator tests - - test_case( "equality operator", "foo" == p1 ); - test_case( "equality operator (order of operands)", p1 == "foo" ); - test_case( "inequality operator", "foobar" != p1 ); - test_case( "inequality operator (order of operands)", p1 != "foobar" ); - test_case( "in operator", p1 in "foobar" ); - test_case( "in operator", p2 in "foobar" ); - test_case( "!in operator", p3 !in "foobar" ); - test_case( "& operator", p1 & p2 in "baroob" ); - test_case( "& operator", p2 & p1 in "baroob" ); - test_case( "| operator", p1 | p2 in "lazybarlazy" ); - test_case( "| operator", p3 | p4 in "xoob" ); - - test_case( "/i pattern modifier", /fOO/i in "xFoObar" ); - test_case( "/i pattern modifier", /fOO/i == "Foo" ); - - test_case( "/i double-quote escape", /"fOO"/i in "xFoObar" ); - test_case( "/i double-quote escape", /"fOO"/i in "xfOObar" ); - - test_case( "case-sensitive pattern", /fOO/ in "xFoObar" ); - test_case( "case-sensitive pattern", /fOO/ == "Foo" ); - test_case( "case-sensitive pattern", /fOO/ == "fOO" ); - - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bez" ); - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bEz" ); - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bar" ); - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bAr" ); - - test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbez" ); - test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbEz" ); - test_case( "/i pattern concatenation", /BAR/i & /bez/ == "barbEz" ); - test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbez" ); - test_case( "/i pattern concatenation", /BAR/i & /bez/ == "bArbez" ); - test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbEz" ); - - test_case( "/i pattern character class", /ba[0a-c99S-Z0]/i & /bEz/ == "bArbEz" ); - test_case( "/i pattern character class", /ba[0a-c99M-S0]/i & /bEz/ == "bArbEz" ); - - test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xBAry" ); - test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xFOoy" ); - test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ | /foo/i in "xFOoy" ); - } diff --git a/testing/btest/core/leaks/pattern.zeek b/testing/btest/core/leaks/pattern.zeek new file mode 100644 index 0000000000..e223e64b57 --- /dev/null +++ b/testing/btest/core/leaks/pattern.zeek @@ -0,0 +1,67 @@ +# @TEST-GROUP: leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event new_connection(c: connection) + { + print "new connection"; + + local p1: pattern = /foo|bar/; + local p2: pattern = /oob/; + local p3: pattern = /^oob/; + local p4 = /foo/; + + # Type inference tests + + test_case( "type inference", type_name(p4) == "pattern" ); + + # Operator tests + + test_case( "equality operator", "foo" == p1 ); + test_case( "equality operator (order of operands)", p1 == "foo" ); + test_case( "inequality operator", "foobar" != p1 ); + test_case( "inequality operator (order of operands)", p1 != "foobar" ); + test_case( "in operator", p1 in "foobar" ); + test_case( "in operator", p2 in "foobar" ); + test_case( "!in operator", p3 !in "foobar" ); + test_case( "& operator", p1 & p2 in "baroob" ); + test_case( "& operator", p2 & p1 in "baroob" ); + test_case( "| operator", p1 | p2 in "lazybarlazy" ); + test_case( "| operator", p3 | p4 in "xoob" ); + + test_case( "/i pattern modifier", /fOO/i in "xFoObar" ); + test_case( "/i pattern modifier", /fOO/i == "Foo" ); + + test_case( "/i double-quote escape", /"fOO"/i in "xFoObar" ); + test_case( "/i double-quote escape", /"fOO"/i in "xfOObar" ); + + test_case( "case-sensitive pattern", /fOO/ in "xFoObar" ); + test_case( "case-sensitive pattern", /fOO/ == "Foo" ); + test_case( "case-sensitive pattern", /fOO/ == "fOO" ); + + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bez" ); + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bEz" ); + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bar" ); + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bAr" ); + + test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbez" ); + test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbEz" ); + test_case( "/i pattern concatenation", /BAR/i & /bez/ == "barbEz" ); + test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbez" ); + test_case( "/i pattern concatenation", /BAR/i & /bez/ == "bArbez" ); + test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbEz" ); + + test_case( "/i pattern character class", /ba[0a-c99S-Z0]/i & /bEz/ == "bArbEz" ); + test_case( "/i pattern character class", /ba[0a-c99M-S0]/i & /bEz/ == "bArbEz" ); + + test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xBAry" ); + test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xFOoy" ); + test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ | /foo/i in "xFOoy" ); + } diff --git a/testing/btest/core/leaks/pe.test b/testing/btest/core/leaks/pe.test index d951cdbd47..3ff64b587f 100644 --- a/testing/btest/core/leaks/pe.test +++ b/testing/btest/core/leaks/pe.test @@ -2,9 +2,9 @@ # # @TEST-GROUP: leaks # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/pe/pe.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/pe/pe.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/ftp diff --git a/testing/btest/core/leaks/radius.test b/testing/btest/core/leaks/radius.test index 228973c47e..e6d1d66bea 100644 --- a/testing/btest/core/leaks/radius.test +++ b/testing/btest/core/leaks/radius.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/radius/radius.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/radius/radius.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/radius diff --git a/testing/btest/core/leaks/returnwhen.bro b/testing/btest/core/leaks/returnwhen.bro deleted file mode 100644 index f5160ef250..0000000000 --- a/testing/btest/core/leaks/returnwhen.bro +++ /dev/null @@ -1,84 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: btest-bg-run bro HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -redef exit_only_after_terminate = T; - -global my_set: set[string] = set(); -global flag: string = "flag"; -global done: bool = F; - -function dummyfunc(s: string): string - { - return "dummy " + s; - } - -function async_func(s: string): string - { - print dummyfunc("from async_func() " + s); - - return when ( flag in my_set ) - { - return flag + " in my_set"; - } - timeout 3sec - { - return "timeout"; - } - } - -event set_flag() - { - add my_set[flag]; - } - -event do_another() - { - delete my_set[flag]; - - local local_dummy = dummyfunc; - - local anon = function(s: string): string { return s + "!"; }; - - if ( ! done ) - schedule 1sec { set_flag() }; - - when ( local result = async_func("from do_another()") ) - { - print "async_func() return result in do_another()", result; - print local_dummy("from do_another() when block"); - print anon("hi"); - if ( result == "timeout" ) - terminate(); - else - { - done = T; - schedule 10msec { do_another() }; - } - } - } - -event bro_init() - { - local local_dummy = dummyfunc; - - local anon = function(s: string): string { return s + "!"; }; - - schedule 1sec { set_flag() }; - - when ( local result = async_func("from bro_init()") ) - { - print "async_func() return result in bro_init()", result; - print local_dummy("from bro_init() when block"); - print anon("hi"); - if ( result == "timeout" ) terminate(); - schedule 10msec { do_another() }; - } - } - - diff --git a/testing/btest/core/leaks/returnwhen.zeek b/testing/btest/core/leaks/returnwhen.zeek new file mode 100644 index 0000000000..689adf1256 --- /dev/null +++ b/testing/btest/core/leaks/returnwhen.zeek @@ -0,0 +1,84 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: btest-bg-run zeek HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local zeek -m -b %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +redef exit_only_after_terminate = T; + +global my_set: set[string] = set(); +global flag: string = "flag"; +global done: bool = F; + +function dummyfunc(s: string): string + { + return "dummy " + s; + } + +function async_func(s: string): string + { + print dummyfunc("from async_func() " + s); + + return when ( flag in my_set ) + { + return flag + " in my_set"; + } + timeout 3sec + { + return "timeout"; + } + } + +event set_flag() + { + add my_set[flag]; + } + +event do_another() + { + delete my_set[flag]; + + local local_dummy = dummyfunc; + + local anon = function(s: string): string { return s + "!"; }; + + if ( ! done ) + schedule 1sec { set_flag() }; + + when ( local result = async_func("from do_another()") ) + { + print "async_func() return result in do_another()", result; + print local_dummy("from do_another() when block"); + print anon("hi"); + if ( result == "timeout" ) + terminate(); + else + { + done = T; + schedule 10msec { do_another() }; + } + } + } + +event zeek_init() + { + local local_dummy = dummyfunc; + + local anon = function(s: string): string { return s + "!"; }; + + schedule 1sec { set_flag() }; + + when ( local result = async_func("from zeek_init()") ) + { + print "async_func() return result in zeek_init()", result; + print local_dummy("from zeek_init() when block"); + print anon("hi"); + if ( result == "timeout" ) terminate(); + schedule 10msec { do_another() }; + } + } + + diff --git a/testing/btest/core/leaks/set.bro b/testing/btest/core/leaks/set.bro deleted file mode 100644 index b3f2200d28..0000000000 --- a/testing/btest/core/leaks/set.bro +++ /dev/null @@ -1,194 +0,0 @@ -# @TEST-GROUP: leaks -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -# Note: only global sets can be initialized with curly braces -global sg1: set[string] = { "curly", "braces" }; -global sg2: set[port, string, bool] = { [10/udp, "curly", F], - [11/udp, "braces", T] }; -global sg3 = { "more", "curly", "braces" }; - -global did_once = F; - -event new_connection(cc: connection) - { - if ( did_once ) - return; - - did_once = T; - - local s1: set[string] = set( "test", "example" ); - local s2: set[string] = set(); - local s3: set[string]; - local s4 = set( "type inference" ); - local s5: set[port, string, bool] = set( [1/tcp, "test", T], - [2/tcp, "example", F] ); - local s6: set[port, string, bool] = set(); - local s7: set[port, string, bool]; - local s8 = set( [8/tcp, "type inference", T] ); - - # Type inference tests - - test_case( "type inference", type_name(s4) == "set[string]" ); - test_case( "type inference", type_name(s8) == "set[port,string,bool]" ); - test_case( "type inference", type_name(sg3) == "set[string]" ); - - # Test the size of each set - - test_case( "cardinality", |s1| == 2 ); - test_case( "cardinality", |s2| == 0 ); - test_case( "cardinality", |s3| == 0 ); - test_case( "cardinality", |s4| == 1 ); - test_case( "cardinality", |s5| == 2 ); - test_case( "cardinality", |s6| == 0 ); - test_case( "cardinality", |s7| == 0 ); - test_case( "cardinality", |s8| == 1 ); - test_case( "cardinality", |sg1| == 2 ); - test_case( "cardinality", |sg2| == 2 ); - test_case( "cardinality", |sg3| == 3 ); - - # Test iterating over each set - - local ct: count; - ct = 0; - for ( c in s1 ) - { - if ( type_name(c) != "string" ) - print "Error: wrong set element type"; - ++ct; - } - test_case( "iterate over set", ct == 2 ); - - ct = 0; - for ( c in s2 ) - { - ++ct; - } - test_case( "iterate over set", ct == 0 ); - - ct = 0; - for ( [c1,c2,c3] in s5 ) - { - ++ct; - } - test_case( "iterate over set", ct == 2 ); - - ct = 0; - for ( [c1,c2,c3] in sg2 ) - { - ++ct; - } - test_case( "iterate over set", ct == 2 ); - - # Test adding elements to each set (Note: cannot add elements to sets - # of multiple types) - - add s1["added"]; - add s1["added"]; # element already exists (nothing happens) - test_case( "add element", |s1| == 3 ); - test_case( "in operator", "added" in s1 ); - - add s2["another"]; - test_case( "add element", |s2| == 1 ); - add s2["test"]; - test_case( "add element", |s2| == 2 ); - test_case( "in operator", "another" in s2 ); - test_case( "in operator", "test" in s2 ); - - add s3["foo"]; - test_case( "add element", |s3| == 1 ); - test_case( "in operator", "foo" in s3 ); - - add s4["local"]; - test_case( "add element", |s4| == 2 ); - test_case( "in operator", "local" in s4 ); - - add sg1["global"]; - test_case( "add element", |sg1| == 3 ); - test_case( "in operator", "global" in sg1 ); - - add sg3["more global"]; - test_case( "add element", |sg3| == 4 ); - test_case( "in operator", "more global" in sg3 ); - - # Test removing elements from each set (Note: cannot remove elements - # from sets of multiple types) - - delete s1["test"]; - delete s1["foobar"]; # element does not exist (nothing happens) - test_case( "remove element", |s1| == 2 ); - test_case( "!in operator", "test" !in s1 ); - - delete s2["test"]; - test_case( "remove element", |s2| == 1 ); - test_case( "!in operator", "test" !in s2 ); - - delete s3["foo"]; - test_case( "remove element", |s3| == 0 ); - test_case( "!in operator", "foo" !in s3 ); - - delete s4["type inference"]; - test_case( "remove element", |s4| == 1 ); - test_case( "!in operator", "type inference" !in s4 ); - - delete sg1["braces"]; - test_case( "remove element", |sg1| == 2 ); - test_case( "!in operator", "braces" !in sg1 ); - - delete sg3["curly"]; - test_case( "remove element", |sg3| == 3 ); - test_case( "!in operator", "curly" !in sg3 ); - - - local a = set(1,5,7,9,8,14); - local b = set(1,7,9,2); - - local a_plus_b = set(1,2,5,7,9,8,14); - local a_also_b = set(1,7,9); - local a_sans_b = set(5,8,14); - local b_sans_a = set(2); - - local a_or_b = a | b; - local a_and_b = a & b; - - test_case( "union", a_or_b == a_plus_b ); - test_case( "intersection", a_and_b == a_plus_b ); - test_case( "difference", a - b == a_sans_b ); - test_case( "difference", b - a == b_sans_a ); - - test_case( "union/inter.", |b & set(1,7,9,2)| == |b | set(1,7,2,9)| ); - test_case( "relational", |b & a_or_b| == |b| && |b| < |a_or_b| ); - test_case( "relational", b < a_or_b && a < a_or_b && a_or_b > a_and_b ); - - test_case( "subset", b < a ); - test_case( "subset", a < b ); - test_case( "subset", b < (a | set(2)) ); - test_case( "superset", b > a ); - test_case( "superset", b > (a | set(2)) ); - test_case( "superset", b | set(8, 14, 5) > (a | set(2)) ); - test_case( "superset", b | set(8, 14, 99, 5) > (a | set(2)) ); - - test_case( "non-ordering", (a <= b) || (a >= b) ); - test_case( "non-ordering", (a <= a_or_b) && (a_or_b >= b) ); - - test_case( "superset", (b | set(14, 5)) > a - set(8) ); - test_case( "superset", (b | set(14)) > a - set(8) ); - test_case( "superset", (b | set(14)) > a - set(8,5) ); - test_case( "superset", b >= a - set(5,8,14) ); - test_case( "superset", b > a - set(5,8,14) ); - test_case( "superset", (b - set(2)) > a - set(5,8,14) ); - test_case( "equality", a == a | set(5) ); - test_case( "equality", a == a | set(5,11) ); - test_case( "non-equality", a != a | set(5,11) ); - test_case( "equality", a == a | set(5,11) ); - - test_case( "magnitude", |a_and_b| == |a_or_b|); - } - diff --git a/testing/btest/core/leaks/set.zeek b/testing/btest/core/leaks/set.zeek new file mode 100644 index 0000000000..a902fe9797 --- /dev/null +++ b/testing/btest/core/leaks/set.zeek @@ -0,0 +1,194 @@ +# @TEST-GROUP: leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +# Note: only global sets can be initialized with curly braces +global sg1: set[string] = { "curly", "braces" }; +global sg2: set[port, string, bool] = { [10/udp, "curly", F], + [11/udp, "braces", T] }; +global sg3 = { "more", "curly", "braces" }; + +global did_once = F; + +event new_connection(cc: connection) + { + if ( did_once ) + return; + + did_once = T; + + local s1: set[string] = set( "test", "example" ); + local s2: set[string] = set(); + local s3: set[string]; + local s4 = set( "type inference" ); + local s5: set[port, string, bool] = set( [1/tcp, "test", T], + [2/tcp, "example", F] ); + local s6: set[port, string, bool] = set(); + local s7: set[port, string, bool]; + local s8 = set( [8/tcp, "type inference", T] ); + + # Type inference tests + + test_case( "type inference", type_name(s4) == "set[string]" ); + test_case( "type inference", type_name(s8) == "set[port,string,bool]" ); + test_case( "type inference", type_name(sg3) == "set[string]" ); + + # Test the size of each set + + test_case( "cardinality", |s1| == 2 ); + test_case( "cardinality", |s2| == 0 ); + test_case( "cardinality", |s3| == 0 ); + test_case( "cardinality", |s4| == 1 ); + test_case( "cardinality", |s5| == 2 ); + test_case( "cardinality", |s6| == 0 ); + test_case( "cardinality", |s7| == 0 ); + test_case( "cardinality", |s8| == 1 ); + test_case( "cardinality", |sg1| == 2 ); + test_case( "cardinality", |sg2| == 2 ); + test_case( "cardinality", |sg3| == 3 ); + + # Test iterating over each set + + local ct: count; + ct = 0; + for ( c in s1 ) + { + if ( type_name(c) != "string" ) + print "Error: wrong set element type"; + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + ct = 0; + for ( c in s2 ) + { + ++ct; + } + test_case( "iterate over set", ct == 0 ); + + ct = 0; + for ( [c1,c2,c3] in s5 ) + { + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + ct = 0; + for ( [c1,c2,c3] in sg2 ) + { + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + # Test adding elements to each set (Note: cannot add elements to sets + # of multiple types) + + add s1["added"]; + add s1["added"]; # element already exists (nothing happens) + test_case( "add element", |s1| == 3 ); + test_case( "in operator", "added" in s1 ); + + add s2["another"]; + test_case( "add element", |s2| == 1 ); + add s2["test"]; + test_case( "add element", |s2| == 2 ); + test_case( "in operator", "another" in s2 ); + test_case( "in operator", "test" in s2 ); + + add s3["foo"]; + test_case( "add element", |s3| == 1 ); + test_case( "in operator", "foo" in s3 ); + + add s4["local"]; + test_case( "add element", |s4| == 2 ); + test_case( "in operator", "local" in s4 ); + + add sg1["global"]; + test_case( "add element", |sg1| == 3 ); + test_case( "in operator", "global" in sg1 ); + + add sg3["more global"]; + test_case( "add element", |sg3| == 4 ); + test_case( "in operator", "more global" in sg3 ); + + # Test removing elements from each set (Note: cannot remove elements + # from sets of multiple types) + + delete s1["test"]; + delete s1["foobar"]; # element does not exist (nothing happens) + test_case( "remove element", |s1| == 2 ); + test_case( "!in operator", "test" !in s1 ); + + delete s2["test"]; + test_case( "remove element", |s2| == 1 ); + test_case( "!in operator", "test" !in s2 ); + + delete s3["foo"]; + test_case( "remove element", |s3| == 0 ); + test_case( "!in operator", "foo" !in s3 ); + + delete s4["type inference"]; + test_case( "remove element", |s4| == 1 ); + test_case( "!in operator", "type inference" !in s4 ); + + delete sg1["braces"]; + test_case( "remove element", |sg1| == 2 ); + test_case( "!in operator", "braces" !in sg1 ); + + delete sg3["curly"]; + test_case( "remove element", |sg3| == 3 ); + test_case( "!in operator", "curly" !in sg3 ); + + + local a = set(1,5,7,9,8,14); + local b = set(1,7,9,2); + + local a_plus_b = set(1,2,5,7,9,8,14); + local a_also_b = set(1,7,9); + local a_sans_b = set(5,8,14); + local b_sans_a = set(2); + + local a_or_b = a | b; + local a_and_b = a & b; + + test_case( "union", a_or_b == a_plus_b ); + test_case( "intersection", a_and_b == a_plus_b ); + test_case( "difference", a - b == a_sans_b ); + test_case( "difference", b - a == b_sans_a ); + + test_case( "union/inter.", |b & set(1,7,9,2)| == |b | set(1,7,2,9)| ); + test_case( "relational", |b & a_or_b| == |b| && |b| < |a_or_b| ); + test_case( "relational", b < a_or_b && a < a_or_b && a_or_b > a_and_b ); + + test_case( "subset", b < a ); + test_case( "subset", a < b ); + test_case( "subset", b < (a | set(2)) ); + test_case( "superset", b > a ); + test_case( "superset", b > (a | set(2)) ); + test_case( "superset", b | set(8, 14, 5) > (a | set(2)) ); + test_case( "superset", b | set(8, 14, 99, 5) > (a | set(2)) ); + + test_case( "non-ordering", (a <= b) || (a >= b) ); + test_case( "non-ordering", (a <= a_or_b) && (a_or_b >= b) ); + + test_case( "superset", (b | set(14, 5)) > a - set(8) ); + test_case( "superset", (b | set(14)) > a - set(8) ); + test_case( "superset", (b | set(14)) > a - set(8,5) ); + test_case( "superset", b >= a - set(5,8,14) ); + test_case( "superset", b > a - set(5,8,14) ); + test_case( "superset", (b - set(2)) > a - set(5,8,14) ); + test_case( "equality", a == a | set(5) ); + test_case( "equality", a == a | set(5,11) ); + test_case( "non-equality", a != a | set(5,11) ); + test_case( "equality", a == a | set(5,11) ); + + test_case( "magnitude", |a_and_b| == |a_or_b|); + } + diff --git a/testing/btest/core/leaks/sip.test b/testing/btest/core/leaks/sip.test index 1aac2b30e0..25125e1816 100644 --- a/testing/btest/core/leaks/sip.test +++ b/testing/btest/core/leaks/sip.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/sip/wireshark.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/sip/wireshark.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/sip diff --git a/testing/btest/core/leaks/smtp_attachment.test b/testing/btest/core/leaks/smtp_attachment.test index 3094deb65c..63eb1e8b5c 100644 --- a/testing/btest/core/leaks/smtp_attachment.test +++ b/testing/btest/core/leaks/smtp_attachment.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/smtp.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/smtp diff --git a/testing/btest/core/leaks/snmp.test b/testing/btest/core/leaks/snmp.test index 4f212d2699..f6769f2602 100644 --- a/testing/btest/core/leaks/snmp.test +++ b/testing/btest/core/leaks/snmp.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/snmp/snmpv1_get.pcap -r $TRACES/snmp/snmpv1_get_short.pcap -r $TRACES/snmp/snmpv1_set.pcap -r $TRACES/snmp/snmpv1_trap.pcap -r $TRACES/snmp/snmpv2_get_bulk.pcap -r $TRACES/snmp/snmpv2_get_next.pcap -r $TRACES/snmp/snmpv2_get.pcap -r $TRACES/snmp/snmpv3_get_next.pcap $SCRIPTS/snmp-test.bro %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/snmp/snmpv1_get.pcap -r $TRACES/snmp/snmpv1_get_short.pcap -r $TRACES/snmp/snmpv1_set.pcap -r $TRACES/snmp/snmpv1_trap.pcap -r $TRACES/snmp/snmpv2_get_bulk.pcap -r $TRACES/snmp/snmpv2_get_next.pcap -r $TRACES/snmp/snmpv2_get.pcap -r $TRACES/snmp/snmpv3_get_next.pcap $SCRIPTS/snmp-test.zeek %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/snmp diff --git a/testing/btest/core/leaks/ssh.test b/testing/btest/core/leaks/ssh.test index 714d7bb3eb..a43654705d 100644 --- a/testing/btest/core/leaks/ssh.test +++ b/testing/btest/core/leaks/ssh.test @@ -1,10 +1,10 @@ # Needs perftools support. # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # @TEST-GROUP: leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/ssh/ssh.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/ssh/ssh.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 @load base/protocols/ssh diff --git a/testing/btest/core/leaks/stats.bro b/testing/btest/core/leaks/stats.bro deleted file mode 100644 index a3459fdc93..0000000000 --- a/testing/btest/core/leaks/stats.bro +++ /dev/null @@ -1,15 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load policy/misc/stats.bro - -event load_sample(samples: load_sample_info, CPU: interval, dmem: int) - { - print CPU; - } diff --git a/testing/btest/core/leaks/stats.zeek b/testing/btest/core/leaks/stats.zeek new file mode 100644 index 0000000000..f541b4fb79 --- /dev/null +++ b/testing/btest/core/leaks/stats.zeek @@ -0,0 +1,15 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load policy/misc/stats + +event load_sample(samples: load_sample_info, CPU: interval, dmem: int) + { + print CPU; + } diff --git a/testing/btest/core/leaks/string-indexing.bro b/testing/btest/core/leaks/string-indexing.bro deleted file mode 100644 index 37f7868190..0000000000 --- a/testing/btest/core/leaks/string-indexing.bro +++ /dev/null @@ -1,27 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - - -event new_connection(c: connection) - { - local s = "0123456789"; - print s[1]; - print s[1:2]; - print s[1:6]; - print s[0:20]; - print s[-2]; - print s[-3:1]; - print s[-1:10]; - print s[-1:0]; - print s[-1:5]; - print s[20:23]; - print s[-20:23]; - print s[0:5][2]; - print s[0:5][1:3][0]; - } diff --git a/testing/btest/core/leaks/string-indexing.zeek b/testing/btest/core/leaks/string-indexing.zeek new file mode 100644 index 0000000000..1ac28efe63 --- /dev/null +++ b/testing/btest/core/leaks/string-indexing.zeek @@ -0,0 +1,27 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + + +event new_connection(c: connection) + { + local s = "0123456789"; + print s[1]; + print s[1:2]; + print s[1:6]; + print s[0:20]; + print s[-2]; + print s[-3:1]; + print s[-1:10]; + print s[-1:0]; + print s[-1:5]; + print s[20:23]; + print s[-20:23]; + print s[0:5][2]; + print s[0:5][1:3][0]; + } diff --git a/testing/btest/core/leaks/switch-statement.bro b/testing/btest/core/leaks/switch-statement.bro deleted file mode 100644 index e5145f9227..0000000000 --- a/testing/btest/core/leaks/switch-statement.bro +++ /dev/null @@ -1,299 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -type MyEnum: enum { - RED, - GREEN, - BLUE, - PINK, -}; - -function switch_bool(v: bool): string - { - switch (v) { - case T: - return "true"; - case F: - return "false"; - } - return "n/a"; - } - -function switch_int(v: int): string - { - switch (v) { - case +1: - return "one"; - case +2: - return "two"; - case -3: - return "minus three"; - } - return "n/a"; - } - -function switch_enum(v: MyEnum): string - { - switch (v) { - case RED: - return "red"; - case GREEN: - return "green"; - case BLUE: - return "blue"; - } - return "n/a"; - } - -function switch_count(v: count): string - { - switch (v) { - case 1: - return "1"; - case 2: - return "2"; - case 3: - return "3"; - } - return "n/a"; - } - -function switch_port(v: port): string - { - switch (v) { - case 22/tcp: - return "ssh"; - case 53/udp: - return "dns"; - case 0/icmp: - return "echo"; - } - return "n/a"; - } - -function switch_double(v: double): string - { - switch (v) { - case 1.1: - return "1.1"; - case 2.2: - return "2.2"; - case 3.3: - return "3.3"; - } - return "n/a"; - } - -function switch_interval(v: interval): string - { - switch (v) { - case 1sec: - return "1sec"; - case 2day: - return "2day"; - case 3min: - return "3min"; - } - return "n/a"; - } - -function switch_string(v: string): string - { - switch (v) { - case "one": - return "first"; - case "two": - return "second"; - case "three": - return "third"; - } - return "n/a"; - } - -function switch_addr(v: addr): string - { - switch (v) { - case 1.2.3.4: - return "ipv4"; - case [fe80::1]: - return "ipv6"; - case 0.0.0.0: - return "unspec"; - } - return "n/a"; - } - -function switch_subnet(v: subnet): string - { - switch (v) { - case 1.2.3.0/24: - return "1.2.3.0/24"; - case [fe80::0]/96: - return "[fe80::0]"; - case 192.168.0.0/16: - return "192.168.0.0/16"; - } - return "n/a"; - } - -function switch_empty(v: count): string - { - switch ( v ) { - } - return "n/a"; - } - -function switch_fallthrough(v: count): string - { - local rval = ""; - switch ( v ) { - case 1: - rval += "test"; - fallthrough; - case 2: - rval += "testing"; - fallthrough; - case 3: - rval += "tested"; - break; - } - return rval + "return"; - } - -function switch_default(v: count): string - { - local rval = ""; - switch ( v ) { - case 1: - rval += "1"; - fallthrough; - case 2: - rval += "2"; - break; - case 3: - rval += "3"; - fallthrough; - default: - rval += "d"; - break; - } - return rval + "r"; - } - -function switch_default_placement(v: count): string - { - local rval = ""; - switch ( v ) { - case 1: - rval += "1"; - fallthrough; - default: - rval += "d"; - fallthrough; - case 2: - rval += "2"; - break; - case 3: - rval += "3"; - break; - } - return rval + "r"; - } - -function switch_case_list(v: count): string - { - switch ( v ) { - case 1, 2: - return "1,2"; - case 3, 4, 5: - return "3,4,5"; - case 6, 7, 8, 9: - return "6,7,8,9"; - } - return "n/a"; - } - -function test_switch(actual: string, expect: string) - { - if ( actual != expect ) - print fmt("%s != %s", actual, expect); - } - -event new_connection(c: connection) - { - test_switch( switch_bool(T) , "true" ); - test_switch( switch_bool(F) , "false" ); - test_switch( switch_int(+1) , "one" ); - test_switch( switch_int(+2) , "two" ); - test_switch( switch_int(-3) , "minus three" ); - test_switch( switch_int(40) , "n/a" ); - test_switch( switch_enum(RED) , "red" ); - test_switch( switch_enum(BLUE) , "blue" ); - test_switch( switch_enum(GREEN) , "green" ); - test_switch( switch_enum(PINK) , "n/a" ); - test_switch( switch_count(1) , "1" ); - test_switch( switch_count(2) , "2" ); - test_switch( switch_count(3) , "3" ); - test_switch( switch_count(100) , "n/a" ); - test_switch( switch_port(22/tcp) , "ssh" ); - test_switch( switch_port(53/udp) , "dns" ); - test_switch( switch_port(0/icmp) , "echo" ); - test_switch( switch_port(1000/tcp) , "n/a" ); - test_switch( switch_double(1.1) , "1.1" ); - test_switch( switch_double(2.2) , "2.2" ); - test_switch( switch_double(3.3) , "3.3" ); - test_switch( switch_interval(1sec) , "1sec" ); - test_switch( switch_interval(2day) , "2day" ); - test_switch( switch_interval(3min) , "3min" ); - test_switch( switch_string("one") , "first" ); - test_switch( switch_string("two") , "second" ); - test_switch( switch_string("three") , "third" ); - test_switch( switch_addr(1.2.3.4) , "ipv4" ); - test_switch( switch_addr([fe80::1]) , "ipv6" ); - test_switch( switch_addr(0.0.0.0) , "unspec" ); - test_switch( switch_subnet(1.2.3.4/24) , "1.2.3.0/24" ); - test_switch( switch_subnet([fe80::1]/96) , "[fe80::0]" ); - test_switch( switch_subnet(192.168.1.100/16) , "192.168.0.0/16" ); - test_switch( switch_empty(2) , "n/a" ); - test_switch( switch_fallthrough(1) , "testtestingtestedreturn" ); - test_switch( switch_fallthrough(2) , "testingtestedreturn" ); - test_switch( switch_fallthrough(3) , "testedreturn" ); - test_switch( switch_default(1) , "12r" ); - test_switch( switch_default(2) , "2r" ); - test_switch( switch_default(3) , "3dr" ); - test_switch( switch_default(4) , "dr" ); - test_switch( switch_default_placement(1) , "1d2r" ); - test_switch( switch_default_placement(2) , "2r" ); - test_switch( switch_default_placement(3) , "3r" ); - test_switch( switch_default_placement(4) , "d2r" ); - - local v = vector(0,1,2,3,4,5,6,7,9,10); - local expect: string; - - for ( i in v ) - { - switch ( v[i] ) { - case 1, 2: - expect = "1,2"; - break; - case 3, 4, 5: - expect = "3,4,5"; - break; - case 6, 7, 8, 9: - expect = "6,7,8,9"; - break; - default: - expect = "n/a"; - break; - } - test_switch( switch_case_list(v[i]) , expect ); - } - - print "done"; - } diff --git a/testing/btest/core/leaks/switch-statement.zeek b/testing/btest/core/leaks/switch-statement.zeek new file mode 100644 index 0000000000..b0c906ec46 --- /dev/null +++ b/testing/btest/core/leaks/switch-statement.zeek @@ -0,0 +1,299 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +type MyEnum: enum { + RED, + GREEN, + BLUE, + PINK, +}; + +function switch_bool(v: bool): string + { + switch (v) { + case T: + return "true"; + case F: + return "false"; + } + return "n/a"; + } + +function switch_int(v: int): string + { + switch (v) { + case +1: + return "one"; + case +2: + return "two"; + case -3: + return "minus three"; + } + return "n/a"; + } + +function switch_enum(v: MyEnum): string + { + switch (v) { + case RED: + return "red"; + case GREEN: + return "green"; + case BLUE: + return "blue"; + } + return "n/a"; + } + +function switch_count(v: count): string + { + switch (v) { + case 1: + return "1"; + case 2: + return "2"; + case 3: + return "3"; + } + return "n/a"; + } + +function switch_port(v: port): string + { + switch (v) { + case 22/tcp: + return "ssh"; + case 53/udp: + return "dns"; + case 0/icmp: + return "echo"; + } + return "n/a"; + } + +function switch_double(v: double): string + { + switch (v) { + case 1.1: + return "1.1"; + case 2.2: + return "2.2"; + case 3.3: + return "3.3"; + } + return "n/a"; + } + +function switch_interval(v: interval): string + { + switch (v) { + case 1sec: + return "1sec"; + case 2day: + return "2day"; + case 3min: + return "3min"; + } + return "n/a"; + } + +function switch_string(v: string): string + { + switch (v) { + case "one": + return "first"; + case "two": + return "second"; + case "three": + return "third"; + } + return "n/a"; + } + +function switch_addr(v: addr): string + { + switch (v) { + case 1.2.3.4: + return "ipv4"; + case [fe80::1]: + return "ipv6"; + case 0.0.0.0: + return "unspec"; + } + return "n/a"; + } + +function switch_subnet(v: subnet): string + { + switch (v) { + case 1.2.3.0/24: + return "1.2.3.0/24"; + case [fe80::0]/96: + return "[fe80::0]"; + case 192.168.0.0/16: + return "192.168.0.0/16"; + } + return "n/a"; + } + +function switch_empty(v: count): string + { + switch ( v ) { + } + return "n/a"; + } + +function switch_fallthrough(v: count): string + { + local rval = ""; + switch ( v ) { + case 1: + rval += "test"; + fallthrough; + case 2: + rval += "testing"; + fallthrough; + case 3: + rval += "tested"; + break; + } + return rval + "return"; + } + +function switch_default(v: count): string + { + local rval = ""; + switch ( v ) { + case 1: + rval += "1"; + fallthrough; + case 2: + rval += "2"; + break; + case 3: + rval += "3"; + fallthrough; + default: + rval += "d"; + break; + } + return rval + "r"; + } + +function switch_default_placement(v: count): string + { + local rval = ""; + switch ( v ) { + case 1: + rval += "1"; + fallthrough; + default: + rval += "d"; + fallthrough; + case 2: + rval += "2"; + break; + case 3: + rval += "3"; + break; + } + return rval + "r"; + } + +function switch_case_list(v: count): string + { + switch ( v ) { + case 1, 2: + return "1,2"; + case 3, 4, 5: + return "3,4,5"; + case 6, 7, 8, 9: + return "6,7,8,9"; + } + return "n/a"; + } + +function test_switch(actual: string, expect: string) + { + if ( actual != expect ) + print fmt("%s != %s", actual, expect); + } + +event new_connection(c: connection) + { + test_switch( switch_bool(T) , "true" ); + test_switch( switch_bool(F) , "false" ); + test_switch( switch_int(+1) , "one" ); + test_switch( switch_int(+2) , "two" ); + test_switch( switch_int(-3) , "minus three" ); + test_switch( switch_int(40) , "n/a" ); + test_switch( switch_enum(RED) , "red" ); + test_switch( switch_enum(BLUE) , "blue" ); + test_switch( switch_enum(GREEN) , "green" ); + test_switch( switch_enum(PINK) , "n/a" ); + test_switch( switch_count(1) , "1" ); + test_switch( switch_count(2) , "2" ); + test_switch( switch_count(3) , "3" ); + test_switch( switch_count(100) , "n/a" ); + test_switch( switch_port(22/tcp) , "ssh" ); + test_switch( switch_port(53/udp) , "dns" ); + test_switch( switch_port(0/icmp) , "echo" ); + test_switch( switch_port(1000/tcp) , "n/a" ); + test_switch( switch_double(1.1) , "1.1" ); + test_switch( switch_double(2.2) , "2.2" ); + test_switch( switch_double(3.3) , "3.3" ); + test_switch( switch_interval(1sec) , "1sec" ); + test_switch( switch_interval(2day) , "2day" ); + test_switch( switch_interval(3min) , "3min" ); + test_switch( switch_string("one") , "first" ); + test_switch( switch_string("two") , "second" ); + test_switch( switch_string("three") , "third" ); + test_switch( switch_addr(1.2.3.4) , "ipv4" ); + test_switch( switch_addr([fe80::1]) , "ipv6" ); + test_switch( switch_addr(0.0.0.0) , "unspec" ); + test_switch( switch_subnet(1.2.3.4/24) , "1.2.3.0/24" ); + test_switch( switch_subnet([fe80::1]/96) , "[fe80::0]" ); + test_switch( switch_subnet(192.168.1.100/16) , "192.168.0.0/16" ); + test_switch( switch_empty(2) , "n/a" ); + test_switch( switch_fallthrough(1) , "testtestingtestedreturn" ); + test_switch( switch_fallthrough(2) , "testingtestedreturn" ); + test_switch( switch_fallthrough(3) , "testedreturn" ); + test_switch( switch_default(1) , "12r" ); + test_switch( switch_default(2) , "2r" ); + test_switch( switch_default(3) , "3dr" ); + test_switch( switch_default(4) , "dr" ); + test_switch( switch_default_placement(1) , "1d2r" ); + test_switch( switch_default_placement(2) , "2r" ); + test_switch( switch_default_placement(3) , "3r" ); + test_switch( switch_default_placement(4) , "d2r" ); + + local v = vector(0,1,2,3,4,5,6,7,9,10); + local expect: string; + + for ( i in v ) + { + switch ( v[i] ) { + case 1, 2: + expect = "1,2"; + break; + case 3, 4, 5: + expect = "3,4,5"; + break; + case 6, 7, 8, 9: + expect = "6,7,8,9"; + break; + default: + expect = "n/a"; + break; + } + test_switch( switch_case_list(v[i]) , expect ); + } + + print "done"; + } diff --git a/testing/btest/core/leaks/teredo.bro b/testing/btest/core/leaks/teredo.bro deleted file mode 100644 index c83a501705..0000000000 --- a/testing/btest/core/leaks/teredo.bro +++ /dev/null @@ -1,38 +0,0 @@ -# Needs perftools support. -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-GROUP: leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/tunnels/Teredo.pcap %INPUT >output -# @TEST-EXEC: btest-bg-wait 60 - -function print_teredo(name: string, outer: connection, inner: teredo_hdr) - { - print fmt("%s: %s", name, outer$id); - print fmt(" ip6: %s", inner$hdr$ip6); - if ( inner?$auth ) - print fmt(" auth: %s", inner$auth); - if ( inner?$origin ) - print fmt(" origin: %s", inner$origin); - } - -event teredo_packet(outer: connection, inner: teredo_hdr) - { - print_teredo("packet", outer, inner); - } - -event teredo_authentication(outer: connection, inner: teredo_hdr) - { - print_teredo("auth", outer, inner); - } - -event teredo_origin_indication(outer: connection, inner: teredo_hdr) - { - print_teredo("origin", outer, inner); - } - -event teredo_bubble(outer: connection, inner: teredo_hdr) - { - print_teredo("bubble", outer, inner); - } diff --git a/testing/btest/core/leaks/teredo.zeek b/testing/btest/core/leaks/teredo.zeek new file mode 100644 index 0000000000..2841679b0e --- /dev/null +++ b/testing/btest/core/leaks/teredo.zeek @@ -0,0 +1,38 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/tunnels/Teredo.pcap %INPUT >output +# @TEST-EXEC: btest-bg-wait 60 + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } diff --git a/testing/btest/core/leaks/test-all.bro b/testing/btest/core/leaks/test-all.bro deleted file mode 100644 index d4f8a040ec..0000000000 --- a/testing/btest/core/leaks/test-all.bro +++ /dev/null @@ -1,8 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -r $TRACES/wikipedia.trace test-all-policy -# @TEST-EXEC: btest-bg-wait 60 diff --git a/testing/btest/core/leaks/test-all.zeek b/testing/btest/core/leaks/test-all.zeek new file mode 100644 index 0000000000..79bc8c916a --- /dev/null +++ b/testing/btest/core/leaks/test-all.zeek @@ -0,0 +1,8 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -r $TRACES/wikipedia.trace test-all-policy +# @TEST-EXEC: btest-bg-wait 60 diff --git a/testing/btest/core/leaks/vector-indexing.zeek b/testing/btest/core/leaks/vector-indexing.zeek new file mode 100644 index 0000000000..540913bd08 --- /dev/null +++ b/testing/btest/core/leaks/vector-indexing.zeek @@ -0,0 +1,30 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +global did_it = F; + +event new_connection(c: connection) + { + if ( did_it ) + return; + + did_it = T; + + # Slicing tests. + local v17 = vector( 1, 2, 3, 4, 5 ); + print v17[0:2]; + print v17[-3:-1]; + print v17[:2]; + print v17[2:]; + print v17[:]; + v17[0:1] = vector(6); + v17[2:4] = vector(7, 8); + v17[2:4] = vector(9, 10, 11); + v17[2:5] = vector(9); + } diff --git a/testing/btest/core/leaks/vector-val-bifs.test b/testing/btest/core/leaks/vector-val-bifs.test index 9e9caece69..a552279a57 100644 --- a/testing/btest/core/leaks/vector-val-bifs.test +++ b/testing/btest/core/leaks/vector-val-bifs.test @@ -2,13 +2,13 @@ # # @TEST-GROUP: leaks # -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks # # The BIFS used in this test originally didn't call the VectorVal() ctor right, # assuming that it didn't automatically Ref the VectorType argument and thus # leaked that memeory. # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/ftp/ipv4.trace %INPUT +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/ftp/ipv4.trace %INPUT # @TEST-EXEC: btest-bg-wait 60 function myfunc(aa: interval, bb: interval): int diff --git a/testing/btest/core/leaks/while.bro b/testing/btest/core/leaks/while.bro deleted file mode 100644 index 44f17e9b69..0000000000 --- a/testing/btest/core/leaks/while.bro +++ /dev/null @@ -1,80 +0,0 @@ -# @TEST-GROUP: leaks -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks - -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -m -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -function test_noop() - { - while ( F ) - print "noooooooooo"; - } - -function test_it() - { - local i = 0; - - while ( i < 10 ) - ++i; - - print i; - } - -function test_break() - { - local s = ""; - - while ( T ) - { - s += "s"; - print s; - - if ( s == "sss" ) - break; - } - } - -function test_next() - { - local s: set[count]; - local i = 0; - - while ( 9 !in s ) - { - ++i; - - if ( i % 2 == 0 ) - next; - - add s[i]; - } - - print s; - } - -function test_return(): vector of string - { - local i = 0; - local rval: vector of string; - - while ( T ) - { - rval[i] = fmt("number %d", i); - ++i; - - if ( i == 13 ) - return rval; - } - - rval[0] = "noooo"; - return rval; - } - -event new_connection(c: connection) - { - test_noop(); - test_it(); - test_break(); - test_next(); - print test_return(); - } diff --git a/testing/btest/core/leaks/while.zeek b/testing/btest/core/leaks/while.zeek new file mode 100644 index 0000000000..f490c9a13d --- /dev/null +++ b/testing/btest/core/leaks/while.zeek @@ -0,0 +1,80 @@ +# @TEST-GROUP: leaks +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks + +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -m -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +function test_noop() + { + while ( F ) + print "noooooooooo"; + } + +function test_it() + { + local i = 0; + + while ( i < 10 ) + ++i; + + print i; + } + +function test_break() + { + local s = ""; + + while ( T ) + { + s += "s"; + print s; + + if ( s == "sss" ) + break; + } + } + +function test_next() + { + local s: set[count]; + local i = 0; + + while ( 9 !in s ) + { + ++i; + + if ( i % 2 == 0 ) + next; + + add s[i]; + } + + print s; + } + +function test_return(): vector of string + { + local i = 0; + local rval: vector of string; + + while ( T ) + { + rval[i] = fmt("number %d", i); + ++i; + + if ( i == 13 ) + return rval; + } + + rval[0] = "noooo"; + return rval; + } + +event new_connection(c: connection) + { + test_noop(); + test_it(); + test_break(); + test_next(); + print test_return(); + } diff --git a/testing/btest/core/leaks/x509_ocsp_verify.bro b/testing/btest/core/leaks/x509_ocsp_verify.bro deleted file mode 100644 index ab24f28ee8..0000000000 --- a/testing/btest/core/leaks/x509_ocsp_verify.bro +++ /dev/null @@ -1,19 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/tls/ocsp-stapling.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load base/protocols/ssl - -event ssl_stapled_ocsp(c: connection, is_orig: bool, response: string) - { - local chain: vector of opaque of x509 = vector(); - for ( i in c$ssl$cert_chain ) - chain[i] = c$ssl$cert_chain[i]$x509$handle; - - print x509_ocsp_verify(chain, response, SSL::root_certs); - } diff --git a/testing/btest/core/leaks/x509_ocsp_verify.zeek b/testing/btest/core/leaks/x509_ocsp_verify.zeek new file mode 100644 index 0000000000..8d6cd5aa3e --- /dev/null +++ b/testing/btest/core/leaks/x509_ocsp_verify.zeek @@ -0,0 +1,19 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/tls/ocsp-stapling.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load base/protocols/ssl + +event ssl_stapled_ocsp(c: connection, is_orig: bool, response: string) + { + local chain: vector of opaque of x509 = vector(); + for ( i in c$ssl$cert_chain ) + chain[i] = c$ssl$cert_chain[i]$x509$handle; + + print x509_ocsp_verify(chain, response, SSL::root_certs); + } diff --git a/testing/btest/core/leaks/x509_verify.bro b/testing/btest/core/leaks/x509_verify.bro deleted file mode 100644 index 7db2581a8b..0000000000 --- a/testing/btest/core/leaks/x509_verify.bro +++ /dev/null @@ -1,33 +0,0 @@ -# Needs perftools support. -# -# @TEST-GROUP: leaks -# -# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks -# -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run bro bro -b -m -r $TRACES/tls/tls-expired-cert.trace %INPUT -# @TEST-EXEC: btest-bg-wait 60 - -@load base/protocols/ssl - -event ssl_established(c: connection) &priority=3 - { - local chain: vector of opaque of x509 = vector(); - for ( i in c$ssl$cert_chain ) - { - chain[i] = c$ssl$cert_chain[i]$x509$handle; - } - - local result = x509_verify(chain, SSL::root_certs); - print fmt("Validation result: %s", result$result_string); - if ( result$result != 0 ) # not ok - return; - - print "Resulting chain:"; - for ( i in result$chain_certs ) - { - local cert = result$chain_certs[i]; - local certinfo = x509_parse(cert); - local sha1 = sha1_hash(x509_get_certificate_string(cert)); - print fmt("Fingerprint: %s, Subject: %s", sha1, certinfo$subject); - } - } diff --git a/testing/btest/core/leaks/x509_verify.zeek b/testing/btest/core/leaks/x509_verify.zeek new file mode 100644 index 0000000000..3989c2b850 --- /dev/null +++ b/testing/btest/core/leaks/x509_verify.zeek @@ -0,0 +1,33 @@ +# Needs perftools support. +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: zeek --help 2>&1 | grep -q mem-leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local btest-bg-run zeek zeek -b -m -r $TRACES/tls/tls-expired-cert.trace %INPUT +# @TEST-EXEC: btest-bg-wait 60 + +@load base/protocols/ssl + +event ssl_established(c: connection) &priority=3 + { + local chain: vector of opaque of x509 = vector(); + for ( i in c$ssl$cert_chain ) + { + chain[i] = c$ssl$cert_chain[i]$x509$handle; + } + + local result = x509_verify(chain, SSL::root_certs); + print fmt("Validation result: %s", result$result_string); + if ( result$result != 0 ) # not ok + return; + + print "Resulting chain:"; + for ( i in result$chain_certs ) + { + local cert = result$chain_certs[i]; + local certinfo = x509_parse(cert); + local sha1 = sha1_hash(x509_get_certificate_string(cert)); + print fmt("Fingerprint: %s, Subject: %s", sha1, certinfo$subject); + } + } diff --git a/testing/btest/core/load-duplicates.bro b/testing/btest/core/load-duplicates.bro deleted file mode 100644 index 8c86fbc272..0000000000 --- a/testing/btest/core/load-duplicates.bro +++ /dev/null @@ -1,14 +0,0 @@ -# This tests bro's mechanism to prevent duplicate script loading. -# -# @TEST-EXEC: mkdir -p foo/bar -# @TEST-EXEC: echo "@load bar/test" >loader.bro -# @TEST-EXEC: cp %INPUT foo/bar/test.bro -# @TEST-EXEC: BROPATH=$BROPATH:.:./foo bro -b misc/loaded-scripts loader bar/test -# @TEST-EXEC: BROPATH=$BROPATH:.:./foo bro -b misc/loaded-scripts loader bar/test.bro -# @TEST-EXEC: BROPATH=$BROPATH:.:./foo bro -b misc/loaded-scripts loader foo/bar/test -# @TEST-EXEC: BROPATH=$BROPATH:.:./foo bro -b misc/loaded-scripts loader foo/bar/test.bro -# @TEST-EXEC: BROPATH=$BROPATH:.:./foo bro -b misc/loaded-scripts loader `pwd`/foo/bar/test.bro - -type Test: enum { - TEST, -}; diff --git a/testing/btest/core/load-duplicates.zeek b/testing/btest/core/load-duplicates.zeek new file mode 100644 index 0000000000..212fc577ac --- /dev/null +++ b/testing/btest/core/load-duplicates.zeek @@ -0,0 +1,15 @@ +# This tests Zeek's mechanism to prevent duplicate script loading. +# +# @TEST-EXEC: mkdir -p foo/bar +# @TEST-EXEC: echo "@load bar/test" >loader.zeek +# @TEST-EXEC: cp %INPUT foo/bar/test.zeek +# @TEST-EXEC: cp %INPUT foo/bar/test2.zeek +# +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:.:./foo zeek -b misc/loaded-scripts loader bar/test +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:.:./foo zeek -b misc/loaded-scripts loader bar/test.zeek +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:.:./foo zeek -b misc/loaded-scripts loader foo/bar/test +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:.:./foo zeek -b misc/loaded-scripts loader foo/bar/test.zeek +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:.:./foo zeek -b misc/loaded-scripts loader `pwd`/foo/bar/test.zeek +# @TEST-EXEC-FAIL: ZEEKPATH=$ZEEKPATH:.:./foo zeek -b misc/loaded-scripts loader bar/test2 + +global pi = 3.14; diff --git a/testing/btest/core/load-explicit-bro-suffix-fallback.zeek b/testing/btest/core/load-explicit-bro-suffix-fallback.zeek new file mode 100644 index 0000000000..d2ce412209 --- /dev/null +++ b/testing/btest/core/load-explicit-bro-suffix-fallback.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +# We don't have a foo.bro, but we'll accept foo.zeek. +@load foo.bro + +@TEST-START-FILE foo.zeek +event zeek_init() + { + print "loaded foo.zeek"; + } +@TEST-END-FILE diff --git a/testing/btest/core/load-file-extension.zeek b/testing/btest/core/load-file-extension.zeek new file mode 100644 index 0000000000..3a0f4e64c5 --- /dev/null +++ b/testing/btest/core/load-file-extension.zeek @@ -0,0 +1,89 @@ +# Test loading scripts with different file extensions. +# +# Test that either ".zeek" or ".bro" can be loaded without specifying extension +# @TEST-EXEC: cp x/foo.bro . +# @TEST-EXEC: zeek -b load_foo > bro_only +# @TEST-EXEC: btest-diff bro_only +# @TEST-EXEC: rm foo.bro +# +# @TEST-EXEC: cp x/foo.zeek . +# @TEST-EXEC: zeek -b load_foo > zeek_only +# @TEST-EXEC: btest-diff zeek_only +# @TEST-EXEC: rm foo.zeek +# +# Test that ".zeek" is the preferred file extension, unless ".bro" is specified +# @TEST-EXEC: cp x/foo.* . +# @TEST-EXEC: cp x2/foo . +# @TEST-EXEC: zeek -b load_foo > zeek_preferred +# @TEST-EXEC: btest-diff zeek_preferred +# +# @TEST-EXEC: zeek -b load_foo_bro > bro_preferred +# @TEST-EXEC: btest-diff bro_preferred +# @TEST-EXEC: rm foo* +# +# Test that ".bro" is preferred over a script with no file extension (when +# there is no ".zeek" script) +# @TEST-EXEC: cp x/foo.bro . +# @TEST-EXEC: cp x2/foo . +# @TEST-EXEC: zeek -b load_foo > bro_preferred_2 +# @TEST-EXEC: btest-diff bro_preferred_2 +# @TEST-EXEC: rm foo* +# +# Test that a script with no file extension can be loaded +# @TEST-EXEC: cp x2/foo . +# @TEST-EXEC: zeek -b load_foo > no_extension +# @TEST-EXEC: btest-diff no_extension +# @TEST-EXEC: rm foo +# +# Test that a ".zeek" script is preferred over a script package of same name +# @TEST-EXEC: cp -r x/foo* . +# @TEST-EXEC: zeek -b load_foo > zeek_script_preferred +# @TEST-EXEC: btest-diff zeek_script_preferred +# @TEST-EXEC: rm -r foo* +# +# Test that unrecognized file extensions can be loaded explicitly +# @TEST-EXEC: cp x/foo.* . +# @TEST-EXEC: zeek -b load_foo_xyz > xyz_preferred +# @TEST-EXEC: btest-diff xyz_preferred +# @TEST-EXEC: rm foo.* +# +# @TEST-EXEC: cp x/foo.xyz . +# @TEST-EXEC-FAIL: zeek -b load_foo +# @TEST-EXEC: rm foo.xyz + +@TEST-START-FILE load_foo +@load foo +@TEST-END-FILE + +@TEST-START-FILE load_foo_bro +@load foo.bro +@TEST-END-FILE + +@TEST-START-FILE load_foo_xyz +@load foo.xyz +@TEST-END-FILE + + +@TEST-START-FILE x/foo.bro +print "Bro script loaded"; +@TEST-END-FILE + +@TEST-START-FILE x/foo.zeek +print "Zeek script loaded"; +@TEST-END-FILE + +@TEST-START-FILE x/foo.xyz +print "Non-standard file extension script loaded"; +@TEST-END-FILE + +@TEST-START-FILE x/foo/__load__.zeek +@load ./main +@TEST-END-FILE + +@TEST-START-FILE x/foo/main.zeek +print "Script package loaded"; +@TEST-END-FILE + +@TEST-START-FILE x2/foo +print "No file extension script loaded"; +@TEST-END-FILE diff --git a/testing/btest/core/load-pkg.bro b/testing/btest/core/load-pkg.bro deleted file mode 100644 index e6671e038d..0000000000 --- a/testing/btest/core/load-pkg.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -b foo >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE foo/__load__.bro -@load ./test.bro -@TEST-END-FILE - -@TEST-START-FILE foo/test.bro -print "Foo loaded"; -@TEST-END-FILE diff --git a/testing/btest/core/load-pkg.zeek b/testing/btest/core/load-pkg.zeek new file mode 100644 index 0000000000..b97211a86a --- /dev/null +++ b/testing/btest/core/load-pkg.zeek @@ -0,0 +1,28 @@ +# Test that package loading works when a package loader script is present. +# +# Test that ".zeek" is loaded when there is also a ".bro" +# @TEST-EXEC: zeek -b foo >output +# @TEST-EXEC: btest-diff output +# +# Test that ".bro" is loaded when there is no ".zeek" +# @TEST-EXEC: rm foo/__load__.zeek +# @TEST-EXEC: zeek -b foo >output2 +# @TEST-EXEC: btest-diff output2 +# +# Test that package cannot be loaded when no package loader script exists. +# @TEST-EXEC: rm foo/__load__.bro +# @TEST-EXEC-FAIL: zeek -b foo + +@TEST-START-FILE foo/__load__.bro +@load ./test +print "__load__.bro loaded"; +@TEST-END-FILE + +@TEST-START-FILE foo/__load__.zeek +@load ./test +print "__load__.zeek loaded"; +@TEST-END-FILE + +@TEST-START-FILE foo/test.zeek +print "test.zeek loaded"; +@TEST-END-FILE diff --git a/testing/btest/core/load-prefixes.bro b/testing/btest/core/load-prefixes.bro deleted file mode 100644 index 1dfc3ac5dd..0000000000 --- a/testing/btest/core/load-prefixes.bro +++ /dev/null @@ -1,25 +0,0 @@ -# A test of prefix-based @load'ing - -# @TEST-EXEC: bro addprefixes >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE addprefixes.bro -@prefixes += lcl -@prefixes += lcl2 -@TEST-END-FILE - -@TEST-START-FILE lcl.base.utils.site.bro -print "loaded lcl.base.utils.site.bro"; -@TEST-END-FILE - -@TEST-START-FILE lcl2.base.utils.site.bro -print "loaded lcl2.base.utils.site.bro"; -@TEST-END-FILE - -@TEST-START-FILE lcl.base.protocols.http.bro -print "loaded lcl.base.protocols.http.bro"; -@TEST-END-FILE - -@TEST-START-FILE lcl2.base.protocols.http.bro -print "loaded lcl2.base.protocols.http.bro"; -@TEST-END-FILE diff --git a/testing/btest/core/load-prefixes.zeek b/testing/btest/core/load-prefixes.zeek new file mode 100644 index 0000000000..0416319827 --- /dev/null +++ b/testing/btest/core/load-prefixes.zeek @@ -0,0 +1,29 @@ +# A test of prefix-based @load'ing + +# @TEST-EXEC: zeek addprefixes >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE addprefixes.zeek +@prefixes += lcl +@prefixes += lcl2 +@TEST-END-FILE + +# Since base/utils/site.zeek is a script, only a script with the original file +# extension can be loaded here. +@TEST-START-FILE lcl.base.utils.site.zeek +print "loaded lcl.base.utils.site.zeek"; +@TEST-END-FILE + +@TEST-START-FILE lcl2.base.utils.site.zeek +print "loaded lcl2.base.utils.site.zeek"; +@TEST-END-FILE + +# For a script package like base/protocols/http/, either of the recognized +# file extensions can be loaded here. +@TEST-START-FILE lcl.base.protocols.http.zeek +print "loaded lcl.base.protocols.http.zeek"; +@TEST-END-FILE + +@TEST-START-FILE lcl2.base.protocols.http.bro +print "loaded lcl2.base.protocols.http.bro"; +@TEST-END-FILE diff --git a/testing/btest/core/load-relative.bro b/testing/btest/core/load-relative.bro deleted file mode 100644 index 3bd082cf8a..0000000000 --- a/testing/btest/core/load-relative.bro +++ /dev/null @@ -1,18 +0,0 @@ -# A test of relative-path-based @load'ing - -# @TEST-EXEC: bro -b foo/foo >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE foo/foo.bro -@load ./bar -@load ../baz -print "foo loaded"; -@TEST-END-FILE - -@TEST-START-FILE foo/bar.bro -print "bar loaded"; -@TEST-END-FILE - -@TEST-START-FILE baz.bro -print "baz loaded"; -@TEST-END-FILE diff --git a/testing/btest/core/load-relative.zeek b/testing/btest/core/load-relative.zeek new file mode 100644 index 0000000000..8e1e6f8a06 --- /dev/null +++ b/testing/btest/core/load-relative.zeek @@ -0,0 +1,18 @@ +# A test of relative-path-based @load'ing + +# @TEST-EXEC: zeek -b foo/foo >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE foo/foo.zeek +@load ./bar +@load ../baz +print "foo loaded"; +@TEST-END-FILE + +@TEST-START-FILE foo/bar.zeek +print "bar loaded"; +@TEST-END-FILE + +@TEST-START-FILE baz.zeek +print "baz loaded"; +@TEST-END-FILE diff --git a/testing/btest/core/load-unload.bro b/testing/btest/core/load-unload.bro deleted file mode 100644 index 6525a8e8ea..0000000000 --- a/testing/btest/core/load-unload.bro +++ /dev/null @@ -1,11 +0,0 @@ -# This tests the @unload directive -# -# @TEST-EXEC: bro -b %INPUT misc/loaded-scripts dontloadmebro > output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: grep -q dontloadmebro loaded_scripts.log && exit 1 || exit 0 - -@unload dontloadmebro - -@TEST-START-FILE dontloadmebro.bro -print "FAIL"; -@TEST-END-FILE diff --git a/testing/btest/core/load-unload.zeek b/testing/btest/core/load-unload.zeek new file mode 100644 index 0000000000..6199f12e8b --- /dev/null +++ b/testing/btest/core/load-unload.zeek @@ -0,0 +1,32 @@ +# This tests the @unload directive +# +# Test that @unload works with ".bro" when there is no ".zeek" script +# @TEST-EXEC: zeek -b unloadbro misc/loaded-scripts dontloadmebro > output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: grep dontloadmebro loaded_scripts.log && exit 1 || exit 0 +# +# Test that @unload looks for ".zeek" first (assuming no file extension is +# specified in the @unload) +# @TEST-EXEC: zeek -b unload misc/loaded-scripts dontloadme.zeek dontloadme.bro > output2 +# @TEST-EXEC: btest-diff output2 +# @TEST-EXEC: grep dontloadme.bro loaded_scripts.log + +@TEST-START-FILE unloadbro.bro +@unload dontloadmebro +@TEST-END-FILE + +@TEST-START-FILE dontloadmebro.bro +print "Loaded: dontloadmebro.bro"; +@TEST-END-FILE + +@TEST-START-FILE unload.zeek +@unload dontloadme +@TEST-END-FILE + +@TEST-START-FILE dontloadme.zeek +print "Loaded: dontloadme.zeek"; +@TEST-END-FILE + +@TEST-START-FILE dontloadme.bro +print "Loaded: dontloadme.bro"; +@TEST-END-FILE diff --git a/testing/btest/core/mobile-ipv6-home-addr.test b/testing/btest/core/mobile-ipv6-home-addr.test index e171a07afb..9be171074a 100644 --- a/testing/btest/core/mobile-ipv6-home-addr.test +++ b/testing/btest/core/mobile-ipv6-home-addr.test @@ -1,5 +1,6 @@ -# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/bro-config.h -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/ipv6-mobile-hoa.trace %INPUT >output +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/zeek-config.h +# +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/ipv6-mobile-hoa.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the orig of the connection is the Home Address, but the diff --git a/testing/btest/core/mobile-ipv6-routing.test b/testing/btest/core/mobile-ipv6-routing.test index ea99a70706..cca944f9c4 100644 --- a/testing/btest/core/mobile-ipv6-routing.test +++ b/testing/btest/core/mobile-ipv6-routing.test @@ -1,5 +1,6 @@ -# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/bro-config.h -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/ipv6-mobile-routing.trace %INPUT >output +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/zeek-config.h +# +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/ipv6-mobile-routing.trace %INPUT >output # @TEST-EXEC: btest-diff output # Just check that the responder of the connection is the final routing diff --git a/testing/btest/core/mobility-checksums.test b/testing/btest/core/mobility-checksums.test index 42877b63d4..d680fdf406 100644 --- a/testing/btest/core/mobility-checksums.test +++ b/testing/btest/core/mobility-checksums.test @@ -1,15 +1,16 @@ -# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/bro-config.h -# @TEST-EXEC: bro -r $TRACES/chksums/mip6-bad-mh-chksum.pcap +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/zeek-config.h +# +# @TEST-EXEC: zeek -r $TRACES/chksums/mip6-bad-mh-chksum.pcap # @TEST-EXEC: mv weird.log bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap # @TEST-EXEC: cat weird.log >> bad.out # @TEST-EXEC: rm weird.log -# @TEST-EXEC: bro -r $TRACES/chksums/mip6-good-mh-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/mip6-good-mh-chksum.pcap # @TEST-EXEC: test ! -e weird.log -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap # @TEST-EXEC: test ! -e weird.log -# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap +# @TEST-EXEC: zeek -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap # @TEST-EXEC: test ! -e weird.log # @TEST-EXEC: btest-diff bad.out diff --git a/testing/btest/core/mobility_msg.test b/testing/btest/core/mobility_msg.test index 1fde084dc2..89538fc667 100644 --- a/testing/btest/core/mobility_msg.test +++ b/testing/btest/core/mobility_msg.test @@ -1,12 +1,13 @@ -# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/bro-config.h -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT >output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_be.trace %INPUT >>output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_brr.trace %INPUT >>output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_bu.trace %INPUT >>output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_cot.trace %INPUT >>output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_coti.trace %INPUT >>output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_hot.trace %INPUT >>output -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_hoti.trace %INPUT >>output +# @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/zeek-config.h +# +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_be.trace %INPUT >>output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_brr.trace %INPUT >>output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_bu.trace %INPUT >>output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_cot.trace %INPUT >>output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_coti.trace %INPUT >>output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_hot.trace %INPUT >>output +# @TEST-EXEC: zeek -b -r $TRACES/mobile-ipv6/mip6_hoti.trace %INPUT >>output # @TEST-EXEC: btest-diff output event mobile_ipv6_message(p: pkt_hdr) diff --git a/testing/btest/core/mpls-in-vlan.bro b/testing/btest/core/mpls-in-vlan.bro deleted file mode 100644 index f57c1862ce..0000000000 --- a/testing/btest/core/mpls-in-vlan.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/mpls-in-vlan.trace -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/mpls-in-vlan.zeek b/testing/btest/core/mpls-in-vlan.zeek new file mode 100644 index 0000000000..9048c34c17 --- /dev/null +++ b/testing/btest/core/mpls-in-vlan.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/mpls-in-vlan.trace +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/negative-time.test b/testing/btest/core/negative-time.test index 5717df835c..cd1ac20240 100644 --- a/testing/btest/core/negative-time.test +++ b/testing/btest/core/negative-time.test @@ -1,2 +1,2 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/negative-time.pcap base/frameworks/notice +# @TEST-EXEC: zeek -b -C -r $TRACES/negative-time.pcap base/frameworks/notice # @TEST-EXEC: btest-diff weird.log diff --git a/testing/btest/core/nflog.bro b/testing/btest/core/nflog.bro deleted file mode 100644 index 39186bbbea..0000000000 --- a/testing/btest/core/nflog.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/nflog-http.pcap %INPUT -# @TEST-EXEC: btest-diff http.log - -@load base/protocols/http diff --git a/testing/btest/core/nflog.zeek b/testing/btest/core/nflog.zeek new file mode 100644 index 0000000000..e3bb62e4a5 --- /dev/null +++ b/testing/btest/core/nflog.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -C -r $TRACES/nflog-http.pcap %INPUT +# @TEST-EXEC: btest-diff http.log + +@load base/protocols/http diff --git a/testing/btest/core/nop.bro b/testing/btest/core/nop.bro deleted file mode 100644 index e42b5a7821..0000000000 --- a/testing/btest/core/nop.bro +++ /dev/null @@ -1,4 +0,0 @@ -# Bro shouldn't crash when doing nothing, nor outputting anything. -# -# @TEST-EXEC: cat /dev/null | bro >output 2>&1 -# @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/nop.zeek b/testing/btest/core/nop.zeek new file mode 100644 index 0000000000..e0f6f70323 --- /dev/null +++ b/testing/btest/core/nop.zeek @@ -0,0 +1,4 @@ +# Zeek shouldn't crash when doing nothing, nor outputting anything. +# +# @TEST-EXEC: cat /dev/null | zeek >output 2>&1 +# @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/old_comm_usage.bro b/testing/btest/core/old_comm_usage.bro deleted file mode 100644 index 0e9ae2f1f6..0000000000 --- a/testing/btest/core/old_comm_usage.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event bro_init() - { - terminate_communication(); - } diff --git a/testing/btest/core/option-errors.bro b/testing/btest/core/option-errors.bro deleted file mode 100644 index 6a9a8f1db6..0000000000 --- a/testing/btest/core/option-errors.bro +++ /dev/null @@ -1,13 +0,0 @@ -# @TEST-EXEC-FAIL: bro %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -option testbool; - -@TEST-START-NEXT - -option testbool : bool; - -@TEST-START-NEXT - -option testopt = 5; -testopt = 6; diff --git a/testing/btest/core/option-errors.zeek b/testing/btest/core/option-errors.zeek new file mode 100644 index 0000000000..b08ba17864 --- /dev/null +++ b/testing/btest/core/option-errors.zeek @@ -0,0 +1,13 @@ +# @TEST-EXEC-FAIL: zeek %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr + +option testbool; + +@TEST-START-NEXT + +option testbool : bool; + +@TEST-START-NEXT + +option testopt = 5; +testopt = 6; diff --git a/testing/btest/core/option-priorities.bro b/testing/btest/core/option-priorities.bro deleted file mode 100644 index fd352a5459..0000000000 --- a/testing/btest/core/option-priorities.bro +++ /dev/null @@ -1,28 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff .stdout - -export { - ## Test some documentation here! - option testbool: bool = T; -} - -function option_changed(ID: string, new_value: bool): bool { - print fmt("Value of %s changed from %s to %s", ID, testbool, new_value); - return new_value; -} - -function option_changed_two(ID: string, new_value: bool, location: string): bool { - print fmt("Higher prio - Value of %s changed from %s to %s at location '%s'", ID, testbool, new_value, location); - return T; -} - -event bro_init() - { - print "Old value", testbool; - Option::set_change_handler("testbool", option_changed); - Option::set_change_handler("testbool", option_changed_two, 99); - Option::set("testbool", F); - Option::set("testbool", F, "here"); - print "New value", testbool; - } - diff --git a/testing/btest/core/option-priorities.zeek b/testing/btest/core/option-priorities.zeek new file mode 100644 index 0000000000..cfc78aafe7 --- /dev/null +++ b/testing/btest/core/option-priorities.zeek @@ -0,0 +1,28 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff .stdout + +export { + ## Test some documentation here! + option testbool: bool = T; +} + +function option_changed(ID: string, new_value: bool): bool { + print fmt("Value of %s changed from %s to %s", ID, testbool, new_value); + return new_value; +} + +function option_changed_two(ID: string, new_value: bool, location: string): bool { + print fmt("Higher prio - Value of %s changed from %s to %s at location '%s'", ID, testbool, new_value, location); + return T; +} + +event zeek_init() + { + print "Old value", testbool; + Option::set_change_handler("testbool", option_changed); + Option::set_change_handler("testbool", option_changed_two, 99); + Option::set("testbool", F); + Option::set("testbool", F, "here"); + print "New value", testbool; + } + diff --git a/testing/btest/core/option-redef.bro b/testing/btest/core/option-redef.bro deleted file mode 100644 index 3d67a9a755..0000000000 --- a/testing/btest/core/option-redef.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff .stdout - -# options are allowed to be redef-able. -# And they are even redef-able by default. - -option testopt = 5 &redef; -redef testopt = 6; -option anotheropt = 6; -redef anotheropt = 7; - -event bro_init() { - print testopt; - print anotheropt; -} - diff --git a/testing/btest/core/option-redef.zeek b/testing/btest/core/option-redef.zeek new file mode 100644 index 0000000000..e47bd7344e --- /dev/null +++ b/testing/btest/core/option-redef.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff .stdout + +# options are allowed to be redef-able. +# And they are even redef-able by default. + +option testopt = 5 &redef; +redef testopt = 6; +option anotheropt = 6; +redef anotheropt = 7; + +event zeek_init() { + print testopt; + print anotheropt; +} + diff --git a/testing/btest/core/option-runtime-errors.bro b/testing/btest/core/option-runtime-errors.bro deleted file mode 100644 index 8ae4b9ca40..0000000000 --- a/testing/btest/core/option-runtime-errors.bro +++ /dev/null @@ -1,104 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -# Errors that happen during runtime. At least at the moment we are not checking these early enough -# that Bro will bail out during startup. Perhaps we want to change this later. - -option A = 5; -Option::set("B", 6); - -@TEST-START-NEXT - -option A = 5; -Option::set("A", "hi"); - -@TEST-START-NEXT - -const A = 5; -Option::set("A", 6); - -@TEST-START-NEXT: - -option A = 5; - -function option_changed(ID: string, new_value: bool): bool { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -option A = 5; - -function option_changed(ID: string): bool { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -option A : count = 5; - -function option_changed(ID: string, new_value: count): bool { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -option A : count = 5; - -hook option_changed(ID: string, new_value: count) { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -option A : count = 5; - -event option_changed(ID: string, new_value: count) { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -function option_changed(ID: string, new_value: count) : count { -} - -Option::set_change_handler("A", option_changed); - - -@TEST-START-NEXT: - -const A : count = 5; - -function option_changed(ID: string, new_value: count) : count { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -option A : count = 5; - -Option::set_change_handler("A", A); - -@TEST-START-NEXT: - -option A : count = 5; - -function option_changed(ID: string, new_value: count, location: count) : count { -} - -Option::set_change_handler("A", option_changed); - -@TEST-START-NEXT: - -option A : count = 5; - -function option_changed(ID: string, new_value: count, location: string, a: count) : count { -} - -Option::set_change_handler("A", option_changed); diff --git a/testing/btest/core/option-runtime-errors.zeek b/testing/btest/core/option-runtime-errors.zeek new file mode 100644 index 0000000000..ef512c6a8e --- /dev/null +++ b/testing/btest/core/option-runtime-errors.zeek @@ -0,0 +1,105 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr + +# Errors that happen during runtime. At least at the moment we are not +# checking these early enough that Zeek will bail out during startup. Perhaps +# we want to change this later. + +option A = 5; +Option::set("B", 6); + +@TEST-START-NEXT + +option A = 5; +Option::set("A", "hi"); + +@TEST-START-NEXT + +const A = 5; +Option::set("A", 6); + +@TEST-START-NEXT: + +option A = 5; + +function option_changed(ID: string, new_value: bool): bool { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +option A = 5; + +function option_changed(ID: string): bool { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +option A : count = 5; + +function option_changed(ID: string, new_value: count): bool { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +option A : count = 5; + +hook option_changed(ID: string, new_value: count) { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +option A : count = 5; + +event option_changed(ID: string, new_value: count) { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +function option_changed(ID: string, new_value: count) : count { +} + +Option::set_change_handler("A", option_changed); + + +@TEST-START-NEXT: + +const A : count = 5; + +function option_changed(ID: string, new_value: count) : count { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +option A : count = 5; + +Option::set_change_handler("A", A); + +@TEST-START-NEXT: + +option A : count = 5; + +function option_changed(ID: string, new_value: count, location: count) : count { +} + +Option::set_change_handler("A", option_changed); + +@TEST-START-NEXT: + +option A : count = 5; + +function option_changed(ID: string, new_value: count, location: string, a: count) : count { +} + +Option::set_change_handler("A", option_changed); diff --git a/testing/btest/core/pcap/dumper.bro b/testing/btest/core/pcap/dumper.bro deleted file mode 100644 index 0f2bdb072e..0000000000 --- a/testing/btest/core/pcap/dumper.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-REQUIRES: which hexdump -# @TEST-EXEC: bro -r $TRACES/workshop_2011_browse.trace -w dump -# @TEST-EXEC: hexdump -C $TRACES/workshop_2011_browse.trace >1 -# @TEST-EXEC: hexdump -C dump >2 -# @TEST-EXEC: diff 1 2 >output || true - -# Note that we're diff'ing the diff because there is an expected -# difference in the pcaps: namely, the snaplen setting stored in the -# global pcap header. -# @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/pcap/dumper.zeek b/testing/btest/core/pcap/dumper.zeek new file mode 100644 index 0000000000..4602022b45 --- /dev/null +++ b/testing/btest/core/pcap/dumper.zeek @@ -0,0 +1,10 @@ +# @TEST-REQUIRES: which hexdump +# @TEST-EXEC: zeek -r $TRACES/workshop_2011_browse.trace -w dump +# @TEST-EXEC: hexdump -C $TRACES/workshop_2011_browse.trace >1 +# @TEST-EXEC: hexdump -C dump >2 +# @TEST-EXEC: diff 1 2 >output || true + +# Note that we're diff'ing the diff because there is an expected +# difference in the pcaps: namely, the snaplen setting stored in the +# global pcap header. +# @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/pcap/dynamic-filter.bro b/testing/btest/core/pcap/dynamic-filter.bro deleted file mode 100644 index c1b48155c1..0000000000 --- a/testing/btest/core/pcap/dynamic-filter.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff conn.log - -redef enum PcapFilterID += { A, B }; - -global cnt = 0; - -event new_packet(c: connection, p: pkt_hdr) - { - ++cnt; - - print cnt, c$id; - - if ( cnt == 1 ) - if ( ! Pcap::install_pcap_filter(A) ) - print "error 3"; - - if ( cnt == 2 ) - if ( ! Pcap::install_pcap_filter(B) ) - print "error 4"; - } - -event bro_init() - { - if ( ! Pcap::precompile_pcap_filter(A, "port 80") ) - print "error 1"; - - if ( ! Pcap::precompile_pcap_filter(B, "port 53") ) - print "error 2"; - } - diff --git a/testing/btest/core/pcap/dynamic-filter.zeek b/testing/btest/core/pcap/dynamic-filter.zeek new file mode 100644 index 0000000000..11edf87644 --- /dev/null +++ b/testing/btest/core/pcap/dynamic-filter.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff conn.log + +redef enum PcapFilterID += { A, B }; + +global cnt = 0; + +event new_packet(c: connection, p: pkt_hdr) + { + ++cnt; + + print cnt, c$id; + + if ( cnt == 1 ) + if ( ! Pcap::install_pcap_filter(A) ) + print "error 3"; + + if ( cnt == 2 ) + if ( ! Pcap::install_pcap_filter(B) ) + print "error 4"; + } + +event zeek_init() + { + if ( ! Pcap::precompile_pcap_filter(A, "port 80") ) + print "error 1"; + + if ( ! Pcap::precompile_pcap_filter(B, "port 53") ) + print "error 2"; + } + diff --git a/testing/btest/core/pcap/filter-error.bro b/testing/btest/core/pcap/filter-error.bro deleted file mode 100644 index 10270ed53f..0000000000 --- a/testing/btest/core/pcap/filter-error.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC-FAIL: bro -r $TRACES/workshop_2011_browse.trace -f "kaputt" >>output 2>&1 -# @TEST-EXEC-FAIL: test -e conn.log -# @TEST-EXEC: echo ---- >>output -# @TEST-EXEC: bro -r $TRACES/workshop_2011_browse.trace %INPUT >>output 2>&1 -# @TEST-EXEC: test -e conn.log -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -redef enum PcapFilterID += { A }; - -event bro_init() - { - if ( ! Pcap::precompile_pcap_filter(A, "kaputt, too") ) - print "error", Pcap::error(); - } - - diff --git a/testing/btest/core/pcap/filter-error.zeek b/testing/btest/core/pcap/filter-error.zeek new file mode 100644 index 0000000000..81f4c24cf9 --- /dev/null +++ b/testing/btest/core/pcap/filter-error.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC-FAIL: zeek -r $TRACES/workshop_2011_browse.trace -f "kaputt" >>output 2>&1 +# @TEST-EXEC-FAIL: test -e conn.log +# @TEST-EXEC: echo ---- >>output +# @TEST-EXEC: zeek -r $TRACES/workshop_2011_browse.trace %INPUT >>output 2>&1 +# @TEST-EXEC: test -e conn.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +redef enum PcapFilterID += { A }; + +event zeek_init() + { + if ( ! Pcap::precompile_pcap_filter(A, "kaputt, too") ) + print "error", Pcap::error(); + } + + diff --git a/testing/btest/core/pcap/input-error.bro b/testing/btest/core/pcap/input-error.bro deleted file mode 100644 index 44788b3391..0000000000 --- a/testing/btest/core/pcap/input-error.bro +++ /dev/null @@ -1,14 +0,0 @@ -# @TEST-EXEC-FAIL: bro -i NO_SUCH_INTERFACE 2>&1 >>output 2>&1 -# @TEST-EXEC: cat output | sed 's/(.*)//g' >output2 -# @TEST-EXEC-FAIL: bro -r NO_SUCH_TRACE 2>&1 >>output2 2>&1 -# @TEST-EXEC: btest-diff output2 - -redef enum PcapFilterID += { A }; - -event bro_init() - { - if ( ! Pcap::precompile_pcap_filter(A, "kaputt, too") ) - print "error", Pcap::error(); - } - - diff --git a/testing/btest/core/pcap/input-error.zeek b/testing/btest/core/pcap/input-error.zeek new file mode 100644 index 0000000000..8a67293a8b --- /dev/null +++ b/testing/btest/core/pcap/input-error.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC-FAIL: zeek -i NO_SUCH_INTERFACE 2>&1 >>output 2>&1 +# @TEST-EXEC: cat output | sed 's/(.*)//g' >output2 +# @TEST-EXEC-FAIL: zeek -r NO_SUCH_TRACE 2>&1 >>output2 2>&1 +# @TEST-EXEC: btest-diff output2 + +redef enum PcapFilterID += { A }; + +event zeek_init() + { + if ( ! Pcap::precompile_pcap_filter(A, "kaputt, too") ) + print "error", Pcap::error(); + } + + diff --git a/testing/btest/core/pcap/pseudo-realtime.bro b/testing/btest/core/pcap/pseudo-realtime.bro deleted file mode 100644 index 625706f321..0000000000 --- a/testing/btest/core/pcap/pseudo-realtime.bro +++ /dev/null @@ -1,42 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT --pseudo-realtime >output -# @TEST-EXEC: btest-diff output - -global init = F; -global last_network = network_time(); -global last_current = current_time(); -global cnt = 0; -global an = 0secs; -global ac = 0secs; - -event new_packet(c: connection, p: pkt_hdr) - { - local tn = network_time(); - local tc = current_time(); - local dn = tn - last_network; - local dc = tc - last_current; - - last_network = tn; - last_current = tc; - ++cnt; - - if ( ! init ) - { - init = T; - return; - } - - an += dn; - ac += dc; - - # print fmt("num=%d agg_delta_network=%.1f agg_delta_real=%.1f", cnt, an, ac); - } - -event bro_done() - { - local d = (an - ac); - if ( d < 0 secs) - d = -d; - - print fmt("real time %s trace time", d < 1.0secs ? "matches" : "does NOT match"); - } - diff --git a/testing/btest/core/pcap/pseudo-realtime.zeek b/testing/btest/core/pcap/pseudo-realtime.zeek new file mode 100644 index 0000000000..994fb42a65 --- /dev/null +++ b/testing/btest/core/pcap/pseudo-realtime.zeek @@ -0,0 +1,42 @@ +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT --pseudo-realtime >output +# @TEST-EXEC: btest-diff output + +global init = F; +global last_network = network_time(); +global last_current = current_time(); +global cnt = 0; +global an = 0secs; +global ac = 0secs; + +event new_packet(c: connection, p: pkt_hdr) + { + local tn = network_time(); + local tc = current_time(); + local dn = tn - last_network; + local dc = tc - last_current; + + last_network = tn; + last_current = tc; + ++cnt; + + if ( ! init ) + { + init = T; + return; + } + + an += dn; + ac += dc; + + # print fmt("num=%d agg_delta_network=%.1f agg_delta_real=%.1f", cnt, an, ac); + } + +event zeek_done() + { + local d = (an - ac); + if ( d < 0 secs) + d = -d; + + print fmt("real time %s trace time", d < 1.0secs ? "matches" : "does NOT match"); + } + diff --git a/testing/btest/core/pcap/read-trace-with-filter.bro b/testing/btest/core/pcap/read-trace-with-filter.bro deleted file mode 100644 index 5878bada64..0000000000 --- a/testing/btest/core/pcap/read-trace-with-filter.bro +++ /dev/null @@ -1,3 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace -f "port 50000" -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff packet_filter.log diff --git a/testing/btest/core/pcap/read-trace-with-filter.zeek b/testing/btest/core/pcap/read-trace-with-filter.zeek new file mode 100644 index 0000000000..ba9db2c2a4 --- /dev/null +++ b/testing/btest/core/pcap/read-trace-with-filter.zeek @@ -0,0 +1,3 @@ +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace -f "port 50000" +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff packet_filter.log diff --git a/testing/btest/core/pppoe-over-qinq.bro b/testing/btest/core/pppoe-over-qinq.bro deleted file mode 100644 index cdfd4607ae..0000000000 --- a/testing/btest/core/pppoe-over-qinq.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/pppoe-over-qinq.pcap -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/pppoe-over-qinq.zeek b/testing/btest/core/pppoe-over-qinq.zeek new file mode 100644 index 0000000000..54cdcba1f7 --- /dev/null +++ b/testing/btest/core/pppoe-over-qinq.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/pppoe-over-qinq.pcap +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/pppoe.test b/testing/btest/core/pppoe.test index 35be84d657..74e3678858 100644 --- a/testing/btest/core/pppoe.test +++ b/testing/btest/core/pppoe.test @@ -1,2 +1,2 @@ -# @TEST-EXEC: bro -r $TRACES/pppoe.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/pppoe.trace %INPUT # @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/print-bpf-filters.bro b/testing/btest/core/print-bpf-filters.bro deleted file mode 100644 index 6e4a4d5c30..0000000000 --- a/testing/btest/core/print-bpf-filters.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/empty.trace >output -# @TEST-EXEC: cat packet_filter.log >>output -# @TEST-EXEC: bro -r $TRACES/empty.trace -f "port 42" >>output -# @TEST-EXEC: cat packet_filter.log >>output -# @TEST-EXEC: bro -r $TRACES/mixed-vlan-mpls.trace PacketFilter::restricted_filter="vlan" >>output -# @TEST-EXEC: cat packet_filter.log >>output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff conn.log -# -# The order in the output of enable_auto_protocol_capture_filters isn't -# stable, for reasons not clear. We canonify it first. -# @TEST-EXEC: bro -r $TRACES/empty.trace PacketFilter::enable_auto_protocol_capture_filters=T -# @TEST-EXEC: cat packet_filter.log | bro-cut filter | sed 's#[()]##g' | tr ' ' '\n' | sort | uniq -c | awk '{print $1, $2}' >output2 -# @TEST-EXEC: btest-diff output2 - diff --git a/testing/btest/core/print-bpf-filters.zeek b/testing/btest/core/print-bpf-filters.zeek new file mode 100644 index 0000000000..fd86ce4f04 --- /dev/null +++ b/testing/btest/core/print-bpf-filters.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -r $TRACES/empty.trace >output +# @TEST-EXEC: cat packet_filter.log >>output +# @TEST-EXEC: zeek -r $TRACES/empty.trace -f "port 42" >>output +# @TEST-EXEC: cat packet_filter.log >>output +# @TEST-EXEC: zeek -r $TRACES/mixed-vlan-mpls.trace PacketFilter::restricted_filter="vlan" >>output +# @TEST-EXEC: cat packet_filter.log >>output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff conn.log +# +# The order in the output of enable_auto_protocol_capture_filters isn't +# stable, for reasons not clear. We canonify it first. +# @TEST-EXEC: zeek -r $TRACES/empty.trace PacketFilter::enable_auto_protocol_capture_filters=T +# @TEST-EXEC: cat packet_filter.log | zeek-cut filter | sed 's#[()]##g' | tr ' ' '\n' | sort | uniq -c | awk '{print $1, $2}' >output2 +# @TEST-EXEC: btest-diff output2 + diff --git a/testing/btest/core/q-in-q.bro b/testing/btest/core/q-in-q.bro deleted file mode 100644 index 7444e7b458..0000000000 --- a/testing/btest/core/q-in-q.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/q-in-q.trace -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/q-in-q.zeek b/testing/btest/core/q-in-q.zeek new file mode 100644 index 0000000000..e864fdf3b5 --- /dev/null +++ b/testing/btest/core/q-in-q.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -r $TRACES/q-in-q.trace +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/radiotap.bro b/testing/btest/core/radiotap.bro deleted file mode 100644 index 27513990f0..0000000000 --- a/testing/btest/core/radiotap.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/radiotap.pcap -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/radiotap.zeek b/testing/btest/core/radiotap.zeek new file mode 100644 index 0000000000..48886297ff --- /dev/null +++ b/testing/btest/core/radiotap.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/radiotap.pcap +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/raw_packet.bro b/testing/btest/core/raw_packet.bro deleted file mode 100644 index cb1ee94b0f..0000000000 --- a/testing/btest/core/raw_packet.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/raw_packets.trace %INPUT >output -# @TEST-EXEC: bro -b -r $TRACES/icmp_dot1q.trace %INPUT >>output -# @TEST-EXEC: btest-diff output - -event raw_packet(p: raw_pkt_hdr) - { - print p; - } - diff --git a/testing/btest/core/raw_packet.zeek b/testing/btest/core/raw_packet.zeek new file mode 100644 index 0000000000..15fa7d133b --- /dev/null +++ b/testing/btest/core/raw_packet.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -b -r $TRACES/raw_packets.trace %INPUT >output +# @TEST-EXEC: zeek -b -r $TRACES/icmp_dot1q.trace %INPUT >>output +# @TEST-EXEC: btest-diff output + +event raw_packet(p: raw_pkt_hdr) + { + print p; + } + diff --git a/testing/btest/core/reassembly.bro b/testing/btest/core/reassembly.bro deleted file mode 100644 index 30cfaa727e..0000000000 --- a/testing/btest/core/reassembly.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/ipv4/fragmented-1.pcap %INPUT >>output -# @TEST-EXEC: bro -C -r $TRACES/ipv4/fragmented-2.pcap %INPUT >>output -# @TEST-EXEC: bro -C -r $TRACES/ipv4/fragmented-3.pcap %INPUT >>output -# @TEST-EXEC: bro -C -r $TRACES/ipv4/fragmented-4.pcap %INPUT >>output -# @TEST-EXEC: bro -C -r $TRACES/tcp/reassembly.pcap %INPUT >>output -# @TEST-EXEC: btest-diff output - -event bro_init() - { - print "----------------------"; - } - -event flow_weird(name: string, src: addr, dst: addr) - { - print "flow weird", name, src, dst; - } - -event net_weird(name: string) - { - print "net_weird", name; - } - -event rexmit_inconsistency(c: connection, t1: string, t2: string, tcp_flags: string) - { - print "rexmit_inconsistency", c$id, t1, t2, tcp_flags; - } diff --git a/testing/btest/core/reassembly.zeek b/testing/btest/core/reassembly.zeek new file mode 100644 index 0000000000..db14364331 --- /dev/null +++ b/testing/btest/core/reassembly.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ipv4/fragmented-1.pcap %INPUT >>output +# @TEST-EXEC: zeek -C -r $TRACES/ipv4/fragmented-2.pcap %INPUT >>output +# @TEST-EXEC: zeek -C -r $TRACES/ipv4/fragmented-3.pcap %INPUT >>output +# @TEST-EXEC: zeek -C -r $TRACES/ipv4/fragmented-4.pcap %INPUT >>output +# @TEST-EXEC: zeek -C -r $TRACES/tcp/reassembly.pcap %INPUT >>output +# @TEST-EXEC: btest-diff output + +event zeek_init() + { + print "----------------------"; + } + +event flow_weird(name: string, src: addr, dst: addr) + { + print "flow weird", name, src, dst; + } + +event net_weird(name: string) + { + print "net_weird", name; + } + +event rexmit_inconsistency(c: connection, t1: string, t2: string, tcp_flags: string) + { + print "rexmit_inconsistency", c$id, t1, t2, tcp_flags; + } diff --git a/testing/btest/core/recursive-event.bro b/testing/btest/core/recursive-event.bro deleted file mode 100644 index 245e994cd6..0000000000 --- a/testing/btest/core/recursive-event.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro %INPUT 2>&1 | grep -v termination | sort | uniq | wc -l | awk '{print $1}' >output -# @TEST-EXEC: btest-diff output - -# In old version, the event would keep triggering endlessely, with the network -# time not moving forward and Bro not terminating. -# -# Note that the output will not be 20 because we still execute two rounds -# of events every time we drain and also at startup several (currently 3) -# rounds of events drain with the same network_time. - -redef exit_only_after_terminate=T; - -global c = 0; - -event test() - { - c += 1; - - if ( c == 20 ) - { - terminate(); - return; - } - - print network_time(); - event test(); - } - -event bro_init() - { - event test(); - } diff --git a/testing/btest/core/recursive-event.zeek b/testing/btest/core/recursive-event.zeek new file mode 100644 index 0000000000..f82b4ed58b --- /dev/null +++ b/testing/btest/core/recursive-event.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek %INPUT 2>&1 | grep -v termination | sort | uniq | wc -l | awk '{print $1}' >output +# @TEST-EXEC: btest-diff output + +# In old version, the event would keep triggering endlessely, with the network +# time not moving forward and Zeek not terminating. +# +# Note that the output will not be 20 because we still execute two rounds +# of events every time we drain and also at startup several (currently 3) +# rounds of events drain with the same network_time. + +redef exit_only_after_terminate=T; + +global c = 0; + +event test() + { + c += 1; + + if ( c == 20 ) + { + terminate(); + return; + } + + print network_time(); + event test(); + } + +event zeek_init() + { + event test(); + } diff --git a/testing/btest/core/reporter-error-in-handler.bro b/testing/btest/core/reporter-error-in-handler.bro deleted file mode 100644 index c4a21d5902..0000000000 --- a/testing/btest/core/reporter-error-in-handler.bro +++ /dev/null @@ -1,29 +0,0 @@ -# -# This test procudes a recursive error: the error handler is itself broken. Rather -# than looping indefinitly, the error inside the handler should reported to stderr. -# -# @TEST-EXEC: bro %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -global a: table[count] of count; - -global c = 0; - -event reporter_error(t: time, msg: string, location: string) -{ - c += 1; - - if ( c > 1 ) - print "FAILED: 2nd error reported to script as well."; - - else - { - print "1st error printed on script level"; - print a[2]; - } -} - -event bro_init() -{ - print a[1]; -} diff --git a/testing/btest/core/reporter-error-in-handler.zeek b/testing/btest/core/reporter-error-in-handler.zeek new file mode 100644 index 0000000000..e7de8a1a75 --- /dev/null +++ b/testing/btest/core/reporter-error-in-handler.zeek @@ -0,0 +1,29 @@ +# +# This test procudes a recursive error: the error handler is itself broken. Rather +# than looping indefinitly, the error inside the handler should reported to stderr. +# +# @TEST-EXEC: zeek %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +global a: table[count] of count; + +global c = 0; + +event reporter_error(t: time, msg: string, location: string) +{ + c += 1; + + if ( c > 1 ) + print "FAILED: 2nd error reported to script as well."; + + else + { + print "1st error printed on script level"; + print a[2]; + } +} + +event zeek_init() +{ + print a[1]; +} diff --git a/testing/btest/core/reporter-fmt-strings.bro b/testing/btest/core/reporter-fmt-strings.bro deleted file mode 100644 index 0e0be77844..0000000000 --- a/testing/btest/core/reporter-fmt-strings.bro +++ /dev/null @@ -1,10 +0,0 @@ -# The format string below should end up as a literal part of the reporter's -# error message to stderr and shouldn't be replaced internally. -# -# @TEST-EXEC-FAIL: bro %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -event bro_init() -{ - event dont_interpret_this("%s"); -} diff --git a/testing/btest/core/reporter-fmt-strings.zeek b/testing/btest/core/reporter-fmt-strings.zeek new file mode 100644 index 0000000000..087b0e2244 --- /dev/null +++ b/testing/btest/core/reporter-fmt-strings.zeek @@ -0,0 +1,10 @@ +# The format string below should end up as a literal part of the reporter's +# error message to stderr and shouldn't be replaced internally. +# +# @TEST-EXEC-FAIL: zeek %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +event zeek_init() +{ + event dont_interpret_this("%s"); +} diff --git a/testing/btest/core/reporter-parse-error.bro b/testing/btest/core/reporter-parse-error.bro deleted file mode 100644 index 25f33e2785..0000000000 --- a/testing/btest/core/reporter-parse-error.bro +++ /dev/null @@ -1,8 +0,0 @@ -# -# @TEST-EXEC-FAIL: bro %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -event bro_init() -{ - print TESTFAILURE; -} diff --git a/testing/btest/core/reporter-parse-error.zeek b/testing/btest/core/reporter-parse-error.zeek new file mode 100644 index 0000000000..dfd9ed6d02 --- /dev/null +++ b/testing/btest/core/reporter-parse-error.zeek @@ -0,0 +1,8 @@ +# +# @TEST-EXEC-FAIL: zeek %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +event zeek_init() +{ + print TESTFAILURE; +} diff --git a/testing/btest/core/reporter-runtime-error.bro b/testing/btest/core/reporter-runtime-error.bro deleted file mode 100644 index f8dd8c504c..0000000000 --- a/testing/btest/core/reporter-runtime-error.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC-FAIL: bro %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -global a: table[count] of count; - -event bro_init() -{ - print a[2]; -} - -print a[1]; - diff --git a/testing/btest/core/reporter-runtime-error.zeek b/testing/btest/core/reporter-runtime-error.zeek new file mode 100644 index 0000000000..63e0437e26 --- /dev/null +++ b/testing/btest/core/reporter-runtime-error.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC-FAIL: zeek %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +global a: table[count] of count; + +event zeek_init() +{ + print a[2]; +} + +print a[1]; + diff --git a/testing/btest/core/reporter-shutdown-order-errors.bro b/testing/btest/core/reporter-shutdown-order-errors.bro deleted file mode 100644 index 6289d47c96..0000000000 --- a/testing/btest/core/reporter-shutdown-order-errors.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: touch reporter.log && chmod -w reporter.log -# @TEST-EXEC: bro %INPUT >out 2>&1 - -# Output doesn't really matter, but we just want to know that Bro shutdowns -# without crashing in such scenarios (reporter log not writable -# and also reporter errors being emitting during shutdown). - -redef Config::config_files += { "./config" }; - diff --git a/testing/btest/core/reporter-shutdown-order-errors.zeek b/testing/btest/core/reporter-shutdown-order-errors.zeek new file mode 100644 index 0000000000..f1478124b8 --- /dev/null +++ b/testing/btest/core/reporter-shutdown-order-errors.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: touch reporter.log && chmod -w reporter.log +# @TEST-EXEC: zeek %INPUT >out 2>&1 + +# Output doesn't really matter, but we just want to know that Zeek shutdowns +# without crashing in such scenarios (reporter log not writable +# and also reporter errors being emitting during shutdown). + +redef Config::config_files += { "./config" }; + diff --git a/testing/btest/core/reporter-type-mismatch.bro b/testing/btest/core/reporter-type-mismatch.bro deleted file mode 100644 index 0faa9b85e2..0000000000 --- a/testing/btest/core/reporter-type-mismatch.bro +++ /dev/null @@ -1,12 +0,0 @@ -# -# @TEST-EXEC-FAIL: bro %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -event foo(a: string) -{ -} - -event bro_init() -{ - event foo(42); -} diff --git a/testing/btest/core/reporter-type-mismatch.zeek b/testing/btest/core/reporter-type-mismatch.zeek new file mode 100644 index 0000000000..0fc8d78f6f --- /dev/null +++ b/testing/btest/core/reporter-type-mismatch.zeek @@ -0,0 +1,12 @@ +# +# @TEST-EXEC-FAIL: zeek %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +event foo(a: string) +{ +} + +event zeek_init() +{ + event foo(42); +} diff --git a/testing/btest/core/reporter-weird-sampling-disable.bro b/testing/btest/core/reporter-weird-sampling-disable.bro deleted file mode 100644 index 014e287dab..0000000000 --- a/testing/btest/core/reporter-weird-sampling-disable.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/http/bro.org.pcap %INPUT >output -# @TEST-EXEC: btest-diff output - -redef Weird::sampling_threshold = 1; -redef Weird::sampling_rate = 0; - -event net_weird(name: string) - { - print "net_weird", name; - } - -event gen_weirds(c: connection) - { - local num = 5; - - while ( num != 0 ) - { - Reporter::net_weird("my_net_weird"); - --num; - } - } - -global did_one_connection = F; - -event new_connection(c: connection) - { - if ( did_one_connection ) - return; - - did_one_connection = T; - event gen_weirds(c); - } diff --git a/testing/btest/core/reporter-weird-sampling-disable.zeek b/testing/btest/core/reporter-weird-sampling-disable.zeek new file mode 100644 index 0000000000..63b4503004 --- /dev/null +++ b/testing/btest/core/reporter-weird-sampling-disable.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -b -r $TRACES/http/bro.org.pcap %INPUT >output +# @TEST-EXEC: btest-diff output + +redef Weird::sampling_threshold = 1; +redef Weird::sampling_rate = 0; + +event net_weird(name: string) + { + print "net_weird", name; + } + +event gen_weirds(c: connection) + { + local num = 5; + + while ( num != 0 ) + { + Reporter::net_weird("my_net_weird"); + --num; + } + } + +global did_one_connection = F; + +event new_connection(c: connection) + { + if ( did_one_connection ) + return; + + did_one_connection = T; + event gen_weirds(c); + } diff --git a/testing/btest/core/reporter-weird-sampling.bro b/testing/btest/core/reporter-weird-sampling.bro deleted file mode 100644 index d9d99681c4..0000000000 --- a/testing/btest/core/reporter-weird-sampling.bro +++ /dev/null @@ -1,55 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/http/bro.org.pcap %INPUT >output -# @TEST-EXEC: btest-diff output - -redef Weird::sampling_duration = 5sec; -redef Weird::sampling_threshold = 10; -redef Weird::sampling_rate = 10; -redef Weird::sampling_whitelist = set("whitelisted_net_weird", - "whitelisted_flow_weird", - "whitelisted_conn_weird"); - -event conn_weird(name: string, c: connection, addl: string) - { - print "conn_weird", name; - } - -event flow_weird(name: string, src: addr, dst: addr) - { - print "flow_weird", name; - } - -event net_weird(name: string) - { - print "net_weird", name; - } - -event gen_weirds(c: connection) - { - local num = 30; - - while ( num != 0 ) - { - Reporter::net_weird("my_net_weird"); - Reporter::flow_weird("my_flow_weird", c$id$orig_h, c$id$resp_h); - Reporter::conn_weird("my_conn_weird", c); - - Reporter::net_weird("whitelisted_net_weird"); - Reporter::flow_weird("whitelisted_flow_weird", c$id$orig_h, c$id$resp_h); - Reporter::conn_weird("whitelisted_conn_weird", c); - --num; - } - } - -global did_one_connection = F; - -event new_connection(c: connection) - { - if ( did_one_connection ) - return; - - did_one_connection = T; - event gen_weirds(c); # should permit 10 + 2 of each "my" weird - schedule 2sec { gen_weirds(c) }; # should permit 3 of each "my" weird - schedule 7sec { gen_weirds(c) }; # should permit 10 + 2 of each "my" weird - # Total of 27 "my" weirds of each type and 90 of each "whitelisted" type - } diff --git a/testing/btest/core/reporter-weird-sampling.zeek b/testing/btest/core/reporter-weird-sampling.zeek new file mode 100644 index 0000000000..c3a83a2c8f --- /dev/null +++ b/testing/btest/core/reporter-weird-sampling.zeek @@ -0,0 +1,55 @@ +# @TEST-EXEC: zeek -b -r $TRACES/http/bro.org.pcap %INPUT >output +# @TEST-EXEC: btest-diff output + +redef Weird::sampling_duration = 5sec; +redef Weird::sampling_threshold = 10; +redef Weird::sampling_rate = 10; +redef Weird::sampling_whitelist = set("whitelisted_net_weird", + "whitelisted_flow_weird", + "whitelisted_conn_weird"); + +event conn_weird(name: string, c: connection, addl: string) + { + print "conn_weird", name; + } + +event flow_weird(name: string, src: addr, dst: addr) + { + print "flow_weird", name; + } + +event net_weird(name: string) + { + print "net_weird", name; + } + +event gen_weirds(c: connection) + { + local num = 30; + + while ( num != 0 ) + { + Reporter::net_weird("my_net_weird"); + Reporter::flow_weird("my_flow_weird", c$id$orig_h, c$id$resp_h); + Reporter::conn_weird("my_conn_weird", c); + + Reporter::net_weird("whitelisted_net_weird"); + Reporter::flow_weird("whitelisted_flow_weird", c$id$orig_h, c$id$resp_h); + Reporter::conn_weird("whitelisted_conn_weird", c); + --num; + } + } + +global did_one_connection = F; + +event new_connection(c: connection) + { + if ( did_one_connection ) + return; + + did_one_connection = T; + event gen_weirds(c); # should permit 10 + 2 of each "my" weird + schedule 2sec { gen_weirds(c) }; # should permit 3 of each "my" weird + schedule 7sec { gen_weirds(c) }; # should permit 10 + 2 of each "my" weird + # Total of 27 "my" weirds of each type and 90 of each "whitelisted" type + } diff --git a/testing/btest/core/reporter.bro b/testing/btest/core/reporter.bro deleted file mode 100644 index aa660ef495..0000000000 --- a/testing/btest/core/reporter.bro +++ /dev/null @@ -1,55 +0,0 @@ -# -# @TEST-EXEC: bro %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff logger-test.log - -event bro_init() -{ - Reporter::info("init test-info"); - Reporter::warning("init test-warning"); - Reporter::error("init test-error"); -} - -event bro_done() -{ - Reporter::info("done test-info"); - Reporter::warning("done test-warning"); - Reporter::error("done test-error"); -} - -global first = 1; - -event connection_established(c: connection) -{ - if ( ! first ) - return; - - print "established"; - - Reporter::info("processing test-info"); - Reporter::warning("processing test-warning"); - Reporter::error("processing test-error"); - first = 0; -} - -global f = open_log_file("logger-test"); - -event reporter_info(t: time, msg: string, location: string) - { - print f, fmt("reporter_info|%s|%s|%.6f", msg, location, t); - } - -event reporter_warning(t: time, msg: string, location: string) - { - print f, fmt("reporter_warning|%s|%s|%.6f", msg, location, t); - } - -event reporter_error(t: time, msg: string, location: string) - { - print f, fmt("reporter_error|%s|%s|%.6f", msg, location, t); - } - -Reporter::info("pre test-info"); -Reporter::warning("pre test-warning"); -Reporter::error("pre test-error"); - diff --git a/testing/btest/core/reporter.zeek b/testing/btest/core/reporter.zeek new file mode 100644 index 0000000000..8591096c2b --- /dev/null +++ b/testing/btest/core/reporter.zeek @@ -0,0 +1,55 @@ +# +# @TEST-EXEC: zeek %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff logger-test.log + +event zeek_init() +{ + Reporter::info("init test-info"); + Reporter::warning("init test-warning"); + Reporter::error("init test-error"); +} + +event zeek_done() +{ + Reporter::info("done test-info"); + Reporter::warning("done test-warning"); + Reporter::error("done test-error"); +} + +global first = 1; + +event connection_established(c: connection) +{ + if ( ! first ) + return; + + print "established"; + + Reporter::info("processing test-info"); + Reporter::warning("processing test-warning"); + Reporter::error("processing test-error"); + first = 0; +} + +global f = open_log_file("logger-test"); + +event reporter_info(t: time, msg: string, location: string) + { + print f, fmt("reporter_info|%s|%s|%.6f", msg, location, t); + } + +event reporter_warning(t: time, msg: string, location: string) + { + print f, fmt("reporter_warning|%s|%s|%.6f", msg, location, t); + } + +event reporter_error(t: time, msg: string, location: string) + { + print f, fmt("reporter_error|%s|%s|%.6f", msg, location, t); + } + +Reporter::info("pre test-info"); +Reporter::warning("pre test-warning"); +Reporter::error("pre test-error"); + diff --git a/testing/btest/core/tcp/fin-retransmit.bro b/testing/btest/core/tcp/fin-retransmit.bro deleted file mode 100644 index 42bf062f5a..0000000000 --- a/testing/btest/core/tcp/fin-retransmit.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tcp/fin_retransmission.pcap %INPUT >out -# @TEST-EXEC: btest-diff out - -event connection_state_remove(c: connection) - { - print c$orig; - print c$resp; - } diff --git a/testing/btest/core/tcp/fin-retransmit.zeek b/testing/btest/core/tcp/fin-retransmit.zeek new file mode 100644 index 0000000000..a24d253583 --- /dev/null +++ b/testing/btest/core/tcp/fin-retransmit.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tcp/fin_retransmission.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +event connection_state_remove(c: connection) + { + print c$orig; + print c$resp; + } diff --git a/testing/btest/core/tcp/large-file-reassembly.bro b/testing/btest/core/tcp/large-file-reassembly.bro deleted file mode 100644 index 655d030d96..0000000000 --- a/testing/btest/core/tcp/large-file-reassembly.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ftp/bigtransfer.pcap %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff files.log -# @TEST-EXEC: btest-diff conn.log - -# The pcap has been truncated on purpose, so there's going to be large -# gaps that are there by design and shouldn't trigger the "skip -# deliveries" code paths because this test still needs to know about the -# payloads being delivered around critical boundaries (e.g. 32-bit TCP -# sequence wraparound and 32-bit data offsets). -redef tcp_excessive_data_without_further_acks=0; - -event file_chunk(f: fa_file, data: string, off: count) - { - print "file_chunk", |data|, off, data; - } - -event file_new(f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_DATA_EVENT, - [$chunk_event=file_chunk]); - } diff --git a/testing/btest/core/tcp/large-file-reassembly.zeek b/testing/btest/core/tcp/large-file-reassembly.zeek new file mode 100644 index 0000000000..ed5d283561 --- /dev/null +++ b/testing/btest/core/tcp/large-file-reassembly.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -r $TRACES/ftp/bigtransfer.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff files.log +# @TEST-EXEC: btest-diff conn.log + +# The pcap has been truncated on purpose, so there's going to be large +# gaps that are there by design and shouldn't trigger the "skip +# deliveries" code paths because this test still needs to know about the +# payloads being delivered around critical boundaries (e.g. 32-bit TCP +# sequence wraparound and 32-bit data offsets). +redef tcp_excessive_data_without_further_acks=0; + +event file_chunk(f: fa_file, data: string, off: count) + { + print "file_chunk", |data|, off, data; + } + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_DATA_EVENT, + [$chunk_event=file_chunk]); + } diff --git a/testing/btest/core/tcp/miss-end-data.bro b/testing/btest/core/tcp/miss-end-data.bro deleted file mode 100644 index 6cee7577d9..0000000000 --- a/testing/btest/core/tcp/miss-end-data.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tcp/miss_end_data.pcap %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff conn.log - -redef report_gaps_for_partial = T; - -event content_gap(c: connection, is_orig: bool, seq: count, length: count) - { - print "content_gap", c$id, is_orig, seq, length; - } diff --git a/testing/btest/core/tcp/miss-end-data.zeek b/testing/btest/core/tcp/miss-end-data.zeek new file mode 100644 index 0000000000..6c802810f1 --- /dev/null +++ b/testing/btest/core/tcp/miss-end-data.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -r $TRACES/tcp/miss_end_data.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff conn.log + +redef report_gaps_for_partial = T; + +event content_gap(c: connection, is_orig: bool, seq: count, length: count) + { + print "content_gap", c$id, is_orig, seq, length; + } diff --git a/testing/btest/core/tcp/missing-syn.bro b/testing/btest/core/tcp/missing-syn.bro deleted file mode 100644 index f34767eee8..0000000000 --- a/testing/btest/core/tcp/missing-syn.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/tcp/missing-syn.pcap %INPUT -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/tcp/missing-syn.zeek b/testing/btest/core/tcp/missing-syn.zeek new file mode 100644 index 0000000000..3450941584 --- /dev/null +++ b/testing/btest/core/tcp/missing-syn.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tcp/missing-syn.pcap %INPUT +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/tcp/quantum-insert.bro b/testing/btest/core/tcp/quantum-insert.bro deleted file mode 100644 index 8b4738c9e1..0000000000 --- a/testing/btest/core/tcp/quantum-insert.bro +++ /dev/null @@ -1,13 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tcp/qi_internet_SYNACK_curl_jsonip.pcap %INPUT -# @TEST-EXEC: btest-diff .stdout - -# Quantum Insert like attack, overlapping TCP packet with different content -redef tcp_max_old_segments = 10; -event rexmit_inconsistency(c: connection, t1: string, t2: string, tcp_flags: string) - { - print "----- rexmit_inconsistency -----"; - print fmt("%.6f c: %s", network_time(), c$id); - print fmt("%.6f t1: %s", network_time(), t1); - print fmt("%.6f t2: %s", network_time(), t2); - print fmt("%.6f tcp_flags: %s", network_time(), tcp_flags); - } diff --git a/testing/btest/core/tcp/quantum-insert.zeek b/testing/btest/core/tcp/quantum-insert.zeek new file mode 100644 index 0000000000..4e94f488c3 --- /dev/null +++ b/testing/btest/core/tcp/quantum-insert.zeek @@ -0,0 +1,13 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tcp/qi_internet_SYNACK_curl_jsonip.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +# Quantum Insert like attack, overlapping TCP packet with different content +redef tcp_max_old_segments = 10; +event rexmit_inconsistency(c: connection, t1: string, t2: string, tcp_flags: string) + { + print "----- rexmit_inconsistency -----"; + print fmt("%.6f c: %s", network_time(), c$id); + print fmt("%.6f t1: %s", network_time(), t1); + print fmt("%.6f t2: %s", network_time(), t2); + print fmt("%.6f tcp_flags: %s", network_time(), tcp_flags); + } diff --git a/testing/btest/core/tcp/rst-after-syn.bro b/testing/btest/core/tcp/rst-after-syn.bro deleted file mode 100644 index 38976909d7..0000000000 --- a/testing/btest/core/tcp/rst-after-syn.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tcp/rst-inject-rae.trace %INPUT -# @TEST-EXEC: btest-diff .stdout - -# Mostly just checking that c$resp$size isn't huge due to the injected -# RST packet being used to initialize sequence number in TCP analyzer. - -event connection_state_remove(c: connection) - { - print c$id; - print "orig:", c$orig; - print "resp:", c$resp; - } diff --git a/testing/btest/core/tcp/rst-after-syn.zeek b/testing/btest/core/tcp/rst-after-syn.zeek new file mode 100644 index 0000000000..97075993d9 --- /dev/null +++ b/testing/btest/core/tcp/rst-after-syn.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tcp/rst-inject-rae.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +# Mostly just checking that c$resp$size isn't huge due to the injected +# RST packet being used to initialize sequence number in TCP analyzer. + +event connection_state_remove(c: connection) + { + print c$id; + print "orig:", c$orig; + print "resp:", c$resp; + } diff --git a/testing/btest/core/tcp/rxmit-history.bro b/testing/btest/core/tcp/rxmit-history.bro deleted file mode 100644 index 6413d66041..0000000000 --- a/testing/btest/core/tcp/rxmit-history.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/tcp/retransmit-fast009.trace %INPUT && mv conn.log conn-1.log -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT && mv conn.log conn-2.log -# @TEST-EXEC: btest-diff conn-1.log -# @TEST-EXEC: btest-diff conn-2.log - diff --git a/testing/btest/core/tcp/rxmit-history.zeek b/testing/btest/core/tcp/rxmit-history.zeek new file mode 100644 index 0000000000..b63e357633 --- /dev/null +++ b/testing/btest/core/tcp/rxmit-history.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tcp/retransmit-fast009.trace %INPUT && mv conn.log conn-1.log +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT && mv conn.log conn-2.log +# @TEST-EXEC: btest-diff conn-1.log +# @TEST-EXEC: btest-diff conn-2.log + diff --git a/testing/btest/core/tcp/truncated-header.bro b/testing/btest/core/tcp/truncated-header.bro deleted file mode 100644 index f3ae369b2e..0000000000 --- a/testing/btest/core/tcp/truncated-header.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tcp/truncated-header.pcap %INPUT >out -# @TEST-EXEC: btest-diff out - -event tcp_packet(c: connection, is_orig: bool, flags: string, seq: count, ack: count, len: count, payload: string) - { - # Just having this handler used to crash Bro on this trace. - print network_time(), c$id; - } - diff --git a/testing/btest/core/tcp/truncated-header.zeek b/testing/btest/core/tcp/truncated-header.zeek new file mode 100644 index 0000000000..145f415754 --- /dev/null +++ b/testing/btest/core/tcp/truncated-header.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tcp/truncated-header.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +event tcp_packet(c: connection, is_orig: bool, flags: string, seq: count, ack: count, len: count, payload: string) + { + # Just having this handler used to crash Zeek on this trace. + print network_time(), c$id; + } + diff --git a/testing/btest/core/truncation.test b/testing/btest/core/truncation.test index d819ca1f88..b602f13585 100644 --- a/testing/btest/core/truncation.test +++ b/testing/btest/core/truncation.test @@ -1,43 +1,43 @@ # Truncated IP packet's should not be analyzed, and generate truncated_IP weird -# @TEST-EXEC: bro -r $TRACES/trunc/ip4-trunc.pcap +# @TEST-EXEC: zeek -r $TRACES/trunc/ip4-trunc.pcap # @TEST-EXEC: mv weird.log output -# @TEST-EXEC: bro -r $TRACES/trunc/ip6-trunc.pcap +# @TEST-EXEC: zeek -r $TRACES/trunc/ip6-trunc.pcap # @TEST-EXEC: cat weird.log >> output -# @TEST-EXEC: bro -r $TRACES/trunc/ip6-ext-trunc.pcap +# @TEST-EXEC: zeek -r $TRACES/trunc/ip6-ext-trunc.pcap # @TEST-EXEC: cat weird.log >> output # If an ICMP packet's payload is truncated due to too small snaplen, -# the checksum calculation is bypassed (and Bro doesn't crash, of course). +# the checksum calculation is bypassed (and Zeek doesn't crash, of course). # @TEST-EXEC: rm -f weird.log -# @TEST-EXEC: bro -r $TRACES/trunc/icmp-payload-trunc.pcap +# @TEST-EXEC: zeek -r $TRACES/trunc/icmp-payload-trunc.pcap # @TEST-EXEC: test ! -e weird.log # If an ICMP packet has the ICMP header truncated due to too small snaplen, # an internally_truncated_header weird gets generated. -# @TEST-EXEC: bro -r $TRACES/trunc/icmp-header-trunc.pcap +# @TEST-EXEC: zeek -r $TRACES/trunc/icmp-header-trunc.pcap # @TEST-EXEC: cat weird.log >> output # Truncated packets where the captured length is less than the length required # for the packet header should also raise a Weird -# @TEST-EXEC: bro -r $TRACES/trunc/trunc-hdr.pcap +# @TEST-EXEC: zeek -r $TRACES/trunc/trunc-hdr.pcap # @TEST-EXEC: cat weird.log >> output # Truncated packet where the length of the IP header is larger than the total # packet length -# @TEST-EXEC: bro -C -r $TRACES/trunc/ipv4-truncated-broken-header.pcap +# @TEST-EXEC: zeek -C -r $TRACES/trunc/ipv4-truncated-broken-header.pcap # @TEST-EXEC: cat weird.log >> output # Truncated packet where the captured length is big enough for the ip header # struct, but not large enough to capture the full header length (with options) -# @TEST-EXEC: bro -C -r $TRACES/trunc/ipv4-internally-truncated-header.pcap +# @TEST-EXEC: zeek -C -r $TRACES/trunc/ipv4-internally-truncated-header.pcap # @TEST-EXEC: cat weird.log >> output # Truncated packet where the length of the IP header is larger than the total # packet length inside several tunnels -# @TEST-EXEC: bro -C -r $TRACES/trunc/mpls-6in6-6in6-4in6-trunc.pcap +# @TEST-EXEC: zeek -C -r $TRACES/trunc/mpls-6in6-6in6-4in6-trunc.pcap # @TEST-EXEC: cat weird.log >> output # @TEST-EXEC: btest-diff output diff --git a/testing/btest/core/tunnels/ayiya.test b/testing/btest/core/tunnels/ayiya.test index 043e06c621..d7a79e6eb2 100644 --- a/testing/btest/core/tunnels/ayiya.test +++ b/testing/btest/core/tunnels/ayiya.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/ayiya3.trace +# @TEST-EXEC: zeek -r $TRACES/tunnels/ayiya3.trace # @TEST-EXEC: btest-diff tunnel.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/core/tunnels/false-teredo.bro b/testing/btest/core/tunnels/false-teredo.bro deleted file mode 100644 index 5622e05204..0000000000 --- a/testing/btest/core/tunnels/false-teredo.bro +++ /dev/null @@ -1,47 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/false-teredo.pcap %INPUT >output -# @TEST-EXEC: test ! -e weird.log -# @TEST-EXEC: test ! -e dpd.log - -# In the first case, there isn't any weird or protocol violation logged -# since the teredo analyzer recognizes that the DNS analyzer has confirmed -# the protocol and yields. - -# In the second case, there are weirds since the teredo analyzer decapsulates -# despite the presence of the confirmed DNS analyzer and the resulting -# inner packets are malformed (no surprise there). There's also no dpd.log -# since the teredo analyzer doesn't confirm until it's seen a valid teredo -# encapsulation in both directions and protocol violations aren't logged -# until there's been a confirmation. - -# In either case, the analyzer doesn't, by default, get disabled as a result -# of the protocol violations. - -function print_teredo(name: string, outer: connection, inner: teredo_hdr) - { - print fmt("%s: %s", name, outer$id); - print fmt(" ip6: %s", inner$hdr$ip6); - if ( inner?$auth ) - print fmt(" auth: %s", inner$auth); - if ( inner?$origin ) - print fmt(" origin: %s", inner$origin); - } - -event teredo_packet(outer: connection, inner: teredo_hdr) - { - print_teredo("packet", outer, inner); - } - -event teredo_authentication(outer: connection, inner: teredo_hdr) - { - print_teredo("auth", outer, inner); - } - -event teredo_origin_indication(outer: connection, inner: teredo_hdr) - { - print_teredo("origin", outer, inner); - } - -event teredo_bubble(outer: connection, inner: teredo_hdr) - { - print_teredo("bubble", outer, inner); - } diff --git a/testing/btest/core/tunnels/false-teredo.zeek b/testing/btest/core/tunnels/false-teredo.zeek new file mode 100644 index 0000000000..818b543d95 --- /dev/null +++ b/testing/btest/core/tunnels/false-teredo.zeek @@ -0,0 +1,47 @@ +# @TEST-EXEC: zeek -r $TRACES/tunnels/false-teredo.pcap %INPUT >output +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: test ! -e dpd.log + +# In the first case, there isn't any weird or protocol violation logged +# since the teredo analyzer recognizes that the DNS analyzer has confirmed +# the protocol and yields. + +# In the second case, there are weirds since the teredo analyzer decapsulates +# despite the presence of the confirmed DNS analyzer and the resulting +# inner packets are malformed (no surprise there). There's also no dpd.log +# since the teredo analyzer doesn't confirm until it's seen a valid teredo +# encapsulation in both directions and protocol violations aren't logged +# until there's been a confirmation. + +# In either case, the analyzer doesn't, by default, get disabled as a result +# of the protocol violations. + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } diff --git a/testing/btest/core/tunnels/gre-in-gre.test b/testing/btest/core/tunnels/gre-in-gre.test index ce85f54dbb..39a7bd774b 100644 --- a/testing/btest/core/tunnels/gre-in-gre.test +++ b/testing/btest/core/tunnels/gre-in-gre.test @@ -1,3 +1,3 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gre-within-gre.pcap +# @TEST-EXEC: zeek -r $TRACES/tunnels/gre-within-gre.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/gre-pptp.test b/testing/btest/core/tunnels/gre-pptp.test index a5fa8c0d19..892f105fb2 100644 --- a/testing/btest/core/tunnels/gre-pptp.test +++ b/testing/btest/core/tunnels/gre-pptp.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gre-pptp.pcap +# @TEST-EXEC: zeek -r $TRACES/tunnels/gre-pptp.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff tunnel.log # @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/core/tunnels/gre.test b/testing/btest/core/tunnels/gre.test index 0ce9a0c8b8..395bcd38bd 100644 --- a/testing/btest/core/tunnels/gre.test +++ b/testing/btest/core/tunnels/gre.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gre-sample.pcap +# @TEST-EXEC: zeek -r $TRACES/tunnels/gre-sample.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff tunnel.log # @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/core/tunnels/gtp/different_dl_and_ul.test b/testing/btest/core/tunnels/gtp/different_dl_and_ul.test index 136853c463..aedd6781dd 100644 --- a/testing/btest/core/tunnels/gtp/different_dl_and_ul.test +++ b/testing/btest/core/tunnels/gtp/different_dl_and_ul.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tunnels/gtp/gtp2_different_udp_port.pcap +# @TEST-EXEC: zeek -C -r $TRACES/tunnels/gtp/gtp2_different_udp_port.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff http.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/gtp/ext_header.test b/testing/btest/core/tunnels/gtp/ext_header.test index 6316acb184..251d8fb9d6 100644 --- a/testing/btest/core/tunnels/gtp/ext_header.test +++ b/testing/btest/core/tunnels/gtp/ext_header.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp_ext_header.pcap %INPUT >out +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp_ext_header.pcap %INPUT >out # @TEST-EXEC: btest-diff out event gtpv1_message(c: connection, hdr: gtpv1_hdr) diff --git a/testing/btest/core/tunnels/gtp/false_gtp.test b/testing/btest/core/tunnels/gtp/false_gtp.test index 6e84be7323..b38291c8df 100644 --- a/testing/btest/core/tunnels/gtp/false_gtp.test +++ b/testing/btest/core/tunnels/gtp/false_gtp.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp3_false_gtp.pcap +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp3_false_gtp.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff dns.log # @TEST-EXEC: test ! -e tunnel.log diff --git a/testing/btest/core/tunnels/gtp/inner_ipv6.test b/testing/btest/core/tunnels/gtp/inner_ipv6.test index 97d8562ecc..865401b9df 100644 --- a/testing/btest/core/tunnels/gtp/inner_ipv6.test +++ b/testing/btest/core/tunnels/gtp/inner_ipv6.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp7_ipv6.pcap +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp7_ipv6.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/gtp/inner_teredo.test b/testing/btest/core/tunnels/gtp/inner_teredo.test index 9161d31229..b6e83a36c3 100644 --- a/testing/btest/core/tunnels/gtp/inner_teredo.test +++ b/testing/btest/core/tunnels/gtp/inner_teredo.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp8_teredo.pcap "Tunnel::delay_teredo_confirmation=F" +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp8_teredo.pcap "Tunnel::delay_teredo_confirmation=F" # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/gtp/non_recursive.test b/testing/btest/core/tunnels/gtp/non_recursive.test index 0b03c0d6ae..6f5e6f3c62 100644 --- a/testing/btest/core/tunnels/gtp/non_recursive.test +++ b/testing/btest/core/tunnels/gtp/non_recursive.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp4_udp_2152_inside.pcap %INPUT >out +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp4_udp_2152_inside.pcap %INPUT >out # @TEST-EXEC: btest-diff out # In telecoms there is never a GTP tunnel within another GTP tunnel. diff --git a/testing/btest/core/tunnels/gtp/not_user_plane_data.test b/testing/btest/core/tunnels/gtp/not_user_plane_data.test index a6a3333360..4edab5ab44 100644 --- a/testing/btest/core/tunnels/gtp/not_user_plane_data.test +++ b/testing/btest/core/tunnels/gtp/not_user_plane_data.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp10_not_0xff.pcap +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp10_not_0xff.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: test ! -e tunnel.log diff --git a/testing/btest/core/tunnels/gtp/opt_header.test b/testing/btest/core/tunnels/gtp/opt_header.test index 32329c7ca8..c1f3d89e03 100644 --- a/testing/btest/core/tunnels/gtp/opt_header.test +++ b/testing/btest/core/tunnels/gtp/opt_header.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp6_gtp_0x32.pcap %INPUT >out +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp6_gtp_0x32.pcap %INPUT >out # @TEST-EXEC: btest-diff out # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/gtp/outer_ip_frag.test b/testing/btest/core/tunnels/gtp/outer_ip_frag.test index b2badb9c1b..310c377eed 100644 --- a/testing/btest/core/tunnels/gtp/outer_ip_frag.test +++ b/testing/btest/core/tunnels/gtp/outer_ip_frag.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tunnels/gtp/gtp1_gn_normal_incl_fragmentation.pcap +# @TEST-EXEC: zeek -C -r $TRACES/tunnels/gtp/gtp1_gn_normal_incl_fragmentation.pcap # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff http.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/gtp/pdp_ctx_messages.test b/testing/btest/core/tunnels/gtp/pdp_ctx_messages.test index 7405c8d019..06912c1f9d 100644 --- a/testing/btest/core/tunnels/gtp/pdp_ctx_messages.test +++ b/testing/btest/core/tunnels/gtp/pdp_ctx_messages.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/gtp/gtp_control_prime.pcap -r $TRACES/tunnels/gtp/gtp_create_pdp_ctx.pcap %INPUT >out +# @TEST-EXEC: zeek -r $TRACES/tunnels/gtp/gtp_control_prime.pcap -r $TRACES/tunnels/gtp/gtp_create_pdp_ctx.pcap %INPUT >out # @TEST-EXEC: btest-diff out event gtpv1_message(c: connection, hdr: gtpv1_hdr) diff --git a/testing/btest/core/tunnels/gtp/unknown_or_too_short.test b/testing/btest/core/tunnels/gtp/unknown_or_too_short.test index e1b3d4ba20..0fe72b9ad8 100644 --- a/testing/btest/core/tunnels/gtp/unknown_or_too_short.test +++ b/testing/btest/core/tunnels/gtp/unknown_or_too_short.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tunnels/gtp/gtp9_unknown_or_too_short_payload.pcap +# @TEST-EXEC: zeek -C -r $TRACES/tunnels/gtp/gtp9_unknown_or_too_short_payload.pcap # @TEST-EXEC: btest-diff dpd.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/core/tunnels/ip-in-ip-version.bro b/testing/btest/core/tunnels/ip-in-ip-version.bro deleted file mode 100644 index 35d633c8fe..0000000000 --- a/testing/btest/core/tunnels/ip-in-ip-version.bro +++ /dev/null @@ -1,14 +0,0 @@ -# Trace in we have mpls->ip6->ip6->ip4 where the ip4 packet -# has an invalid IP version. -# @TEST-EXEC: bro -C -r $TRACES/tunnels/mpls-6in6-6in6-4in6-invalid-version-4.pcap -# @TEST-EXEC: mv weird.log output - -# Trace in which we have mpls->ip6->ip6 where the ip6 packet -# has an invalid IP version. -# @TEST-EXEC: bro -C -r $TRACES/tunnels/mpls-6in6-6in6-invalid-version-6.pcap -# @TEST-EXEC: cat weird.log >> output - -# @TEST-EXEC: btest-diff output - -@load base/frameworks/notice/weird.bro - diff --git a/testing/btest/core/tunnels/ip-in-ip-version.zeek b/testing/btest/core/tunnels/ip-in-ip-version.zeek new file mode 100644 index 0000000000..49e8a5a3d0 --- /dev/null +++ b/testing/btest/core/tunnels/ip-in-ip-version.zeek @@ -0,0 +1,12 @@ +# Trace in we have mpls->ip6->ip6->ip4 where the ip4 packet +# has an invalid IP version. +# @TEST-EXEC: zeek -C -r $TRACES/tunnels/mpls-6in6-6in6-4in6-invalid-version-4.pcap +# @TEST-EXEC: mv weird.log output + +# Trace in which we have mpls->ip6->ip6 where the ip6 packet +# has an invalid IP version. +# @TEST-EXEC: zeek -C -r $TRACES/tunnels/mpls-6in6-6in6-invalid-version-6.pcap +# @TEST-EXEC: cat weird.log >> output + +# @TEST-EXEC: btest-diff output + diff --git a/testing/btest/core/tunnels/ip-in-ip.test b/testing/btest/core/tunnels/ip-in-ip.test index 38f4610445..f003865b2e 100644 --- a/testing/btest/core/tunnels/ip-in-ip.test +++ b/testing/btest/core/tunnels/ip-in-ip.test @@ -1,9 +1,9 @@ -# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in6.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in6in6.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in4.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/tunnels/4in6.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/tunnels/4in4.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/tunnels/6in6-tunnel-change.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/6in6.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/6in6in6.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/6in4.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/4in6.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/4in4.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/6in6-tunnel-change.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output event new_connection(c: connection) diff --git a/testing/btest/core/tunnels/ip-tunnel-uid.test b/testing/btest/core/tunnels/ip-tunnel-uid.test index f86fd126c9..1f50d4baea 100644 --- a/testing/btest/core/tunnels/ip-tunnel-uid.test +++ b/testing/btest/core/tunnels/ip-tunnel-uid.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/tunnels/ping6-in-ipv4.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -b -r $TRACES/tunnels/ping6-in-ipv4.pcap %INPUT >>output 2>&1 # @TEST-EXEC: btest-diff output event new_connection(c: connection) diff --git a/testing/btest/core/tunnels/teredo-known-services.test b/testing/btest/core/tunnels/teredo-known-services.test index db42996eb2..dc5aad52fd 100644 --- a/testing/btest/core/tunnels/teredo-known-services.test +++ b/testing/btest/core/tunnels/teredo-known-services.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/false-teredo.pcap base/frameworks/dpd base/protocols/tunnels protocols/conn/known-services Tunnel::delay_teredo_confirmation=T "Site::local_nets+={192.168.1.0/24}" +# @TEST-EXEC: zeek -r $TRACES/tunnels/false-teredo.pcap base/frameworks/dpd base/protocols/tunnels protocols/conn/known-services Tunnel::delay_teredo_confirmation=T "Site::local_nets+={192.168.1.0/24}" # @TEST-EXEC: test ! -e known_services.log # The first case using Tunnel::delay_teredo_confirmation=T doesn't produce diff --git a/testing/btest/core/tunnels/teredo.bro b/testing/btest/core/tunnels/teredo.bro deleted file mode 100644 index c457decd98..0000000000 --- a/testing/btest/core/tunnels/teredo.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/Teredo.pcap %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff tunnel.log -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff http.log - -function print_teredo(name: string, outer: connection, inner: teredo_hdr) - { - print fmt("%s: %s", name, outer$id); - print fmt(" ip6: %s", inner$hdr$ip6); - if ( inner?$auth ) - print fmt(" auth: %s", inner$auth); - if ( inner?$origin ) - print fmt(" origin: %s", inner$origin); - } - -event teredo_packet(outer: connection, inner: teredo_hdr) - { - print_teredo("packet", outer, inner); - } - -event teredo_authentication(outer: connection, inner: teredo_hdr) - { - print_teredo("auth", outer, inner); - } - -event teredo_origin_indication(outer: connection, inner: teredo_hdr) - { - print_teredo("origin", outer, inner); - } - -event teredo_bubble(outer: connection, inner: teredo_hdr) - { - print_teredo("bubble", outer, inner); - } diff --git a/testing/btest/core/tunnels/teredo.zeek b/testing/btest/core/tunnels/teredo.zeek new file mode 100644 index 0000000000..0a884bc027 --- /dev/null +++ b/testing/btest/core/tunnels/teredo.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -r $TRACES/tunnels/Teredo.pcap %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff tunnel.log +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log + +function print_teredo(name: string, outer: connection, inner: teredo_hdr) + { + print fmt("%s: %s", name, outer$id); + print fmt(" ip6: %s", inner$hdr$ip6); + if ( inner?$auth ) + print fmt(" auth: %s", inner$auth); + if ( inner?$origin ) + print fmt(" origin: %s", inner$origin); + } + +event teredo_packet(outer: connection, inner: teredo_hdr) + { + print_teredo("packet", outer, inner); + } + +event teredo_authentication(outer: connection, inner: teredo_hdr) + { + print_teredo("auth", outer, inner); + } + +event teredo_origin_indication(outer: connection, inner: teredo_hdr) + { + print_teredo("origin", outer, inner); + } + +event teredo_bubble(outer: connection, inner: teredo_hdr) + { + print_teredo("bubble", outer, inner); + } diff --git a/testing/btest/core/tunnels/teredo_bubble_with_payload.test b/testing/btest/core/tunnels/teredo_bubble_with_payload.test index f45d8ca585..ef72ddf519 100644 --- a/testing/btest/core/tunnels/teredo_bubble_with_payload.test +++ b/testing/btest/core/tunnels/teredo_bubble_with_payload.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/teredo_bubble_with_payload.pcap %INPUT >output +# @TEST-EXEC: zeek -r $TRACES/tunnels/teredo_bubble_with_payload.pcap %INPUT >output # @TEST-EXEC: btest-diff output # @TEST-EXEC: btest-diff tunnel.log # @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/tunnels/vxlan.bro b/testing/btest/core/tunnels/vxlan.bro deleted file mode 100644 index 50a7b1a24a..0000000000 --- a/testing/btest/core/tunnels/vxlan.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tunnels/vxlan.pcap %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff tunnel.log - -event vxlan_packet(c: connection, inner: pkt_hdr, vni: count) - { - print "vxlan_packet", c$id, inner, vni; - } diff --git a/testing/btest/core/tunnels/vxlan.zeek b/testing/btest/core/tunnels/vxlan.zeek new file mode 100644 index 0000000000..5b1b9defaa --- /dev/null +++ b/testing/btest/core/tunnels/vxlan.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -r $TRACES/tunnels/vxlan.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff tunnel.log + +event vxlan_packet(c: connection, inner: pkt_hdr, vni: count) + { + print "vxlan_packet", c$id, inner, vni; + } diff --git a/testing/btest/core/vector-assignment.bro b/testing/btest/core/vector-assignment.bro deleted file mode 100644 index d1f02c124f..0000000000 --- a/testing/btest/core/vector-assignment.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro %INPUT - -# This regression test checks a special case in the vector code. In this case -# UnaryExpr will be called with a Type() of any. Tests succeeds if it does not -# crash Bro. - -type OptionCacheValue: record { - val: any; -}; - -function set_me(val: any) { - local a = OptionCacheValue($val=val); - print a; -} - -event bro_init() { - local b: vector of count = {1, 2, 3}; - set_me(b); -} diff --git a/testing/btest/core/vector-assignment.zeek b/testing/btest/core/vector-assignment.zeek new file mode 100644 index 0000000000..a66830f713 --- /dev/null +++ b/testing/btest/core/vector-assignment.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: zeek %INPUT + +# This regression test checks a special case in the vector code. In this case +# UnaryExpr will be called with a Type() of any. Tests succeeds if it does not +# crash Zeek. + +type OptionCacheValue: record { + val: any; +}; + +function set_me(val: any) { + local a = OptionCacheValue($val=val); + print a; +} + +event zeek_init() { + local b: vector of count = {1, 2, 3}; + set_me(b); +} diff --git a/testing/btest/core/vlan-mpls.bro b/testing/btest/core/vlan-mpls.bro deleted file mode 100644 index b7a7a351cb..0000000000 --- a/testing/btest/core/vlan-mpls.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/mixed-vlan-mpls.trace -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/vlan-mpls.zeek b/testing/btest/core/vlan-mpls.zeek new file mode 100644 index 0000000000..9e345b762a --- /dev/null +++ b/testing/btest/core/vlan-mpls.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/mixed-vlan-mpls.trace +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/when-interpreter-exceptions.bro b/testing/btest/core/when-interpreter-exceptions.bro deleted file mode 100644 index f259a46bda..0000000000 --- a/testing/btest/core/when-interpreter-exceptions.bro +++ /dev/null @@ -1,123 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro "bro -b %INPUT >output 2>&1" -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps | $SCRIPTS/diff-sort" btest-diff bro/output - -# interpreter exceptions in "when" blocks shouldn't cause termination - -@load base/utils/exec -redef exit_only_after_terminate = T; - -type MyRecord: record { - a: bool &default=T; - notset: bool &optional; -}; - -global myrecord: MyRecord; - -global c = 0; - -function check_term_condition() - { - ++c; - - #print "check_term_condition", c; - - if ( c == 6 ) - terminate(); - } - -event termination_check() - { - #print "termination_check event"; - check_term_condition(); - } - -function f(do_exception: bool): bool - { - local cmd = Exec::Command($cmd=fmt("echo 'f(%s)'", - do_exception)); - - return when ( local result = Exec::run(cmd) ) - { - print result$stdout; - - if ( do_exception ) - { - event termination_check(); - print myrecord$notset; - } - - return T; - } - - check_term_condition(); - return F; - } - -function g(do_exception: bool): bool - { - local stall = Exec::Command($cmd="sleep 30"); - - return when ( local result = Exec::run(stall) ) - { - print "shouldn't get here, g()", do_exception, result; - } - timeout 0 sec - { - print "timeout g()", do_exception; - - if ( do_exception ) - { - event termination_check(); - print myrecord$notset; - } - - return T; - } - - check_term_condition(); - return F; - } - -event bro_init() - { - local cmd = Exec::Command($cmd="echo 'bro_init()'"); - local stall = Exec::Command($cmd="sleep 30"); - - when ( local result = Exec::run(cmd) ) - { - print result$stdout; - event termination_check(); - print myrecord$notset; - } - - when ( local result2 = Exec::run(stall) ) - { - print "shouldn't get here", result2; - check_term_condition(); - } - timeout 0 sec - { - print "timeout"; - event termination_check(); - print myrecord$notset; - } - - when ( local b = f(T) ) - print "f() exception done (shouldn't be printed)", b; - - when ( local b2 = g(T) ) - print "g() exception done (shouldn't be printed)", b2; - - when ( local b3 = f(F) ) - { - print "f() done, no exception", b3; - check_term_condition(); - } - - when ( local b4 = g(F) ) - { - print "g() done, no exception", b4; - check_term_condition(); - } - } diff --git a/testing/btest/core/when-interpreter-exceptions.zeek b/testing/btest/core/when-interpreter-exceptions.zeek new file mode 100644 index 0000000000..1a713fd1af --- /dev/null +++ b/testing/btest/core/when-interpreter-exceptions.zeek @@ -0,0 +1,123 @@ +# @TEST-EXEC: btest-bg-run zeek "zeek -b %INPUT >output 2>&1" +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps | $SCRIPTS/diff-sort" btest-diff zeek/output + +# interpreter exceptions in "when" blocks shouldn't cause termination + +@load base/utils/exec +redef exit_only_after_terminate = T; + +type MyRecord: record { + a: bool &default=T; + notset: bool &optional; +}; + +global myrecord: MyRecord; + +global c = 0; + +function check_term_condition() + { + ++c; + + #print "check_term_condition", c; + + if ( c == 6 ) + terminate(); + } + +event termination_check() + { + #print "termination_check event"; + check_term_condition(); + } + +function f(do_exception: bool): bool + { + local cmd = Exec::Command($cmd=fmt("echo 'f(%s)'", + do_exception)); + + return when ( local result = Exec::run(cmd) ) + { + print result$stdout; + + if ( do_exception ) + { + event termination_check(); + print myrecord$notset; + } + + return T; + } + + check_term_condition(); + return F; + } + +function g(do_exception: bool): bool + { + local stall = Exec::Command($cmd="sleep 30"); + + return when ( local result = Exec::run(stall) ) + { + print "shouldn't get here, g()", do_exception, result; + } + timeout 0 sec + { + print "timeout g()", do_exception; + + if ( do_exception ) + { + event termination_check(); + print myrecord$notset; + } + + return T; + } + + check_term_condition(); + return F; + } + +event zeek_init() + { + local cmd = Exec::Command($cmd="echo 'zeek_init()'"); + local stall = Exec::Command($cmd="sleep 30"); + + when ( local result = Exec::run(cmd) ) + { + print result$stdout; + event termination_check(); + print myrecord$notset; + } + + when ( local result2 = Exec::run(stall) ) + { + print "shouldn't get here", result2; + check_term_condition(); + } + timeout 0 sec + { + print "timeout"; + event termination_check(); + print myrecord$notset; + } + + when ( local b = f(T) ) + print "f() exception done (shouldn't be printed)", b; + + when ( local b2 = g(T) ) + print "g() exception done (shouldn't be printed)", b2; + + when ( local b3 = f(F) ) + { + print "f() done, no exception", b3; + check_term_condition(); + } + + when ( local b4 = g(F) ) + { + print "g() done, no exception", b4; + check_term_condition(); + } + } diff --git a/testing/btest/core/wlanmon.bro b/testing/btest/core/wlanmon.bro deleted file mode 100644 index b227baf7eb..0000000000 --- a/testing/btest/core/wlanmon.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/wlanmon.pcap -# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/wlanmon.zeek b/testing/btest/core/wlanmon.zeek new file mode 100644 index 0000000000..e29613ae56 --- /dev/null +++ b/testing/btest/core/wlanmon.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -C -r $TRACES/wlanmon.pcap +# @TEST-EXEC: btest-diff conn.log diff --git a/testing/btest/core/x509-generalizedtime.bro b/testing/btest/core/x509-generalizedtime.bro deleted file mode 100644 index b69ab31743..0000000000 --- a/testing/btest/core/x509-generalizedtime.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/x509-generalizedtime.pcap %INPUT >>output 2>&1 -# @TEST-EXEC: bro -C -r $TRACES/tls/tls1.2.trace %INPUT >>output 2>&1 -# @TEST-EXEC: btest-diff output -event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) - { - print "----- x509_certificate ----"; - print fmt("serial: %s", cert$serial); - print fmt("not_valid_before: %T (epoch: %s)", cert$not_valid_before, cert$not_valid_before); - print fmt("not_valid_after : %T (epoch: %s)", cert$not_valid_after, cert$not_valid_after); - } diff --git a/testing/btest/core/x509-generalizedtime.zeek b/testing/btest/core/x509-generalizedtime.zeek new file mode 100644 index 0000000000..14e9edbf24 --- /dev/null +++ b/testing/btest/core/x509-generalizedtime.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tls/x509-generalizedtime.pcap %INPUT >>output 2>&1 +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls1.2.trace %INPUT >>output 2>&1 +# @TEST-EXEC: btest-diff output +event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) + { + print "----- x509_certificate ----"; + print fmt("serial: %s", cert$serial); + print fmt("not_valid_before: %T (epoch: %s)", cert$not_valid_before, cert$not_valid_before); + print fmt("not_valid_after : %T (epoch: %s)", cert$not_valid_after, cert$not_valid_after); + } diff --git a/testing/btest/coverage/bare-load-baseline.test b/testing/btest/coverage/bare-load-baseline.test index e518e703fb..94fdb04b04 100644 --- a/testing/btest/coverage/bare-load-baseline.test +++ b/testing/btest/coverage/bare-load-baseline.test @@ -1,13 +1,13 @@ # This test is meant to cover whether the set of scripts that get loaded by # default in bare mode matches a baseline of known defaults. The baseline -# should only need updating if something new is @load'd from init-bare.bro +# should only need updating if something new is @load'd from init-bare.zeek # (or from an @load'd descendent of it). # # As the output has absolute paths in it, we need to remove the common # prefix to make the test work everywhere. That's what the sed magic # below does. Don't ask. :-) -# @TEST-EXEC: bro -b misc/loaded-scripts +# @TEST-EXEC: zeek -b misc/loaded-scripts # @TEST-EXEC: test -e loaded_scripts.log # @TEST-EXEC: cat loaded_scripts.log | egrep -v '#' | awk 'NR>0{print $1}' | sed -e ':a' -e '$!N' -e 's/^\(.*\).*\n\1.*/\1/' -e 'ta' >prefix # @TEST-EXEC: (test -L $BUILD && basename $(readlink $BUILD) || basename $BUILD) >buildprefix diff --git a/testing/btest/coverage/bare-mode-errors.test b/testing/btest/coverage/bare-mode-errors.test index 2310b66b4b..fa4c15c120 100644 --- a/testing/btest/coverage/bare-mode-errors.test +++ b/testing/btest/coverage/bare-mode-errors.test @@ -1,9 +1,9 @@ -# Makes sure any given bro script in the scripts/ tree can be loaded in +# Makes sure any given zeek script in the scripts/ tree can be loaded in # bare mode without error. # # Commonly, this test may fail if one forgets to @load some base/ scripts -# when writing a new bro scripts. +# when writing a new zeek scripts. # # @TEST-EXEC: test -d $DIST/scripts -# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro`; do bro -b --parse-only $script >>errors 2>&1; done +# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.zeek`; do zeek -b --parse-only $script >>errors 2>&1; done # @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-sort" btest-diff errors diff --git a/testing/btest/coverage/broxygen.sh b/testing/btest/coverage/broxygen.sh index 13bf24bce3..6bc43d9c90 100644 --- a/testing/btest/coverage/broxygen.sh +++ b/testing/btest/coverage/broxygen.sh @@ -1,12 +1,12 @@ -# This check piggy-backs on the test-all-policy.bro test, assuming that every +# This check piggy-backs on the test-all-policy.zeek test, assuming that every # loadable script is referenced there. The only additional check here is -# that the broxygen package should even load scripts that are commented -# out in test-all-policy.bro because the broxygen package is only loaded -# when generated documentation and will terminate has soon as bro_init -# is handled, even if a script will e.g. put Bro into listen mode or otherwise +# that the zeekygen package should even load scripts that are commented +# out in test-all-policy.zeek because the zeekygen package is only loaded +# when generated documentation and will terminate has soon as zeek_init +# is handled, even if a script will e.g. put Zeek into listen mode or otherwise # cause it to not terminate after scripts are parsed. -# @TEST-EXEC: bash %INPUT $DIST/scripts/test-all-policy.bro $DIST/scripts/broxygen/__load__.bro +# @TEST-EXEC: bash %INPUT $DIST/scripts/test-all-policy.zeek $DIST/scripts/zeekygen/__load__.zeek error_count=0 @@ -22,10 +22,10 @@ if [ $# -ne 2 ]; then fi all_loads=$(egrep "#[[:space:]]*@load.*" $1 | sed 's/#[[:space:]]*@load[[:space:]]*//g') -broxygen_loads=$(egrep "@load.*" $2 | sed 's/@load[[:space:]]*//g') +zeekygen_loads=$(egrep "@load.*" $2 | sed 's/@load[[:space:]]*//g') for f in $all_loads; do - echo "$broxygen_loads" | grep -q $f || error_msg "$f not loaded in broxygen/__load__.bro" + echo "$zeekygen_loads" | grep -q $f || error_msg "$f not loaded in zeekygen/__load__.zeek" done if [ $error_count -gt 0 ]; then diff --git a/testing/btest/coverage/coverage-blacklist.bro b/testing/btest/coverage/coverage-blacklist.bro deleted file mode 100644 index 30a5f86efa..0000000000 --- a/testing/btest/coverage/coverage-blacklist.bro +++ /dev/null @@ -1,29 +0,0 @@ -# @TEST-EXEC: BRO_PROFILER_FILE=coverage bro -b %INPUT -# @TEST-EXEC: grep %INPUT coverage | sort -k2 >output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -print "first"; - -if ( F ) - { # @no-test - print "hello"; - print "world"; - } - -print "cover me"; - -if ( T ) - { - print "always executed"; - } - -print "don't cover me"; # @no-test - -if ( 0 + 0 == 1 ) print "impossible"; # @no-test - -if ( 1 == 0 ) - { - print "also impossible, but included in code coverage analysis"; - } - -print "success"; diff --git a/testing/btest/coverage/coverage-blacklist.zeek b/testing/btest/coverage/coverage-blacklist.zeek new file mode 100644 index 0000000000..75ef0feb79 --- /dev/null +++ b/testing/btest/coverage/coverage-blacklist.zeek @@ -0,0 +1,29 @@ +# @TEST-EXEC: ZEEK_PROFILER_FILE=coverage zeek -b %INPUT +# @TEST-EXEC: grep %INPUT coverage | sort -k2 >output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +print "first"; + +if ( F ) + { # @no-test + print "hello"; + print "world"; + } + +print "cover me"; + +if ( T ) + { + print "always executed"; + } + +print "don't cover me"; # @no-test + +if ( 0 + 0 == 1 ) print "impossible"; # @no-test + +if ( 1 == 0 ) + { + print "also impossible, but included in code coverage analysis"; + } + +print "success"; diff --git a/testing/btest/coverage/default-load-baseline.test b/testing/btest/coverage/default-load-baseline.test index 076f26b770..df13444ad7 100644 --- a/testing/btest/coverage/default-load-baseline.test +++ b/testing/btest/coverage/default-load-baseline.test @@ -7,7 +7,7 @@ # prefix to make the test work everywhere. That's what the sed magic # below does. Don't ask. :-) -# @TEST-EXEC: bro misc/loaded-scripts +# @TEST-EXEC: zeek misc/loaded-scripts # @TEST-EXEC: test -e loaded_scripts.log # @TEST-EXEC: cat loaded_scripts.log | egrep -v '#' | sed 's/ //g' | sed -e ':a' -e '$!N' -e 's/^\(.*\).*\n\1.*/\1/' -e 'ta' >prefix # @TEST-EXEC: (test -L $BUILD && basename $(readlink $BUILD) || basename $BUILD) >buildprefix diff --git a/testing/btest/coverage/find-bro-logs.test b/testing/btest/coverage/find-bro-logs.test index e7bcf0578f..61d2b13ada 100644 --- a/testing/btest/coverage/find-bro-logs.test +++ b/testing/btest/coverage/find-bro-logs.test @@ -22,13 +22,13 @@ import os, sys scriptdir = sys.argv[1] -# Return a list of all bro script files. +# Return a list of all zeek script files. def find_scripts(): scripts = [] for r, d, f in os.walk(scriptdir): for fname in f: - if fname.endswith(".bro"): + if fname.endswith(".zeek") or fname.endswith(".bro"): scripts.append(os.path.join(r, fname)) return scripts diff --git a/testing/btest/coverage/init-default.test b/testing/btest/coverage/init-default.test index 537b5ca77d..f3c1aec31e 100644 --- a/testing/btest/coverage/init-default.test +++ b/testing/btest/coverage/init-default.test @@ -1,19 +1,19 @@ -# Makes sure that all base/* scripts are loaded by default via init-default.bro; -# and that all scripts loaded there in there actually exist. +# Makes sure that all base/* scripts are loaded by default via +# init-default.zeek; and that all scripts loaded there actually exist. # -# This test will fail if a new bro script is added under the scripts/base/ -# directory and it is not also added as an @load in base/init-default.bro. -# In some cases, a script in base is loaded based on the bro configuration +# This test will fail if a new zeek script is added under the scripts/base/ +# directory and it is not also added as an @load in base/init-default.zeek. +# In some cases, a script in base is loaded based on the zeek configuration # (e.g. cluster operation), and in such cases, the missing_loads baseline # can be adjusted to tolerate that. #@TEST-EXEC: test -d $DIST/scripts/base -#@TEST-EXEC: test -e $DIST/scripts/base/init-default.bro -#@TEST-EXEC: ( cd $DIST/scripts/base && find . -name '*.bro' ) | sort >"all scripts found" -#@TEST-EXEC: bro misc/loaded-scripts +#@TEST-EXEC: test -e $DIST/scripts/base/init-default.zeek +#@TEST-EXEC: ( cd $DIST/scripts/base && find . -name '*.zeek' ) | sort >"all scripts found" +#@TEST-EXEC: zeek misc/loaded-scripts #@TEST-EXEC: (test -L $BUILD && basename $(readlink $BUILD) || basename $BUILD) >buildprefix -#@TEST-EXEC: cat loaded_scripts.log | egrep -v "/build/scripts/|$(cat buildprefix)/scripts/|/loaded-scripts.bro|#" | sed 's#/./#/#g' >loaded_scripts.log.tmp +#@TEST-EXEC: cat loaded_scripts.log | egrep -v "/build/scripts/|$(cat buildprefix)/scripts/|/loaded-scripts.zeek|#" | sed 's#/./#/#g' >loaded_scripts.log.tmp #@TEST-EXEC: cat loaded_scripts.log.tmp | sed 's/ //g' | sed -e ':a' -e '$!N' -e 's/^\(.*\).*\n\1.*/\1/' -e 'ta' >prefix -#@TEST-EXEC: cat loaded_scripts.log.tmp | sed 's/ //g' | sed "s#`cat prefix`#./#g" | sort >init-default.bro -#@TEST-EXEC: diff -u "all scripts found" init-default.bro | egrep "^-[^-]" > missing_loads +#@TEST-EXEC: cat loaded_scripts.log.tmp | sed 's/ //g' | sed "s#`cat prefix`#./#g" | sort >init-default.zeek +#@TEST-EXEC: diff -u "all scripts found" init-default.zeek | egrep "^-[^-]" > missing_loads #@TEST-EXEC: btest-diff missing_loads diff --git a/testing/btest/coverage/sphinx-broxygen-docs.sh b/testing/btest/coverage/sphinx-broxygen-docs.sh deleted file mode 100644 index ab194cb027..0000000000 --- a/testing/btest/coverage/sphinx-broxygen-docs.sh +++ /dev/null @@ -1,48 +0,0 @@ -# This script checks whether the reST docs generated by broxygen are stale. -# If this test fails when testing the master branch, then simply run: -# -# testing/scripts/gen-broxygen-docs.sh -# -# and then commit the changes. -# -# @TEST-EXEC: bash $SCRIPTS/gen-broxygen-docs.sh ./doc -# @TEST-EXEC: bash %INPUT - -if [ -n "$TRAVIS_PULL_REQUEST" ]; then - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - # Don't run this test on Travis for pull-requests, just let someone - # manually update zeek-docs for things when merging to master. - exit 0 - fi -fi - -function check_diff - { - local file=$1 - echo "Checking $file for differences" - diff -Nru $DIST/$file $file 1>&2 - - if [ $? -ne 0 ]; then - echo "============================" 1>&2 - echo "" 1>&2 - echo "$DIST/$file is outdated" 1>&2 - echo "" 1>&2 - echo "You can ignore this failure if testing changes that you will" 1>&2 - echo "submit in a pull-request." 1>&2 - echo "" 1>&2 - echo "If this fails in the master branch or when merging to master," 1>&2 - echo "re-run the following command:" 1>&2 - echo "" 1>&2 - echo " $SCRIPTS/gen-broxygen-docs.sh" 1>&2 - echo "" 1>&2 - echo "Then commit/push the changes in the zeek-docs repo" 1>&2 - echo "(the doc/ directory in the zeek repo)." 1>&2 - exit 1 - fi - } - -for file in $(find ./doc -name autogenerated-*); do - check_diff $file -done - -check_diff ./doc/scripts diff --git a/testing/btest/coverage/sphinx-zeekygen-docs.sh b/testing/btest/coverage/sphinx-zeekygen-docs.sh new file mode 100644 index 0000000000..b5e3d7262c --- /dev/null +++ b/testing/btest/coverage/sphinx-zeekygen-docs.sh @@ -0,0 +1,48 @@ +# This script checks whether the reST docs generated by zeekygen are stale. +# If this test fails when testing the master branch, then simply run: +# +# testing/scripts/update-zeekygen-docs.sh +# +# and then commit the changes. +# +# @TEST-EXEC: bash $SCRIPTS/update-zeekygen-docs.sh ./doc +# @TEST-EXEC: bash %INPUT + +if [ -n "$TRAVIS_PULL_REQUEST" ]; then + if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then + # Don't run this test on Travis for pull-requests, just let someone + # manually update zeek-docs for things when merging to master. + exit 0 + fi +fi + +function check_diff + { + local file=$1 + echo "Checking $file for differences" + diff -Nru $DIST/$file $file 1>&2 + + if [ $? -ne 0 ]; then + echo "============================" 1>&2 + echo "" 1>&2 + echo "$DIST/$file is outdated" 1>&2 + echo "" 1>&2 + echo "You can ignore this failure if testing changes that you will" 1>&2 + echo "submit in a pull-request." 1>&2 + echo "" 1>&2 + echo "If this fails in the master branch or when merging to master," 1>&2 + echo "re-run the following command:" 1>&2 + echo "" 1>&2 + echo " $SCRIPTS/update-zeekygen-docs.sh" 1>&2 + echo "" 1>&2 + echo "Then commit/push the changes in the zeek-docs repo" 1>&2 + echo "(the doc/ directory in the zeek repo)." 1>&2 + exit 1 + fi + } + +for file in $(find ./doc -name autogenerated-*); do + check_diff $file +done + +check_diff ./doc/scripts diff --git a/testing/btest/coverage/test-all-policy.test b/testing/btest/coverage/test-all-policy.test index 3a545a02af..46571d967e 100644 --- a/testing/btest/coverage/test-all-policy.test +++ b/testing/btest/coverage/test-all-policy.test @@ -1,12 +1,12 @@ # Makes sure that all policy/* scripts are loaded in -# scripts/test-all-policy.bro and that all scripts loaded there actually exist. +# scripts/test-all-policy.zeek and that all scripts loaded there actually exist. # -# This test will fail if new bro scripts are added to the scripts/policy/ -# directory. Correcting that just involves updating scripts/test-all-policy.bro -# to @load the new bro scripts. +# This test will fail if new zeek scripts are added to the scripts/policy/ +# directory. Correcting that just involves updating +# scripts/test-all-policy.zeek to @load the new zeek scripts. -@TEST-EXEC: test -e $DIST/scripts/test-all-policy.bro +@TEST-EXEC: test -e $DIST/scripts/test-all-policy.zeek @TEST-EXEC: test -d $DIST/scripts -@TEST-EXEC: ( cd $DIST/scripts/policy && find . -name '*.bro' ) | sort >"all scripts found" -@TEST-EXEC: cat $DIST/scripts/test-all-policy.bro | grep '@load' | sed 'sm^\( *# *\)\{0,\}@load *m./mg' | sort >test-all-policy.bro -@TEST-EXEC: diff -u "all scripts found" test-all-policy.bro 1>&2 +@TEST-EXEC: ( cd $DIST/scripts/policy && find . -name '*.zeek' ) | sort >"all scripts found" +@TEST-EXEC: cat $DIST/scripts/test-all-policy.zeek | grep '@load' | sed 'sm^\( *# *\)\{0,\}@load *m./mg' | sort >test-all-policy.zeek +@TEST-EXEC: diff -u "all scripts found" test-all-policy.zeek 1>&2 diff --git a/testing/btest/doc/broxygen/all_scripts.test b/testing/btest/doc/broxygen/all_scripts.test deleted file mode 100644 index 238ba3a4f3..0000000000 --- a/testing/btest/doc/broxygen/all_scripts.test +++ /dev/null @@ -1,14 +0,0 @@ -# This test is mostly just checking that there's no errors that result -# from loading all scripts and generated docs for each. - -# This must be serialized with communication tests because it does load -# listen.bro in order to document it. - -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -X broxygen.config broxygen DumpEvents::include=/NOTHING_MATCHES/ Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-diff .stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -@TEST-START-FILE broxygen.config -script * scripts/ -@TEST-END-FILE diff --git a/testing/btest/doc/broxygen/command_line.bro b/testing/btest/doc/broxygen/command_line.bro deleted file mode 100644 index d009667b7e..0000000000 --- a/testing/btest/doc/broxygen/command_line.bro +++ /dev/null @@ -1,7 +0,0 @@ -# Shouldn't emit any warnings about not being able to document something -# that's supplied via command line script. - -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro %INPUT -e 'redef myvar=10; print myvar' >output 2>&1 -# @TEST-EXEC: btest-diff output - -const myvar = 5 &redef; diff --git a/testing/btest/doc/broxygen/comment_retrieval_bifs.bro b/testing/btest/doc/broxygen/comment_retrieval_bifs.bro deleted file mode 100644 index f3c1be6b14..0000000000 --- a/testing/btest/doc/broxygen/comment_retrieval_bifs.bro +++ /dev/null @@ -1,111 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -##! This is a test script. -##! With some summary comments. - -## Hello world. This is an option. -## With some more description here. -## And here. -const myvar = 7 &redef; ##< Maybe just one more. - -## This function prints a string line by line. -## -## lines: A string to print line by line, w/ lines delimited by newline chars. -global print_lines: function(lines: string, prefix: string &default=""); - -## And some more comments on the function implementation. -function print_lines(lines: string, prefix: string) - { - local v: vector of string; - local line_table = split(lines, /\n/); - - for ( i in line_table ) - v[i] = line_table[i]; - - for ( i in v ) - print fmt("%s%s", prefix, v[i]); - } - -function print_comments(name: string, func: function(name: string): string) - { - print fmt("%s:", name); - print_lines(func(name), " "); - } - -## This is an alias for count. -type mytype: count; - -## My record type. -type myrecord: record { - ## The first field. - ## Does something... - aaa: count; ##< Done w/ aaa. - ## The second field. - bbb: string; ##< Done w/ bbb. - ##< No really, done w/ bbb. - ## Third field. - ccc: int; ##< Done w/ ccc. - ## Fourth field. - ddd: interval; ##< Done w/ ddd. -}; - - -## My enum type; -type myenum: enum { - ## First enum value. - ## I know, the name isn't clever. - FIRST, ##< Done w/ first. - ## Second enum value. - SECOND, ##< Done w/ second. - ## Third enum value. - THIRD, ##< Done w/ third. - ##< Done w/ third again. - ## SIC. - ## It's a programming language. - FORTH ##< Using Reverse Polish Notation. - ##< Done w/ forth. -}; - -redef record myrecord += { - ## First redef'd field. - ## With two lines of comments. - eee: count &optional; ##< And two post-notation comments. - ##< Done w/ eee. - ## Second redef'd field. - fff: count &optional; ##< Done w/ fff. - ## Third redef'd field. - ggg: count &optional; ##< Done w/ ggg. -}; - -redef enum myenum += { - ## First redef'd enum val. - FIFTH, ##< Done w/ fifth. - ## Second redef'd enum val. - SIXTH, ##< Done w/ sixth. - ## Third redef'd enum val. - ## Lucky number seven. - SEVENTH, ##< Still works with comma. - ##< Done w/ seventh. -}; - -print_lines(get_script_comments(@DIR + "/" + @FILENAME)); -print_comments("myvar", get_identifier_comments); -print_comments("print_lines", get_identifier_comments); -print_comments("mytype", get_identifier_comments); -print_comments("myrecord", get_identifier_comments); -print_comments("myrecord$aaa", get_record_field_comments); -print_comments("myrecord$bbb", get_record_field_comments); -print_comments("myrecord$ccc", get_record_field_comments); -print_comments("myrecord$ddd", get_record_field_comments); -print_comments("myrecord$eee", get_record_field_comments); -print_comments("myrecord$fff", get_record_field_comments); -print_comments("myrecord$ggg", get_record_field_comments); -print_comments("myenum", get_identifier_comments); -print_comments("FIRST", get_identifier_comments); -print_comments("SECOND", get_identifier_comments); -print_comments("THIRD", get_identifier_comments); -print_comments("FORTH", get_identifier_comments); -print_comments("FIFTH", get_identifier_comments); -print_comments("SIXTH", get_identifier_comments); -print_comments("SEVENTH", get_identifier_comments); diff --git a/testing/btest/doc/broxygen/enums.bro b/testing/btest/doc/broxygen/enums.bro deleted file mode 100644 index 8fbdb11ab6..0000000000 --- a/testing/btest/doc/broxygen/enums.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT -# @TEST-EXEC: btest-diff autogen-reST-enums.rst - -@TEST-START-FILE broxygen.config -identifier TestEnum* autogen-reST-enums.rst -@TEST-END-FILE - -## There's tons of ways an enum can look... -type TestEnum1: enum { - ## like this - ONE, - TWO, ##< or like this - ## multiple - ## comments - THREE, ##< and even - ##< more comments -}; - -## The final comma is optional -type TestEnum2: enum { - ## like this - A, - B, ##< or like this - ## multiple - ## comments - C ##< and even - ##< more comments -}; - -## redefs should also work -redef enum TestEnum1 += { - ## adding another - FOUR ##< value -}; - -## now with a comma -redef enum TestEnum1 += { - ## adding another - FIVE, ##< value -}; - -## this should reference the TestEnum1 type and not a generic "enum" type -const TestEnumVal = ONE &redef; diff --git a/testing/btest/doc/broxygen/example.bro b/testing/btest/doc/broxygen/example.bro deleted file mode 100644 index 22a6fc7418..0000000000 --- a/testing/btest/doc/broxygen/example.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -X broxygen.config %INPUT -# @TEST-EXEC: btest-diff example.rst - -@TEST-START-FILE broxygen.config -script broxygen/example.bro example.rst -@TEST-END-FILE - -@load broxygen/example.bro diff --git a/testing/btest/doc/broxygen/func-params.bro b/testing/btest/doc/broxygen/func-params.bro deleted file mode 100644 index e53ca475f1..0000000000 --- a/testing/btest/doc/broxygen/func-params.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT -# @TEST-EXEC: btest-diff autogen-reST-func-params.rst - -@TEST-START-FILE broxygen.config -identifier test_func_params* autogen-reST-func-params.rst -@TEST-END-FILE - -## This is a global function declaration. -## -## i: First param. -## j: Second param. -## -## Returns: A string. -global test_func_params_func: function(i: int, j: int): string; - -type test_func_params_rec: record { - ## This is a record field function. - ## - ## i: First param. - ## j: Second param. - ## - ## Returns: A string. - field_func: function(i: int, j: int): string; -}; diff --git a/testing/btest/doc/broxygen/identifier.bro b/testing/btest/doc/broxygen/identifier.bro deleted file mode 100644 index ae49d812a0..0000000000 --- a/testing/btest/doc/broxygen/identifier.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-diff test.rst - -@TEST-START-FILE broxygen.config -identifier BroxygenExample::* test.rst -@TEST-END-FILE - -@load broxygen diff --git a/testing/btest/doc/broxygen/package.bro b/testing/btest/doc/broxygen/package.bro deleted file mode 100644 index 6a9957804a..0000000000 --- a/testing/btest/doc/broxygen/package.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-diff test.rst - -@TEST-START-FILE broxygen.config -package broxygen test.rst -@TEST-END-FILE - -@load broxygen diff --git a/testing/btest/doc/broxygen/package_index.bro b/testing/btest/doc/broxygen/package_index.bro deleted file mode 100644 index 49c367aa48..0000000000 --- a/testing/btest/doc/broxygen/package_index.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-diff test.rst - -@TEST-START-FILE broxygen.config -package_index broxygen test.rst -@TEST-END-FILE - -@load broxygen diff --git a/testing/btest/doc/broxygen/records.bro b/testing/btest/doc/broxygen/records.bro deleted file mode 100644 index fbaa957a9f..0000000000 --- a/testing/btest/doc/broxygen/records.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT -# @TEST-EXEC: btest-diff autogen-reST-records.rst - -@TEST-START-FILE broxygen.config -identifier TestRecord* autogen-reST-records.rst -@TEST-END-FILE - -# undocumented record -type TestRecord1: record { - field1: bool; - field2: count; -}; - -## Here's the ways records and record fields can be documented. -type TestRecord2: record { - ## document ``A`` - A: count; - - B: bool; ##< document ``B`` - - ## and now ``C`` - C: TestRecord1; ##< is a declared type - - ## sets/tables should show the index types - D: set[count, bool]; -}; diff --git a/testing/btest/doc/broxygen/script_index.bro b/testing/btest/doc/broxygen/script_index.bro deleted file mode 100644 index ab257ad35d..0000000000 --- a/testing/btest/doc/broxygen/script_index.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-diff test.rst - -@TEST-START-FILE broxygen.config -script_index broxygen/* test.rst -@TEST-END-FILE - -@load broxygen diff --git a/testing/btest/doc/broxygen/script_summary.bro b/testing/btest/doc/broxygen/script_summary.bro deleted file mode 100644 index a517a08072..0000000000 --- a/testing/btest/doc/broxygen/script_summary.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-diff test.rst - -@TEST-START-FILE broxygen.config -script_summary broxygen/example.bro test.rst -@TEST-END-FILE - -@load broxygen diff --git a/testing/btest/doc/broxygen/type-aliases.bro b/testing/btest/doc/broxygen/type-aliases.bro deleted file mode 100644 index 0971327c2b..0000000000 --- a/testing/btest/doc/broxygen/type-aliases.bro +++ /dev/null @@ -1,34 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT -# @TEST-EXEC: btest-diff autogen-reST-type-aliases.rst - -@TEST-START-FILE broxygen.config -identifier BroxygenTest::* autogen-reST-type-aliases.rst -@TEST-END-FILE - -module BroxygenTest; - -export { - ## This is just an alias for a builtin type ``bool``. - type TypeAlias: bool; - - ## This type should get its own comments, not associated w/ TypeAlias. - type NotTypeAlias: bool; - - ## This cross references ``bool`` in the description of its type - ## instead of ``TypeAlias`` just because it seems more useful -- - ## one doesn't have to click through the full type alias chain to - ## find out what the actual type is... - type OtherTypeAlias: TypeAlias; - - ## But this should reference a type of ``TypeAlias``. - global a: TypeAlias; - - ## And this should reference a type of ``OtherTypeAlias``. - global b: OtherTypeAlias; - - type MyRecord: record { - f1: TypeAlias; - f2: OtherTypeAlias; - f3: bool; - }; -} diff --git a/testing/btest/doc/broxygen/vectors.bro b/testing/btest/doc/broxygen/vectors.bro deleted file mode 100644 index 7c18225357..0000000000 --- a/testing/btest/doc/broxygen/vectors.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: unset BRO_DISABLE_BROXYGEN; bro -b -X broxygen.config %INPUT -# @TEST-EXEC: btest-diff autogen-reST-vectors.rst - -@TEST-START-FILE broxygen.config -identifier test_vector* autogen-reST-vectors.rst -@TEST-END-FILE - -type TestRecord: record { - field1: bool; - field2: count; -}; - -## Yield type is documented/cross-referenced for primitize types. -global test_vector0: vector of string; - -## Yield type is documented/cross-referenced for composite types. -global test_vector1: vector of TestRecord; - -## Just showing an even fancier yield type. -global test_vector2: vector of vector of TestRecord; diff --git a/testing/btest/doc/record-add.bro b/testing/btest/doc/record-add.bro deleted file mode 100644 index 284ea22959..0000000000 --- a/testing/btest/doc/record-add.bro +++ /dev/null @@ -1,36 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT - -# To support documentation of type aliases, Bro clones declared types -# (see add_type() in Var.cc) in order to keep track of type names and aliases. -# This test makes sure that the cloning is done in a way that's compatible -# with adding fields to a record type -- we want to be sure that cloning -# a type that contains record types will correctly see field additions to -# those contained-records. - -type my_record: record { - field1: bool; - field2: string; -}; - -type super_record: record { - rec: my_record; -}; -type my_table: table[count] of my_record; -type my_vector: vector of my_record; - -redef record my_record += { - field3: count &optional; -}; - -global a: my_record; -global b: super_record; -global c: my_table; -global d: my_vector; - -function test_func() - { - a?$field3; - b$rec?$field3; - c[0]$field3; - d[0]$field3; - } diff --git a/testing/btest/doc/record-add.zeek b/testing/btest/doc/record-add.zeek new file mode 100644 index 0000000000..1c764cae5f --- /dev/null +++ b/testing/btest/doc/record-add.zeek @@ -0,0 +1,36 @@ +# @TEST-EXEC: zeek -b %INPUT + +# To support documentation of type aliases, Zeek clones declared types +# (see add_type() in Var.cc) in order to keep track of type names and aliases. +# This test makes sure that the cloning is done in a way that's compatible +# with adding fields to a record type -- we want to be sure that cloning +# a type that contains record types will correctly see field additions to +# those contained-records. + +type my_record: record { + field1: bool; + field2: string; +}; + +type super_record: record { + rec: my_record; +}; +type my_table: table[count] of my_record; +type my_vector: vector of my_record; + +redef record my_record += { + field3: count &optional; +}; + +global a: my_record; +global b: super_record; +global c: my_table; +global d: my_vector; + +function test_func() + { + a?$field3; + b$rec?$field3; + c[0]$field3; + d[0]$field3; + } diff --git a/testing/btest/doc/record-attr-check.bro b/testing/btest/doc/record-attr-check.bro deleted file mode 100644 index c7dc74631d..0000000000 --- a/testing/btest/doc/record-attr-check.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT - -type Tag: enum { - SOMETHING -}; - -type R: record { - field1: set[Tag] &default=set(); -}; diff --git a/testing/btest/doc/record-attr-check.zeek b/testing/btest/doc/record-attr-check.zeek new file mode 100644 index 0000000000..e34b417e57 --- /dev/null +++ b/testing/btest/doc/record-attr-check.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -b %INPUT + +type Tag: enum { + SOMETHING +}; + +type R: record { + field1: set[Tag] &default=set(); +}; diff --git a/testing/btest/doc/zeekygen/command_line.zeek b/testing/btest/doc/zeekygen/command_line.zeek new file mode 100644 index 0000000000..434122b0cd --- /dev/null +++ b/testing/btest/doc/zeekygen/command_line.zeek @@ -0,0 +1,7 @@ +# Shouldn't emit any warnings about not being able to document something +# that's supplied via command line script. + +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek %INPUT -e 'redef myvar=10; print myvar' >output 2>&1 +# @TEST-EXEC: btest-diff output + +const myvar = 5 &redef; diff --git a/testing/btest/doc/zeekygen/comment_retrieval_bifs.zeek b/testing/btest/doc/zeekygen/comment_retrieval_bifs.zeek new file mode 100644 index 0000000000..8594aa2195 --- /dev/null +++ b/testing/btest/doc/zeekygen/comment_retrieval_bifs.zeek @@ -0,0 +1,107 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +##! This is a test script. +##! With some summary comments. + +## Hello world. This is an option. +## With some more description here. +## And here. +const myvar = 7 &redef; ##< Maybe just one more. + +## This function prints a string line by line. +## +## lines: A string to print line by line, w/ lines delimited by newline chars. +global print_lines: function(lines: string, prefix: string &default=""); + +## And some more comments on the function implementation. +function print_lines(lines: string, prefix: string) + { + local v = split_string(lines, /\n/); + + for ( i in v ) + print fmt("%s%s", prefix, v[i]); + } + +function print_comments(name: string, func: function(name: string): string) + { + print fmt("%s:", name); + print_lines(func(name), " "); + } + +## This is an alias for count. +type mytype: count; + +## My record type. +type myrecord: record { + ## The first field. + ## Does something... + aaa: count; ##< Done w/ aaa. + ## The second field. + bbb: string; ##< Done w/ bbb. + ##< No really, done w/ bbb. + ## Third field. + ccc: int; ##< Done w/ ccc. + ## Fourth field. + ddd: interval; ##< Done w/ ddd. +}; + + +## My enum type; +type myenum: enum { + ## First enum value. + ## I know, the name isn't clever. + FIRST, ##< Done w/ first. + ## Second enum value. + SECOND, ##< Done w/ second. + ## Third enum value. + THIRD, ##< Done w/ third. + ##< Done w/ third again. + ## SIC. + ## It's a programming language. + FORTH ##< Using Reverse Polish Notation. + ##< Done w/ forth. +}; + +redef record myrecord += { + ## First redef'd field. + ## With two lines of comments. + eee: count &optional; ##< And two post-notation comments. + ##< Done w/ eee. + ## Second redef'd field. + fff: count &optional; ##< Done w/ fff. + ## Third redef'd field. + ggg: count &optional; ##< Done w/ ggg. +}; + +redef enum myenum += { + ## First redef'd enum val. + FIFTH, ##< Done w/ fifth. + ## Second redef'd enum val. + SIXTH, ##< Done w/ sixth. + ## Third redef'd enum val. + ## Lucky number seven. + SEVENTH, ##< Still works with comma. + ##< Done w/ seventh. +}; + +print_lines(get_script_comments(@DIR + "/" + @FILENAME)); +print_comments("myvar", get_identifier_comments); +print_comments("print_lines", get_identifier_comments); +print_comments("mytype", get_identifier_comments); +print_comments("myrecord", get_identifier_comments); +print_comments("myrecord$aaa", get_record_field_comments); +print_comments("myrecord$bbb", get_record_field_comments); +print_comments("myrecord$ccc", get_record_field_comments); +print_comments("myrecord$ddd", get_record_field_comments); +print_comments("myrecord$eee", get_record_field_comments); +print_comments("myrecord$fff", get_record_field_comments); +print_comments("myrecord$ggg", get_record_field_comments); +print_comments("myenum", get_identifier_comments); +print_comments("FIRST", get_identifier_comments); +print_comments("SECOND", get_identifier_comments); +print_comments("THIRD", get_identifier_comments); +print_comments("FORTH", get_identifier_comments); +print_comments("FIFTH", get_identifier_comments); +print_comments("SIXTH", get_identifier_comments); +print_comments("SEVENTH", get_identifier_comments); diff --git a/testing/btest/doc/zeekygen/enums.zeek b/testing/btest/doc/zeekygen/enums.zeek new file mode 100644 index 0000000000..8e117854fc --- /dev/null +++ b/testing/btest/doc/zeekygen/enums.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT +# @TEST-EXEC: btest-diff autogen-reST-enums.rst + +@TEST-START-FILE zeekygen.config +identifier TestEnum* autogen-reST-enums.rst +@TEST-END-FILE + +## There's tons of ways an enum can look... +type TestEnum1: enum { + ## like this + ONE, + TWO, ##< or like this + ## multiple + ## comments + THREE, ##< and even + ##< more comments +}; + +## The final comma is optional +type TestEnum2: enum { + ## like this + A, + B, ##< or like this + ## multiple + ## comments + C ##< and even + ##< more comments +}; + +## redefs should also work +redef enum TestEnum1 += { + ## adding another + FOUR ##< value +}; + +## now with a comma +redef enum TestEnum1 += { + ## adding another + FIVE, ##< value +}; + +## this should reference the TestEnum1 type and not a generic "enum" type +const TestEnumVal = ONE &redef; diff --git a/testing/btest/doc/zeekygen/example.zeek b/testing/btest/doc/zeekygen/example.zeek new file mode 100644 index 0000000000..b1dfac934d --- /dev/null +++ b/testing/btest/doc/zeekygen/example.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -X zeekygen.config %INPUT +# @TEST-EXEC: btest-diff example.rst + +@TEST-START-FILE zeekygen.config +script zeekygen/example.zeek example.rst +@TEST-END-FILE + +@load zeekygen/example diff --git a/testing/btest/doc/zeekygen/func-params.zeek b/testing/btest/doc/zeekygen/func-params.zeek new file mode 100644 index 0000000000..b425df3410 --- /dev/null +++ b/testing/btest/doc/zeekygen/func-params.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT +# @TEST-EXEC: btest-diff autogen-reST-func-params.rst + +@TEST-START-FILE zeekygen.config +identifier test_func_params* autogen-reST-func-params.rst +@TEST-END-FILE + +## This is a global function declaration. +## +## i: First param. +## j: Second param. +## +## Returns: A string. +global test_func_params_func: function(i: int, j: int): string; + +type test_func_params_rec: record { + ## This is a record field function. + ## + ## i: First param. + ## j: Second param. + ## + ## Returns: A string. + field_func: function(i: int, j: int): string; +}; diff --git a/testing/btest/doc/zeekygen/identifier.zeek b/testing/btest/doc/zeekygen/identifier.zeek new file mode 100644 index 0000000000..ee0841e9e2 --- /dev/null +++ b/testing/btest/doc/zeekygen/identifier.zeek @@ -0,0 +1,9 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-diff test.rst + +@TEST-START-FILE zeekygen.config +identifier ZeekygenExample::* test.rst +@TEST-END-FILE + +@load zeekygen diff --git a/testing/btest/doc/zeekygen/package.zeek b/testing/btest/doc/zeekygen/package.zeek new file mode 100644 index 0000000000..a2c66f2c91 --- /dev/null +++ b/testing/btest/doc/zeekygen/package.zeek @@ -0,0 +1,9 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-diff test.rst + +@TEST-START-FILE zeekygen.config +package zeekygen test.rst +@TEST-END-FILE + +@load zeekygen diff --git a/testing/btest/doc/zeekygen/package_index.zeek b/testing/btest/doc/zeekygen/package_index.zeek new file mode 100644 index 0000000000..7831ea25ee --- /dev/null +++ b/testing/btest/doc/zeekygen/package_index.zeek @@ -0,0 +1,9 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-diff test.rst + +@TEST-START-FILE zeekygen.config +package_index zeekygen test.rst +@TEST-END-FILE + +@load zeekygen diff --git a/testing/btest/doc/zeekygen/records.zeek b/testing/btest/doc/zeekygen/records.zeek new file mode 100644 index 0000000000..fd720fe22e --- /dev/null +++ b/testing/btest/doc/zeekygen/records.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT +# @TEST-EXEC: btest-diff autogen-reST-records.rst + +@TEST-START-FILE zeekygen.config +identifier TestRecord* autogen-reST-records.rst +@TEST-END-FILE + +# undocumented record +type TestRecord1: record { + field1: bool; + field2: count; +}; + +## Here's the ways records and record fields can be documented. +type TestRecord2: record { + ## document ``A`` + A: count; + + B: bool; ##< document ``B`` + + ## and now ``C`` + C: TestRecord1; ##< is a declared type + + ## sets/tables should show the index types + D: set[count, bool]; +}; diff --git a/testing/btest/doc/zeekygen/script_index.zeek b/testing/btest/doc/zeekygen/script_index.zeek new file mode 100644 index 0000000000..a58d82c1d9 --- /dev/null +++ b/testing/btest/doc/zeekygen/script_index.zeek @@ -0,0 +1,9 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-diff test.rst + +@TEST-START-FILE zeekygen.config +script_index zeekygen/* test.rst +@TEST-END-FILE + +@load zeekygen diff --git a/testing/btest/doc/zeekygen/script_summary.zeek b/testing/btest/doc/zeekygen/script_summary.zeek new file mode 100644 index 0000000000..8eb5785cae --- /dev/null +++ b/testing/btest/doc/zeekygen/script_summary.zeek @@ -0,0 +1,9 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-diff test.rst + +@TEST-START-FILE zeekygen.config +script_summary zeekygen/example.zeek test.rst +@TEST-END-FILE + +@load zeekygen diff --git a/testing/btest/doc/zeekygen/type-aliases.zeek b/testing/btest/doc/zeekygen/type-aliases.zeek new file mode 100644 index 0000000000..8388044e99 --- /dev/null +++ b/testing/btest/doc/zeekygen/type-aliases.zeek @@ -0,0 +1,34 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT +# @TEST-EXEC: btest-diff autogen-reST-type-aliases.rst + +@TEST-START-FILE zeekygen.config +identifier ZeekygenTest::* autogen-reST-type-aliases.rst +@TEST-END-FILE + +module ZeekygenTest; + +export { + ## This is just an alias for a builtin type ``bool``. + type TypeAlias: bool; + + ## This type should get its own comments, not associated w/ TypeAlias. + type NotTypeAlias: bool; + + ## This cross references ``bool`` in the description of its type + ## instead of ``TypeAlias`` just because it seems more useful -- + ## one doesn't have to click through the full type alias chain to + ## find out what the actual type is... + type OtherTypeAlias: TypeAlias; + + ## But this should reference a type of ``TypeAlias``. + global a: TypeAlias; + + ## And this should reference a type of ``OtherTypeAlias``. + global b: OtherTypeAlias; + + type MyRecord: record { + f1: TypeAlias; + f2: OtherTypeAlias; + f3: bool; + }; +} diff --git a/testing/btest/doc/zeekygen/vectors.zeek b/testing/btest/doc/zeekygen/vectors.zeek new file mode 100644 index 0000000000..70c877134e --- /dev/null +++ b/testing/btest/doc/zeekygen/vectors.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: unset ZEEK_DISABLE_ZEEKYGEN; unset BRO_DISABLE_BROXYGEN; zeek -b -X zeekygen.config %INPUT +# @TEST-EXEC: btest-diff autogen-reST-vectors.rst + +@TEST-START-FILE zeekygen.config +identifier test_vector* autogen-reST-vectors.rst +@TEST-END-FILE + +type TestRecord: record { + field1: bool; + field2: count; +}; + +## Yield type is documented/cross-referenced for primitize types. +global test_vector0: vector of string; + +## Yield type is documented/cross-referenced for composite types. +global test_vector1: vector of TestRecord; + +## Just showing an even fancier yield type. +global test_vector2: vector of vector of TestRecord; diff --git a/testing/btest/language/addr.bro b/testing/btest/language/addr.bro deleted file mode 100644 index dd7e5e1dff..0000000000 --- a/testing/btest/language/addr.bro +++ /dev/null @@ -1,52 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - # IPv4 addresses - local a1: addr = 0.0.0.0; - local a2: addr = 10.0.0.11; - local a3: addr = 255.255.255.255; - local a4 = 192.1.2.3; - - test_case( "IPv4 address inequality", a1 != a2 ); - test_case( "IPv4 address equality", a1 == 0.0.0.0 ); - test_case( "IPv4 address comparison", a1 < a2 ); - test_case( "IPv4 address comparison", a3 > a2 ); - test_case( "size of IPv4 address", |a1| == 32 ); - test_case( "IPv4 address type inference", type_name(a4) == "addr" ); - - # IPv6 addresses - local b1: addr = [::]; - local b2: addr = [::255.255.255.255]; - local b3: addr = [::ffff:ffff]; - local b4: addr = [ffff::ffff]; - local b5: addr = [0000:0000:0000:0000:0000:0000:0000:0000]; - local b6: addr = [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; - local b7: addr = [AAAA:BBBB:CCCC:DDDD:EEEE:FFFF:1111:2222]; - local b8 = [a::b]; - - test_case( "IPv6 address inequality", b1 != b2 ); - test_case( "IPv6 address equality", b1 == b5 ); - test_case( "IPv6 address equality", b2 == b3 ); - test_case( "IPv6 address comparison", b1 < b2 ); - test_case( "IPv6 address comparison", b4 > b2 ); - test_case( "IPv6 address not case-sensitive", b6 == b7 ); - test_case( "size of IPv6 address", |b1| == 128 ); - test_case( "IPv6 address type inference", type_name(b8) == "addr" ); - - test_case( "IPv4 and IPv6 address inequality", a1 != b1 ); - - # IPv4-mapped-IPv6 (internally treated as IPv4) - local c1: addr = [::ffff:1.2.3.4]; - - test_case( "IPv4-mapped-IPv6 equality to IPv4", c1 == 1.2.3.4 ); - test_case( "IPv4-mapped-IPv6 is IPv4", is_v4_addr(c1) == T ); -} - diff --git a/testing/btest/language/addr.zeek b/testing/btest/language/addr.zeek new file mode 100644 index 0000000000..dff331c3fd --- /dev/null +++ b/testing/btest/language/addr.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + # IPv4 addresses + local a1: addr = 0.0.0.0; + local a2: addr = 10.0.0.11; + local a3: addr = 255.255.255.255; + local a4 = 192.1.2.3; + + test_case( "IPv4 address inequality", a1 != a2 ); + test_case( "IPv4 address equality", a1 == 0.0.0.0 ); + test_case( "IPv4 address comparison", a1 < a2 ); + test_case( "IPv4 address comparison", a3 > a2 ); + test_case( "size of IPv4 address", |a1| == 32 ); + test_case( "IPv4 address type inference", type_name(a4) == "addr" ); + + # IPv6 addresses + local b1: addr = [::]; + local b2: addr = [::255.255.255.255]; + local b3: addr = [::ffff:ffff]; + local b4: addr = [ffff::ffff]; + local b5: addr = [0000:0000:0000:0000:0000:0000:0000:0000]; + local b6: addr = [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; + local b7: addr = [AAAA:BBBB:CCCC:DDDD:EEEE:FFFF:1111:2222]; + local b8 = [a::b]; + local b9 = [2001:db8:0:0:0:FFFF:192.168.0.5]; + + test_case( "IPv6 address inequality", b1 != b2 ); + test_case( "IPv6 address equality", b1 == b5 ); + test_case( "IPv6 address equality", b2 == b3 ); + test_case( "IPv6 address comparison", b1 < b2 ); + test_case( "IPv6 address comparison", b4 > b2 ); + test_case( "IPv6 address not case-sensitive", b6 == b7 ); + test_case( "size of IPv6 address", |b1| == 128 ); + test_case( "IPv6 address type inference", type_name(b8) == "addr" ); + + test_case( "IPv4 and IPv6 address inequality", a1 != b1 ); + + # IPv4-mapped-IPv6 (internally treated as IPv4) + local c1: addr = [::ffff:1.2.3.4]; + + test_case( "IPv4-mapped-IPv6 equality to IPv4", c1 == 1.2.3.4 ); + test_case( "IPv4-mapped-IPv6 is IPv4", is_v4_addr(c1) == T ); +} + diff --git a/testing/btest/language/any.bro b/testing/btest/language/any.bro deleted file mode 100644 index fca23f6db8..0000000000 --- a/testing/btest/language/any.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -function anyarg(arg1: any, arg1type: string) - { - test_case( arg1type, type_name(arg1) == arg1type ); - } - -event bro_init() -{ - local any1: any = 5; - local any2: any = "bar"; - local any3: any = /bar/; - - # Test using variable of type "any" - - anyarg( any1, "count" ); - anyarg( any2, "string" ); - anyarg( any3, "pattern" ); - - # Test of other types - - anyarg( T, "bool" ); - anyarg( "foo", "string" ); - anyarg( 15, "count" ); - anyarg( +15, "int" ); - anyarg( 15.0, "double" ); - anyarg( /foo/, "pattern" ); - anyarg( 127.0.0.1, "addr" ); - anyarg( [::1], "addr" ); - anyarg( 127.0.0.1/16, "subnet" ); - anyarg( [ffff::1]/64, "subnet" ); - anyarg( 123/tcp, "port" ); -} - diff --git a/testing/btest/language/any.zeek b/testing/btest/language/any.zeek new file mode 100644 index 0000000000..aebab284c2 --- /dev/null +++ b/testing/btest/language/any.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +function anyarg(arg1: any, arg1type: string) + { + test_case( arg1type, type_name(arg1) == arg1type ); + } + +event zeek_init() +{ + local any1: any = 5; + local any2: any = "bar"; + local any3: any = /bar/; + + # Test using variable of type "any" + + anyarg( any1, "count" ); + anyarg( any2, "string" ); + anyarg( any3, "pattern" ); + + # Test of other types + + anyarg( T, "bool" ); + anyarg( "foo", "string" ); + anyarg( 15, "count" ); + anyarg( +15, "int" ); + anyarg( 15.0, "double" ); + anyarg( /foo/, "pattern" ); + anyarg( 127.0.0.1, "addr" ); + anyarg( [::1], "addr" ); + anyarg( 127.0.0.1/16, "subnet" ); + anyarg( [ffff::1]/64, "subnet" ); + anyarg( 123/tcp, "port" ); +} + diff --git a/testing/btest/language/at-deprecated.bro b/testing/btest/language/at-deprecated.bro deleted file mode 100644 index dd0f746658..0000000000 --- a/testing/btest/language/at-deprecated.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -b foo -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -@TEST-START-FILE foo.bro -@deprecated -@load bar -@load baz -@TEST-END-FILE - -@TEST-START-FILE bar.bro -@deprecated "Use '@load qux.bro' instead" -@TEST-END-FILE - -@TEST-START-FILE baz.bro -@deprecated -@TEST-END-FILE diff --git a/testing/btest/language/at-deprecated.zeek b/testing/btest/language/at-deprecated.zeek new file mode 100644 index 0000000000..a035f6d24e --- /dev/null +++ b/testing/btest/language/at-deprecated.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -b foo +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr + +@TEST-START-FILE foo.zeek +@deprecated +@load bar +@load baz +@TEST-END-FILE + +@TEST-START-FILE bar.zeek +@deprecated "Use '@load qux' instead" +@TEST-END-FILE + +@TEST-START-FILE baz.zeek +@deprecated +@TEST-END-FILE diff --git a/testing/btest/language/at-dir.bro b/testing/btest/language/at-dir.bro deleted file mode 100644 index b826e3a5da..0000000000 --- a/testing/btest/language/at-dir.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out -# @TEST-EXEC: bro -b ./pathtest.bro >out2 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out2 - -print @DIR; - -@TEST-START-FILE pathtest.bro -print @DIR; -@TEST-END-FILE diff --git a/testing/btest/language/at-dir.zeek b/testing/btest/language/at-dir.zeek new file mode 100644 index 0000000000..35f8894caf --- /dev/null +++ b/testing/btest/language/at-dir.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out +# @TEST-EXEC: zeek -b ./pathtest.zeek >out2 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out2 + +print @DIR; + +@TEST-START-FILE pathtest.zeek +print @DIR; +@TEST-END-FILE diff --git a/testing/btest/language/at-filename.bro b/testing/btest/language/at-filename.bro deleted file mode 100644 index 83e4e968f3..0000000000 --- a/testing/btest/language/at-filename.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -print @FILENAME; diff --git a/testing/btest/language/at-filename.zeek b/testing/btest/language/at-filename.zeek new file mode 100644 index 0000000000..aa8b924b7e --- /dev/null +++ b/testing/btest/language/at-filename.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +print @FILENAME; diff --git a/testing/btest/language/at-if-event.bro b/testing/btest/language/at-if-event.bro deleted file mode 100644 index 0dd9815908..0000000000 --- a/testing/btest/language/at-if-event.bro +++ /dev/null @@ -1,51 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out -# Check if @if can be used to alternative function/event definitions - -@if ( 1==1 ) -function test_case(msg: string) -@else -lalala -@endif - { - print msg; - } - -@if ( 1==1 ) -event bro_init() -@else -lalala -@endif - { - print "1"; - test_case("2"); - } - -@if ( 1==0 ) -lalala -@else -event bro_init() -@endif - { - print "3"; - } - -@if ( 1==1 ) -@if ( 1==1 ) -event bro_init() -@endif -@else -lalala -@endif - { - print "4"; - } - -@if ( 1==1 ) -event bro_init() &priority=10 -@else -lalala -@endif - { - print "0"; - } diff --git a/testing/btest/language/at-if-event.zeek b/testing/btest/language/at-if-event.zeek new file mode 100644 index 0000000000..bd6112f369 --- /dev/null +++ b/testing/btest/language/at-if-event.zeek @@ -0,0 +1,51 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out +# Check if @if can be used to alternative function/event definitions + +@if ( 1==1 ) +function test_case(msg: string) +@else +lalala +@endif + { + print msg; + } + +@if ( 1==1 ) +event zeek_init() +@else +lalala +@endif + { + print "1"; + test_case("2"); + } + +@if ( 1==0 ) +lalala +@else +event zeek_init() +@endif + { + print "3"; + } + +@if ( 1==1 ) +@if ( 1==1 ) +event zeek_init() +@endif +@else +lalala +@endif + { + print "4"; + } + +@if ( 1==1 ) +event zeek_init() &priority=10 +@else +lalala +@endif + { + print "0"; + } diff --git a/testing/btest/language/at-if-invalid.bro b/testing/btest/language/at-if-invalid.bro deleted file mode 100644 index 1be2b94304..0000000000 --- a/testing/btest/language/at-if-invalid.bro +++ /dev/null @@ -1,41 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -function foo(c: count): bool - { return c == 42 ? T : F; } - -global TRUE_CONDITION = T; - -event bro_init() - { - local xyz = 0; - local local_true_condition = T; - - @if ( F ) - xyz += 1; - @endif - - @if ( foo(0) ) - xyz += 1; - @endif - - @if ( T && foo(42) ) - xyz += 2; - @endif - - xyz = 0; - - @if ( F && foo(xyz) ) - xyz += 1; - @else - xyz += 2; - @endif - - xyz = 0; - - @if ( T && TRUE_CONDITION && local_true_condition ) - xyz += 1; - @else - xyz += 2; - @endif - } diff --git a/testing/btest/language/at-if-invalid.zeek b/testing/btest/language/at-if-invalid.zeek new file mode 100644 index 0000000000..8657e3affb --- /dev/null +++ b/testing/btest/language/at-if-invalid.zeek @@ -0,0 +1,41 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +function foo(c: count): bool + { return c == 42 ? T : F; } + +global TRUE_CONDITION = T; + +event zeek_init() + { + local xyz = 0; + local local_true_condition = T; + + @if ( F ) + xyz += 1; + @endif + + @if ( foo(0) ) + xyz += 1; + @endif + + @if ( T && foo(42) ) + xyz += 2; + @endif + + xyz = 0; + + @if ( F && foo(xyz) ) + xyz += 1; + @else + xyz += 2; + @endif + + xyz = 0; + + @if ( T && TRUE_CONDITION && local_true_condition ) + xyz += 1; + @else + xyz += 2; + @endif + } diff --git a/testing/btest/language/at-if.bro b/testing/btest/language/at-if.bro deleted file mode 100644 index dddf9a22a5..0000000000 --- a/testing/btest/language/at-if.bro +++ /dev/null @@ -1,57 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -function foo(c: count): bool - { return c == 42 ? T : F; } - -global TRUE_CONDITION = T; - -event bro_init() -{ - local xyz = 0; - - # Test "if" without "else" - - @if ( F ) - xyz += 1; - @endif - - @if ( foo(0) ) - xyz += 1; - @endif - - @if ( T && foo(42) ) - xyz += 2; - @endif - - test_case( "@if", xyz == 2 ); - - # Test "if" with an "else" - - xyz = 0; - - @if ( F ) - xyz += 1; - @else - xyz += 2; - @endif - - test_case( "@if...@else", xyz == 2 ); - - xyz = 0; - - @if ( T && TRUE_CONDITION ) - xyz += 1; - @else - xyz += 2; - @endif - - test_case( "@if...@else", xyz == 1 ); - -} - diff --git a/testing/btest/language/at-if.zeek b/testing/btest/language/at-if.zeek new file mode 100644 index 0000000000..e6d7f58cae --- /dev/null +++ b/testing/btest/language/at-if.zeek @@ -0,0 +1,57 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +function foo(c: count): bool + { return c == 42 ? T : F; } + +global TRUE_CONDITION = T; + +event zeek_init() +{ + local xyz = 0; + + # Test "if" without "else" + + @if ( F ) + xyz += 1; + @endif + + @if ( foo(0) ) + xyz += 1; + @endif + + @if ( T && foo(42) ) + xyz += 2; + @endif + + test_case( "@if", xyz == 2 ); + + # Test "if" with an "else" + + xyz = 0; + + @if ( F ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@if...@else", xyz == 2 ); + + xyz = 0; + + @if ( T && TRUE_CONDITION ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@if...@else", xyz == 1 ); + +} + diff --git a/testing/btest/language/at-ifdef.bro b/testing/btest/language/at-ifdef.bro deleted file mode 100644 index e7bb961833..0000000000 --- a/testing/btest/language/at-ifdef.bro +++ /dev/null @@ -1,50 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -global thisisdefined = 123; - -event bro_init() -{ - local xyz = 0; - - # Test "ifdef" without "else" - - @ifdef ( notdefined ) - xyz += 1; - @endif - - @ifdef ( thisisdefined ) - xyz += 2; - @endif - - test_case( "@ifdef", xyz == 2 ); - - # Test "ifdef" with an "else" - - xyz = 0; - - @ifdef ( doesnotexist ) - xyz += 1; - @else - xyz += 2; - @endif - - test_case( "@ifdef...@else", xyz == 2 ); - - xyz = 0; - - @ifdef ( thisisdefined ) - xyz += 1; - @else - xyz += 2; - @endif - - test_case( "@ifdef...@else", xyz == 1 ); - -} - diff --git a/testing/btest/language/at-ifdef.zeek b/testing/btest/language/at-ifdef.zeek new file mode 100644 index 0000000000..cbc26b5cfa --- /dev/null +++ b/testing/btest/language/at-ifdef.zeek @@ -0,0 +1,50 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global thisisdefined = 123; + +event zeek_init() +{ + local xyz = 0; + + # Test "ifdef" without "else" + + @ifdef ( notdefined ) + xyz += 1; + @endif + + @ifdef ( thisisdefined ) + xyz += 2; + @endif + + test_case( "@ifdef", xyz == 2 ); + + # Test "ifdef" with an "else" + + xyz = 0; + + @ifdef ( doesnotexist ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifdef...@else", xyz == 2 ); + + xyz = 0; + + @ifdef ( thisisdefined ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifdef...@else", xyz == 1 ); + +} + diff --git a/testing/btest/language/at-ifndef.bro b/testing/btest/language/at-ifndef.bro deleted file mode 100644 index 8bff0c456b..0000000000 --- a/testing/btest/language/at-ifndef.bro +++ /dev/null @@ -1,50 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -global thisisdefined = 123; - -event bro_init() -{ - local xyz = 0; - - # Test "ifndef" without "else" - - @ifndef ( notdefined ) - xyz += 1; - @endif - - @ifndef ( thisisdefined ) - xyz += 2; - @endif - - test_case( "@ifndef", xyz == 1 ); - - # Test "ifndef" with an "else" - - xyz = 0; - - @ifndef ( doesnotexist ) - xyz += 1; - @else - xyz += 2; - @endif - - test_case( "@ifndef...@else", xyz == 1 ); - - xyz = 0; - - @ifndef ( thisisdefined ) - xyz += 1; - @else - xyz += 2; - @endif - - test_case( "@ifndef...@else", xyz == 2 ); - -} - diff --git a/testing/btest/language/at-ifndef.zeek b/testing/btest/language/at-ifndef.zeek new file mode 100644 index 0000000000..069b51bddc --- /dev/null +++ b/testing/btest/language/at-ifndef.zeek @@ -0,0 +1,50 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global thisisdefined = 123; + +event zeek_init() +{ + local xyz = 0; + + # Test "ifndef" without "else" + + @ifndef ( notdefined ) + xyz += 1; + @endif + + @ifndef ( thisisdefined ) + xyz += 2; + @endif + + test_case( "@ifndef", xyz == 1 ); + + # Test "ifndef" with an "else" + + xyz = 0; + + @ifndef ( doesnotexist ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifndef...@else", xyz == 1 ); + + xyz = 0; + + @ifndef ( thisisdefined ) + xyz += 1; + @else + xyz += 2; + @endif + + test_case( "@ifndef...@else", xyz == 2 ); + +} + diff --git a/testing/btest/language/at-load.bro b/testing/btest/language/at-load.bro deleted file mode 100644 index 7427cd639a..0000000000 --- a/testing/btest/language/at-load.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -# In this script, we try to access each object defined in a "@load"ed script - -@load secondtestfile - -event bro_init() -{ - test_case( "function", T ); - test_case( "global variable", num == 123 ); - test_case( "const", daysperyear == 365 ); - event testevent( "foo" ); -} - - -# @TEST-START-FILE secondtestfile - -# In this script, we define some objects to be used in another script - -# Note: this script is not listed on the bro command-line (instead, it -# is "@load"ed from the other script) - -global test_case: function(msg: string, expect: bool); - -global testevent: event(msg: string); - -global num: count = 123; - -const daysperyear: count = 365; - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -event testevent(msg: string) - { - test_case( "event", T ); - } - -# @TEST-END-FILE - diff --git a/testing/btest/language/at-load.zeek b/testing/btest/language/at-load.zeek new file mode 100644 index 0000000000..45df73b05c --- /dev/null +++ b/testing/btest/language/at-load.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +# In this script, we try to access each object defined in a "@load"ed script + +@load secondtestfile + +event zeek_init() +{ + test_case( "function", T ); + test_case( "global variable", num == 123 ); + test_case( "const", daysperyear == 365 ); + event testevent( "foo" ); +} + + +# @TEST-START-FILE secondtestfile + +# In this script, we define some objects to be used in another script + +# Note: this script is not listed on the zeek command-line (instead, it +# is "@load"ed from the other script) + +global test_case: function(msg: string, expect: bool); + +global testevent: event(msg: string); + +global num: count = 123; + +const daysperyear: count = 365; + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event testevent(msg: string) + { + test_case( "event", T ); + } + +# @TEST-END-FILE + diff --git a/testing/btest/language/attr-default-coercion.bro b/testing/btest/language/attr-default-coercion.bro deleted file mode 100644 index 14590d0033..0000000000 --- a/testing/btest/language/attr-default-coercion.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type my_table: table[string] of double; - -type my_record: record { - i: int &default = 1; - d: double &default = 3; -}; - -global t: my_table &default = 7; -global r = my_record(); - -function foo(i: int &default = 237, d: double &default = 101) - { - print i, d; - } - -event bro_init() - { - print t["nope"]; - print r; - foo(); - foo(-5); - foo(-37, -8.1); - } diff --git a/testing/btest/language/attr-default-coercion.zeek b/testing/btest/language/attr-default-coercion.zeek new file mode 100644 index 0000000000..01adee04e4 --- /dev/null +++ b/testing/btest/language/attr-default-coercion.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type my_table: table[string] of double; + +type my_record: record { + i: int &default = 1; + d: double &default = 3; +}; + +global t: my_table &default = 7; +global r = my_record(); + +function foo(i: int &default = 237, d: double &default = 101) + { + print i, d; + } + +event zeek_init() + { + print t["nope"]; + print r; + foo(); + foo(-5); + foo(-37, -8.1); + } diff --git a/testing/btest/language/attr-default-global-set-error.bro b/testing/btest/language/attr-default-global-set-error.bro deleted file mode 100644 index 8ee80bccb2..0000000000 --- a/testing/btest/language/attr-default-global-set-error.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -global ss: set[string] &default=0; diff --git a/testing/btest/language/attr-default-global-set-error.zeek b/testing/btest/language/attr-default-global-set-error.zeek new file mode 100644 index 0000000000..515c71fc24 --- /dev/null +++ b/testing/btest/language/attr-default-global-set-error.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +global ss: set[string] &default=0; diff --git a/testing/btest/language/bool.bro b/testing/btest/language/bool.bro deleted file mode 100644 index 8a1404459c..0000000000 --- a/testing/btest/language/bool.bro +++ /dev/null @@ -1,29 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local b1: bool = T; - local b2: bool = F; - local b3: bool = T; - local b4 = T; - local b5 = F; - - test_case( "equality operator", b1 == b3 ); - test_case( "inequality operator", b1 != b2 ); - test_case( "logical or operator", b1 || b2 ); - test_case( "logical and operator", b1 && b3 ); - test_case( "negation operator", !b2 ); - test_case( "absolute value", |b1| == 1 ); - test_case( "absolute value", |b2| == 0 ); - test_case( "type inference", type_name(b4) == "bool" ); - test_case( "type inference", type_name(b5) == "bool" ); - -} - diff --git a/testing/btest/language/bool.zeek b/testing/btest/language/bool.zeek new file mode 100644 index 0000000000..e19f5a3714 --- /dev/null +++ b/testing/btest/language/bool.zeek @@ -0,0 +1,29 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local b1: bool = T; + local b2: bool = F; + local b3: bool = T; + local b4 = T; + local b5 = F; + + test_case( "equality operator", b1 == b3 ); + test_case( "inequality operator", b1 != b2 ); + test_case( "logical or operator", b1 || b2 ); + test_case( "logical and operator", b1 && b3 ); + test_case( "negation operator", !b2 ); + test_case( "absolute value", |b1| == 1 ); + test_case( "absolute value", |b2| == 0 ); + test_case( "type inference", type_name(b4) == "bool" ); + test_case( "type inference", type_name(b5) == "bool" ); + +} + diff --git a/testing/btest/language/common-mistakes.bro b/testing/btest/language/common-mistakes.bro deleted file mode 100644 index 361aae0ff4..0000000000 --- a/testing/btest/language/common-mistakes.bro +++ /dev/null @@ -1,95 +0,0 @@ -# These tests show off common scripting mistakes, which should be -# handled internally by way of throwing an exception to unwind out -# of the current event handler body. - -# @TEST-EXEC: bro -b 1.bro >1.out 2>&1 -# @TEST-EXEC: btest-diff 1.out - -# @TEST-EXEC: bro -b 2.bro >2.out 2>&1 -# @TEST-EXEC: btest-diff 2.out - -# @TEST-EXEC: bro -b 3.bro >3.out 2>&1 -# @TEST-EXEC: btest-diff 3.out - -@TEST-START-FILE 1.bro -type myrec: record { - f: string &optional; -}; - -function foo(mr: myrec) - { - print "foo start"; - # Unitialized field access: unwind out of current event handler body - print mr$f; - # Unreachable - print "foo done"; - } - -function bar() - { - print "bar start"; - foo(myrec()); - # Unreachable - print "bar done"; - } - -event bro_init() - { - bar(); - # Unreachable - print "bro_init done"; - } - -event bro_init() &priority=-10 - { - # Reachable - print "other bro_init"; - } -@TEST-END-FILE - -@TEST-START-FILE 2.bro -function foo() - { - print "in foo"; - local t: table[string] of string = table(); - - # Non-existing index access: (sub)expressions should not be evaluated - if ( t["nope"] == "nope" ) - # Unreachable - print "yes"; - else - # Unreachable - print "no"; - - # Unreachable - print "foo done"; - } - -event bro_init() - { - foo(); - # Unreachable - print "bro_init done"; - } - -@TEST-END-FILE - -@TEST-START-FILE 3.bro -function foo(v: vector of any) - { - print "in foo"; - # Vector append incompatible element type - v += "ok"; - # Unreachable - print "foo done"; - } - -event bro_init() - { - local v: vector of count; - v += 1; - foo(v); - # Unreachable - print "bro_init done", v; - } -@TEST-END-FILE diff --git a/testing/btest/language/common-mistakes.zeek b/testing/btest/language/common-mistakes.zeek new file mode 100644 index 0000000000..b829b5315b --- /dev/null +++ b/testing/btest/language/common-mistakes.zeek @@ -0,0 +1,95 @@ +# These tests show off common scripting mistakes, which should be +# handled internally by way of throwing an exception to unwind out +# of the current event handler body. + +# @TEST-EXEC: zeek -b 1.zeek >1.out 2>&1 +# @TEST-EXEC: btest-diff 1.out + +# @TEST-EXEC: zeek -b 2.zeek >2.out 2>&1 +# @TEST-EXEC: btest-diff 2.out + +# @TEST-EXEC: zeek -b 3.zeek >3.out 2>&1 +# @TEST-EXEC: btest-diff 3.out + +@TEST-START-FILE 1.zeek +type myrec: record { + f: string &optional; +}; + +function foo(mr: myrec) + { + print "foo start"; + # Unitialized field access: unwind out of current event handler body + print mr$f; + # Unreachable + print "foo done"; + } + +function bar() + { + print "bar start"; + foo(myrec()); + # Unreachable + print "bar done"; + } + +event zeek_init() + { + bar(); + # Unreachable + print "zeek_init done"; + } + +event zeek_init() &priority=-10 + { + # Reachable + print "other zeek_init"; + } +@TEST-END-FILE + +@TEST-START-FILE 2.zeek +function foo() + { + print "in foo"; + local t: table[string] of string = table(); + + # Non-existing index access: (sub)expressions should not be evaluated + if ( t["nope"] == "nope" ) + # Unreachable + print "yes"; + else + # Unreachable + print "no"; + + # Unreachable + print "foo done"; + } + +event zeek_init() + { + foo(); + # Unreachable + print "zeek_init done"; + } + +@TEST-END-FILE + +@TEST-START-FILE 3.zeek +function foo(v: vector of any) + { + print "in foo"; + # Vector append incompatible element type + v += "ok"; + # Unreachable + print "foo done"; + } + +event zeek_init() + { + local v: vector of count; + v += 1; + foo(v); + # Unreachable + print "zeek_init done", v; + } +@TEST-END-FILE diff --git a/testing/btest/language/conditional-expression.bro b/testing/btest/language/conditional-expression.bro deleted file mode 100644 index ea0acf009f..0000000000 --- a/testing/btest/language/conditional-expression.bro +++ /dev/null @@ -1,66 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -global ct: count; - -function f1(): bool - { - ct += 1; - return T; - } - -function f2(): bool - { - ct += 4; - return F; - } - - -event bro_init() -{ - local a: count; - local b: count; - local res: count; - local res2: bool; - - # Test that the correct operand is evaluated - - a = b = 0; - res = T ? ++a : ++b; - test_case( "true condition", a == 1 && b == 0 && res == 1); - - a = b = 0; - res = F ? ++a : ++b; - test_case( "false condition", a == 0 && b == 1 && res == 1); - - # Test again using function calls as operands - - ct = 0; - res2 = ct == 0 ? f1() : f2(); - test_case( "true condition", ct == 1 && res2 == T); - - ct = 0; - res2 = ct != 0 ? f1() : f2(); - test_case( "false condition", ct == 4 && res2 == F); - - # Test that the conditional operator is right-associative - - ct = 0; - T ? f1() : T ? f1() : f2(); - test_case( "associativity", ct == 1 ); - - ct = 0; - T ? f1() : (T ? f1() : f2()); - test_case( "associativity", ct == 1 ); - - ct = 0; - (T ? f1() : T) ? f1() : f2(); - test_case( "associativity", ct == 2 ); - -} - diff --git a/testing/btest/language/conditional-expression.zeek b/testing/btest/language/conditional-expression.zeek new file mode 100644 index 0000000000..43c5d12a83 --- /dev/null +++ b/testing/btest/language/conditional-expression.zeek @@ -0,0 +1,66 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global ct: count; + +function f1(): bool + { + ct += 1; + return T; + } + +function f2(): bool + { + ct += 4; + return F; + } + + +event zeek_init() +{ + local a: count; + local b: count; + local res: count; + local res2: bool; + + # Test that the correct operand is evaluated + + a = b = 0; + res = T ? ++a : ++b; + test_case( "true condition", a == 1 && b == 0 && res == 1); + + a = b = 0; + res = F ? ++a : ++b; + test_case( "false condition", a == 0 && b == 1 && res == 1); + + # Test again using function calls as operands + + ct = 0; + res2 = ct == 0 ? f1() : f2(); + test_case( "true condition", ct == 1 && res2 == T); + + ct = 0; + res2 = ct != 0 ? f1() : f2(); + test_case( "false condition", ct == 4 && res2 == F); + + # Test that the conditional operator is right-associative + + ct = 0; + T ? f1() : T ? f1() : f2(); + test_case( "associativity", ct == 1 ); + + ct = 0; + T ? f1() : (T ? f1() : f2()); + test_case( "associativity", ct == 1 ); + + ct = 0; + (T ? f1() : T) ? f1() : f2(); + test_case( "associativity", ct == 2 ); + +} + diff --git a/testing/btest/language/const.bro b/testing/btest/language/const.bro deleted file mode 100644 index ee938e8d45..0000000000 --- a/testing/btest/language/const.bro +++ /dev/null @@ -1,79 +0,0 @@ -# @TEST-EXEC: bro -b valid.bro 2>valid.stderr 1>valid.stdout -# @TEST-EXEC: btest-diff valid.stderr -# @TEST-EXEC: btest-diff valid.stdout - -# @TEST-EXEC-FAIL: bro -b invalid.bro 2>invalid.stderr 1>invalid.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff invalid.stderr -# @TEST-EXEC: btest-diff invalid.stdout - -@TEST-START-FILE valid.bro -# First some simple code that should be valid and error-free. - -function f(c: count) - { - print "enter f", c; - c = c + 100; - print "exit f", c; - } - -const foo = 0 &redef; -redef foo = 10; - -const bar = 9; - -event bro_init() - { - const baz = 7; - local i = foo; - i = i + bar + 2; - i = i + baz + 11; - ++i; - print i; - --i; - f(foo); - f(bar); - f(baz); - print "foo", foo; - print "bar", bar; - print "baz", baz; - } - -@TEST-END-FILE - -@TEST-START-FILE invalid.bro -# Now some const assignments that should generate errors at parse-time. - -const foo = 0 &redef; -redef foo = 10; - -const bar = 9; - -event bro_init() - { - const baz = 7; - local s = 0; - - print "nope"; - - foo = 100; - foo = bar; - foo = bar = baz; - foo = s; - ++foo; - s = foo = bar; - - if ( foo = 0 ) - print "nope"; - - bar = 1 + 1; - baz = s; - ++bar; - --baz; - - print "foo", foo; - print "bar", bar; - print "baz", baz; - print "foo=foo", foo = foo; - } - -@TEST-END-FILE diff --git a/testing/btest/language/const.zeek b/testing/btest/language/const.zeek new file mode 100644 index 0000000000..38aada2029 --- /dev/null +++ b/testing/btest/language/const.zeek @@ -0,0 +1,79 @@ +# @TEST-EXEC: zeek -b valid.zeek 2>valid.stderr 1>valid.stdout +# @TEST-EXEC: btest-diff valid.stderr +# @TEST-EXEC: btest-diff valid.stdout + +# @TEST-EXEC-FAIL: zeek -b invalid.zeek 2>invalid.stderr 1>invalid.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff invalid.stderr +# @TEST-EXEC: btest-diff invalid.stdout + +@TEST-START-FILE valid.zeek +# First some simple code that should be valid and error-free. + +function f(c: count) + { + print "enter f", c; + c = c + 100; + print "exit f", c; + } + +const foo = 0 &redef; +redef foo = 10; + +const bar = 9; + +event zeek_init() + { + const baz = 7; + local i = foo; + i = i + bar + 2; + i = i + baz + 11; + ++i; + print i; + --i; + f(foo); + f(bar); + f(baz); + print "foo", foo; + print "bar", bar; + print "baz", baz; + } + +@TEST-END-FILE + +@TEST-START-FILE invalid.zeek +# Now some const assignments that should generate errors at parse-time. + +const foo = 0 &redef; +redef foo = 10; + +const bar = 9; + +event zeek_init() + { + const baz = 7; + local s = 0; + + print "nope"; + + foo = 100; + foo = bar; + foo = bar = baz; + foo = s; + ++foo; + s = foo = bar; + + if ( foo = 0 ) + print "nope"; + + bar = 1 + 1; + baz = s; + ++bar; + --baz; + + print "foo", foo; + print "bar", bar; + print "baz", baz; + print "foo=foo", foo = foo; + } + +@TEST-END-FILE diff --git a/testing/btest/language/container-ctor-scope.bro b/testing/btest/language/container-ctor-scope.bro deleted file mode 100644 index fd1939a459..0000000000 --- a/testing/btest/language/container-ctor-scope.bro +++ /dev/null @@ -1,38 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -# All various container contructors should work at both global and local scope. - -global gt1: table[port] of count = table( [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 ); -global gs1: set[port] = set( 1/tcp, 2/tcp, 3/tcp ); -global gv1: vector of port = vector( 1/tcp, 2/tcp, 3/tcp, 1/tcp ); - -global gt2: table[port] of count = { [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 }; -global gs2: set[port] = { 1/tcp, 2/tcp, 3/tcp }; -global gv2: vector of port = { 1/tcp, 2/tcp, 3/tcp, 1/tcp }; - -local t1: table[port] of count = table( [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 ); -local s1: set[port] = set( 1/tcp, 2/tcp, 3/tcp ); -local v1: vector of port = vector( 1/tcp, 2/tcp, 3/tcp, 1/tcp ); - -local t2: table[port] of count = { [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 }; -local s2: set[port] = { 1/tcp, 2/tcp, 3/tcp }; -local v2: vector of port = { 1/tcp, 2/tcp, 3/tcp, 1/tcp }; - -print gt1; -print gt2; - -print gs1; -print gs2; - -print gv1; -print gv2; - -print t1; -print t2; - -print s1; -print s2; - -print v1; -print v2; diff --git a/testing/btest/language/container-ctor-scope.zeek b/testing/btest/language/container-ctor-scope.zeek new file mode 100644 index 0000000000..f4f2da92ac --- /dev/null +++ b/testing/btest/language/container-ctor-scope.zeek @@ -0,0 +1,38 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +# All various container contructors should work at both global and local scope. + +global gt1: table[port] of count = table( [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 ); +global gs1: set[port] = set( 1/tcp, 2/tcp, 3/tcp ); +global gv1: vector of port = vector( 1/tcp, 2/tcp, 3/tcp, 1/tcp ); + +global gt2: table[port] of count = { [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 }; +global gs2: set[port] = { 1/tcp, 2/tcp, 3/tcp }; +global gv2: vector of port = { 1/tcp, 2/tcp, 3/tcp, 1/tcp }; + +local t1: table[port] of count = table( [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 ); +local s1: set[port] = set( 1/tcp, 2/tcp, 3/tcp ); +local v1: vector of port = vector( 1/tcp, 2/tcp, 3/tcp, 1/tcp ); + +local t2: table[port] of count = { [1/tcp] = 1, [2/tcp] = 2, [3/tcp] = 3 }; +local s2: set[port] = { 1/tcp, 2/tcp, 3/tcp }; +local v2: vector of port = { 1/tcp, 2/tcp, 3/tcp, 1/tcp }; + +print gt1; +print gt2; + +print gs1; +print gs2; + +print gv1; +print gv2; + +print t1; +print t2; + +print s1; +print s2; + +print v1; +print v2; diff --git a/testing/btest/language/copy-all-opaques.zeek b/testing/btest/language/copy-all-opaques.zeek new file mode 100644 index 0000000000..6e590816e6 --- /dev/null +++ b/testing/btest/language/copy-all-opaques.zeek @@ -0,0 +1,93 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff .stderr + +event zeek_init() + { + print "============ Topk"; + local k1: opaque of topk = topk_init(4); + topk_add(k1, "a"); + topk_add(k1, "b"); + topk_add(k1, "b"); + topk_add(k1, "c"); + local k2 = copy(k1); + print topk_get_top(k1, 5); + topk_add(k1, "shoulnotshowup"); + print topk_get_top(k2, 5); + + + print "============ HLL"; + local c1 = hll_cardinality_init(0.01, 0.95); + hll_cardinality_add(c1, 2001); + hll_cardinality_add(c1, 2002); + hll_cardinality_add(c1, 2003); + + print hll_cardinality_estimate(c1); + local c2 = copy(c1); + hll_cardinality_add(c1, 2004); + print hll_cardinality_estimate(c2); + + local c3 = hll_cardinality_init(0.01, 0.95); + hll_cardinality_merge_into(c3, c2); + print hll_cardinality_estimate(c3); + + print "============ Bloom"; + local bf_cnt = bloomfilter_basic_init(0.1, 1000); + bloomfilter_add(bf_cnt, 42); + bloomfilter_add(bf_cnt, 84); + bloomfilter_add(bf_cnt, 168); + print bloomfilter_lookup(bf_cnt, 0); + print bloomfilter_lookup(bf_cnt, 42); + local bf_copy = copy(bf_cnt); + bloomfilter_add(bf_cnt, 0); + print bloomfilter_lookup(bf_copy, 0); + print bloomfilter_lookup(bf_copy, 42); + # check that typefication transfered. + bloomfilter_add(bf_copy, 0.5); # causes stderr output + + print "============ Hashes"; + local md5a = md5_hash_init(); + md5_hash_update(md5a, "one"); + local md5b = copy(md5a); + md5_hash_update(md5a, "two"); + md5_hash_update(md5b, "two"); + print md5_hash_finish(md5a); + print md5_hash_finish(md5b); + + local sha1a = sha1_hash_init(); + sha1_hash_update(sha1a, "one"); + local sha1b = copy(sha1a); + sha1_hash_update(sha1a, "two"); + sha1_hash_update(sha1b, "two"); + print sha1_hash_finish(sha1a); + print sha1_hash_finish(sha1b); + + local sha256a = sha256_hash_init(); + sha256_hash_update(sha256a, "one"); + local sha256b = copy(sha256a); + sha256_hash_update(sha256a, "two"); + sha256_hash_update(sha256b, "two"); + print sha256_hash_finish(sha256a); + print sha256_hash_finish(sha256b); + + print "============ X509"; + local x509 = x509_from_der("\x30\x82\x03\x75\x30\x82\x02\x5D\xA0\x03\x02\x01\x02\x02\x0B\x04\x00\x00\x00\x00\x01\x15\x4B\x5A\xC3\x94\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x1E\x17\x0D\x39\x38\x30\x39\x30\x31\x31\x32\x30\x30\x30\x30\x5A\x17\x0D\x32\x38\x30\x31\x32\x38\x31\x32\x30\x30\x30\x30\x5A\x30\x57\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x42\x45\x31\x19\x30\x17\x06\x03\x55\x04\x0A\x13\x10\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x6E\x76\x2D\x73\x61\x31\x10\x30\x0E\x06\x03\x55\x04\x0B\x13\x07\x52\x6F\x6F\x74\x20\x43\x41\x31\x1B\x30\x19\x06\x03\x55\x04\x03\x13\x12\x47\x6C\x6F\x62\x61\x6C\x53\x69\x67\x6E\x20\x52\x6F\x6F\x74\x20\x43\x41\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xDA\x0E\xE6\x99\x8D\xCE\xA3\xE3\x4F\x8A\x7E\xFB\xF1\x8B\x83\x25\x6B\xEA\x48\x1F\xF1\x2A\xB0\xB9\x95\x11\x04\xBD\xF0\x63\xD1\xE2\x67\x66\xCF\x1C\xDD\xCF\x1B\x48\x2B\xEE\x8D\x89\x8E\x9A\xAF\x29\x80\x65\xAB\xE9\xC7\x2D\x12\xCB\xAB\x1C\x4C\x70\x07\xA1\x3D\x0A\x30\xCD\x15\x8D\x4F\xF8\xDD\xD4\x8C\x50\x15\x1C\xEF\x50\xEE\xC4\x2E\xF7\xFC\xE9\x52\xF2\x91\x7D\xE0\x6D\xD5\x35\x30\x8E\x5E\x43\x73\xF2\x41\xE9\xD5\x6A\xE3\xB2\x89\x3A\x56\x39\x38\x6F\x06\x3C\x88\x69\x5B\x2A\x4D\xC5\xA7\x54\xB8\x6C\x89\xCC\x9B\xF9\x3C\xCA\xE5\xFD\x89\xF5\x12\x3C\x92\x78\x96\xD6\xDC\x74\x6E\x93\x44\x61\xD1\x8D\xC7\x46\xB2\x75\x0E\x86\xE8\x19\x8A\xD5\x6D\x6C\xD5\x78\x16\x95\xA2\xE9\xC8\x0A\x38\xEB\xF2\x24\x13\x4F\x73\x54\x93\x13\x85\x3A\x1B\xBC\x1E\x34\xB5\x8B\x05\x8C\xB9\x77\x8B\xB1\xDB\x1F\x20\x91\xAB\x09\x53\x6E\x90\xCE\x7B\x37\x74\xB9\x70\x47\x91\x22\x51\x63\x16\x79\xAE\xB1\xAE\x41\x26\x08\xC8\x19\x2B\xD1\x46\xAA\x48\xD6\x64\x2A\xD7\x83\x34\xFF\x2C\x2A\xC1\x6C\x19\x43\x4A\x07\x85\xE7\xD3\x7C\xF6\x21\x68\xEF\xEA\xF2\x52\x9F\x7F\x93\x90\xCF\x02\x03\x01\x00\x01\xA3\x42\x30\x40\x30\x0E\x06\x03\x55\x1D\x0F\x01\x01\xFF\x04\x04\x03\x02\x01\x06\x30\x0F\x06\x03\x55\x1D\x13\x01\x01\xFF\x04\x05\x30\x03\x01\x01\xFF\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x60\x7B\x66\x1A\x45\x0D\x97\xCA\x89\x50\x2F\x7D\x04\xCD\x34\xA8\xFF\xFC\xFD\x4B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05\x00\x03\x82\x01\x01\x00\xD6\x73\xE7\x7C\x4F\x76\xD0\x8D\xBF\xEC\xBA\xA2\xBE\x34\xC5\x28\x32\xB5\x7C\xFC\x6C\x9C\x2C\x2B\xBD\x09\x9E\x53\xBF\x6B\x5E\xAA\x11\x48\xB6\xE5\x08\xA3\xB3\xCA\x3D\x61\x4D\xD3\x46\x09\xB3\x3E\xC3\xA0\xE3\x63\x55\x1B\xF2\xBA\xEF\xAD\x39\xE1\x43\xB9\x38\xA3\xE6\x2F\x8A\x26\x3B\xEF\xA0\x50\x56\xF9\xC6\x0A\xFD\x38\xCD\xC4\x0B\x70\x51\x94\x97\x98\x04\xDF\xC3\x5F\x94\xD5\x15\xC9\x14\x41\x9C\xC4\x5D\x75\x64\x15\x0D\xFF\x55\x30\xEC\x86\x8F\xFF\x0D\xEF\x2C\xB9\x63\x46\xF6\xAA\xFC\xDF\xBC\x69\xFD\x2E\x12\x48\x64\x9A\xE0\x95\xF0\xA6\xEF\x29\x8F\x01\xB1\x15\xB5\x0C\x1D\xA5\xFE\x69\x2C\x69\x24\x78\x1E\xB3\xA7\x1C\x71\x62\xEE\xCA\xC8\x97\xAC\x17\x5D\x8A\xC2\xF8\x47\x86\x6E\x2A\xC4\x56\x31\x95\xD0\x67\x89\x85\x2B\xF9\x6C\xA6\x5D\x46\x9D\x0C\xAA\x82\xE4\x99\x51\xDD\x70\xB7\xDB\x56\x3D\x61\xE4\x6A\xE1\x5C\xD6\xF6\xFE\x3D\xDE\x41\xCC\x07\xAE\x63\x52\xBF\x53\x53\xF4\x2B\xE9\xC7\xFD\xB6\xF7\x82\x5F\x85\xD2\x41\x18\xDB\x81\xB3\x04\x1C\xC5\x1F\xA4\x80\x6F\x15\x20\xC9\xDE\x0C\x88\x0A\x1D\xD6\x66\x55\xE2\xFC\x48\xC9\x29\x26\x69\xE0"); + local x5092 = copy(x509); + print x509_parse(x509); + print x509_parse(x5092); + + print "============ Entropy"; + local handle = entropy_test_init(); + entropy_test_add(handle, "dh3Hie02uh^s#Sdf9L3frd243h$d78r2G4cM6*Q05d(7rh46f!0|4-f"); + local handle2 = copy(handle); + print entropy_test_finish(handle); + print entropy_test_finish(handle2); + + print "============ Paraglob"; + local p = paraglob_init(vector("https://*.google.com/*", "*malware*", "*.gov*")); + local p2 = copy(p); + print paraglob_equals(p, p2); + # A get operation shouldn't change the paraglob + paraglob_match(p, "whitehouse.gov"); + print paraglob_equals(p, p2); + } diff --git a/testing/btest/language/copy-all-types.zeek b/testing/btest/language/copy-all-types.zeek new file mode 100644 index 0000000000..10bf182ef5 --- /dev/null +++ b/testing/btest/language/copy-all-types.zeek @@ -0,0 +1,187 @@ +# Note: opaque types in separate test +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type MyEnum: enum { ENUMME }; + +type InnerTestRecord: record { + a: string; +}; + +type TestRecord: record { + s1: string; + s2: string; + i1: InnerTestRecord; + i2: InnerTestRecord &optional; + donotset: InnerTestRecord &optional; + def: count &default=5; +}; + +function join_count_set(ss: set[count], j: string): string + { + local output=""; + local i=0; + for ( s in ss ) + { + if ( i > 0 ) + output = cat(output, j); + + output = cat(output, s); + ++i; + } + return output; + } + +function do_format(i: any): any + { + local tpe = type_name(i); + + switch ( tpe ) + { + case "set[count]": + return join_count_set(i, ","); + case "table[string] of string": + local cast: table[string] of string = i; + local vout: vector of string = vector(); + for ( el in cast ) + { + vout += cat(el, "=", cast[el]); + } + return join_string_vec(vout, ";"); + } + return i; + } + +function check(o1: any, o2: any, equal: bool, expect_same: bool) + { + local expect_msg = (equal ? "ok" : "FAIL0"); + local same = same_object(o1, o2); + + if ( expect_same && ! same ) + expect_msg = "FAIL1"; + + if ( ! expect_same && same ) + expect_msg = "FAIL2"; + + print fmt("orig=%s (%s) clone=%s (%s) equal=%s same_object=%s (%s)", do_format(o1), type_name(o1), do_format(o2), type_name(o2), equal, same, expect_msg); + } + +function check_vector_equal(a: vector of count, b: vector of count): bool + { + if ( |a| != |b| ) + return F; + + for ( i in a ) + { + if ( a[i] != b[i] ) + return F; + } + + return T; + } + +function check_string_table_equal(a: table[string] of string, b: table[string] of string): bool + { + if ( |a| != |b| ) + return F; + + for ( i in a ) + { + if ( a[i] != b[i] ) + return F; + } + + return T; + } + +function compare_otr(a: TestRecord, b: TestRecord): bool + { + if ( a$s1 != b$s1 ) + return F; + if ( a$s2 != b$s2 ) + return F; + if ( a$i1$a != b$i1$a ) + return F; + if ( a$i2$a != b$i2$a ) + return F; + + if ( same_object(a$i1, b$i1) ) + return F; + if ( same_object(a$i2, b$i2) ) + return F; + + # check that we restroe that i1 & i2 point to same object + if ( ! same_object(a$i1, a$i2) ) + return F; + if ( ! same_object(b$i1, b$i2) ) + return F; + + if ( a$def != b$def ) + return F; + + return T; + } + + +event zeek_init() + { + local i1 = -42; + local i2 = copy(i1); + check(i1, i2, i1 == i2, T); + + local c1 : count = 42; + local c2 = copy(c1); + check(c1, c2, c1 == c2, T); + + local a1 = 127.0.0.1; + local a2 = copy(a1); + check(a1, a2, a1 == a2, T); + + local p1 = 42/tcp; + local p2 = copy(p1); + check(p1, p2, p1 == p2, T); + + local sn1 = 127.0.0.1/24; + local sn2 = copy(sn1); + check(sn1, sn2, sn1 == sn2, T); + + local s1 = "Foo"; + local s2 = copy(s1); + check(s1, s2, s1 == s2, F); + + local pat1 = /.*PATTERN.*/; + local pat2 = copy(pat1); + # patterns cannot be directoy compared + if ( same_object(pat1, pat2) ) + print "FAIL P1"; + if ( ! ( pat1 == "PATTERN" ) ) + print "FAIL P2"; + if ( ! ( pat2 == "PATTERN" ) ) + print "FAIL P3"; + if ( pat2 == "PATERN" ) + print "FAIL P4"; + print fmt("orig=%s (%s) clone=%s (%s) same_object=%s", pat1, type_name(pat1), pat2, type_name(pat2), same_object(pat1, pat2)); + + local set1 = [1, 2, 3, 4, 5]; + local set2 = copy(set1); + check(set1, set2, set1 == set2, F); + + local v1 = vector(1, 2, 3, 4, 5); + local v2 = copy(v1); + check(v1, v2, check_vector_equal(v1, v2), F); + + local t1 : table[string] of string = table(); + t1["a"] = "va"; + t1["b"] = "vb"; + local t2 = copy(t1); + check(t1, t2, check_string_table_equal(t1, t2), F); + + local e1 = ENUMME; + local e2 = copy(ENUMME); + check(e1, e2, e1 == e2, T); + + local itr = InnerTestRecord($a="a"); + local otr1 = TestRecord($s1="s1", $s2="s2", $i1=itr, $i2=itr); + local otr2 = copy(otr1); + check(otr1, otr2, compare_otr(otr1, otr2), F); + } diff --git a/testing/btest/language/copy-cycle.zeek b/testing/btest/language/copy-cycle.zeek new file mode 100644 index 0000000000..347affeb40 --- /dev/null +++ b/testing/btest/language/copy-cycle.zeek @@ -0,0 +1,23 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type B: record { + x: any &optional; + }; + +type A: record { + x: any &optional; + y: B; + }; + +event zeek_init() + { + local x: A; + x$x = x; + x$y$x = x; + local y = copy(x); + + print fmt("%s (expected: F)", same_object(x, y)); + print fmt("%s (expected: T)", same_object(y, y$x)); + print fmt("%s (expected: T)", same_object(y, y$y$x)); + } diff --git a/testing/btest/language/copy.bro b/testing/btest/language/copy.bro deleted file mode 100644 index 3ddbc15e23..0000000000 --- a/testing/btest/language/copy.bro +++ /dev/null @@ -1,30 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - - -event bro_init() -{ - # "b" is not a copy of "a" - local a: set[string] = set("this", "test"); - local b: set[string] = a; - - delete a["this"]; - - test_case( "direct assignment", |b| == 1 && "this" !in b ); - - # "d" is a copy of "c" - local c: set[string] = set("this", "test"); - local d: set[string] = copy(c); - - delete c["this"]; - - test_case( "using copy", |d| == 2 && "this" in d); - -} - diff --git a/testing/btest/language/copy.zeek b/testing/btest/language/copy.zeek new file mode 100644 index 0000000000..638976295d --- /dev/null +++ b/testing/btest/language/copy.zeek @@ -0,0 +1,49 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event zeek_init() + { + # "b" is not a copy of "a" + local a: set[string] = set("this", "test"); + local b: set[string] = a; + + delete a["this"]; + + test_case( "direct assignment", |b| == 1 && "this" !in b ); + + # "d" is a copy of "c" + local c: set[string] = set("this", "test"); + local d: set[string] = copy(c); + + delete c["this"]; + + test_case( "using copy", |d| == 2 && "this" in d); + } + +type myrec: record { + a: count; +}; + +event zeek_init() + { + local v: vector of myrec; + local t: table[count] of myrec; + local mr = myrec($a = 42); + + t[0] = mr; + t[1] = mr; + local tc = copy(t); + print same_object(t, tc), same_object(tc[0], tc[1]); + + v[0] = mr; + v[1] = mr; + local vc = copy(v); + print same_object(v, vc), same_object(vc[0], vc[1]); + print tc[0], tc[1], vc[0], vc[1]; + } + diff --git a/testing/btest/language/count.bro b/testing/btest/language/count.bro deleted file mode 100644 index 39a3786dfb..0000000000 --- a/testing/btest/language/count.bro +++ /dev/null @@ -1,70 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local c1: count = 0; - local c2: count = 5; - local c3: count = 0xFF; - local c4: count = 255; - local c5: count = 18446744073709551615; # maximum allowed value - local c6: count = 0xffffffffffffffff; # maximum allowed value - local c7: counter = 5; - local c8 = 1; - - # Type inference test - - test_case( "type inference", type_name(c8) == "count" ); - - # Counter alias test - - test_case( "counter alias", c2 == c7 ); - - # Test various constant representations - - test_case( "hexadecimal", c3 == c4 ); - - # Operator tests - - test_case( "inequality operator", c1 != c2 ); - test_case( "relational operator", c1 < c2 ); - test_case( "relational operator", c1 <= c2 ); - test_case( "relational operator", c2 > c1 ); - test_case( "relational operator", c2 >= c1 ); - test_case( "absolute value", |c1| == 0 ); - test_case( "absolute value", |c2| == 5 ); - test_case( "pre-increment operator", ++c2 == 6 ); - test_case( "pre-decrement operator", --c2 == 5 ); - test_case( "modulus operator", c2%2 == 1 ); - test_case( "division operator", c2/2 == 2 ); - c2 += 3; - test_case( "assignment operator", c2 == 8 ); - c2 -= 2; - test_case( "assignment operator", c2 == 6 ); - test_case( "bitwise and", c2 & 0x4 == 0x4 ); - test_case( "bitwise and", c4 & 0x4 == 0x4 ); - test_case( "bitwise and", c8 & 0x4 == 0x0 ); - test_case( "bitwise or", c2 | 0x4 == c2 ); - test_case( "bitwise or", c4 | 0x4 == c4 ); - test_case( "bitwise or", c8 | 0x4 == c7 ); - test_case( "bitwise xor", c7 ^ 0x4 == c8 ); - test_case( "bitwise xor", c4 ^ 0x4 == 251 ); - test_case( "bitwise xor", c8 ^ 0x4 == c7 ); - test_case( "bitwise complement", ~c6 == 0 ); - test_case( "bitwise complement", ~~c4 == c4 ); - - # Max. value tests - - local str1 = fmt("max count value = %d", c5); - test_case( str1, str1 == "max count value = 18446744073709551615" ); - local str2 = fmt("max count value = %d", c6); - test_case( str2, str2 == "max count value = 18446744073709551615" ); - -} - diff --git a/testing/btest/language/count.zeek b/testing/btest/language/count.zeek new file mode 100644 index 0000000000..a2d3fb0cc2 --- /dev/null +++ b/testing/btest/language/count.zeek @@ -0,0 +1,70 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local c1: count = 0; + local c2: count = 5; + local c3: count = 0xFF; + local c4: count = 255; + local c5: count = 18446744073709551615; # maximum allowed value + local c6: count = 0xffffffffffffffff; # maximum allowed value + local c7: counter = 5; + local c8 = 1; + + # Type inference test + + test_case( "type inference", type_name(c8) == "count" ); + + # Counter alias test + + test_case( "counter alias", c2 == c7 ); + + # Test various constant representations + + test_case( "hexadecimal", c3 == c4 ); + + # Operator tests + + test_case( "inequality operator", c1 != c2 ); + test_case( "relational operator", c1 < c2 ); + test_case( "relational operator", c1 <= c2 ); + test_case( "relational operator", c2 > c1 ); + test_case( "relational operator", c2 >= c1 ); + test_case( "absolute value", |c1| == 0 ); + test_case( "absolute value", |c2| == 5 ); + test_case( "pre-increment operator", ++c2 == 6 ); + test_case( "pre-decrement operator", --c2 == 5 ); + test_case( "modulus operator", c2%2 == 1 ); + test_case( "division operator", c2/2 == 2 ); + c2 += 3; + test_case( "assignment operator", c2 == 8 ); + c2 -= 2; + test_case( "assignment operator", c2 == 6 ); + test_case( "bitwise and", c2 & 0x4 == 0x4 ); + test_case( "bitwise and", c4 & 0x4 == 0x4 ); + test_case( "bitwise and", c8 & 0x4 == 0x0 ); + test_case( "bitwise or", c2 | 0x4 == c2 ); + test_case( "bitwise or", c4 | 0x4 == c4 ); + test_case( "bitwise or", c8 | 0x4 == c7 ); + test_case( "bitwise xor", c7 ^ 0x4 == c8 ); + test_case( "bitwise xor", c4 ^ 0x4 == 251 ); + test_case( "bitwise xor", c8 ^ 0x4 == c7 ); + test_case( "bitwise complement", ~c6 == 0 ); + test_case( "bitwise complement", ~~c4 == c4 ); + + # Max. value tests + + local str1 = fmt("max count value = %d", c5); + test_case( str1, str1 == "max count value = 18446744073709551615" ); + local str2 = fmt("max count value = %d", c6); + test_case( str2, str2 == "max count value = 18446744073709551615" ); + +} + diff --git a/testing/btest/language/cross-product-init.bro b/testing/btest/language/cross-product-init.bro deleted file mode 100644 index 8cb9c48367..0000000000 --- a/testing/btest/language/cross-product-init.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -global my_subs = { 1.2.3.4/19, 5.6.7.8/21 }; - -global x: set[string, subnet] &redef; - -redef x += { [["foo", "bar"], my_subs] }; - -print x; diff --git a/testing/btest/language/cross-product-init.zeek b/testing/btest/language/cross-product-init.zeek new file mode 100644 index 0000000000..f5027cfd3c --- /dev/null +++ b/testing/btest/language/cross-product-init.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +global my_subs = { 1.2.3.4/19, 5.6.7.8/21 }; + +global x: set[string, subnet] &redef; + +redef x += { [["foo", "bar"], my_subs] }; + +print x; diff --git a/testing/btest/language/default-params.bro b/testing/btest/language/default-params.bro deleted file mode 100644 index c11adbf3b5..0000000000 --- a/testing/btest/language/default-params.bro +++ /dev/null @@ -1,65 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -### functions - -global foo_func: function(a: string &default="hello"); - -# &defaults transfer from the declaration automatically -function foo_func(a: string) - { - print "foo_func", a; - } - -function bar_func(a: string, b: string &default="hi", c: count &default=5) - { - print "bar_func", a, b, c; - } - -### events - -global foo_event: event(a: string &default="hello"); - -event foo_event(a: string) - { - print "foo_event", a; - } - -event bar_event(a: string, b: string &default="hi", c: count &default=5) - { - print "bar_event", a, b, c; - } - -### hooks - -global foo_hook: hook(a: string &default="hello"); - -hook foo_hook(a: string) - { - print "foo_hook", a; - } - -hook bar_hook(a: string, b: string &default="hi", c: count &default=5) - { - print "bar_hook", a, b, c; - } - -{} - -foo_func("test"); -foo_func(); -bar_func("hmm"); -bar_func("cool", "beans"); -bar_func("cool", "beans", 13); - -event foo_event("test"); -event foo_event(); -event bar_event("hmm"); -event bar_event("cool", "beans"); -event bar_event("cool", "beans", 13); - -hook foo_hook("test"); -hook foo_hook(); -hook bar_hook("hmm"); -hook bar_hook("cool", "beans"); -hook bar_hook("cool", "beans", 13); diff --git a/testing/btest/language/default-params.zeek b/testing/btest/language/default-params.zeek new file mode 100644 index 0000000000..c07bdee207 --- /dev/null +++ b/testing/btest/language/default-params.zeek @@ -0,0 +1,65 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +### functions + +global foo_func: function(a: string &default="hello"); + +# &defaults transfer from the declaration automatically +function foo_func(a: string) + { + print "foo_func", a; + } + +function bar_func(a: string, b: string &default="hi", c: count &default=5) + { + print "bar_func", a, b, c; + } + +### events + +global foo_event: event(a: string &default="hello"); + +event foo_event(a: string) + { + print "foo_event", a; + } + +event bar_event(a: string, b: string &default="hi", c: count &default=5) + { + print "bar_event", a, b, c; + } + +### hooks + +global foo_hook: hook(a: string &default="hello"); + +hook foo_hook(a: string) + { + print "foo_hook", a; + } + +hook bar_hook(a: string, b: string &default="hi", c: count &default=5) + { + print "bar_hook", a, b, c; + } + +{} + +foo_func("test"); +foo_func(); +bar_func("hmm"); +bar_func("cool", "beans"); +bar_func("cool", "beans", 13); + +event foo_event("test"); +event foo_event(); +event bar_event("hmm"); +event bar_event("cool", "beans"); +event bar_event("cool", "beans", 13); + +hook foo_hook("test"); +hook foo_hook(); +hook bar_hook("hmm"); +hook bar_hook("cool", "beans"); +hook bar_hook("cool", "beans", 13); diff --git a/testing/btest/language/delete-field-set.bro b/testing/btest/language/delete-field-set.bro deleted file mode 100644 index 1f1c5b0c27..0000000000 --- a/testing/btest/language/delete-field-set.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type FooBar: record { - a: set[string] &default=set(); - b: table[string] of count &default=table(); - c: vector of string &default=vector(); -}; - -global test: FooBar; - -delete test$a; -delete test$b; -delete test$c; - -print test; diff --git a/testing/btest/language/delete-field-set.zeek b/testing/btest/language/delete-field-set.zeek new file mode 100644 index 0000000000..8f1482c6c2 --- /dev/null +++ b/testing/btest/language/delete-field-set.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type FooBar: record { + a: set[string] &default=set(); + b: table[string] of count &default=table(); + c: vector of string &default=vector(); +}; + +global test: FooBar; + +delete test$a; +delete test$b; +delete test$c; + +print test; diff --git a/testing/btest/language/delete-field.bro b/testing/btest/language/delete-field.bro deleted file mode 100644 index 99136ff2b9..0000000000 --- a/testing/btest/language/delete-field.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type X: record { - a: count &optional; - b: count &default=5; -}; - -function p(x: X) - { - print x?$a ? fmt("a: %d", x$a) : "a: not set"; - print x$b; - } - - -global x: X = [$a=20, $b=20]; -p(x); -delete x$a; -delete x$b; -p(x); diff --git a/testing/btest/language/delete-field.zeek b/testing/btest/language/delete-field.zeek new file mode 100644 index 0000000000..0e5d4e3809 --- /dev/null +++ b/testing/btest/language/delete-field.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type X: record { + a: count &optional; + b: count &default=5; +}; + +function p(x: X) + { + print x?$a ? fmt("a: %d", x$a) : "a: not set"; + print x$b; + } + + +global x: X = [$a=20, $b=20]; +p(x); +delete x$a; +delete x$b; +p(x); diff --git a/testing/btest/language/deprecated.bro b/testing/btest/language/deprecated.bro deleted file mode 100644 index ec9c3c9e1e..0000000000 --- a/testing/btest/language/deprecated.bro +++ /dev/null @@ -1,80 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type blah: string &deprecated; - -global my_event: event(arg: string) &deprecated; - -global my_hook: hook(arg: string) &deprecated; - -type my_record: record { - a: count &default = 1; - b: string &optional &deprecated; -}; - -type my_enum: enum { - RED, - GREEN &deprecated, - BLUE &deprecated -}; - -type my_other_enum: enum { - ZERO = 0, - ONE = 1 &deprecated, - TWO = 2 &deprecated -}; - -event bro_init() - { - print ZERO; - print ONE; - print TWO; - print RED; - print GREEN; - print BLUE; - - local l: blah = "testing"; - - local ls: string = " test"; - - event my_event("generate my_event please"); - schedule 1sec { my_event("schedule my_event please") }; - hook my_hook("generate my_hook please"); - - local mr = my_record($a = 3, $b = "yeah"); - mr = [$a = 4, $b = "ye"]; - mr = record($a = 5, $b = "y"); - - if ( ! mr?$b ) - mr$b = "nooooooo"; - - mr$a = 2; - mr$b = "noooo"; - } - -event my_event(arg: string) - { - print arg; - } - -hook my_hook(arg: string) - { - print arg; - } - -function hmm(b: blah) - { - print b; - } - -global dont_use_me: function() &deprecated; - -function dont_use_me() - { - dont_use_me(); - } - -function dont_use_me_either() &deprecated - { - dont_use_me_either(); - } diff --git a/testing/btest/language/deprecated.zeek b/testing/btest/language/deprecated.zeek new file mode 100644 index 0000000000..b10b5674d3 --- /dev/null +++ b/testing/btest/language/deprecated.zeek @@ -0,0 +1,165 @@ +# @TEST-EXEC: zeek -b no-warnings.zeek >no-warnings.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff no-warnings.out + +# @TEST-EXEC: zeek -b warnings.zeek >warnings.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff warnings.out + +@TEST-START-FILE no-warnings.zeek +type blah: string &deprecated; + +global my_event: event(arg: string) &deprecated; + +global my_hook: hook(arg: string) &deprecated; + +type my_record: record { + a: count &default = 1; + b: string &optional &deprecated; +}; + +type my_enum: enum { + RED, + GREEN &deprecated, + BLUE &deprecated +}; + +type my_other_enum: enum { + ZERO = 0, + ONE = 1 &deprecated, + TWO = 2 &deprecated, +}; + +event zeek_init() + { + print ZERO; + print ONE; + print TWO; + print RED; + print GREEN; + print BLUE; + + local l: blah = "testing"; + + local ls: string = " test"; + + event my_event("generate my_event please"); + schedule 1sec { my_event("schedule my_event please") }; + hook my_hook("generate my_hook please"); + + local mr = my_record($a = 3, $b = "yeah"); + mr = [$a = 4, $b = "ye"]; + mr = record($a = 5, $b = "y"); + + if ( ! mr?$b ) + mr$b = "nooooooo"; + + mr$a = 2; + mr$b = "noooo"; + } + +event my_event(arg: string) + { + print arg; + } + +hook my_hook(arg: string) + { + print arg; + } + +function hmm(b: blah) + { + print b; + } + +global dont_use_me: function() &deprecated; + +function dont_use_me() + { + dont_use_me(); + } + +function dont_use_me_either() &deprecated + { + dont_use_me_either(); + } +@TEST-END-FILE + +@TEST-START-FILE warnings.zeek +type blah: string &deprecated="type warning"; + +global my_event: event(arg: string) &deprecated="event warning"; + +global my_hook: hook(arg: string) &deprecated="hook warning"; + +type my_record: record { + a: count &default = 1; + b: string &optional &deprecated="record warning"; +}; + +type my_enum: enum { + RED, + GREEN &deprecated="green warning", + BLUE &deprecated="red warning" +}; + +type my_other_enum: enum { + ZERO = 0, + ONE = 1 &deprecated="one warning", + TWO = 2 &deprecated="two warning", +}; + +event zeek_init() + { + print ZERO; + print ONE; + print TWO; + print RED; + print GREEN; + print BLUE; + + local l: blah = "testing"; + + local ls: string = " test"; + + event my_event("generate my_event please"); + schedule 1sec { my_event("schedule my_event please") }; + hook my_hook("generate my_hook please"); + + local mr = my_record($a = 3, $b = "yeah"); + mr = [$a = 4, $b = "ye"]; + mr = record($a = 5, $b = "y"); + + if ( ! mr?$b ) + mr$b = "nooooooo"; + + mr$a = 2; + mr$b = "noooo"; + } + +event my_event(arg: string) + { + print arg; + } + +hook my_hook(arg: string) + { + print arg; + } + +function hmm(b: blah) + { + print b; + } + +global dont_use_me: function() &deprecated="global function warning"; + +function dont_use_me() + { + dont_use_me(); + } + +function dont_use_me_either() &deprecated="function warning" + { + dont_use_me_either(); + } +@TEST-END-FILE diff --git a/testing/btest/language/double.bro b/testing/btest/language/double.bro deleted file mode 100644 index f85b216828..0000000000 --- a/testing/btest/language/double.bro +++ /dev/null @@ -1,79 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local d1: double = 3; - local d2: double = +3; - local d3: double = 3.; - local d4: double = 3.0; - local d5: double = +3.0; - local d6: double = 3e0; - local d7: double = 3E0; - local d8: double = 3e+0; - local d9: double = 3e-0; - local d10: double = 3.0e0; - local d11: double = +3.0e0; - local d12: double = +3.0e+0; - local d13: double = +3.0E+0; - local d14: double = +3.0E-0; - local d15: double = .03E+2; - local d16: double = .03E2; - local d17: double = 3.0001; - local d18: double = -3.0001; - local d19: double = 1.7976931348623157e308; # maximum allowed value - local d20 = 7.0; - local d21 = 7e0; - local d22 = 7e+1; - - # Type inference tests - - test_case( "type inference", type_name(d20) == "double" ); - test_case( "type inference", type_name(d21) == "double" ); - test_case( "type inference", type_name(d22) == "double" ); - - # Test various constant representations - - test_case( "double representations", d1 == d2 ); - test_case( "double representations", d1 == d3 ); - test_case( "double representations", d1 == d4 ); - test_case( "double representations", d1 == d5 ); - test_case( "double representations", d1 == d6 ); - test_case( "double representations", d1 == d7 ); - test_case( "double representations", d1 == d8 ); - test_case( "double representations", d1 == d9 ); - test_case( "double representations", d1 == d10 ); - test_case( "double representations", d1 == d11 ); - test_case( "double representations", d1 == d12 ); - test_case( "double representations", d1 == d13 ); - test_case( "double representations", d1 == d14 ); - test_case( "double representations", d1 == d15 ); - test_case( "double representations", d1 == d16 ); - - # Operator tests - - test_case( "inequality operator", d18 != d17 ); - test_case( "absolute value", |d18| == d17 ); - d4 += 2; - test_case( "assignment operator", d4 == 5.0 ); - d4 -= 3; - test_case( "assignment operator", d4 == 2.0 ); - test_case( "relational operator", d4 <= d3 ); - test_case( "relational operator", d4 < d3 ); - test_case( "relational operator", d17 >= d3 ); - test_case( "relational operator", d17 > d3 ); - test_case( "division operator", d3/2 == 1.5 ); - - # Max. value test - - local str1 = fmt("max double value = %.16e", d19); - test_case( str1, str1 == "max double value = 1.7976931348623157e+308" ); - -} - diff --git a/testing/btest/language/double.zeek b/testing/btest/language/double.zeek new file mode 100644 index 0000000000..56ce711da2 --- /dev/null +++ b/testing/btest/language/double.zeek @@ -0,0 +1,79 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local d1: double = 3; + local d2: double = +3; + local d3: double = 3.; + local d4: double = 3.0; + local d5: double = +3.0; + local d6: double = 3e0; + local d7: double = 3E0; + local d8: double = 3e+0; + local d9: double = 3e-0; + local d10: double = 3.0e0; + local d11: double = +3.0e0; + local d12: double = +3.0e+0; + local d13: double = +3.0E+0; + local d14: double = +3.0E-0; + local d15: double = .03E+2; + local d16: double = .03E2; + local d17: double = 3.0001; + local d18: double = -3.0001; + local d19: double = 1.7976931348623157e308; # maximum allowed value + local d20 = 7.0; + local d21 = 7e0; + local d22 = 7e+1; + + # Type inference tests + + test_case( "type inference", type_name(d20) == "double" ); + test_case( "type inference", type_name(d21) == "double" ); + test_case( "type inference", type_name(d22) == "double" ); + + # Test various constant representations + + test_case( "double representations", d1 == d2 ); + test_case( "double representations", d1 == d3 ); + test_case( "double representations", d1 == d4 ); + test_case( "double representations", d1 == d5 ); + test_case( "double representations", d1 == d6 ); + test_case( "double representations", d1 == d7 ); + test_case( "double representations", d1 == d8 ); + test_case( "double representations", d1 == d9 ); + test_case( "double representations", d1 == d10 ); + test_case( "double representations", d1 == d11 ); + test_case( "double representations", d1 == d12 ); + test_case( "double representations", d1 == d13 ); + test_case( "double representations", d1 == d14 ); + test_case( "double representations", d1 == d15 ); + test_case( "double representations", d1 == d16 ); + + # Operator tests + + test_case( "inequality operator", d18 != d17 ); + test_case( "absolute value", |d18| == d17 ); + d4 += 2; + test_case( "assignment operator", d4 == 5.0 ); + d4 -= 3; + test_case( "assignment operator", d4 == 2.0 ); + test_case( "relational operator", d4 <= d3 ); + test_case( "relational operator", d4 < d3 ); + test_case( "relational operator", d17 >= d3 ); + test_case( "relational operator", d17 > d3 ); + test_case( "division operator", d3/2 == 1.5 ); + + # Max. value test + + local str1 = fmt("max double value = %.16e", d19); + test_case( str1, str1 == "max double value = 1.7976931348623157e+308" ); + +} + diff --git a/testing/btest/language/enum-desc.bro b/testing/btest/language/enum-desc.bro deleted file mode 100644 index 86466e2fc2..0000000000 --- a/testing/btest/language/enum-desc.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -type test_enum1: enum { ONE }; - -module TEST; - -type test_enum2: enum { TWO }; - -print ONE; -print fmt("%s", ONE); - - -print TWO; -print fmt("%s", TWO); diff --git a/testing/btest/language/enum-desc.zeek b/testing/btest/language/enum-desc.zeek new file mode 100644 index 0000000000..c296b76a13 --- /dev/null +++ b/testing/btest/language/enum-desc.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +type test_enum1: enum { ONE }; + +module TEST; + +type test_enum2: enum { TWO }; + +print ONE; +print fmt("%s", ONE); + + +print TWO; +print fmt("%s", TWO); diff --git a/testing/btest/language/enum-scope.bro b/testing/btest/language/enum-scope.bro deleted file mode 100644 index 82e7c7fd7c..0000000000 --- a/testing/btest/language/enum-scope.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type foo: enum { a, b } &redef; - -module test; - -redef enum foo += { c }; - -print c; diff --git a/testing/btest/language/enum-scope.zeek b/testing/btest/language/enum-scope.zeek new file mode 100644 index 0000000000..8c2e20c9b2 --- /dev/null +++ b/testing/btest/language/enum-scope.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type foo: enum { a, b } &redef; + +module test; + +redef enum foo += { c }; + +print c; diff --git a/testing/btest/language/enum.bro b/testing/btest/language/enum.bro deleted file mode 100644 index 6de7d345da..0000000000 --- a/testing/btest/language/enum.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -# enum with optional comma at end of definition -type color: enum { Red, White, Blue, }; - -# enum without optional comma -type city: enum { Rome, Paris }; - - -event bro_init() -{ - local e1: color = Blue; - local e2: color = White; - local e3: color = Blue; - local e4: city = Rome; - - test_case( "enum equality comparison", e1 != e2 ); - test_case( "enum equality comparison", e1 == e3 ); - test_case( "enum equality comparison", e1 != e4 ); - - # type inference - local x = Blue; - test_case( "type inference", x == e1 ); -} - diff --git a/testing/btest/language/enum.zeek b/testing/btest/language/enum.zeek new file mode 100644 index 0000000000..71c354971f --- /dev/null +++ b/testing/btest/language/enum.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# enum with optional comma at end of definition +type color: enum { Red, White, Blue, }; + +# enum without optional comma +type city: enum { Rome, Paris }; + + +event zeek_init() +{ + local e1: color = Blue; + local e2: color = White; + local e3: color = Blue; + local e4: city = Rome; + + test_case( "enum equality comparison", e1 != e2 ); + test_case( "enum equality comparison", e1 == e3 ); + test_case( "enum equality comparison", e1 != e4 ); + + # type inference + local x = Blue; + test_case( "type inference", x == e1 ); +} + diff --git a/testing/btest/language/eof-parse-errors.bro b/testing/btest/language/eof-parse-errors.bro deleted file mode 100644 index a2c6edc66d..0000000000 --- a/testing/btest/language/eof-parse-errors.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b a.bro >output1 2>&1 -# @TEST-EXEC-FAIL: bro -b a.bro b.bro >output2 2>&1 -# @TEST-EXEC: btest-diff output1 -# @TEST-EXEC: btest-diff output2 - -@TEST-START-FILE a.bro -module A; - -event bro_init() - { - print "a"; -@TEST-END-FILE - -@TEST-START-FILE b.bro -module B; - -event bro_init() - { - print "b"; - } -@TEST-END-FILE diff --git a/testing/btest/language/eof-parse-errors.zeek b/testing/btest/language/eof-parse-errors.zeek new file mode 100644 index 0000000000..54fe96df19 --- /dev/null +++ b/testing/btest/language/eof-parse-errors.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC-FAIL: zeek -b a.zeek >output1 2>&1 +# @TEST-EXEC-FAIL: zeek -b a.zeek b.zeek >output2 2>&1 +# @TEST-EXEC: btest-diff output1 +# @TEST-EXEC: btest-diff output2 + +@TEST-START-FILE a.zeek +module A; + +event zeek_init() + { + print "a"; +@TEST-END-FILE + +@TEST-START-FILE b.zeek +module B; + +event zeek_init() + { + print "b"; + } +@TEST-END-FILE diff --git a/testing/btest/language/event-local-var.bro b/testing/btest/language/event-local-var.bro deleted file mode 100644 index d4dd9d19a5..0000000000 --- a/testing/btest/language/event-local-var.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT 2> out -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - - -event e1(num: count) - { - print fmt("event 1: %s", num); - } - -event bro_init() -{ - # Test assigning a local event variable to an event - local v: event(num: count); - v = e1; - schedule 1sec { v(6) }; # This should fail -} diff --git a/testing/btest/language/event-local-var.zeek b/testing/btest/language/event-local-var.zeek new file mode 100644 index 0000000000..4d7364cc39 --- /dev/null +++ b/testing/btest/language/event-local-var.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT 2> out +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + + +event e1(num: count) + { + print fmt("event 1: %s", num); + } + +event zeek_init() +{ + # Test assigning a local event variable to an event + local v: event(num: count); + v = e1; + schedule 1sec { v(6) }; # This should fail +} diff --git a/testing/btest/language/event.bro b/testing/btest/language/event.bro deleted file mode 100644 index d4eef24731..0000000000 --- a/testing/btest/language/event.bro +++ /dev/null @@ -1,53 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - - -event e1() - { - print "event statement"; - return; - print "Error: this should not happen"; - } - -event e2(s: string) - { - print fmt("schedule statement %s", s); - } - -event e3(test: string) - { - print "event part1"; - } - -event e4(num: count) - { - print fmt("assign event variable (%s)", num); - } - -# Note: the name of this event is intentionally the same as one above -event e3(test: string) - { - print "event part2"; - } - -global e5: event(num: count); - -event bro_init() -{ - # Test calling an event with "event" statement - event e1(); - - # Test calling an event with "schedule" statement - schedule 1 sec { e2("in bro_init") }; - schedule 3 sec { e2("another in bro_init") }; - - # Test calling an event that has two separate definitions - event e3("foo"); - - # Test assigning an event variable to an event - e5 = e4; - event e5(6); -} - -# scheduling in outside of an event handler shouldn't crash. -schedule 2sec { e2("in global") }; diff --git a/testing/btest/language/event.zeek b/testing/btest/language/event.zeek new file mode 100644 index 0000000000..39bb36c192 --- /dev/null +++ b/testing/btest/language/event.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + + +event e1() + { + print "event statement"; + return; + print "Error: this should not happen"; + } + +event e2(s: string) + { + print fmt("schedule statement %s", s); + } + +event e3(test: string) + { + print "event part1"; + } + +event e4(num: count) + { + print fmt("assign event variable (%s)", num); + } + +# Note: the name of this event is intentionally the same as one above +event e3(test: string) + { + print "event part2"; + } + +global e5: event(num: count); + +event zeek_init() +{ + # Test calling an event with "event" statement + event e1(); + + # Test calling an event with "schedule" statement + schedule 1 sec { e2("in zeek_init") }; + schedule 3 sec { e2("another in zeek_init") }; + + # Test calling an event that has two separate definitions + event e3("foo"); + + # Test assigning an event variable to an event + e5 = e4; + event e5(6); +} + +# scheduling in outside of an event handler shouldn't crash. +schedule 2sec { e2("in global") }; diff --git a/testing/btest/language/expire-expr-error.bro b/testing/btest/language/expire-expr-error.bro deleted file mode 100644 index 7c9a3aa318..0000000000 --- a/testing/btest/language/expire-expr-error.bro +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: cp .stderr output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -redef exit_only_after_terminate = T; - -global x: table[string] of interval; -global data: table[int] of string &create_expire=x["kaputt"]; - -global runs = 0; -event do_it() - { - print fmt("Run %s", runs); - - ++runs; - if ( runs < 4 ) - schedule 1sec { do_it() }; - else - terminate(); - } - - -event bro_init() &priority=-10 - { - data[0] = "some data"; - schedule 1sec { do_it() }; - } diff --git a/testing/btest/language/expire-expr-error.zeek b/testing/btest/language/expire-expr-error.zeek new file mode 100644 index 0000000000..5e6f0b4e6f --- /dev/null +++ b/testing/btest/language/expire-expr-error.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: cp .stderr output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +redef exit_only_after_terminate = T; + +global x: table[string] of interval; +global data: table[int] of string &create_expire=x["kaputt"]; + +global runs = 0; +event do_it() + { + print fmt("Run %s", runs); + + ++runs; + if ( runs < 4 ) + schedule 1sec { do_it() }; + else + terminate(); + } + + +event zeek_init() &priority=-10 + { + data[0] = "some data"; + schedule 1sec { do_it() }; + } diff --git a/testing/btest/language/expire-func-undef.bro b/testing/btest/language/expire-func-undef.bro deleted file mode 100644 index eb864d2390..0000000000 --- a/testing/btest/language/expire-func-undef.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/rotation.trace -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -module segfault; - -export { - - global scan_summary: - function(t: table[addr] of set[addr], orig: addr): interval; - - global distinct_peers: table[addr] of set[addr] - &read_expire = 7 secs &expire_func=scan_summary &redef; - -} - - -event new_connection(c: connection) -{ - - local orig = c$id$orig_h ; - local resp = c$id$resp_h ; - - - if (orig !in distinct_peers) - distinct_peers[orig]=set(); - - if (resp !in distinct_peers[orig]) - add distinct_peers[orig][resp]; - -} - -event bro_done() -{ - - for (o in distinct_peers) - { - print fmt("orig: %s: peers: %s", o, distinct_peers[o]); - } - -} diff --git a/testing/btest/language/expire-func-undef.zeek b/testing/btest/language/expire-func-undef.zeek new file mode 100644 index 0000000000..9198edc6c4 --- /dev/null +++ b/testing/btest/language/expire-func-undef.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -r $TRACES/rotation.trace -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +module segfault; + +export { + + global scan_summary: + function(t: table[addr] of set[addr], orig: addr): interval; + + global distinct_peers: table[addr] of set[addr] + &read_expire = 7 secs &expire_func=scan_summary &redef; + +} + + +event new_connection(c: connection) +{ + + local orig = c$id$orig_h ; + local resp = c$id$resp_h ; + + + if (orig !in distinct_peers) + distinct_peers[orig]=set(); + + if (resp !in distinct_peers[orig]) + add distinct_peers[orig][resp]; + +} + +event zeek_done() +{ + + for (o in distinct_peers) + { + print fmt("orig: %s: peers: %s", o, distinct_peers[o]); + } + +} diff --git a/testing/btest/language/expire-redef.bro b/testing/btest/language/expire-redef.bro deleted file mode 100644 index 5cbb00f313..0000000000 --- a/testing/btest/language/expire-redef.bro +++ /dev/null @@ -1,37 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -redef exit_only_after_terminate = T; - -const exp_val = -1sec &redef; - -global expired: function(tbl: table[int] of string, idx: int): interval; -global data: table[int] of string &write_expire=exp_val &expire_func=expired; - -redef table_expire_interval = 1sec; -redef exp_val = 6sec; - -global runs = 0; -event do_it() - { - ++runs; - print fmt("Run %s", runs); - - if ( runs < 2 ) - schedule 4sec { do_it() }; - else - terminate(); - } - - -function expired(tbl: table[int] of string, idx: int): interval - { - print fmt("Expired: %s --> %s", idx, tbl[idx]); - return 0sec; - } - -event bro_init() &priority=-10 - { - data[0] = "some data"; - schedule 4sec { do_it() }; - } diff --git a/testing/btest/language/expire-redef.zeek b/testing/btest/language/expire-redef.zeek new file mode 100644 index 0000000000..3958ef8342 --- /dev/null +++ b/testing/btest/language/expire-redef.zeek @@ -0,0 +1,37 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +redef exit_only_after_terminate = T; + +const exp_val = -1sec &redef; + +global expired: function(tbl: table[int] of string, idx: int): interval; +global data: table[int] of string &write_expire=exp_val &expire_func=expired; + +redef table_expire_interval = 1sec; +redef exp_val = 6sec; + +global runs = 0; +event do_it() + { + ++runs; + print fmt("Run %s", runs); + + if ( runs < 2 ) + schedule 4sec { do_it() }; + else + terminate(); + } + + +function expired(tbl: table[int] of string, idx: int): interval + { + print fmt("Expired: %s --> %s", idx, tbl[idx]); + return 0sec; + } + +event zeek_init() &priority=-10 + { + data[0] = "some data"; + schedule 4sec { do_it() }; + } diff --git a/testing/btest/language/expire-type-error.bro b/testing/btest/language/expire-type-error.bro deleted file mode 100644 index d6d807e22f..0000000000 --- a/testing/btest/language/expire-type-error.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -global data: table[int] of string &write_expire="kaputt"; - - diff --git a/testing/btest/language/expire-type-error.zeek b/testing/btest/language/expire-type-error.zeek new file mode 100644 index 0000000000..2424ca0394 --- /dev/null +++ b/testing/btest/language/expire-type-error.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +global data: table[int] of string &write_expire="kaputt"; + + diff --git a/testing/btest/language/expire_func.test b/testing/btest/language/expire_func.test index 653a4d9a86..016ebe9d88 100644 --- a/testing/btest/language/expire_func.test +++ b/testing/btest/language/expire_func.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/var-services-std-ports.trace %INPUT >output +# @TEST-EXEC: zeek -C -r $TRACES/var-services-std-ports.trace %INPUT >output # @TEST-EXEC: btest-diff output function inform_me(s: set[string], idx: string): interval @@ -9,7 +9,7 @@ function inform_me(s: set[string], idx: string): interval global s: set[string] &create_expire=1secs &expire_func=inform_me; -event bro_init() +event zeek_init() { add s["i"]; add s["am"]; diff --git a/testing/btest/language/expire_func_mod.bro b/testing/btest/language/expire_func_mod.bro deleted file mode 100644 index 4790a9650e..0000000000 --- a/testing/btest/language/expire_func_mod.bro +++ /dev/null @@ -1,41 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -module Test; - -redef exit_only_after_terminate = T; -redef table_expire_interval = .1 secs ; - -export { - global table_expire_func: function(t: table[string] of count, - s: string): interval; - - global t: table[string] of count - &write_expire=0 secs - &expire_func=table_expire_func; -} - -event die() - { - terminate(); - } - -function table_expire_func(t: table[string] of count, s: string): interval - { - t[s] += 1 ; - - print fmt("inside table_expire_func: %s, %s", s, t[s]); - - if ( t[s] < 10 ) - return .1 secs ; - - schedule .1sec { die() }; - return 0 secs; - } - -event bro_init() - { - local s="ashish"; - t[s] = 1 ; - print fmt("starting: %s, %s", s, t[s]); - } diff --git a/testing/btest/language/expire_func_mod.zeek b/testing/btest/language/expire_func_mod.zeek new file mode 100644 index 0000000000..4e64edc968 --- /dev/null +++ b/testing/btest/language/expire_func_mod.zeek @@ -0,0 +1,41 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +module Test; + +redef exit_only_after_terminate = T; +redef table_expire_interval = .1 secs ; + +export { + global table_expire_func: function(t: table[string] of count, + s: string): interval; + + global t: table[string] of count + &write_expire=0 secs + &expire_func=table_expire_func; +} + +event die() + { + terminate(); + } + +function table_expire_func(t: table[string] of count, s: string): interval + { + t[s] += 1 ; + + print fmt("inside table_expire_func: %s, %s", s, t[s]); + + if ( t[s] < 10 ) + return .1 secs ; + + schedule .1sec { die() }; + return 0 secs; + } + +event zeek_init() + { + local s="ashish"; + t[s] = 1 ; + print fmt("starting: %s, %s", s, t[s]); + } diff --git a/testing/btest/language/expire_multiple.test b/testing/btest/language/expire_multiple.test index 1e4aaa0975..38c552a0e1 100644 --- a/testing/btest/language/expire_multiple.test +++ b/testing/btest/language/expire_multiple.test @@ -1,4 +1,4 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >output 2>&1 +# @TEST-EXEC-FAIL: zeek -b %INPUT >output 2>&1 # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output global s: set[string] &create_expire=1secs &read_expire=1secs; diff --git a/testing/btest/language/expire_subnet.test b/testing/btest/language/expire_subnet.test index 12d5e56b5a..9b95f39763 100644 --- a/testing/btest/language/expire_subnet.test +++ b/testing/btest/language/expire_subnet.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/var-services-std-ports.trace %INPUT >output +# @TEST-EXEC: zeek -C -r $TRACES/var-services-std-ports.trace %INPUT >output # @TEST-EXEC: btest-diff output redef table_expire_interval = 1sec; @@ -55,7 +55,7 @@ function execute_test() ### Events ### -event bro_init() +event zeek_init() { step = 0; diff --git a/testing/btest/language/file.bro b/testing/btest/language/file.bro deleted file mode 100644 index 47430b6813..0000000000 --- a/testing/btest/language/file.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff out1 -# @TEST-EXEC: btest-diff out2 - - -event bro_init() -{ - local f1: file = open( "out1" ); - print f1, 20; - print f1, 12; - close(f1); - - # Type inference test - - local f2 = open( "out2" ); - print f2, "test", 123, 456; - close(f2); -} - diff --git a/testing/btest/language/file.zeek b/testing/btest/language/file.zeek new file mode 100644 index 0000000000..a3691b87da --- /dev/null +++ b/testing/btest/language/file.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff out1 +# @TEST-EXEC: btest-diff out2 + + +event zeek_init() +{ + local f1: file = open( "out1" ); + print f1, 20; + print f1, 12; + close(f1); + + # Type inference test + + local f2 = open( "out2" ); + print f2, "test", 123, 456; + close(f2); +} + diff --git a/testing/btest/language/for.bro b/testing/btest/language/for.bro deleted file mode 100644 index 5f0c211597..0000000000 --- a/testing/btest/language/for.bro +++ /dev/null @@ -1,57 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - - -event bro_init() -{ - local vv: vector of string = vector( "a", "b", "c" ); - local ct: count = 0; - - # Test a "for" loop without "break" or "next" - - ct = 0; - for ( i in vv ) ++ct; - test_case("for loop", ct == 3 ); - - # Test the "break" statement - - ct = 0; - for ( i in vv ) - { - ++ct; - break; - test_case("Error: this should not happen", F); - } - test_case("for loop with break", ct == 1 ); - - # Test the "next" statement - - ct = 0; - for ( i in vv ) - { - ++ct; - next; - test_case("Error: this should not happen", F); - } - test_case("for loop with next", ct == 3 ); - - # Test keys that are tuples - - local t: table[count, count] of string = table(); - t[1, 2] = "hi"; - - local s1: string = ""; - - for ( [i, j] in t ) - s1 = fmt("%d %d %s", i, j, t[i,j]); - - test_case("keys that are tuples", s1 == "1 2 hi"); - - # Tests for key value for loop are in key-value-for.bro -} diff --git a/testing/btest/language/for.zeek b/testing/btest/language/for.zeek new file mode 100644 index 0000000000..6918e78818 --- /dev/null +++ b/testing/btest/language/for.zeek @@ -0,0 +1,57 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + + +event zeek_init() +{ + local vv: vector of string = vector( "a", "b", "c" ); + local ct: count = 0; + + # Test a "for" loop without "break" or "next" + + ct = 0; + for ( i in vv ) ++ct; + test_case("for loop", ct == 3 ); + + # Test the "break" statement + + ct = 0; + for ( i in vv ) + { + ++ct; + break; + test_case("Error: this should not happen", F); + } + test_case("for loop with break", ct == 1 ); + + # Test the "next" statement + + ct = 0; + for ( i in vv ) + { + ++ct; + next; + test_case("Error: this should not happen", F); + } + test_case("for loop with next", ct == 3 ); + + # Test keys that are tuples + + local t: table[count, count] of string = table(); + t[1, 2] = "hi"; + + local s1: string = ""; + + for ( [i, j] in t ) + s1 = fmt("%d %d %s", i, j, t[i,j]); + + test_case("keys that are tuples", s1 == "1 2 hi"); + + # Note: Tests for key value "for" loop are in key-value-for.zeek +} diff --git a/testing/btest/language/func-assignment.bro b/testing/btest/language/func-assignment.bro deleted file mode 100644 index 576d7f3270..0000000000 --- a/testing/btest/language/func-assignment.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function double_string(s: string): string - { - return string_cat(s, " ", s); - } - -function triple_string(str: string): string - { - return string_cat(str, " ", str, " ", str); - } - -type sample_function: record { - s: string; - f: function(str: string): string; -}; - -event bro_init() - { - local test_sf: sample_function; - test_sf$s = "Brogrammers, like bowties, are cool."; - - test_sf$f = triple_string; - print test_sf$f(test_sf$s); - - test_sf$f = double_string; - print test_sf$f(test_sf$s); - - # Works as expected - test_sf$f = function(str: string): string - { return to_upper(str); }; - print test_sf$f(test_sf$s); - - # Func arg names shouldn't factor in to the type check. - test_sf$f = function(s: string): string - { return to_upper(s); }; - print test_sf$f(test_sf$s); - } diff --git a/testing/btest/language/func-assignment.zeek b/testing/btest/language/func-assignment.zeek new file mode 100644 index 0000000000..febf57e61c --- /dev/null +++ b/testing/btest/language/func-assignment.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function double_string(s: string): string + { + return string_cat(s, " ", s); + } + +function triple_string(str: string): string + { + return string_cat(str, " ", str, " ", str); + } + +type sample_function: record { + s: string; + f: function(str: string): string; +}; + +event zeek_init() + { + local test_sf: sample_function; + test_sf$s = "Brogrammers, like bowties, are cool."; + + test_sf$f = triple_string; + print test_sf$f(test_sf$s); + + test_sf$f = double_string; + print test_sf$f(test_sf$s); + + # Works as expected + test_sf$f = function(str: string): string + { return to_upper(str); }; + print test_sf$f(test_sf$s); + + # Func arg names shouldn't factor in to the type check. + test_sf$f = function(s: string): string + { return to_upper(s); }; + print test_sf$f(test_sf$s); + } diff --git a/testing/btest/language/function.bro b/testing/btest/language/function.bro deleted file mode 100644 index ab60c4fa62..0000000000 --- a/testing/btest/language/function.bro +++ /dev/null @@ -1,73 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -function f1() - { - test_case("no args without return value", T ); - } - -function f2() - { - test_case("no args no return value, empty return", T ); - return; - } - -function f3(): bool - { - return T; - } - -function f4(test: string) - { - test_case("args without return value", T ); - } - -function f5(test: string): bool - { - return T; - } - -function f6(test: string, num: count): bool - { - local val: int = -num; - if ( test == "bar" && num == 3 && val < 0 ) return T; - return F; - } - -function f7(test: string): bool - { - return F; - } - -event bro_init() -{ - f1(); - f2(); - test_case("no args with return value", f3() ); - f4("foo"); - test_case("args with return value", f5("foo") ); - test_case("multiple args with return value", f6("bar", 3) ); - - local f10 = function() { test_case("anonymous function without args or return value", T ); }; - f10(); - - local f11 = function(): bool { return T; }; - test_case("anonymous function with return value", f11() ); - - local f12 = function(val: int): bool { if (val > 0) return T; else return F; }; - test_case("anonymous function with args and return value", f12(2) ); - - # Test that a function variable can later be assigned to a function - local f13: function(test: string): bool; - f13 = f5; - test_case("assign function variable", f13("foo") ); - f13 = f7; - test_case("reassign function variable", !f13("bar") ); -} - diff --git a/testing/btest/language/function.zeek b/testing/btest/language/function.zeek new file mode 100644 index 0000000000..ff967b897f --- /dev/null +++ b/testing/btest/language/function.zeek @@ -0,0 +1,73 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +function f1() + { + test_case("no args without return value", T ); + } + +function f2() + { + test_case("no args no return value, empty return", T ); + return; + } + +function f3(): bool + { + return T; + } + +function f4(test: string) + { + test_case("args without return value", T ); + } + +function f5(test: string): bool + { + return T; + } + +function f6(test: string, num: count): bool + { + local val: int = -num; + if ( test == "bar" && num == 3 && val < 0 ) return T; + return F; + } + +function f7(test: string): bool + { + return F; + } + +event zeek_init() +{ + f1(); + f2(); + test_case("no args with return value", f3() ); + f4("foo"); + test_case("args with return value", f5("foo") ); + test_case("multiple args with return value", f6("bar", 3) ); + + local f10 = function() { test_case("anonymous function without args or return value", T ); }; + f10(); + + local f11 = function(): bool { return T; }; + test_case("anonymous function with return value", f11() ); + + local f12 = function(val: int): bool { if (val > 0) return T; else return F; }; + test_case("anonymous function with args and return value", f12(2) ); + + # Test that a function variable can later be assigned to a function + local f13: function(test: string): bool; + f13 = f5; + test_case("assign function variable", f13("foo") ); + f13 = f7; + test_case("reassign function variable", !f13("bar") ); +} + diff --git a/testing/btest/language/hook.bro b/testing/btest/language/hook.bro deleted file mode 100644 index 3edfd9556c..0000000000 --- a/testing/btest/language/hook.bro +++ /dev/null @@ -1,118 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type rec: record { - a: count; - b: string; -}; - -global myhook: hook(r: rec); -global myhook2: hook(s: string); -# a hook doesn't have to take any arguments -global myhook4: hook(); -global myhook5: hook(s: string); -global myhook6: hook(s: string); - -hook myhook(r: rec) &priority=5 - { - print "myhook, &priority=5", r; - # break statement short-circuits the hook handling chain. - break; - print "ERROR: break statement should return from hook handler body"; - } - -hook myhook(r: rec) - { - # This handler shouldn't execute ever because of the handler at priority=5 - # exiting the body from a "break" statement. - print "myhook, &priority=0", rec; - } - -hook myhook(r: rec) &priority=10 - { - print "myhook, &priority=10", r; - # modifications to the record argument will be seen by remaining handlers. - r$a = 37; - r$b = "goobye world"; - # returning from the handler early, is fine, remaining handlers still run. - return; - print "ERROR: return statement should return from hook handler body"; - } - -hook myhook(r: rec) &priority=9 - { - print "myhook return F"; - # return value is ignored, remaining handlers still run, final return - # value is whether any hook body returned via break statement - return F; - print "ERROR: return statement should return from hook handler body"; - } - -hook myhook(r: rec) &priority=8 - { - print "myhook return T"; - # return value is ignored, remaining handlers still run, final return - # value is whether any hook body returned via break statement - return T; - print "ERROR: return statement should return from hook handler body"; - } - -# hook function doesn't need a declaration, we can go straight to defining -# a handler body. -hook myhook3(i: count) - { - print "myhook3", i; - } - -hook myhook4() &priority=1 - { - print "myhook4", 1; - } - -hook myhook4() &priority=2 - { - print "myhook4", 2; - } - -hook myhook5(s: string) - { - print "myhook5", s; - } - -hook myhook6(s: string) - { - print "myhook6", s; - break; - } - -function printMe(s: string): bool - { - print s; - return T; - } - -event bro_init() - { - print hook myhook([$a=1156, $b="hello world"]); - - # A hook with no handlers is fine, it's just a no-op. - print hook myhook2("nope"); - - print hook myhook3(8); - print hook myhook4(); - if ( hook myhook4() ) - { - print "myhook4 all handlers ran"; - } - - # A hook can be treated like other data types and doesn't have to be - # invoked directly by name. - local h = myhook; - print hook h([$a=2, $b="it works"]); - - if ( hook myhook5("test") && printMe("second part ran") ) - print "myhook5 ran"; - - if ( ( hook myhook6("test") ) && printMe("second part ran") ) - print "myhook6 ran"; - } diff --git a/testing/btest/language/hook.zeek b/testing/btest/language/hook.zeek new file mode 100644 index 0000000000..01b43e5807 --- /dev/null +++ b/testing/btest/language/hook.zeek @@ -0,0 +1,118 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type rec: record { + a: count; + b: string; +}; + +global myhook: hook(r: rec); +global myhook2: hook(s: string); +# a hook doesn't have to take any arguments +global myhook4: hook(); +global myhook5: hook(s: string); +global myhook6: hook(s: string); + +hook myhook(r: rec) &priority=5 + { + print "myhook, &priority=5", r; + # break statement short-circuits the hook handling chain. + break; + print "ERROR: break statement should return from hook handler body"; + } + +hook myhook(r: rec) + { + # This handler shouldn't execute ever because of the handler at priority=5 + # exiting the body from a "break" statement. + print "myhook, &priority=0", rec; + } + +hook myhook(r: rec) &priority=10 + { + print "myhook, &priority=10", r; + # modifications to the record argument will be seen by remaining handlers. + r$a = 37; + r$b = "goobye world"; + # returning from the handler early, is fine, remaining handlers still run. + return; + print "ERROR: return statement should return from hook handler body"; + } + +hook myhook(r: rec) &priority=9 + { + print "myhook return F"; + # return value is ignored, remaining handlers still run, final return + # value is whether any hook body returned via break statement + return F; + print "ERROR: return statement should return from hook handler body"; + } + +hook myhook(r: rec) &priority=8 + { + print "myhook return T"; + # return value is ignored, remaining handlers still run, final return + # value is whether any hook body returned via break statement + return T; + print "ERROR: return statement should return from hook handler body"; + } + +# hook function doesn't need a declaration, we can go straight to defining +# a handler body. +hook myhook3(i: count) + { + print "myhook3", i; + } + +hook myhook4() &priority=1 + { + print "myhook4", 1; + } + +hook myhook4() &priority=2 + { + print "myhook4", 2; + } + +hook myhook5(s: string) + { + print "myhook5", s; + } + +hook myhook6(s: string) + { + print "myhook6", s; + break; + } + +function printMe(s: string): bool + { + print s; + return T; + } + +event zeek_init() + { + print hook myhook([$a=1156, $b="hello world"]); + + # A hook with no handlers is fine, it's just a no-op. + print hook myhook2("nope"); + + print hook myhook3(8); + print hook myhook4(); + if ( hook myhook4() ) + { + print "myhook4 all handlers ran"; + } + + # A hook can be treated like other data types and doesn't have to be + # invoked directly by name. + local h = myhook; + print hook h([$a=2, $b="it works"]); + + if ( hook myhook5("test") && printMe("second part ran") ) + print "myhook5 ran"; + + if ( ( hook myhook6("test") ) && printMe("second part ran") ) + print "myhook6 ran"; + } diff --git a/testing/btest/language/hook_calls.bro b/testing/btest/language/hook_calls.bro deleted file mode 100644 index 41ef6f52ae..0000000000 --- a/testing/btest/language/hook_calls.bro +++ /dev/null @@ -1,82 +0,0 @@ -# @TEST-EXEC: bro -b valid.bro >valid.out -# @TEST-EXEC: btest-diff valid.out -# @TEST-EXEC-FAIL: bro -b invalid.bro > invalid.out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff invalid.out - -# hook functions must be called using the "hook" keyword as an operator... - -@TEST-START-FILE valid.bro -hook myhook(i: count) - { - print "myhook()", i; - if ( i == 0 ) break; - } - -hook myhook(i: count) &priority=-1 - { - print "other myhook()", i; - } - -function indirect(): hook(i: count) - { - print "indirect()"; - return myhook; - } - -function really_indirect(): function(): hook(i: count) - { - print "really_indirect()"; - return indirect; - } - -global t: table[count] of hook(i: count) = { - [0] = myhook, -}; - -event bro_init() - { - hook myhook(3); - print hook myhook(3); - print hook myhook(0); - print "-----------"; - hook indirect()(3); - print hook indirect()(3); - print "-----------"; - hook really_indirect()()(3); - print hook really_indirect()()(3); - print "-----------"; - local h = t[0]; - hook h(3); - print hook h(3); - if ( hook h(3) ) - print "yes"; - if ( ! hook h(0) ) - print "double yes"; - print "-----------"; - hook t[0](3); - print hook t[0](3); - } - -@TEST-END-FILE - -@TEST-START-FILE invalid.bro -hook myhook(i: count) - { - print "myhook()", i; - if ( i == 0 ) break; - } - -event bro_init() - { - myhook(3); - print myhook(3); - print myhook(0); - hook 2+2; - print hook 2+2; - local h = myhook; - h(3); - if ( h(3) ) - print "hmm"; - print "done"; - } -@TEST-END-FILE diff --git a/testing/btest/language/hook_calls.zeek b/testing/btest/language/hook_calls.zeek new file mode 100644 index 0000000000..eee92f1e2a --- /dev/null +++ b/testing/btest/language/hook_calls.zeek @@ -0,0 +1,82 @@ +# @TEST-EXEC: zeek -b valid.zeek >valid.out +# @TEST-EXEC: btest-diff valid.out +# @TEST-EXEC-FAIL: zeek -b invalid.zeek > invalid.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff invalid.out + +# hook functions must be called using the "hook" keyword as an operator... + +@TEST-START-FILE valid.zeek +hook myhook(i: count) + { + print "myhook()", i; + if ( i == 0 ) break; + } + +hook myhook(i: count) &priority=-1 + { + print "other myhook()", i; + } + +function indirect(): hook(i: count) + { + print "indirect()"; + return myhook; + } + +function really_indirect(): function(): hook(i: count) + { + print "really_indirect()"; + return indirect; + } + +global t: table[count] of hook(i: count) = { + [0] = myhook, +}; + +event zeek_init() + { + hook myhook(3); + print hook myhook(3); + print hook myhook(0); + print "-----------"; + hook indirect()(3); + print hook indirect()(3); + print "-----------"; + hook really_indirect()()(3); + print hook really_indirect()()(3); + print "-----------"; + local h = t[0]; + hook h(3); + print hook h(3); + if ( hook h(3) ) + print "yes"; + if ( ! hook h(0) ) + print "double yes"; + print "-----------"; + hook t[0](3); + print hook t[0](3); + } + +@TEST-END-FILE + +@TEST-START-FILE invalid.zeek +hook myhook(i: count) + { + print "myhook()", i; + if ( i == 0 ) break; + } + +event zeek_init() + { + myhook(3); + print myhook(3); + print myhook(0); + hook 2+2; + print hook 2+2; + local h = myhook; + h(3); + if ( h(3) ) + print "hmm"; + print "done"; + } +@TEST-END-FILE diff --git a/testing/btest/language/if.bro b/testing/btest/language/if.bro deleted file mode 100644 index 785030a012..0000000000 --- a/testing/btest/language/if.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - - -event bro_init() -{ - # Test "if" without "else" - - if ( T ) test_case( "if T", T); - - if ( F ) test_case( "Error: this should not happen", F); - - # Test "if" with only an "else" - - if ( T ) test_case( "if T else", T); - else test_case( "Error: this should not happen", F); - - if ( F ) test_case( "Error: this should not happen", F); - else test_case( "if F else", T); - - # Test "if" with only an "else if" - - if ( T ) test_case( "if T else if F", T); - else if ( F ) test_case( "Error: this should not happen", F); - - if ( F ) test_case( "Error: this should not happen", F); - else if ( T ) test_case( "if F else if T", T); - - if ( T ) test_case( "if T else if T", T); - else if ( T ) test_case( "Error: this should not happen", F); - - if ( F ) test_case( "Error: this should not happen", F); - else if ( F ) test_case( "Error: this should not happen", F); - - # Test "if" with both "else if" and "else" - - if ( T ) test_case( "if T else if F else", T); - else if ( F ) test_case( "Error: this should not happen", F); - else test_case( "Error: this should not happen", F); - - if ( F ) test_case( "Error: this should not happen", F); - else if ( T ) test_case( "if F else if T else", T); - else test_case( "Error: this should not happen", F); - - if ( T ) test_case( "if T else if T else", T); - else if ( T ) test_case( "Error: this should not happen", F); - else test_case( "Error: this should not happen", F); - - if ( F ) test_case( "Error: this should not happen", F); - else if ( F ) test_case( "Error: this should not happen", F); - else test_case( "if F else if F else", T); - - # Test "if" with multiple "else if" and an "else" - - if ( F ) test_case( "Error: this should not happen", F); - else if ( F ) test_case( "Error: this should not happen", F); - else if ( T ) test_case( "if F else if F else if T else", T); - else test_case( "Error: this should not happen", F); - - if ( F ) test_case( "Error: this should not happen", F); - else if ( F ) test_case( "Error: this should not happen", F); - else if ( F ) test_case( "Error: this should not happen", F); - else test_case( "if F else if F else if F else", T); -} - diff --git a/testing/btest/language/if.zeek b/testing/btest/language/if.zeek new file mode 100644 index 0000000000..1f6f1116e1 --- /dev/null +++ b/testing/btest/language/if.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + + +event zeek_init() +{ + # Test "if" without "else" + + if ( T ) test_case( "if T", T); + + if ( F ) test_case( "Error: this should not happen", F); + + # Test "if" with only an "else" + + if ( T ) test_case( "if T else", T); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else test_case( "if F else", T); + + # Test "if" with only an "else if" + + if ( T ) test_case( "if T else if F", T); + else if ( F ) test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( T ) test_case( "if F else if T", T); + + if ( T ) test_case( "if T else if T", T); + else if ( T ) test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + + # Test "if" with both "else if" and "else" + + if ( T ) test_case( "if T else if F else", T); + else if ( F ) test_case( "Error: this should not happen", F); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( T ) test_case( "if F else if T else", T); + else test_case( "Error: this should not happen", F); + + if ( T ) test_case( "if T else if T else", T); + else if ( T ) test_case( "Error: this should not happen", F); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else test_case( "if F else if F else", T); + + # Test "if" with multiple "else if" and an "else" + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else if ( T ) test_case( "if F else if F else if T else", T); + else test_case( "Error: this should not happen", F); + + if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else if ( F ) test_case( "Error: this should not happen", F); + else test_case( "if F else if F else if F else", T); +} + diff --git a/testing/btest/language/incr-vec-expr.test b/testing/btest/language/incr-vec-expr.test index c9945061a2..1bd3e54129 100644 --- a/testing/btest/language/incr-vec-expr.test +++ b/testing/btest/language/incr-vec-expr.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: zeek -b %INPUT >out # @TEST-EXEC: btest-diff out type rec: record { diff --git a/testing/btest/language/index-assignment-invalid.bro b/testing/btest/language/index-assignment-invalid.bro deleted file mode 100644 index 68458eb149..0000000000 --- a/testing/btest/language/index-assignment-invalid.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: grep "error" output >output2 -# @TEST-EXEC: for i in 1 2 3 4 5; do cat output2 | cut -d'|' -f$i >>out; done -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -@load base/utils/queue - -global q: Queue::Queue = Queue::init(); - -type myrec: record { - a: bool &default=T; - b: string &default="hi"; - c: string &optional; -}; - -function bar(c: count) - { - local rval: vector of string = vector(); - Queue::get_vector(q, rval); - print rval; - Queue::get_vector(q, rval); - print rval; - } - -function foo(s: string, c: count) - { - bar(c + 42); - } - -event bro_init() - { - Queue::put(q, "hello"); - Queue::put(q, "goodbye"); - Queue::put(q, "test"); - Queue::put(q, myrec()); - Queue::put(q, "asdf"); - Queue::put(q, 3); - Queue::put(q, "jkl;"); - foo("hi", 13); - } diff --git a/testing/btest/language/index-assignment-invalid.zeek b/testing/btest/language/index-assignment-invalid.zeek new file mode 100644 index 0000000000..a42c81320b --- /dev/null +++ b/testing/btest/language/index-assignment-invalid.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: grep "error" output >output2 +# @TEST-EXEC: for i in 1 2 3 4 5; do cat output2 | cut -d'|' -f$i >>out; done +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +@load base/utils/queue + +global q: Queue::Queue = Queue::init(); + +type myrec: record { + a: bool &default=T; + b: string &default="hi"; + c: string &optional; +}; + +function bar(c: count) + { + local rval: vector of string = vector(); + Queue::get_vector(q, rval); + print rval; + Queue::get_vector(q, rval); + print rval; + } + +function foo(s: string, c: count) + { + bar(c + 42); + } + +event zeek_init() + { + Queue::put(q, "hello"); + Queue::put(q, "goodbye"); + Queue::put(q, "test"); + Queue::put(q, myrec()); + Queue::put(q, "asdf"); + Queue::put(q, 3); + Queue::put(q, "jkl;"); + foo("hi", 13); + } diff --git a/testing/btest/language/init-in-anon-function.bro b/testing/btest/language/init-in-anon-function.bro deleted file mode 100644 index 45f5f09f09..0000000000 --- a/testing/btest/language/init-in-anon-function.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -r ${TRACES}/wikipedia.trace %INPUT >out -# @TEST-EXEC: btest-diff http.log - -module Foo; - -event bro_init() { - - Log::remove_default_filter(HTTP::LOG); - - local filter: Log::Filter = [$name = "http", - $pred = function(rec: HTTP::Info): bool { - rec$id$orig_h = remask_addr(rec$id$orig_h, 0.0.0.0, 112); - return T; - }]; - Log::add_filter(HTTP::LOG, filter); -} diff --git a/testing/btest/language/init-in-anon-function.zeek b/testing/btest/language/init-in-anon-function.zeek new file mode 100644 index 0000000000..f5808c1d99 --- /dev/null +++ b/testing/btest/language/init-in-anon-function.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -r ${TRACES}/wikipedia.trace %INPUT >out +# @TEST-EXEC: btest-diff http.log + +module Foo; + +event zeek_init() { + + Log::remove_default_filter(HTTP::LOG); + + local filter: Log::Filter = [$name = "http", + $pred = function(rec: HTTP::Info): bool { + rec$id$orig_h = remask_addr(rec$id$orig_h, 0.0.0.0, 112); + return T; + }]; + Log::add_filter(HTTP::LOG, filter); +} diff --git a/testing/btest/language/int.bro b/testing/btest/language/int.bro deleted file mode 100644 index f511d82bbb..0000000000 --- a/testing/btest/language/int.bro +++ /dev/null @@ -1,70 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local i1: int = 3; - local i2: int = +3; - local i3: int = -3; - local i4: int = +0; - local i5: int = -0; - local i6: int = 12; - local i7: int = +0xc; - local i8: int = 0xC; - local i9: int = -0xC; - local i10: int = -12; - local i11: int = 9223372036854775807; # max. allowed value - local i12: int = -9223372036854775808; # min. allowed value - local i13: int = 0x7fffffffffffffff; # max. allowed value - local i14: int = -0x8000000000000000; # min. allowed value - local i15 = +3; - - # Type inference test - - test_case( "type inference", type_name(i15) == "int" ); - - # Test various constant representations - - test_case( "optional '+' sign", i1 == i2 ); - test_case( "negative vs. positive", i1 != i3 ); - test_case( "negative vs. positive", i4 == i5 ); - test_case( "hexadecimal", i6 == i7 ); - test_case( "hexadecimal", i6 == i8 ); - test_case( "hexadecimal", i9 == i10 ); - - # Operator tests - - test_case( "relational operator", i2 > i3 ); - test_case( "relational operator", i2 >= i3 ); - test_case( "relational operator", i3 < i2 ); - test_case( "relational operator", i3 <= i2 ); - test_case( "absolute value", |i4| == 0 ); - test_case( "absolute value", |i3| == 3 ); - test_case( "pre-increment operator", ++i2 == 4 ); - test_case( "pre-decrement operator", --i2 == 3 ); - test_case( "modulus operator", i2%2 == 1 ); - test_case( "division operator", i2/2 == 1 ); - i2 += 4; - test_case( "assignment operator", i2 == 7 ); - i2 -= 2; - test_case( "assignment operator", i2 == 5 ); - - # Max/min value tests - - local str1 = fmt("max int value = %d", i11); - test_case( str1, str1 == "max int value = 9223372036854775807" ); - local str2 = fmt("min int value = %d", i12); - test_case( str2, str2 == "min int value = -9223372036854775808" ); - local str3 = fmt("max int value = %d", i13); - test_case( str3, str3 == "max int value = 9223372036854775807" ); - local str4 = fmt("min int value = %d", i14); - test_case( str4, str4 == "min int value = -9223372036854775808" ); - -} - diff --git a/testing/btest/language/int.zeek b/testing/btest/language/int.zeek new file mode 100644 index 0000000000..c9344dd007 --- /dev/null +++ b/testing/btest/language/int.zeek @@ -0,0 +1,70 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local i1: int = 3; + local i2: int = +3; + local i3: int = -3; + local i4: int = +0; + local i5: int = -0; + local i6: int = 12; + local i7: int = +0xc; + local i8: int = 0xC; + local i9: int = -0xC; + local i10: int = -12; + local i11: int = 9223372036854775807; # max. allowed value + local i12: int = -9223372036854775808; # min. allowed value + local i13: int = 0x7fffffffffffffff; # max. allowed value + local i14: int = -0x8000000000000000; # min. allowed value + local i15 = +3; + + # Type inference test + + test_case( "type inference", type_name(i15) == "int" ); + + # Test various constant representations + + test_case( "optional '+' sign", i1 == i2 ); + test_case( "negative vs. positive", i1 != i3 ); + test_case( "negative vs. positive", i4 == i5 ); + test_case( "hexadecimal", i6 == i7 ); + test_case( "hexadecimal", i6 == i8 ); + test_case( "hexadecimal", i9 == i10 ); + + # Operator tests + + test_case( "relational operator", i2 > i3 ); + test_case( "relational operator", i2 >= i3 ); + test_case( "relational operator", i3 < i2 ); + test_case( "relational operator", i3 <= i2 ); + test_case( "absolute value", |i4| == 0 ); + test_case( "absolute value", |i3| == 3 ); + test_case( "pre-increment operator", ++i2 == 4 ); + test_case( "pre-decrement operator", --i2 == 3 ); + test_case( "modulus operator", i2%2 == 1 ); + test_case( "division operator", i2/2 == 1 ); + i2 += 4; + test_case( "assignment operator", i2 == 7 ); + i2 -= 2; + test_case( "assignment operator", i2 == 5 ); + + # Max/min value tests + + local str1 = fmt("max int value = %d", i11); + test_case( str1, str1 == "max int value = 9223372036854775807" ); + local str2 = fmt("min int value = %d", i12); + test_case( str2, str2 == "min int value = -9223372036854775808" ); + local str3 = fmt("max int value = %d", i13); + test_case( str3, str3 == "max int value = 9223372036854775807" ); + local str4 = fmt("min int value = %d", i14); + test_case( str4, str4 == "min int value = -9223372036854775808" ); + +} + diff --git a/testing/btest/language/interval.bro b/testing/btest/language/interval.bro deleted file mode 100644 index 0bb912c4d9..0000000000 --- a/testing/btest/language/interval.bro +++ /dev/null @@ -1,93 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -function approx_equal(x: double, y: double): bool - { - # return T if x and y are approximately equal, and F otherwise - return |(x - y)/x| < 1e-6 ? T : F; - } - -event bro_init() -{ - # Constants without space and no letter "s" - - local in11: interval = 2usec; - local in12: interval = 2msec; - local in13: interval = 120sec; - local in14: interval = 2min; - local in15: interval = -2hr; - local in16: interval = 2.5day; - - # Constants with space and no letter "s" - - local in21: interval = 2 usec; - local in22: interval = 2 msec; - local in23: interval = 120 sec; - local in24: interval = 2 min; - local in25: interval = -2 hr; - local in26: interval = 2.5 day; - - # Constants with space and letter "s" - - local in31: interval = 2 usecs; - local in32: interval = 2 msecs; - local in33: interval = 1.2e2 secs; - local in34: interval = 2 mins; - local in35: interval = -2 hrs; - local in36: interval = 2.5 days; - - # Type inference - - local in41 = 2 usec; - local in42 = 2.1usec; - local in43 = 3usecs; - - # Type inference tests - - test_case( "type inference", type_name(in41) == "interval" ); - test_case( "type inference", type_name(in42) == "interval" ); - test_case( "type inference", type_name(in43) == "interval" ); - - # Test various constant representations - - test_case( "optional space", in11 == in21 ); - test_case( "plural/singular interval are same", in11 == in31 ); - - # Operator tests - - test_case( "different units with same numeric value", in11 != in12 ); - test_case( "compare different time units", in13 == in34 ); - test_case( "compare different time units", in13 <= in34 ); - test_case( "compare different time units", in13 >= in34 ); - test_case( "compare different time units", in13 < in36 ); - test_case( "compare different time units", in13 <= in36 ); - test_case( "compare different time units", in13 > in35 ); - test_case( "compare different time units", in13 >= in35 ); - test_case( "add different time units", in13 + in14 == 4min ); - test_case( "subtract different time units", in24 - in23 == 0sec ); - test_case( "absolute value", |in25| == 2.0*3600 ); - test_case( "absolute value", |in36| == 2.5*86400 ); - test_case( "absolute value", |5sec - 9sec| == 4.0 ); - in34 += 2hr; - test_case( "assignment operator", in34 == 122min ); - in34 -= 2hr; - test_case( "assignment operator", in34 == 2min ); - test_case( "multiplication operator", in33*2 == 4min ); - test_case( "division operator", in35/2 == -1hr ); - test_case( "division operator", approx_equal(in32/in31, 1e3) ); - - # Test relative size of each interval unit - - test_case( "relative size of units", approx_equal(1msec/1usec, 1000) ); - test_case( "relative size of units", approx_equal(1sec/1msec, 1000) ); - test_case( "relative size of units", approx_equal(1min/1sec, 60) ); - test_case( "relative size of units", approx_equal(1hr/1min, 60) ); - test_case( "relative size of units", approx_equal(1day/1hr, 24) ); - -} - diff --git a/testing/btest/language/interval.zeek b/testing/btest/language/interval.zeek new file mode 100644 index 0000000000..994eb4c769 --- /dev/null +++ b/testing/btest/language/interval.zeek @@ -0,0 +1,93 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +function approx_equal(x: double, y: double): bool + { + # return T if x and y are approximately equal, and F otherwise + return |(x - y)/x| < 1e-6 ? T : F; + } + +event zeek_init() +{ + # Constants without space and no letter "s" + + local in11: interval = 2usec; + local in12: interval = 2msec; + local in13: interval = 120sec; + local in14: interval = 2min; + local in15: interval = -2hr; + local in16: interval = 2.5day; + + # Constants with space and no letter "s" + + local in21: interval = 2 usec; + local in22: interval = 2 msec; + local in23: interval = 120 sec; + local in24: interval = 2 min; + local in25: interval = -2 hr; + local in26: interval = 2.5 day; + + # Constants with space and letter "s" + + local in31: interval = 2 usecs; + local in32: interval = 2 msecs; + local in33: interval = 1.2e2 secs; + local in34: interval = 2 mins; + local in35: interval = -2 hrs; + local in36: interval = 2.5 days; + + # Type inference + + local in41 = 2 usec; + local in42 = 2.1usec; + local in43 = 3usecs; + + # Type inference tests + + test_case( "type inference", type_name(in41) == "interval" ); + test_case( "type inference", type_name(in42) == "interval" ); + test_case( "type inference", type_name(in43) == "interval" ); + + # Test various constant representations + + test_case( "optional space", in11 == in21 ); + test_case( "plural/singular interval are same", in11 == in31 ); + + # Operator tests + + test_case( "different units with same numeric value", in11 != in12 ); + test_case( "compare different time units", in13 == in34 ); + test_case( "compare different time units", in13 <= in34 ); + test_case( "compare different time units", in13 >= in34 ); + test_case( "compare different time units", in13 < in36 ); + test_case( "compare different time units", in13 <= in36 ); + test_case( "compare different time units", in13 > in35 ); + test_case( "compare different time units", in13 >= in35 ); + test_case( "add different time units", in13 + in14 == 4min ); + test_case( "subtract different time units", in24 - in23 == 0sec ); + test_case( "absolute value", |in25| == 2.0*3600 ); + test_case( "absolute value", |in36| == 2.5*86400 ); + test_case( "absolute value", |5sec - 9sec| == 4.0 ); + in34 += 2hr; + test_case( "assignment operator", in34 == 122min ); + in34 -= 2hr; + test_case( "assignment operator", in34 == 2min ); + test_case( "multiplication operator", in33*2 == 4min ); + test_case( "division operator", in35/2 == -1hr ); + test_case( "division operator", approx_equal(in32/in31, 1e3) ); + + # Test relative size of each interval unit + + test_case( "relative size of units", approx_equal(1msec/1usec, 1000) ); + test_case( "relative size of units", approx_equal(1sec/1msec, 1000) ); + test_case( "relative size of units", approx_equal(1min/1sec, 60) ); + test_case( "relative size of units", approx_equal(1hr/1min, 60) ); + test_case( "relative size of units", approx_equal(1day/1hr, 24) ); + +} + diff --git a/testing/btest/language/invalid_index.bro b/testing/btest/language/invalid_index.bro deleted file mode 100644 index 23fdb50d06..0000000000 --- a/testing/btest/language/invalid_index.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -global foo: vector of count = { 42 }; -global foo2: table[count] of count = { [0] = 13 }; - -event bro_init() - { - print "foo[0]", foo[0]; - print "foo[1]", foo[1]; - } - -event bro_init() - { - print "foo2[0]", foo2[0]; - print "foo2[1]", foo2[1]; - } - -event bro_done() - { - print "done"; - } diff --git a/testing/btest/language/invalid_index.zeek b/testing/btest/language/invalid_index.zeek new file mode 100644 index 0000000000..80f294c68b --- /dev/null +++ b/testing/btest/language/invalid_index.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +global foo: vector of count = { 42 }; +global foo2: table[count] of count = { [0] = 13 }; + +event zeek_init() + { + print "foo[0]", foo[0]; + print "foo[1]", foo[1]; + } + +event zeek_init() + { + print "foo2[0]", foo2[0]; + print "foo2[1]", foo2[1]; + } + +event zeek_done() + { + print "done"; + } diff --git a/testing/btest/language/ipv6-literals.bro b/testing/btest/language/ipv6-literals.bro deleted file mode 100644 index bf888b29e1..0000000000 --- a/testing/btest/language/ipv6-literals.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -local v: vector of addr = vector(); - -v += [::1]; -v += [::ffff]; -v += [::ffff:ffff]; -v += [::0a0a:ffff]; -v += [1::1]; -v += [1::a]; -v += [1::1:1]; -v += [1::1:a]; -v += [a::a]; -v += [a::1]; -v += [a::a:a]; -v += [a::a:1]; -v += [a:a::a]; -v += [aaaa:0::ffff]; -v += [::ffff:192.168.1.100]; -v += [ffff::192.168.1.100]; -v += [::192.168.1.100]; -v += [::ffff:0:192.168.1.100]; -v += [805B:2D9D:DC28::FC57:212.200.31.255]; -v += [0xaaaa::bbbb]; -v += [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; -v += [aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222]; -v += [aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222]; -v += [aaaa:bbbb:cccc:dddd:eeee:0:0:2222]; - -for (i in v) - print v[i]; diff --git a/testing/btest/language/ipv6-literals.zeek b/testing/btest/language/ipv6-literals.zeek new file mode 100644 index 0000000000..e64185d92a --- /dev/null +++ b/testing/btest/language/ipv6-literals.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +local v: vector of addr = vector(); + +v += [::1]; +v += [::ffff]; +v += [::ffff:ffff]; +v += [::0a0a:ffff]; +v += [1::1]; +v += [1::a]; +v += [1::1:1]; +v += [1::1:a]; +v += [a::a]; +v += [a::1]; +v += [a::a:a]; +v += [a::a:1]; +v += [a:a::a]; +v += [aaaa:0::ffff]; +v += [::ffff:192.168.1.100]; +v += [ffff::192.168.1.100]; +v += [::192.168.1.100]; +v += [::ffff:0:192.168.1.100]; +v += [805B:2D9D:DC28::FC57:212.200.31.255]; +v += [0xaaaa::bbbb]; +v += [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]; +v += [aaaa:bbbb:cccc:dddd:eeee:ffff:1:2222]; +v += [aaaa:bbbb:cccc:dddd:eeee:ffff:0:2222]; +v += [aaaa:bbbb:cccc:dddd:eeee:0:0:2222]; + +for (i in v) + print v[i]; diff --git a/testing/btest/language/key-value-for.bro b/testing/btest/language/key-value-for.bro deleted file mode 100644 index 97591dcacf..0000000000 --- a/testing/btest/language/key-value-for.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - - -event bro_init() - { - # Test single keys - - local t: table[count] of string = table(); - t[1] = "hello"; - t[55] = "goodbye"; - for (key, value in t) - print key, value; - - # Test multiple keys - - local tkk: table[string, string] of count = table(); - tkk["hello", "world"] = 1; - tkk["goodbye", "world"] = 55; - for ([k1, k2], val in tkk) - print k1, k2, val; - } diff --git a/testing/btest/language/key-value-for.zeek b/testing/btest/language/key-value-for.zeek new file mode 100644 index 0000000000..6d3dfc5f7f --- /dev/null +++ b/testing/btest/language/key-value-for.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + + +event zeek_init() + { + # Test single keys + + local t: table[count] of string = table(); + t[1] = "hello"; + t[55] = "goodbye"; + for (key, value in t) + print key, value; + + # Test multiple keys + + local tkk: table[string, string] of count = table(); + tkk["hello", "world"] = 1; + tkk["goodbye", "world"] = 55; + for ([k1, k2], val in tkk) + print k1, k2, val; + } diff --git a/testing/btest/language/module.bro b/testing/btest/language/module.bro deleted file mode 100644 index 3278697a8d..0000000000 --- a/testing/btest/language/module.bro +++ /dev/null @@ -1,41 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT secondtestfile >out -# @TEST-EXEC: btest-diff out - -# In this source file, we define a module and export some objects - -module thisisatest; - -export { - global test_case: function(msg: string, expect: bool); - - global testevent: event(msg: string); - - global num: count = 123; - - const daysperyear: count = 365; -} - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -event testevent(msg: string) - { - test_case( "event", T ); - } - - -# @TEST-START-FILE secondtestfile - -# In this source file, we try to access each exported object from the module - -event bro_init() -{ - thisisatest::test_case( "function", T ); - thisisatest::test_case( "global variable", thisisatest::num == 123 ); - thisisatest::test_case( "const", thisisatest::daysperyear == 365 ); - event thisisatest::testevent( "foo" ); -} - -# @TEST-END-FILE diff --git a/testing/btest/language/module.zeek b/testing/btest/language/module.zeek new file mode 100644 index 0000000000..e714ff22c2 --- /dev/null +++ b/testing/btest/language/module.zeek @@ -0,0 +1,41 @@ +# @TEST-EXEC: zeek -b %INPUT secondtestfile >out +# @TEST-EXEC: btest-diff out + +# In this source file, we define a module and export some objects + +module thisisatest; + +export { + global test_case: function(msg: string, expect: bool); + + global testevent: event(msg: string); + + global num: count = 123; + + const daysperyear: count = 365; +} + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event testevent(msg: string) + { + test_case( "event", T ); + } + + +# @TEST-START-FILE secondtestfile + +# In this source file, we try to access each exported object from the module + +event zeek_init() +{ + thisisatest::test_case( "function", T ); + thisisatest::test_case( "global variable", thisisatest::num == 123 ); + thisisatest::test_case( "const", thisisatest::daysperyear == 365 ); + event thisisatest::testevent( "foo" ); +} + +# @TEST-END-FILE diff --git a/testing/btest/language/named-record-ctors.bro b/testing/btest/language/named-record-ctors.bro deleted file mode 100644 index 40a79d86b3..0000000000 --- a/testing/btest/language/named-record-ctors.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -@load frameworks/software/vulnerable - -type MyRec: record { - min: count &optional; - max: count; -}; - -type Bar: record { - aaa: count; - bbb: string &optional; - ccc: string &optional; - ddd: string &default="default"; -}; - -const java_1_6_vuln = Software::VulnerableVersionRange( - $max = Software::Version($major = 1, $minor = 6, $minor2 = 0, $minor3 = 44) -); - -const java_1_7_vuln = Software::VulnerableVersionRange( - $min = Software::Version($major = 1, $minor = 7), - $max = Software::Version($major = 1, $minor = 7, $minor2 = 0, $minor3 = 20) -); - -redef Software::vulnerable_versions += { - ["Java"] = set(java_1_6_vuln, java_1_7_vuln) -}; - -local myrec: MyRec = MyRec($max=2); -print myrec; -myrec = MyRec($min=7, $max=42); -print myrec; - -local data = Bar($aaa=1, $bbb="test"); -print data; - -print Software::vulnerable_versions; diff --git a/testing/btest/language/named-record-ctors.zeek b/testing/btest/language/named-record-ctors.zeek new file mode 100644 index 0000000000..af2b175266 --- /dev/null +++ b/testing/btest/language/named-record-ctors.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +@load frameworks/software/vulnerable + +type MyRec: record { + min: count &optional; + max: count; +}; + +type Bar: record { + aaa: count; + bbb: string &optional; + ccc: string &optional; + ddd: string &default="default"; +}; + +const java_1_6_vuln = Software::VulnerableVersionRange( + $max = Software::Version($major = 1, $minor = 6, $minor2 = 0, $minor3 = 44) +); + +const java_1_7_vuln = Software::VulnerableVersionRange( + $min = Software::Version($major = 1, $minor = 7), + $max = Software::Version($major = 1, $minor = 7, $minor2 = 0, $minor3 = 20) +); + +redef Software::vulnerable_versions += { + ["Java"] = set(java_1_6_vuln, java_1_7_vuln) +}; + +local myrec: MyRec = MyRec($max=2); +print myrec; +myrec = MyRec($min=7, $max=42); +print myrec; + +local data = Bar($aaa=1, $bbb="test"); +print data; + +print Software::vulnerable_versions; diff --git a/testing/btest/language/named-set-ctors.bro b/testing/btest/language/named-set-ctors.bro deleted file mode 100644 index 083937c42e..0000000000 --- a/testing/btest/language/named-set-ctors.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type MyRec: record { - min: count &optional; - max: count; -}; - -type FooSet: set[count]; -type FooSetRec: set[MyRec]; -type FooSetComp: set[string, count]; - -global myset: FooSet = FooSet(1, 5, 3); -global mysetrec: FooSetRec = FooSetRec([$max=5], [$max=2]); -global mysetcomp: FooSetComp = FooSetComp(["test", 1], ["cool", 2]); - -print myset; -print mysetrec; -print mysetcomp; diff --git a/testing/btest/language/named-set-ctors.zeek b/testing/btest/language/named-set-ctors.zeek new file mode 100644 index 0000000000..707c8f6fe5 --- /dev/null +++ b/testing/btest/language/named-set-ctors.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type MyRec: record { + min: count &optional; + max: count; +}; + +type FooSet: set[count]; +type FooSetRec: set[MyRec]; +type FooSetComp: set[string, count]; + +global myset: FooSet = FooSet(1, 5, 3); +global mysetrec: FooSetRec = FooSetRec([$max=5], [$max=2]); +global mysetcomp: FooSetComp = FooSetComp(["test", 1], ["cool", 2]); + +print myset; +print mysetrec; +print mysetcomp; diff --git a/testing/btest/language/named-table-ctors.bro b/testing/btest/language/named-table-ctors.bro deleted file mode 100644 index 1fad56e30f..0000000000 --- a/testing/btest/language/named-table-ctors.bro +++ /dev/null @@ -1,30 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type MyRec: record { - min: count &optional; - max: count; -}; - -type FooTable: table[count] of string; -type FooTableRec: table[MyRec] of string; -type FooTableComp: table[string, count] of string; -type FooTableY: table[string] of double; - -global mytable: FooTable = FooTable([1] = "one", [5] = "five", [3] = "three"); -global mytablerec: FooTableRec = FooTableRec([[$max=5]] = "max5", [[$max=2]] = "max2"); -global mytablecomp: FooTableComp = FooTableComp(["test", 1] = "test1", ["cool", -2] = "cool2"); -global mytabley: FooTableY = FooTableY(["one"] = 1, ["two"] = 2, ["three"] = 3) &default=0; - -event bro_init() - { - print mytable; - print mytablerec; - print mytablecomp; - print mytabley; - print mytabley["test"]; - - local loctable = FooTable([42] = "forty-two", [37] = "thirty-seven"); - print loctable; - } diff --git a/testing/btest/language/named-table-ctors.zeek b/testing/btest/language/named-table-ctors.zeek new file mode 100644 index 0000000000..957ea351da --- /dev/null +++ b/testing/btest/language/named-table-ctors.zeek @@ -0,0 +1,30 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type MyRec: record { + min: count &optional; + max: count; +}; + +type FooTable: table[count] of string; +type FooTableRec: table[MyRec] of string; +type FooTableComp: table[string, count] of string; +type FooTableY: table[string] of double; + +global mytable: FooTable = FooTable([1] = "one", [5] = "five", [3] = "three"); +global mytablerec: FooTableRec = FooTableRec([[$max=5]] = "max5", [[$max=2]] = "max2"); +global mytablecomp: FooTableComp = FooTableComp(["test", 1] = "test1", ["cool", +2] = "cool2"); +global mytabley: FooTableY = FooTableY(["one"] = 1, ["two"] = 2, ["three"] = 3) &default=0; + +event zeek_init() + { + print mytable; + print mytablerec; + print mytablecomp; + print mytabley; + print mytabley["test"]; + + local loctable = FooTable([42] = "forty-two", [37] = "thirty-seven"); + print loctable; + } diff --git a/testing/btest/language/named-vector-ctors.bro b/testing/btest/language/named-vector-ctors.bro deleted file mode 100644 index 1e0e1e9e55..0000000000 --- a/testing/btest/language/named-vector-ctors.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type MyRec: record { - min: count &optional; - max: count; -}; - -type FooVector: vector of string; -type FooVectorD: vector of double; -type FooVectorRec: vector of MyRec; - -global myvec: FooVector = FooVector("one", "two", "three"); -global myvecd: FooVectorD = FooVectorD(1, 2, 3); -global myvecrec: FooVectorRec = FooVectorRec([$max=1], [$max=2], [$max=3]); - -print myvec; -print myvecd; -print myvecrec; diff --git a/testing/btest/language/named-vector-ctors.zeek b/testing/btest/language/named-vector-ctors.zeek new file mode 100644 index 0000000000..775422810b --- /dev/null +++ b/testing/btest/language/named-vector-ctors.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type MyRec: record { + min: count &optional; + max: count; +}; + +type FooVector: vector of string; +type FooVectorD: vector of double; +type FooVectorRec: vector of MyRec; + +global myvec: FooVector = FooVector("one", "two", "three"); +global myvecd: FooVectorD = FooVectorD(1, 2, 3); +global myvecrec: FooVectorRec = FooVectorRec([$max=1], [$max=2], [$max=3]); + +print myvec; +print myvecd; +print myvecrec; diff --git a/testing/btest/language/nested-sets.bro b/testing/btest/language/nested-sets.bro deleted file mode 100644 index e33e1ac842..0000000000 --- a/testing/btest/language/nested-sets.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: for i in `seq 21`; do echo 0 >> random.seed; done -# @TEST-EXEC: test `bro -b -G random.seed %INPUT` = "pass" - -type r: record { - b: set[count]; -}; - -global foo: set[r]; -global bar = set(1,3,5); - -add foo[record($b=bar)]; - -bar = set(5,3,1); -delete foo[record($b=bar)]; - -if ( |foo| > 0 ) - print "fail"; -else - print "pass"; diff --git a/testing/btest/language/nested-sets.zeek b/testing/btest/language/nested-sets.zeek new file mode 100644 index 0000000000..8c4f987075 --- /dev/null +++ b/testing/btest/language/nested-sets.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: for i in `seq 21`; do echo 0 >> random.seed; done +# @TEST-EXEC: test `zeek -b -G random.seed %INPUT` = "pass" + +type r: record { + b: set[count]; +}; + +global foo: set[r]; +global bar = set(1,3,5); + +add foo[record($b=bar)]; + +bar = set(5,3,1); +delete foo[record($b=bar)]; + +if ( |foo| > 0 ) + print "fail"; +else + print "pass"; diff --git a/testing/btest/language/next-test.bro b/testing/btest/language/next-test.bro deleted file mode 100644 index d46ad187c4..0000000000 --- a/testing/btest/language/next-test.bro +++ /dev/null @@ -1,36 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -# This script tests "next" being called during the last iteration of a -# for loop - -event bro_done() - { - - local number_set: set[count]; - local i: count; - - add number_set[0]; - add number_set[1]; - - - for ( i in number_set ) - { - print fmt ("%d", i); - if ( i == 0 ) - next; - print fmt ("%d", i); - } - print fmt ("MIDDLE"); - - - for ( i in number_set ) - { - print fmt ("%d", i); - if ( i == 1 ) - next; - print fmt ("%d", i); - } - print fmt ("THE END"); - - } diff --git a/testing/btest/language/next-test.zeek b/testing/btest/language/next-test.zeek new file mode 100644 index 0000000000..3746c4cb09 --- /dev/null +++ b/testing/btest/language/next-test.zeek @@ -0,0 +1,36 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +# This script tests "next" being called during the last iteration of a +# for loop + +event zeek_done() + { + + local number_set: set[count]; + local i: count; + + add number_set[0]; + add number_set[1]; + + + for ( i in number_set ) + { + print fmt ("%d", i); + if ( i == 0 ) + next; + print fmt ("%d", i); + } + print fmt ("MIDDLE"); + + + for ( i in number_set ) + { + print fmt ("%d", i); + if ( i == 1 ) + next; + print fmt ("%d", i); + } + print fmt ("THE END"); + + } diff --git a/testing/btest/language/no-module.bro b/testing/btest/language/no-module.bro deleted file mode 100644 index fff55d3854..0000000000 --- a/testing/btest/language/no-module.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT secondtestfile >out -# @TEST-EXEC: btest-diff out - -# This is the same test as "module.bro", but here we omit the module definition - - -global num: count = 123; - -const daysperyear: count = 365; - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -event testevent(msg: string) - { - test_case( "event", T ); - } - - -# @TEST-START-FILE secondtestfile - -# In this script, we try to access each object defined in the other script - -event bro_init() -{ - test_case( "function", T ); - test_case( "global variable", num == 123 ); - test_case( "fully qualified global variable", GLOBAL::num == 123 ); # test for BIT-1758 : GLOBAL scope ID discovery bug - test_case( "const", daysperyear == 365 ); - event testevent( "foo" ); -} - -# @TEST-END-FILE diff --git a/testing/btest/language/no-module.zeek b/testing/btest/language/no-module.zeek new file mode 100644 index 0000000000..f78c9da6c0 --- /dev/null +++ b/testing/btest/language/no-module.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -b %INPUT secondtestfile >out +# @TEST-EXEC: btest-diff out + +# This is the same test as "module.zeek", but here we omit the module definition + + +global num: count = 123; + +const daysperyear: count = 365; + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +event testevent(msg: string) + { + test_case( "event", T ); + } + + +# @TEST-START-FILE secondtestfile + +# In this script, we try to access each object defined in the other script + +event zeek_init() +{ + test_case( "function", T ); + test_case( "global variable", num == 123 ); + test_case( "fully qualified global variable", GLOBAL::num == 123 ); # test for BIT-1758 : GLOBAL scope ID discovery bug + test_case( "const", daysperyear == 365 ); + event testevent( "foo" ); +} + +# @TEST-END-FILE diff --git a/testing/btest/language/null-statement.bro b/testing/btest/language/null-statement.bro deleted file mode 100644 index 20c70f4876..0000000000 --- a/testing/btest/language/null-statement.bro +++ /dev/null @@ -1,34 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - - -function f1(test: string) - { - ; # null statement in function - } - -event bro_init() -{ - local s1: set[string] = set( "this", "test" ); - - ; # null statement in event - - for ( i in s1 ) - ; # null statement in for loop - - if ( |s1| > 0 ) ; # null statement in if statement - - f1("foo"); - - { ; } # null compound statement - - if ( |s1| == 0 ) - { - print "Error: this should not happen"; - } - else - ; # null statement in else - - print "done"; -} - diff --git a/testing/btest/language/null-statement.zeek b/testing/btest/language/null-statement.zeek new file mode 100644 index 0000000000..72ceedf293 --- /dev/null +++ b/testing/btest/language/null-statement.zeek @@ -0,0 +1,34 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + + +function f1(test: string) + { + ; # null statement in function + } + +event zeek_init() +{ + local s1: set[string] = set( "this", "test" ); + + ; # null statement in event + + for ( i in s1 ) + ; # null statement in for loop + + if ( |s1| > 0 ) ; # null statement in if statement + + f1("foo"); + + { ; } # null compound statement + + if ( |s1| == 0 ) + { + print "Error: this should not happen"; + } + else + ; # null statement in else + + print "done"; +} + diff --git a/testing/btest/language/outer_param_binding.bro b/testing/btest/language/outer_param_binding.bro deleted file mode 100644 index fb37fd4712..0000000000 --- a/testing/btest/language/outer_param_binding.bro +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type Foo: record { - x: function(a: string) : string; -}; - -function bar(b: string, c: string) - { - local f: Foo; - local d = 8; - f = [$x=function(a: string) : string - { - local x = 0; - print x; - print c, d; - return cat(a, " ", b); - } - ]; - - print f$x("2"); - } - -event bro_init() - { - bar("1", "20"); - } diff --git a/testing/btest/language/outer_param_binding.zeek b/testing/btest/language/outer_param_binding.zeek new file mode 100644 index 0000000000..d3587a7cce --- /dev/null +++ b/testing/btest/language/outer_param_binding.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type Foo: record { + x: function(a: string) : string; +}; + +function bar(b: string, c: string) + { + local f: Foo; + local d = 8; + f = [$x=function(a: string) : string + { + local x = 0; + print x; + print c, d; + return cat(a, " ", b); + } + ]; + + print f$x("2"); + } + +event zeek_init() + { + bar("1", "20"); + } diff --git a/testing/btest/language/paraglob-serialization.zeek b/testing/btest/language/paraglob-serialization.zeek new file mode 100644 index 0000000000..c9ffe9df4c --- /dev/null +++ b/testing/btest/language/paraglob-serialization.zeek @@ -0,0 +1,102 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run recv "zeek -B broker -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -B broker -b ../send.zeek >send.out" +# +# @TEST-EXEC: btest-bg-wait 45 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +redef exit_only_after_terminate = T; + +global event_count = 0; +global p: opaque of paraglob = paraglob_init(vector("hello", "*ello", "*")); + +global ping: event(msg: opaque of paraglob, c: count); + +event zeek_init() + { + print "Starting send."; + print paraglob_match(p, "hello"); + Broker::subscribe("bro/event/my_topic"); + Broker::peer("127.0.0.1", to_port(getenv("BROKER_PORT"))); + print "is_remote should be F, and is", is_remote_event(); + } + +function send_event() + { + ++event_count; + local e = Broker::make_event(ping, p, event_count); + Broker::publish("bro/event/my_topic", e); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender added peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + send_event(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("sender lost peer: endpoint=%s msg=%s", + endpoint$network$address, msg); + terminate(); + } + +event pong(msg: opaque of paraglob, n: count) + { + print "is_remote should be T, and is", is_remote_event(); + print fmt("sender got pong number: %s", n); + send_event(); + } + +@TEST-END-FILE + + +@TEST-START-FILE recv.zeek + +redef exit_only_after_terminate = T; + +const events_to_recv = 3; + +global handler: event(msg: string, c: count); +global auto_handler: event(msg: string, c: count); + +global pong: event(msg: opaque of paraglob, c: count); + +event zeek_init() + { + Broker::subscribe("bro/event/my_topic"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver added peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + print fmt("receiver lost peer: endpoint=%s msg=%s", endpoint$network$address, msg); + } + +event ping(msg: opaque of paraglob, n: count) + { + print "is_remote should be T, and is", is_remote_event(); + if ( n > events_to_recv ) + { + print get_broker_stats(); + terminate(); + return; + } + print fmt("receiver got ping number: %s", n); + print paraglob_match(msg, "hello"); + + local e = Broker::make_event(pong, msg, n); + Broker::publish("bro/event/my_topic", e); + } + +@TEST-END-FILE diff --git a/testing/btest/language/paraglob.zeek b/testing/btest/language/paraglob.zeek new file mode 100644 index 0000000000..b6f61a8ef0 --- /dev/null +++ b/testing/btest/language/paraglob.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_init () +{ + local v1 = vector("*", "d?g", "*og", "d?", "d[!wl]g"); + local v2 = vector("once", "!o*", "once"); + local v3 = vector("https://*.google.com/*", "*malware*", "*.gov*"); + local v4 = vector("z*ro"); + + local p1 = paraglob_init(v1); + local p2: opaque of paraglob = paraglob_init(v2); + local p3 = paraglob_init(v3); + local p4 = paraglob_init(v4); + local p_eq = paraglob_init(v1); + + # paraglob_init should not modify v1 + print (v1 == vector("*", "d?g", "*og", "d?", "d[!wl]g")); + # p_eq and p1 should be the same paraglobs + print paraglob_equals(p_eq, p1); + print paraglob_equals(p1, p2); + + print paraglob_match(p1, "dog"); + + print paraglob_match(p2, "once"); + print paraglob_match(p2, "nothing"); + print paraglob_match(p3, "www.strange-malware-domain.gov"); + print paraglob_match(p4, "zero\0zero"); + + # This looks like a lot, but really should complete quickly. + # Paraglob should stop addition of duplicate patterns. + local i = 1000000; + while (i > 0) { + i = i - 1; + v3 += v3[1]; + } + + local large_glob: opaque of paraglob = paraglob_init(v3); + print paraglob_match(large_glob, "www.strange-malware-domain.gov"); +} diff --git a/testing/btest/language/pattern.bro b/testing/btest/language/pattern.bro deleted file mode 100644 index e427b70e80..0000000000 --- a/testing/btest/language/pattern.bro +++ /dev/null @@ -1,68 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local p1: pattern = /foo|bar/; - local p2: pattern = /oob/; - local p3: pattern = /^oob/; - local p4 = /foo/; - - # Type inference tests - - test_case( "type inference", type_name(p4) == "pattern" ); - - # Operator tests - - test_case( "equality operator", "foo" == p1 ); - test_case( "equality operator (order of operands)", p1 == "foo" ); - - test_case( "inequality operator", "foobar" != p1 ); - test_case( "inequality operator (order of operands)", p1 != "foobar" ); - - test_case( "in operator", p1 in "foobar" ); - test_case( "in operator", p2 in "foobar" ); - test_case( "!in operator", p3 !in "foobar" ); - - test_case( "& operator", p1 & p2 in "baroob" ); - test_case( "& operator", p2 & p1 in "baroob" ); - - test_case( "| operator", p1 | p2 in "lazybarlazy" ); - test_case( "| operator", p3 | p4 in "xoob" ); - - test_case( "/i pattern modifier", /fOO/i in "xFoObar" ); - test_case( "/i pattern modifier", /fOO/i == "Foo" ); - - test_case( "/i double-quote escape", /"fOO"/i in "xFoObar" ); - test_case( "/i double-quote escape", /"fOO"/i in "xfOObar" ); - - test_case( "case-sensitive pattern", /fOO/ in "xFoObar" ); - test_case( "case-sensitive pattern", /fOO/ == "Foo" ); - test_case( "case-sensitive pattern", /fOO/ == "fOO" ); - - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bez" ); - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bEz" ); - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bar" ); - test_case( "/i pattern disjunction", /bar/i | /bez/ == "bAr" ); - - test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbez" ); - test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbEz" ); - test_case( "/i pattern concatenation", /BAR/i & /bez/ == "barbEz" ); - test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbez" ); - test_case( "/i pattern concatenation", /BAR/i & /bez/ == "bArbez" ); - test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbEz" ); - - test_case( "/i pattern character class", /ba[0a-c99S-Z0]/i & /bEz/ == "bArbEz" ); - test_case( "/i pattern character class", /ba[0a-c99M-S0]/i & /bEz/ == "bArbEz" ); - - test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xBAry" ); - test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xFOoy" ); - test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ | /foo/i in "xFOoy" ); - -} diff --git a/testing/btest/language/pattern.zeek b/testing/btest/language/pattern.zeek new file mode 100644 index 0000000000..05a84e713c --- /dev/null +++ b/testing/btest/language/pattern.zeek @@ -0,0 +1,68 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local p1: pattern = /foo|bar/; + local p2: pattern = /oob/; + local p3: pattern = /^oob/; + local p4 = /foo/; + + # Type inference tests + + test_case( "type inference", type_name(p4) == "pattern" ); + + # Operator tests + + test_case( "equality operator", "foo" == p1 ); + test_case( "equality operator (order of operands)", p1 == "foo" ); + + test_case( "inequality operator", "foobar" != p1 ); + test_case( "inequality operator (order of operands)", p1 != "foobar" ); + + test_case( "in operator", p1 in "foobar" ); + test_case( "in operator", p2 in "foobar" ); + test_case( "!in operator", p3 !in "foobar" ); + + test_case( "& operator", p1 & p2 in "baroob" ); + test_case( "& operator", p2 & p1 in "baroob" ); + + test_case( "| operator", p1 | p2 in "lazybarlazy" ); + test_case( "| operator", p3 | p4 in "xoob" ); + + test_case( "/i pattern modifier", /fOO/i in "xFoObar" ); + test_case( "/i pattern modifier", /fOO/i == "Foo" ); + + test_case( "/i double-quote escape", /"fOO"/i in "xFoObar" ); + test_case( "/i double-quote escape", /"fOO"/i in "xfOObar" ); + + test_case( "case-sensitive pattern", /fOO/ in "xFoObar" ); + test_case( "case-sensitive pattern", /fOO/ == "Foo" ); + test_case( "case-sensitive pattern", /fOO/ == "fOO" ); + + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bez" ); + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bEz" ); + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bar" ); + test_case( "/i pattern disjunction", /bar/i | /bez/ == "bAr" ); + + test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbez" ); + test_case( "/i pattern concatenation", /bar/i & /bez/ == "barbEz" ); + test_case( "/i pattern concatenation", /BAR/i & /bez/ == "barbEz" ); + test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbez" ); + test_case( "/i pattern concatenation", /BAR/i & /bez/ == "bArbez" ); + test_case( "/i pattern concatenation", /bar/i & /bez/ == "bArbEz" ); + + test_case( "/i pattern character class", /ba[0a-c99S-Z0]/i & /bEz/ == "bArbEz" ); + test_case( "/i pattern character class", /ba[0a-c99M-S0]/i & /bEz/ == "bArbEz" ); + + test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xBAry" ); + test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ in "xFOoy" ); + test_case( "(?i:...) pattern construct", /foo|(?i:bar)/ | /foo/i in "xFOoy" ); + +} diff --git a/testing/btest/language/port.bro b/testing/btest/language/port.bro deleted file mode 100644 index a9c7fd33e7..0000000000 --- a/testing/btest/language/port.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local p1: port = 1/icmp; - local p2: port = 2/udp; - local p3: port = 3/tcp; - local p4: port = 4/unknown; - local p5 = 123/tcp; - - # maximum allowed values for each port type - local p6: port = 255/icmp; - local p7: port = 65535/udp; - local p8: port = 65535/tcp; - local p9: port = 255/unknown; - - # Type inference test - - test_case( "type inference", type_name(p5) == "port" ); - - # Operator tests - - test_case( "protocol ordering", p1 > p2 ); - test_case( "protocol ordering", p2 > p3 ); - test_case( "protocol ordering", p3 > p4 ); - test_case( "protocol ordering", p8 < p7 ); - test_case( "protocol ordering", p9 < p6 ); - test_case( "different protocol but same numeric value", p7 != p8 ); - test_case( "different protocol but same numeric value", p6 != p9 ); - test_case( "equality operator", 65535/tcp == p8 ); - -} - diff --git a/testing/btest/language/port.zeek b/testing/btest/language/port.zeek new file mode 100644 index 0000000000..03a6617eed --- /dev/null +++ b/testing/btest/language/port.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local p1: port = 1/icmp; + local p2: port = 2/udp; + local p3: port = 3/tcp; + local p4: port = 4/unknown; + local p5 = 123/tcp; + + # maximum allowed values for each port type + local p6: port = 255/icmp; + local p7: port = 65535/udp; + local p8: port = 65535/tcp; + local p9: port = 255/unknown; + + # Type inference test + + test_case( "type inference", type_name(p5) == "port" ); + + # Operator tests + + test_case( "protocol ordering", p1 > p2 ); + test_case( "protocol ordering", p2 > p3 ); + test_case( "protocol ordering", p3 > p4 ); + test_case( "protocol ordering", p8 < p7 ); + test_case( "protocol ordering", p9 < p6 ); + test_case( "different protocol but same numeric value", p7 != p8 ); + test_case( "different protocol but same numeric value", p6 != p9 ); + test_case( "equality operator", 65535/tcp == p8 ); + +} + diff --git a/testing/btest/language/precedence.bro b/testing/btest/language/precedence.bro deleted file mode 100644 index 27fc1e024a..0000000000 --- a/testing/btest/language/precedence.bro +++ /dev/null @@ -1,110 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -# This is an incomplete set of tests to demonstrate the order of precedence -# of bro script operators - -event bro_init() -{ - local n1: int; - local n2: int; - local n3: int; - - # Tests that show "++" has higher precedence than "*" - - n1 = n2 = 5; - n1 = ++n1 * 3; - n2 = (++n2) * 3; - test_case( "++ and *", n1 == 18 ); - test_case( "++ and *", n2 == 18 ); - - n1 = 5; - n1 = 3 * ++n1; - test_case( "* and ++", n1 == 18 ); - - # Tests that show "*" has same precedence as "%" - - n1 = 3 * 5 % 2; - n2 = (3 * 5) % 2; - n3 = 3 * (5 % 2); - test_case( "* and %", n1 == 1 ); - test_case( "* and %", n2 == 1 ); - test_case( "* and %", n3 == 3 ); - - n1 = 7 % 3 * 2; - n2 = (7 % 3) * 2; - n3 = 7 % (3 * 2); - test_case( "% and *", n1 == 2 ); - test_case( "% and *", n2 == 2 ); - test_case( "% and *", n3 == 1 ); - - # Tests that show "*" has higher precedence than "+" - - n1 = 1 + 2 * 3; - n2 = 1 + (2 * 3); - n3 = (1 + 2) * 3; - test_case( "+ and *", n1 == 7 ); - test_case( "+ and *", n2 == 7 ); - test_case( "+ and *", n3 == 9 ); - - # Tests that show "+" has higher precedence than "<" - - test_case( "< and +", 5 < 3 + 7 ); - test_case( "< and +", 5 < (3 + 7) ); - - test_case( "+ and <", 7 + 3 > 5 ); - test_case( "+ and <", (7 + 3) > 5 ); - - # Tests that show "+" has higher precedence than "+=" - - n1 = n2 = n3 = 0; - n1 += 1 + 2; - n2 += (1 + 2); - (n3 += 1) + 2; - test_case( "+= and +", n1 == 3 ); - test_case( "+= and +", n2 == 3 ); - test_case( "+= and +", n3 == 1 ); - - local r1: bool; - local r2: bool; - local r3: bool; - - # Tests that show "&&" has higher precedence than "||" - - r1 = F && F || T; - r2 = (F && F) || T; - r3 = F && (F || T); - test_case( "&& and ||", r1 ); - test_case( "&& and ||", r2 ); - test_case( "&& and ||", !r3 ); - - r1 = T || F && F; - r2 = T || (F && F); - r3 = (T || F) && F; - test_case( "|| and &&", r1 ); - test_case( "|| and &&", r2 ); - test_case( "|| and &&", !r3 ); - - # Tests that show "||" has higher precedence than conditional operator - - r1 = T || T ? F : F; - r2 = (T || T) ? F : F; - r3 = T || (T ? F : F); - test_case( "|| and conditional operator", !r1 ); - test_case( "|| and conditional operator", !r2 ); - test_case( "|| and conditional operator", r3 ); - - r1 = T ? F : F || T; - r2 = T ? F : (F || T); - r3 = (T ? F : F) || T; - test_case( "conditional operator and ||", !r1 ); - test_case( "conditional operator and ||", !r2 ); - test_case( "conditional operator and ||", r3 ); - -} - diff --git a/testing/btest/language/precedence.zeek b/testing/btest/language/precedence.zeek new file mode 100644 index 0000000000..1af4bb6569 --- /dev/null +++ b/testing/btest/language/precedence.zeek @@ -0,0 +1,110 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +# This is an incomplete set of tests to demonstrate the order of precedence +# of zeek script operators + +event zeek_init() +{ + local n1: int; + local n2: int; + local n3: int; + + # Tests that show "++" has higher precedence than "*" + + n1 = n2 = 5; + n1 = ++n1 * 3; + n2 = (++n2) * 3; + test_case( "++ and *", n1 == 18 ); + test_case( "++ and *", n2 == 18 ); + + n1 = 5; + n1 = 3 * ++n1; + test_case( "* and ++", n1 == 18 ); + + # Tests that show "*" has same precedence as "%" + + n1 = 3 * 5 % 2; + n2 = (3 * 5) % 2; + n3 = 3 * (5 % 2); + test_case( "* and %", n1 == 1 ); + test_case( "* and %", n2 == 1 ); + test_case( "* and %", n3 == 3 ); + + n1 = 7 % 3 * 2; + n2 = (7 % 3) * 2; + n3 = 7 % (3 * 2); + test_case( "% and *", n1 == 2 ); + test_case( "% and *", n2 == 2 ); + test_case( "% and *", n3 == 1 ); + + # Tests that show "*" has higher precedence than "+" + + n1 = 1 + 2 * 3; + n2 = 1 + (2 * 3); + n3 = (1 + 2) * 3; + test_case( "+ and *", n1 == 7 ); + test_case( "+ and *", n2 == 7 ); + test_case( "+ and *", n3 == 9 ); + + # Tests that show "+" has higher precedence than "<" + + test_case( "< and +", 5 < 3 + 7 ); + test_case( "< and +", 5 < (3 + 7) ); + + test_case( "+ and <", 7 + 3 > 5 ); + test_case( "+ and <", (7 + 3) > 5 ); + + # Tests that show "+" has higher precedence than "+=" + + n1 = n2 = n3 = 0; + n1 += 1 + 2; + n2 += (1 + 2); + (n3 += 1) + 2; + test_case( "+= and +", n1 == 3 ); + test_case( "+= and +", n2 == 3 ); + test_case( "+= and +", n3 == 1 ); + + local r1: bool; + local r2: bool; + local r3: bool; + + # Tests that show "&&" has higher precedence than "||" + + r1 = F && F || T; + r2 = (F && F) || T; + r3 = F && (F || T); + test_case( "&& and ||", r1 ); + test_case( "&& and ||", r2 ); + test_case( "&& and ||", !r3 ); + + r1 = T || F && F; + r2 = T || (F && F); + r3 = (T || F) && F; + test_case( "|| and &&", r1 ); + test_case( "|| and &&", r2 ); + test_case( "|| and &&", !r3 ); + + # Tests that show "||" has higher precedence than conditional operator + + r1 = T || T ? F : F; + r2 = (T || T) ? F : F; + r3 = T || (T ? F : F); + test_case( "|| and conditional operator", !r1 ); + test_case( "|| and conditional operator", !r2 ); + test_case( "|| and conditional operator", r3 ); + + r1 = T ? F : F || T; + r2 = T ? F : (F || T); + r3 = (T ? F : F) || T; + test_case( "conditional operator and ||", !r1 ); + test_case( "conditional operator and ||", !r2 ); + test_case( "conditional operator and ||", r3 ); + +} + diff --git a/testing/btest/language/raw_output_attr.test b/testing/btest/language/raw_output_attr.test index 8bcd479fbf..ccf616405e 100644 --- a/testing/btest/language/raw_output_attr.test +++ b/testing/btest/language/raw_output_attr.test @@ -1,14 +1,14 @@ # Files with the &raw_output attribute shouldn't interpret NUL characters # in strings that are `print`ed to it. -# @TEST-EXEC: bro -b %INPUT +# @TEST-EXEC: zeek -b %INPUT # @TEST-EXEC: tr '\000' 'X' output # @TEST-EXEC: btest-diff output # @TEST-EXEC: cmp myfile hookfile # first check local variable of file type w/ &raw_output -event bro_init() +event zeek_init() { local myfile: file; myfile = open("myfile"); diff --git a/testing/btest/language/rec-comp-init.bro b/testing/btest/language/rec-comp-init.bro deleted file mode 100644 index c65ef69097..0000000000 --- a/testing/btest/language/rec-comp-init.bro +++ /dev/null @@ -1,14 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -# Make sure composit types in records are initialized. - -type Foo: record { - a: set[count]; - b: table[count] of string; - c: vector of string; -}; - -global f: Foo; - -print f; diff --git a/testing/btest/language/rec-comp-init.zeek b/testing/btest/language/rec-comp-init.zeek new file mode 100644 index 0000000000..022f9fd50e --- /dev/null +++ b/testing/btest/language/rec-comp-init.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +# Make sure composit types in records are initialized. + +type Foo: record { + a: set[count]; + b: table[count] of string; + c: vector of string; +}; + +global f: Foo; + +print f; diff --git a/testing/btest/language/rec-nested-opt.bro b/testing/btest/language/rec-nested-opt.bro deleted file mode 100644 index 3b4a478f6b..0000000000 --- a/testing/btest/language/rec-nested-opt.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type Version: record { - major: count &optional; ##< Major version number - minor: count &optional; ##< Minor version number - addl: string &optional; ##< Additional version string (e.g. "beta42") -} &log; - -type Info: record { - name: string; - version: Version; - host: addr; - ts: time; -}; - - -# Important thing to note here is that $minor2 is not include in the $version field. -global matched_software: table[string] of Info = { - ["Wget/1.9+cvs-stable (Red Hat modified)"] = - [$name="Wget", $version=[$major=1,$minor=9,$addl="+cvs"], $host=0.0.0.0, $ts=network_time()], -}; - -print matched_software; diff --git a/testing/btest/language/rec-nested-opt.zeek b/testing/btest/language/rec-nested-opt.zeek new file mode 100644 index 0000000000..be03a4532c --- /dev/null +++ b/testing/btest/language/rec-nested-opt.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type Version: record { + major: count &optional; ##< Major version number + minor: count &optional; ##< Minor version number + addl: string &optional; ##< Additional version string (e.g. "beta42") +} &log; + +type Info: record { + name: string; + version: Version; + host: addr; + ts: time; +}; + + +# Important thing to note here is that $minor2 is not include in the $version field. +global matched_software: table[string] of Info = { + ["Wget/1.9+cvs-stable (Red Hat modified)"] = + [$name="Wget", $version=[$major=1,$minor=9,$addl="+cvs"], $host=0.0.0.0, $ts=network_time()], +}; + +print matched_software; diff --git a/testing/btest/language/rec-of-tbl.bro b/testing/btest/language/rec-of-tbl.bro deleted file mode 100644 index 8d2c9ab0e0..0000000000 --- a/testing/btest/language/rec-of-tbl.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type x: record { - a: table[int] of count; -}; - -global y: x; - -global yy: table[int] of count; - -y$a = yy; - -y$a[+5] = 3; - -print y; diff --git a/testing/btest/language/rec-of-tbl.zeek b/testing/btest/language/rec-of-tbl.zeek new file mode 100644 index 0000000000..6285680c47 --- /dev/null +++ b/testing/btest/language/rec-of-tbl.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type x: record { + a: table[int] of count; +}; + +global y: x; + +global yy: table[int] of count; + +y$a = yy; + +y$a[+5] = 3; + +print y; diff --git a/testing/btest/language/rec-table-default.bro b/testing/btest/language/rec-table-default.bro deleted file mode 100644 index 27e0043dc3..0000000000 --- a/testing/btest/language/rec-table-default.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type X: record { - a: table[string] of bool &default=table( ["foo"] = T ); - b: table[string] of bool &default=table(); - c: set[string] &default=set("A", "B", "C"); - d: set[string] &default=set(); -}; - -global x: X; -global y: table[string] of bool &default=T; - -print x$a; -print x$b; -print x$c; -print x$d; - diff --git a/testing/btest/language/rec-table-default.zeek b/testing/btest/language/rec-table-default.zeek new file mode 100644 index 0000000000..3f14e3ab59 --- /dev/null +++ b/testing/btest/language/rec-table-default.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type X: record { + a: table[string] of bool &default=table( ["foo"] = T ); + b: table[string] of bool &default=table(); + c: set[string] &default=set("A", "B", "C"); + d: set[string] &default=set(); +}; + +global x: X; +global y: table[string] of bool &default=T; + +print x$a; +print x$b; +print x$c; +print x$d; + diff --git a/testing/btest/language/record-bad-ctor.bro b/testing/btest/language/record-bad-ctor.bro deleted file mode 100644 index 6b7ae4ff19..0000000000 --- a/testing/btest/language/record-bad-ctor.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -# At least shouldn't crash Bro, just report the invalid record ctor. - -global asdfasdf; -const blah = [$ports=asdfasdf]; -print blah; diff --git a/testing/btest/language/record-bad-ctor.zeek b/testing/btest/language/record-bad-ctor.zeek new file mode 100644 index 0000000000..40bafa47de --- /dev/null +++ b/testing/btest/language/record-bad-ctor.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +# At least shouldn't crash Zeek, just report the invalid record ctor. + +global asdfasdf; +const blah = [$ports=asdfasdf]; +print blah; diff --git a/testing/btest/language/record-bad-ctor2.bro b/testing/btest/language/record-bad-ctor2.bro deleted file mode 100644 index 7941c38860..0000000000 --- a/testing/btest/language/record-bad-ctor2.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -# Record ctor's expression list shouldn't accept "expressions that -# eval in to record". The expression list should only be comprised of -# record-field-assignment expressions. - -type myrec: record { - cmd: string; - stdin: string &default=""; - read_files: string &optional; -}; - -local bad = myrec([$cmd="echo hi"]); - -print bad; diff --git a/testing/btest/language/record-bad-ctor2.zeek b/testing/btest/language/record-bad-ctor2.zeek new file mode 100644 index 0000000000..02f4f472d6 --- /dev/null +++ b/testing/btest/language/record-bad-ctor2.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +# Record ctor's expression list shouldn't accept "expressions that +# eval in to record". The expression list should only be comprised of +# record-field-assignment expressions. + +type myrec: record { + cmd: string; + stdin: string &default=""; + read_files: string &optional; +}; + +local bad = myrec([$cmd="echo hi"]); + +print bad; diff --git a/testing/btest/language/record-ceorce-orphan.bro b/testing/btest/language/record-ceorce-orphan.bro deleted file mode 100644 index 126b99d5ff..0000000000 --- a/testing/btest/language/record-ceorce-orphan.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type myrec: record { - a: string; - b: count; - c: interval &optional; -}; - -function myfunc(rec: myrec) - { - print rec; - } - -event bro_init() - { - # Orhpaned fields in a record coercion reflect a programming error, like a typo, so should - # be reported at parse-time to prevent unexpected run-time behavior. - local rec: myrec = [$a="test", $b=42, $wtf=1sec]; - print rec; - myfunc([$a="test", $b=42, $wtf=1sec]); - } diff --git a/testing/btest/language/record-ceorce-orphan.zeek b/testing/btest/language/record-ceorce-orphan.zeek new file mode 100644 index 0000000000..8279da4afb --- /dev/null +++ b/testing/btest/language/record-ceorce-orphan.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type myrec: record { + a: string; + b: count; + c: interval &optional; +}; + +function myfunc(rec: myrec) + { + print rec; + } + +event zeek_init() + { + # Orhpaned fields in a record coercion reflect a programming error, like a typo, so should + # be reported at parse-time to prevent unexpected run-time behavior. + local rec: myrec = [$a="test", $b=42, $wtf=1sec]; + print rec; + myfunc([$a="test", $b=42, $wtf=1sec]); + } diff --git a/testing/btest/language/record-coerce-clash.bro b/testing/btest/language/record-coerce-clash.bro deleted file mode 100644 index a0bd6f21ad..0000000000 --- a/testing/btest/language/record-coerce-clash.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out -# Record coercion attempt should report mismatched field types. -global wrong = "80/tcp"; - -type myrec: record { - cid: conn_id; -}; - -event bro_init() - { - local mr: myrec; - mr = [$cid = [$orig_h=1.2.3.4,$orig_p=0/tcp,$resp_h=0.0.0.0,$resp_p=wrong]]; - get_port_transport_proto(mr$cid$resp_p); - } diff --git a/testing/btest/language/record-coerce-clash.zeek b/testing/btest/language/record-coerce-clash.zeek new file mode 100644 index 0000000000..3b4dcb393e --- /dev/null +++ b/testing/btest/language/record-coerce-clash.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out +# Record coercion attempt should report mismatched field types. +global wrong = "80/tcp"; + +type myrec: record { + cid: conn_id; +}; + +event zeek_init() + { + local mr: myrec; + mr = [$cid = [$orig_h=1.2.3.4,$orig_p=0/tcp,$resp_h=0.0.0.0,$resp_p=wrong]]; + get_port_transport_proto(mr$cid$resp_p); + } diff --git a/testing/btest/language/record-default-coercion.bro b/testing/btest/language/record-default-coercion.bro deleted file mode 100644 index 9d8babf571..0000000000 --- a/testing/btest/language/record-default-coercion.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type MyRecord: record { - a: count &default=13; - c: count; - v: vector of string &default=vector(); -}; - -type Foo: record { - foo: count; - quux: count &default=9876; -}; - -type Bar: record { - bar: count; - foo: Foo &default=[$foo=1234]; -}; - -function print_bar(b: Bar) - { - print b; - print b$foo; - print b$foo$quux; - } - -global bar: Bar = [$bar=4321]; -global bar2: Bar = [$bar=4231, $foo=[$foo=1000]]; -global bar3: Bar = [$bar=4321, $foo=[$foo=10, $quux=42]]; - -print_bar(bar); -print_bar(bar2); -print_bar(bar3); - -local bar4: Bar = [$bar=100]; -local bar5: Bar = [$bar=100, $foo=[$foo=1001]]; -local bar6: Bar = [$bar=100, $foo=[$foo=11, $quux=7]]; - -print_bar(bar4); -print_bar(bar5); -print_bar(bar6); - -local r: MyRecord = [$c=13]; -print r; -print |r$v|; -r$v += "test"; -print r; -print |r$v|; diff --git a/testing/btest/language/record-default-coercion.zeek b/testing/btest/language/record-default-coercion.zeek new file mode 100644 index 0000000000..83e48044a3 --- /dev/null +++ b/testing/btest/language/record-default-coercion.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type MyRecord: record { + a: count &default=13; + c: count; + v: vector of string &default=vector(); +}; + +type Foo: record { + foo: count; + quux: count &default=9876; +}; + +type Bar: record { + bar: count; + foo: Foo &default=[$foo=1234]; +}; + +function print_bar(b: Bar) + { + print b; + print b$foo; + print b$foo$quux; + } + +global bar: Bar = [$bar=4321]; +global bar2: Bar = [$bar=4231, $foo=[$foo=1000]]; +global bar3: Bar = [$bar=4321, $foo=[$foo=10, $quux=42]]; + +print_bar(bar); +print_bar(bar2); +print_bar(bar3); + +local bar4: Bar = [$bar=100]; +local bar5: Bar = [$bar=100, $foo=[$foo=1001]]; +local bar6: Bar = [$bar=100, $foo=[$foo=11, $quux=7]]; + +print_bar(bar4); +print_bar(bar5); +print_bar(bar6); + +local r: MyRecord = [$c=13]; +print r; +print |r$v|; +r$v += "test"; +print r; +print |r$v|; diff --git a/testing/btest/language/record-default-set-mismatch.bro b/testing/btest/language/record-default-set-mismatch.bro deleted file mode 100644 index fcf10c1281..0000000000 --- a/testing/btest/language/record-default-set-mismatch.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT 2>out -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type Foo: record { - a: set[string] &default=set(1,2,3); -}; - -global f: Foo; -print f; diff --git a/testing/btest/language/record-default-set-mismatch.zeek b/testing/btest/language/record-default-set-mismatch.zeek new file mode 100644 index 0000000000..8de2459ebd --- /dev/null +++ b/testing/btest/language/record-default-set-mismatch.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT 2>out +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type Foo: record { + a: set[string] &default=set(1,2,3); +}; + +global f: Foo; +print f; diff --git a/testing/btest/language/record-extension.bro b/testing/btest/language/record-extension.bro deleted file mode 100644 index 02b4c3bbe7..0000000000 --- a/testing/btest/language/record-extension.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -type Foo: record { - a: count; - b: count &optional; - myset: set[count] &default=set(); -}; - -redef record Foo += { - c: count &default=42; - d: string &optional; - anotherset: set[count] &default=set(); -}; - -global f1: Foo = [$a=21]; -global f2: Foo = [$a=21, $d="XXX"]; - -print f1; -print f2; - diff --git a/testing/btest/language/record-extension.zeek b/testing/btest/language/record-extension.zeek new file mode 100644 index 0000000000..6dbf2be290 --- /dev/null +++ b/testing/btest/language/record-extension.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +type Foo: record { + a: count; + b: count &optional; + myset: set[count] &default=set(); +}; + +redef record Foo += { + c: count &default=42; + d: string &optional; + anotherset: set[count] &default=set(); +}; + +global f1: Foo = [$a=21]; +global f2: Foo = [$a=21, $d="XXX"]; + +print f1; +print f2; + diff --git a/testing/btest/language/record-function-recursion.bro b/testing/btest/language/record-function-recursion.bro deleted file mode 100644 index 90832bfa69..0000000000 --- a/testing/btest/language/record-function-recursion.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT 2>&1 >out -# @TEST-EXEC: btest-diff out - -type Outer: record { - id: count &optional; -}; - -type Inner: record { - create: function(input: Outer) : string; -}; - -redef record Outer += { - inner: Inner &optional; -}; - -event bro_init() { - local o = Outer(); - print o; - print type_name(o); -} diff --git a/testing/btest/language/record-function-recursion.zeek b/testing/btest/language/record-function-recursion.zeek new file mode 100644 index 0000000000..e5168a6e3e --- /dev/null +++ b/testing/btest/language/record-function-recursion.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -b %INPUT 2>&1 >out +# @TEST-EXEC: btest-diff out + +type Outer: record { + id: count &optional; +}; + +type Inner: record { + create: function(input: Outer) : string; +}; + +redef record Outer += { + inner: Inner &optional; +}; + +event zeek_init() { + local o = Outer(); + print o; + print type_name(o); +} diff --git a/testing/btest/language/record-index-complex-fields.bro b/testing/btest/language/record-index-complex-fields.bro deleted file mode 100644 index ae45648728..0000000000 --- a/testing/btest/language/record-index-complex-fields.bro +++ /dev/null @@ -1,38 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -# This test checks whether records with complex fields (tables, sets, vectors) -# can be used as table/set indices. - -type MetaData: record { - a: count; - tags_v: vector of count; - tags_t: table[string] of count; - tags_s: set[string]; -}; - -global ip_data: table[addr] of set[MetaData] = table(); - -global t1_t: table[string] of count = { ["one"] = 1, ["two"] = 2 }; -global t2_t: table[string] of count = { ["four"] = 4, ["five"] = 5 }; - -global t1_v: vector of count = vector(); -global t2_v: vector of count = vector(); -t1_v[0] = 0; -t1_v[1] = 1; -t2_v[2] = 2; -t2_v[3] = 3; - -local m: MetaData = [$a=4, $tags_v=t1_v, $tags_t=t1_t, $tags_s=set("a", "b")]; -local n: MetaData = [$a=13, $tags_v=t2_v, $tags_t=t2_t, $tags_s=set("c", "d")]; - -if ( 1.2.3.4 !in ip_data ) - ip_data[1.2.3.4] = set(m); -else - add ip_data[1.2.3.4][m]; - -print ip_data; - -add ip_data[1.2.3.4][n]; - -print ip_data[1.2.3.4]; diff --git a/testing/btest/language/record-index-complex-fields.zeek b/testing/btest/language/record-index-complex-fields.zeek new file mode 100644 index 0000000000..eedf777ff6 --- /dev/null +++ b/testing/btest/language/record-index-complex-fields.zeek @@ -0,0 +1,38 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +# This test checks whether records with complex fields (tables, sets, vectors) +# can be used as table/set indices. + +type MetaData: record { + a: count; + tags_v: vector of count; + tags_t: table[string] of count; + tags_s: set[string]; +}; + +global ip_data: table[addr] of set[MetaData] = table(); + +global t1_t: table[string] of count = { ["one"] = 1, ["two"] = 2 }; +global t2_t: table[string] of count = { ["four"] = 4, ["five"] = 5 }; + +global t1_v: vector of count = vector(); +global t2_v: vector of count = vector(); +t1_v[0] = 0; +t1_v[1] = 1; +t2_v[2] = 2; +t2_v[3] = 3; + +local m: MetaData = [$a=4, $tags_v=t1_v, $tags_t=t1_t, $tags_s=set("a", "b")]; +local n: MetaData = [$a=13, $tags_v=t2_v, $tags_t=t2_t, $tags_s=set("c", "d")]; + +if ( 1.2.3.4 !in ip_data ) + ip_data[1.2.3.4] = set(m); +else + add ip_data[1.2.3.4][m]; + +print ip_data; + +add ip_data[1.2.3.4][n]; + +print ip_data[1.2.3.4]; diff --git a/testing/btest/language/record-recursive-coercion.bro b/testing/btest/language/record-recursive-coercion.bro deleted file mode 100644 index 0eb24a70d9..0000000000 --- a/testing/btest/language/record-recursive-coercion.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -type Version: record { - major: count &optional; - minor: count &optional; - minor2: count &optional; - addl: string &optional; -}; - -type Info: record { - name: string; - version: Version; -}; - -global matched_software: table[string] of Info = { - ["OpenSSH_4.4"] = [$name="OpenSSH", $version=[$major=4,$minor=4]], -}; - -type Foo: record { - i: interval &default=1hr; - s: string &optional; -}; - -type FooContainer: record { - c: count; - f: Foo &optional; -}; - -function foo_func(fc: FooContainer) - { - print fc; - } - -event bro_init() - { - for ( sw in matched_software ) - print matched_software[sw]$version; - foo_func([$c=1, $f=[$i=2hrs]]); - } diff --git a/testing/btest/language/record-recursive-coercion.zeek b/testing/btest/language/record-recursive-coercion.zeek new file mode 100644 index 0000000000..614bd3d92c --- /dev/null +++ b/testing/btest/language/record-recursive-coercion.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +type Version: record { + major: count &optional; + minor: count &optional; + minor2: count &optional; + addl: string &optional; +}; + +type Info: record { + name: string; + version: Version; +}; + +global matched_software: table[string] of Info = { + ["OpenSSH_4.4"] = [$name="OpenSSH", $version=[$major=4,$minor=4]], +}; + +type Foo: record { + i: interval &default=1hr; + s: string &optional; +}; + +type FooContainer: record { + c: count; + f: Foo &optional; +}; + +function foo_func(fc: FooContainer) + { + print fc; + } + +event zeek_init() + { + for ( sw in matched_software ) + print matched_software[sw]$version; + foo_func([$c=1, $f=[$i=2hrs]]); + } diff --git a/testing/btest/language/record-redef-after-init.bro b/testing/btest/language/record-redef-after-init.bro deleted file mode 100644 index 693d8bac76..0000000000 --- a/testing/btest/language/record-redef-after-init.bro +++ /dev/null @@ -1,52 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -type myrec: record { - a: string; -}; - -const mr = myrec($a = "init") &redef; - -redef mr = myrec($a = "redef"); - -# Many fields may help ensure out-of-bounds reference failures -redef record myrec += { - d: string &optional; - e: string &optional; - f: string &optional; - g: string &optional; - h: string &optional; - i: string &optional; - j: string &optional; - k: string &optional; - l: string &optional; - m: string &optional; - n: string &optional; - o: string &optional; - p: string &optional; - q: string &default="OPTQ"; -}; - -print mr; # original 'myrec' type with updated a value -print myrec($a = "runtime"); # check we get new defaults - -local mr2 = myrec($a = "local"); -print mr2; - -mr2 = mr; # Copying should do the right thing -print mr2; - -local mr3: myrec = mr; # Initializing should do the right thing -print mr3; - -if ( mr?$q ) # the test that did not work properly - { - print mr$q; # accessed invalid memory location - } -mr$p = "newp"; # Assignment updates mr as much as needed -print mr$p; -print mr; -print mr$q; -mr$q = "our value"; -print mr$q; -print mr; diff --git a/testing/btest/language/record-redef-after-init.zeek b/testing/btest/language/record-redef-after-init.zeek new file mode 100644 index 0000000000..2ec28c1367 --- /dev/null +++ b/testing/btest/language/record-redef-after-init.zeek @@ -0,0 +1,52 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +type myrec: record { + a: string; +}; + +const mr = myrec($a = "init") &redef; + +redef mr = myrec($a = "redef"); + +# Many fields may help ensure out-of-bounds reference failures +redef record myrec += { + d: string &optional; + e: string &optional; + f: string &optional; + g: string &optional; + h: string &optional; + i: string &optional; + j: string &optional; + k: string &optional; + l: string &optional; + m: string &optional; + n: string &optional; + o: string &optional; + p: string &optional; + q: string &default="OPTQ"; +}; + +print mr; # original 'myrec' type with updated a value +print myrec($a = "runtime"); # check we get new defaults + +local mr2 = myrec($a = "local"); +print mr2; + +mr2 = mr; # Copying should do the right thing +print mr2; + +local mr3: myrec = mr; # Initializing should do the right thing +print mr3; + +if ( mr?$q ) # the test that did not work properly + { + print mr$q; # accessed invalid memory location + } +mr$p = "newp"; # Assignment updates mr as much as needed +print mr$p; +print mr; +print mr$q; +mr$q = "our value"; +print mr$q; +print mr; diff --git a/testing/btest/language/record-ref-assign.bro b/testing/btest/language/record-ref-assign.bro deleted file mode 100644 index a9539ab716..0000000000 --- a/testing/btest/language/record-ref-assign.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -type State: record { - host: string &default="NOT SET"; -}; - -global session: State; -global s: State; -s = session; -s$host = "XXX"; -print s$host, session$host; diff --git a/testing/btest/language/record-ref-assign.zeek b/testing/btest/language/record-ref-assign.zeek new file mode 100644 index 0000000000..993d7223e3 --- /dev/null +++ b/testing/btest/language/record-ref-assign.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +type State: record { + host: string &default="NOT SET"; +}; + +global session: State; +global s: State; +s = session; +s$host = "XXX"; +print s$host, session$host; diff --git a/testing/btest/language/record-type-checking.bro b/testing/btest/language/record-type-checking.bro deleted file mode 100644 index d58937d577..0000000000 --- a/testing/btest/language/record-type-checking.bro +++ /dev/null @@ -1,47 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type MyRec: record { - a: port &default = 1/tcp; -}; - -# global, type deduction, named ctor -global grdn = MyRec($a = 0); # type clash in init - -# global, type explicit, named ctor -global gren: MyRec = MyRec($a = 1); # type clash in init - -# global, type deduction, anon ctor -global grda = [$a = 2]; # fine -event bro_init() - { - grda = MyRec($a = 2); # type clash in assignment - } - -# global, type explicit, anon ctor -global grea: MyRec = [$a = 3]; # type clash - -# local, type deduction, named ctor -event bro_init() - { - local lrdn = MyRec($a = 1000); # type clash - } - -# local, type explicit, named ctor -event bro_init() - { - local lren: MyRec = MyRec($a = 1001); # type clash - } - -# local, type deduction, anon ctor -event bro_init() - { - local lrda = [$a = 1002]; # fine - lrda = MyRec($a = 1002); # type clash - } - -# local, type explicit, anon ctor -event bro_init() - { - local lrea: MyRec = [$a = 1003]; # type clash - } diff --git a/testing/btest/language/record-type-checking.zeek b/testing/btest/language/record-type-checking.zeek new file mode 100644 index 0000000000..b341414564 --- /dev/null +++ b/testing/btest/language/record-type-checking.zeek @@ -0,0 +1,47 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type MyRec: record { + a: port &default = 1/tcp; +}; + +# global, type deduction, named ctor +global grdn = MyRec($a = 0); # type clash in init + +# global, type explicit, named ctor +global gren: MyRec = MyRec($a = 1); # type clash in init + +# global, type deduction, anon ctor +global grda = [$a = 2]; # fine +event zeek_init() + { + grda = MyRec($a = 2); # type clash in assignment + } + +# global, type explicit, anon ctor +global grea: MyRec = [$a = 3]; # type clash + +# local, type deduction, named ctor +event zeek_init() + { + local lrdn = MyRec($a = 1000); # type clash + } + +# local, type explicit, named ctor +event zeek_init() + { + local lren: MyRec = MyRec($a = 1001); # type clash + } + +# local, type deduction, anon ctor +event zeek_init() + { + local lrda = [$a = 1002]; # fine + lrda = MyRec($a = 1002); # type clash + } + +# local, type explicit, anon ctor +event zeek_init() + { + local lrea: MyRec = [$a = 1003]; # type clash + } diff --git a/testing/btest/language/redef-same-prefixtable-idx.bro b/testing/btest/language/redef-same-prefixtable-idx.bro deleted file mode 100644 index 13cf27cc0f..0000000000 --- a/testing/btest/language/redef-same-prefixtable-idx.bro +++ /dev/null @@ -1,17 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -const my_table: table[subnet] of subnet &redef; - -redef my_table[3.0.0.0/8] = 1.0.0.0/8; -redef my_table[3.0.0.0/8] = 2.0.0.0/8; - -# The above is basically a shorthand for: -# redef my_table += { [3.0.0.0/8] = 1.0.0.0/8 }; -# redef my_table += { [3.0.0.0/8] = 2.0.0.0/8 }; - -event bro_init() - { - print my_table; - print my_table[3.0.0.0/8]; - } diff --git a/testing/btest/language/redef-same-prefixtable-idx.zeek b/testing/btest/language/redef-same-prefixtable-idx.zeek new file mode 100644 index 0000000000..c96af48f3e --- /dev/null +++ b/testing/btest/language/redef-same-prefixtable-idx.zeek @@ -0,0 +1,17 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +const my_table: table[subnet] of subnet &redef; + +redef my_table[3.0.0.0/8] = 1.0.0.0/8; +redef my_table[3.0.0.0/8] = 2.0.0.0/8; + +# The above is basically a shorthand for: +# redef my_table += { [3.0.0.0/8] = 1.0.0.0/8 }; +# redef my_table += { [3.0.0.0/8] = 2.0.0.0/8 }; + +event zeek_init() + { + print my_table; + print my_table[3.0.0.0/8]; + } diff --git a/testing/btest/language/redef-vector.bro b/testing/btest/language/redef-vector.bro deleted file mode 100644 index 26dc2109ba..0000000000 --- a/testing/btest/language/redef-vector.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -const foo: vector of string &redef; -redef foo += { "testing", "blah", "foo", "foo", "testing" }; - -const bar: vector of string = vector() &redef; -redef bar += { "one", "two", "three" }; - -const baz: vector of string = vector("a", "b", "c") &redef; -redef baz += { "one", "two", "three" }; -redef baz += { "a", "b", "c" }; -const d = "d"; -redef baz += { "a" + "b" + "c", d }; - -print foo; -print bar; -print baz; diff --git a/testing/btest/language/redef-vector.zeek b/testing/btest/language/redef-vector.zeek new file mode 100644 index 0000000000..bf35467424 --- /dev/null +++ b/testing/btest/language/redef-vector.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +const foo: vector of string &redef; +redef foo += { "testing", "blah", "foo", "foo", "testing" }; + +const bar: vector of string = vector() &redef; +redef bar += { "one", "two", "three" }; + +const baz: vector of string = vector("a", "b", "c") &redef; +redef baz += { "one", "two", "three" }; +redef baz += { "a", "b", "c" }; +const d = "d"; +redef baz += { "a" + "b" + "c", d }; + +print foo; +print bar; +print baz; diff --git a/testing/btest/language/returnwhen.bro b/testing/btest/language/returnwhen.bro deleted file mode 100644 index 593841eb7e..0000000000 --- a/testing/btest/language/returnwhen.bro +++ /dev/null @@ -1,79 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: btest-diff bro/.stdout - -redef exit_only_after_terminate = T; - -global my_set: set[string] = set(); -global flag: string = "flag"; -global done: bool = F; - -function dummyfunc(s: string): string - { - return "dummy " + s; - } - -function async_func(s: string): string - { - print dummyfunc("from async_func() " + s); - - return when ( flag in my_set ) - { - return flag + " in my_set"; - } - timeout 3sec - { - return "timeout"; - } - } - -event set_flag() - { - add my_set[flag]; - } - -event do_another() - { - delete my_set[flag]; - - local local_dummy = dummyfunc; - - local anon = function(s: string): string { return s + "!"; }; - - if ( ! done ) - schedule 1sec { set_flag() }; - - when ( local result = async_func("from do_another()") ) - { - print "async_func() return result in do_another()", result; - print local_dummy("from do_another() when block"); - print anon("hi"); - if ( result == "timeout" ) - terminate(); - else - { - done = T; - schedule 10msec { do_another() }; - } - } - } - -event bro_init() - { - local local_dummy = dummyfunc; - - local anon = function(s: string): string { return s + "!"; }; - - schedule 1sec { set_flag() }; - - when ( local result = async_func("from bro_init()") ) - { - print "async_func() return result in bro_init()", result; - print local_dummy("from bro_init() when block"); - print anon("hi"); - if ( result == "timeout" ) terminate(); - schedule 10msec { do_another() }; - } - } - - diff --git a/testing/btest/language/returnwhen.zeek b/testing/btest/language/returnwhen.zeek new file mode 100644 index 0000000000..8eddd4a30b --- /dev/null +++ b/testing/btest/language/returnwhen.zeek @@ -0,0 +1,79 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-diff zeek/.stdout + +redef exit_only_after_terminate = T; + +global my_set: set[string] = set(); +global flag: string = "flag"; +global done: bool = F; + +function dummyfunc(s: string): string + { + return "dummy " + s; + } + +function async_func(s: string): string + { + print dummyfunc("from async_func() " + s); + + return when ( flag in my_set ) + { + return flag + " in my_set"; + } + timeout 3sec + { + return "timeout"; + } + } + +event set_flag() + { + add my_set[flag]; + } + +event do_another() + { + delete my_set[flag]; + + local local_dummy = dummyfunc; + + local anon = function(s: string): string { return s + "!"; }; + + if ( ! done ) + schedule 1sec { set_flag() }; + + when ( local result = async_func("from do_another()") ) + { + print "async_func() return result in do_another()", result; + print local_dummy("from do_another() when block"); + print anon("hi"); + if ( result == "timeout" ) + terminate(); + else + { + done = T; + schedule 10msec { do_another() }; + } + } + } + +event zeek_init() + { + local local_dummy = dummyfunc; + + local anon = function(s: string): string { return s + "!"; }; + + schedule 1sec { set_flag() }; + + when ( local result = async_func("from zeek_init()") ) + { + print "async_func() return result in zeek_init()", result; + print local_dummy("from zeek_init() when block"); + print anon("hi"); + if ( result == "timeout" ) terminate(); + schedule 10msec { do_another() }; + } + } + + diff --git a/testing/btest/language/set-opt-record-index.bro b/testing/btest/language/set-opt-record-index.bro deleted file mode 100644 index d42de8b041..0000000000 --- a/testing/btest/language/set-opt-record-index.bro +++ /dev/null @@ -1,55 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -# Make sure a set can be indexed with a record that has optional fields - -type FOO: record { - a: count; - b: count &optional; -}; - -event bro_init() - { - local set_of_foo: set[FOO] = set(); - - local f: FOO; - f$a = 1; - - add set_of_foo[f]; - add set_of_foo[[$a=3]]; - - local f3: FOO; # = [$a=4, $b=5]; - f3$a = 4; - f3$b = 5; - - add set_of_foo[f3]; - - add set_of_foo[[$a=4, $b=5]]; - - print set_of_foo; - - print ""; - - for ( i in set_of_foo ) - print i; - - print ""; - - local f2: FOO; - f2$a = 2; - - print f in set_of_foo; - print f2 in set_of_foo; - - print ""; - - f3$a = 4; - print f3 in set_of_foo; - - f3$b = 4; - print f3 in set_of_foo; - - f3$b = 5; - print f3 in set_of_foo; - - } diff --git a/testing/btest/language/set-opt-record-index.zeek b/testing/btest/language/set-opt-record-index.zeek new file mode 100644 index 0000000000..0015c20621 --- /dev/null +++ b/testing/btest/language/set-opt-record-index.zeek @@ -0,0 +1,55 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +# Make sure a set can be indexed with a record that has optional fields + +type FOO: record { + a: count; + b: count &optional; +}; + +event zeek_init() + { + local set_of_foo: set[FOO] = set(); + + local f: FOO; + f$a = 1; + + add set_of_foo[f]; + add set_of_foo[[$a=3]]; + + local f3: FOO; # = [$a=4, $b=5]; + f3$a = 4; + f3$b = 5; + + add set_of_foo[f3]; + + add set_of_foo[[$a=4, $b=5]]; + + print set_of_foo; + + print ""; + + for ( i in set_of_foo ) + print i; + + print ""; + + local f2: FOO; + f2$a = 2; + + print f in set_of_foo; + print f2 in set_of_foo; + + print ""; + + f3$a = 4; + print f3 in set_of_foo; + + f3$b = 4; + print f3 in set_of_foo; + + f3$b = 5; + print f3 in set_of_foo; + + } diff --git a/testing/btest/language/set-type-checking.bro b/testing/btest/language/set-type-checking.bro deleted file mode 100644 index 3c82a29730..0000000000 --- a/testing/btest/language/set-type-checking.bro +++ /dev/null @@ -1,60 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type MySet: set[port]; - -# global, type deduction, named ctor -global gdn = MySet(0); # type clash in init - -# global, type explicit, named ctor -global gen: MySet = MySet(1); # type clash in init - -# global, type deduction, anon ctor -global gda = set(2); # fine -event bro_init() - { - gda = MySet(2); # type clash in assignment - } - -# global, type explicit, anon ctor -global gea: MySet = set(3); # type clash - -# local, type deduction, named ctor -event bro_init() - { - local ldn = MySet(1000); # type clash - } - -# local, type explicit, named ctor -event bro_init() - { - local len: MySet = MySet(1001); # type clash - } - -# local, type deduction, anon ctor -event bro_init() - { - local lda = set(1002); # fine - lda = MySet(1002); # type clash - } - -# local, type explicit, anon ctor -event bro_init() - { - local lea: MySet = set(1003); # type clash - } - -type MyRecord: record { - user: string; - host: string; - host_port: count &default=22; - path: string; -}; - -global set_of_records: set[MyRecord]; - -event bro_init() - { - # Set ctor w/ anonymous record ctor should coerce. - set_of_records = set([$user="testuser", $host="testhost", $path="testpath"]); - } diff --git a/testing/btest/language/set-type-checking.zeek b/testing/btest/language/set-type-checking.zeek new file mode 100644 index 0000000000..49674ce870 --- /dev/null +++ b/testing/btest/language/set-type-checking.zeek @@ -0,0 +1,60 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type MySet: set[port]; + +# global, type deduction, named ctor +global gdn = MySet(0); # type clash in init + +# global, type explicit, named ctor +global gen: MySet = MySet(1); # type clash in init + +# global, type deduction, anon ctor +global gda = set(2); # fine +event zeek_init() + { + gda = MySet(2); # type clash in assignment + } + +# global, type explicit, anon ctor +global gea: MySet = set(3); # type clash + +# local, type deduction, named ctor +event zeek_init() + { + local ldn = MySet(1000); # type clash + } + +# local, type explicit, named ctor +event zeek_init() + { + local len: MySet = MySet(1001); # type clash + } + +# local, type deduction, anon ctor +event zeek_init() + { + local lda = set(1002); # fine + lda = MySet(1002); # type clash + } + +# local, type explicit, anon ctor +event zeek_init() + { + local lea: MySet = set(1003); # type clash + } + +type MyRecord: record { + user: string; + host: string; + host_port: count &default=22; + path: string; +}; + +global set_of_records: set[MyRecord]; + +event zeek_init() + { + # Set ctor w/ anonymous record ctor should coerce. + set_of_records = set([$user="testuser", $host="testhost", $path="testpath"]); + } diff --git a/testing/btest/language/set.bro b/testing/btest/language/set.bro deleted file mode 100644 index 56cd649b49..0000000000 --- a/testing/btest/language/set.bro +++ /dev/null @@ -1,185 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -# Note: only global sets can be initialized with curly braces -global sg1: set[string] = { "curly", "braces" }; -global sg2: set[port, string, bool] = { [10/udp, "curly", F], - [11/udp, "braces", T] }; -global sg3 = { "more", "curly", "braces" }; - -event bro_init() -{ - local s1: set[string] = set( "test", "example" ); - local s2: set[string] = set(); - local s3: set[string]; - local s4 = set( "type inference" ); - local s5: set[port, string, bool] = set( [1/tcp, "test", T], - [2/tcp, "example", F] ); - local s6: set[port, string, bool] = set(); - local s7: set[port, string, bool]; - local s8 = set( [8/tcp, "type inference", T] ); - - # Type inference tests - - test_case( "type inference", type_name(s4) == "set[string]" ); - test_case( "type inference", type_name(s8) == "set[port,string,bool]" ); - test_case( "type inference", type_name(sg3) == "set[string]" ); - - # Test the size of each set - - test_case( "cardinality", |s1| == 2 ); - test_case( "cardinality", |s2| == 0 ); - test_case( "cardinality", |s3| == 0 ); - test_case( "cardinality", |s4| == 1 ); - test_case( "cardinality", |s5| == 2 ); - test_case( "cardinality", |s6| == 0 ); - test_case( "cardinality", |s7| == 0 ); - test_case( "cardinality", |s8| == 1 ); - test_case( "cardinality", |sg1| == 2 ); - test_case( "cardinality", |sg2| == 2 ); - test_case( "cardinality", |sg3| == 3 ); - - # Test iterating over each set - - local ct: count; - ct = 0; - for ( c in s1 ) - { - if ( type_name(c) != "string" ) - print "Error: wrong set element type"; - ++ct; - } - test_case( "iterate over set", ct == 2 ); - - ct = 0; - for ( c in s2 ) - { - ++ct; - } - test_case( "iterate over set", ct == 0 ); - - ct = 0; - for ( [c1,c2,c3] in s5 ) - { - ++ct; - } - test_case( "iterate over set", ct == 2 ); - - ct = 0; - for ( [c1,c2,c3] in sg2 ) - { - ++ct; - } - test_case( "iterate over set", ct == 2 ); - - # Test adding elements to each set (Note: cannot add elements to sets - # of multiple types) - - add s1["added"]; - add s1["added"]; # element already exists (nothing happens) - test_case( "add element", |s1| == 3 ); - test_case( "in operator", "added" in s1 ); - - add s2["another"]; - test_case( "add element", |s2| == 1 ); - add s2["test"]; - test_case( "add element", |s2| == 2 ); - test_case( "in operator", "another" in s2 ); - test_case( "in operator", "test" in s2 ); - - add s3["foo"]; - test_case( "add element", |s3| == 1 ); - test_case( "in operator", "foo" in s3 ); - - add s4["local"]; - test_case( "add element", |s4| == 2 ); - test_case( "in operator", "local" in s4 ); - - add sg1["global"]; - test_case( "add element", |sg1| == 3 ); - test_case( "in operator", "global" in sg1 ); - - add sg3["more global"]; - test_case( "add element", |sg3| == 4 ); - test_case( "in operator", "more global" in sg3 ); - - # Test removing elements from each set (Note: cannot remove elements - # from sets of multiple types) - - delete s1["test"]; - delete s1["foobar"]; # element does not exist (nothing happens) - test_case( "remove element", |s1| == 2 ); - test_case( "!in operator", "test" !in s1 ); - - delete s2["test"]; - test_case( "remove element", |s2| == 1 ); - test_case( "!in operator", "test" !in s2 ); - - delete s3["foo"]; - test_case( "remove element", |s3| == 0 ); - test_case( "!in operator", "foo" !in s3 ); - - delete s4["type inference"]; - test_case( "remove element", |s4| == 1 ); - test_case( "!in operator", "type inference" !in s4 ); - - delete sg1["braces"]; - test_case( "remove element", |sg1| == 2 ); - test_case( "!in operator", "braces" !in sg1 ); - - delete sg3["curly"]; - test_case( "remove element", |sg3| == 3 ); - test_case( "!in operator", "curly" !in sg3 ); - - - local a = set(1,5,7,9,8,14); - local b = set(1,7,9,2); - - local a_plus_b = set(1,2,5,7,9,8,14); - local a_also_b = set(1,7,9); - local a_sans_b = set(5,8,14); - local b_sans_a = set(2); - - local a_or_b = a | b; - local a_and_b = a & b; - - test_case( "union", a_or_b == a_plus_b ); - test_case( "intersection", a_and_b == a_plus_b ); - test_case( "difference", a - b == a_sans_b ); - test_case( "difference", b - a == b_sans_a ); - - test_case( "union/inter.", |b & set(1,7,9,2)| == |b | set(1,7,2,9)| ); - test_case( "relational", |b & a_or_b| == |b| && |b| < |a_or_b| ); - test_case( "relational", b < a_or_b && a < a_or_b && a_or_b > a_and_b ); - - test_case( "subset", b < a ); - test_case( "subset", a < b ); - test_case( "subset", b < (a | set(2)) ); - test_case( "superset", b > a ); - test_case( "superset", b > (a | set(2)) ); - test_case( "superset", b | set(8, 14, 5) > (a | set(2)) ); - test_case( "superset", b | set(8, 14, 99, 5) > (a | set(2)) ); - - test_case( "non-ordering", (a <= b) || (a >= b) ); - test_case( "non-ordering", (a <= a_or_b) && (a_or_b >= b) ); - - test_case( "superset", (b | set(14, 5)) > a - set(8) ); - test_case( "superset", (b | set(14)) > a - set(8) ); - test_case( "superset", (b | set(14)) > a - set(8,5) ); - test_case( "superset", b >= a - set(5,8,14) ); - test_case( "superset", b > a - set(5,8,14) ); - test_case( "superset", (b - set(2)) > a - set(5,8,14) ); - test_case( "equality", a == a | set(5) ); - test_case( "equality", a == a | set(5,11) ); - test_case( "non-equality", a != a | set(5,11) ); - test_case( "equality", a == a | set(5,11) ); - - test_case( "magnitude", |a_and_b| == |a_or_b|); -} - diff --git a/testing/btest/language/set.zeek b/testing/btest/language/set.zeek new file mode 100644 index 0000000000..1c3ab85ef2 --- /dev/null +++ b/testing/btest/language/set.zeek @@ -0,0 +1,185 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# Note: only global sets can be initialized with curly braces +global sg1: set[string] = { "curly", "braces" }; +global sg2: set[port, string, bool] = { [10/udp, "curly", F], + [11/udp, "braces", T] }; +global sg3 = { "more", "curly", "braces" }; + +event zeek_init() +{ + local s1: set[string] = set( "test", "example" ); + local s2: set[string] = set(); + local s3: set[string]; + local s4 = set( "type inference" ); + local s5: set[port, string, bool] = set( [1/tcp, "test", T], + [2/tcp, "example", F] ); + local s6: set[port, string, bool] = set(); + local s7: set[port, string, bool]; + local s8 = set( [8/tcp, "type inference", T] ); + + # Type inference tests + + test_case( "type inference", type_name(s4) == "set[string]" ); + test_case( "type inference", type_name(s8) == "set[port,string,bool]" ); + test_case( "type inference", type_name(sg3) == "set[string]" ); + + # Test the size of each set + + test_case( "cardinality", |s1| == 2 ); + test_case( "cardinality", |s2| == 0 ); + test_case( "cardinality", |s3| == 0 ); + test_case( "cardinality", |s4| == 1 ); + test_case( "cardinality", |s5| == 2 ); + test_case( "cardinality", |s6| == 0 ); + test_case( "cardinality", |s7| == 0 ); + test_case( "cardinality", |s8| == 1 ); + test_case( "cardinality", |sg1| == 2 ); + test_case( "cardinality", |sg2| == 2 ); + test_case( "cardinality", |sg3| == 3 ); + + # Test iterating over each set + + local ct: count; + ct = 0; + for ( c in s1 ) + { + if ( type_name(c) != "string" ) + print "Error: wrong set element type"; + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + ct = 0; + for ( c in s2 ) + { + ++ct; + } + test_case( "iterate over set", ct == 0 ); + + ct = 0; + for ( [c1,c2,c3] in s5 ) + { + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + ct = 0; + for ( [c1,c2,c3] in sg2 ) + { + ++ct; + } + test_case( "iterate over set", ct == 2 ); + + # Test adding elements to each set (Note: cannot add elements to sets + # of multiple types) + + add s1["added"]; + add s1["added"]; # element already exists (nothing happens) + test_case( "add element", |s1| == 3 ); + test_case( "in operator", "added" in s1 ); + + add s2["another"]; + test_case( "add element", |s2| == 1 ); + add s2["test"]; + test_case( "add element", |s2| == 2 ); + test_case( "in operator", "another" in s2 ); + test_case( "in operator", "test" in s2 ); + + add s3["foo"]; + test_case( "add element", |s3| == 1 ); + test_case( "in operator", "foo" in s3 ); + + add s4["local"]; + test_case( "add element", |s4| == 2 ); + test_case( "in operator", "local" in s4 ); + + add sg1["global"]; + test_case( "add element", |sg1| == 3 ); + test_case( "in operator", "global" in sg1 ); + + add sg3["more global"]; + test_case( "add element", |sg3| == 4 ); + test_case( "in operator", "more global" in sg3 ); + + # Test removing elements from each set (Note: cannot remove elements + # from sets of multiple types) + + delete s1["test"]; + delete s1["foobar"]; # element does not exist (nothing happens) + test_case( "remove element", |s1| == 2 ); + test_case( "!in operator", "test" !in s1 ); + + delete s2["test"]; + test_case( "remove element", |s2| == 1 ); + test_case( "!in operator", "test" !in s2 ); + + delete s3["foo"]; + test_case( "remove element", |s3| == 0 ); + test_case( "!in operator", "foo" !in s3 ); + + delete s4["type inference"]; + test_case( "remove element", |s4| == 1 ); + test_case( "!in operator", "type inference" !in s4 ); + + delete sg1["braces"]; + test_case( "remove element", |sg1| == 2 ); + test_case( "!in operator", "braces" !in sg1 ); + + delete sg3["curly"]; + test_case( "remove element", |sg3| == 3 ); + test_case( "!in operator", "curly" !in sg3 ); + + + local a = set(1,5,7,9,8,14); + local b = set(1,7,9,2); + + local a_plus_b = set(1,2,5,7,9,8,14); + local a_also_b = set(1,7,9); + local a_sans_b = set(5,8,14); + local b_sans_a = set(2); + + local a_or_b = a | b; + local a_and_b = a & b; + + test_case( "union", a_or_b == a_plus_b ); + test_case( "intersection", a_and_b == a_plus_b ); + test_case( "difference", a - b == a_sans_b ); + test_case( "difference", b - a == b_sans_a ); + + test_case( "union/inter.", |b & set(1,7,9,2)| == |b | set(1,7,2,9)| ); + test_case( "relational", |b & a_or_b| == |b| && |b| < |a_or_b| ); + test_case( "relational", b < a_or_b && a < a_or_b && a_or_b > a_and_b ); + + test_case( "subset", b < a ); + test_case( "subset", a < b ); + test_case( "subset", b < (a | set(2)) ); + test_case( "superset", b > a ); + test_case( "superset", b > (a | set(2)) ); + test_case( "superset", b | set(8, 14, 5) > (a | set(2)) ); + test_case( "superset", b | set(8, 14, 99, 5) > (a | set(2)) ); + + test_case( "non-ordering", (a <= b) || (a >= b) ); + test_case( "non-ordering", (a <= a_or_b) && (a_or_b >= b) ); + + test_case( "superset", (b | set(14, 5)) > a - set(8) ); + test_case( "superset", (b | set(14)) > a - set(8) ); + test_case( "superset", (b | set(14)) > a - set(8,5) ); + test_case( "superset", b >= a - set(5,8,14) ); + test_case( "superset", b > a - set(5,8,14) ); + test_case( "superset", (b - set(2)) > a - set(5,8,14) ); + test_case( "equality", a == a | set(5) ); + test_case( "equality", a == a | set(5,11) ); + test_case( "non-equality", a != a | set(5,11) ); + test_case( "equality", a == a | set(5,11) ); + + test_case( "magnitude", |a_and_b| == |a_or_b|); +} + diff --git a/testing/btest/language/short-circuit.bro b/testing/btest/language/short-circuit.bro deleted file mode 100644 index 598ac8da35..0000000000 --- a/testing/btest/language/short-circuit.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -global ct: count; - -function t_func(): bool - { - ct += 1; - return T; - } - -function f_func(): bool - { - ct += 2; - return F; - } - - -event bro_init() -{ - local res: bool; - - # both functions should be called - ct = 0; - res = t_func() && f_func(); - test_case("&& operator (eval. both operands)", res == F && ct == 3 ); - - # only first function should be called - ct = 0; - res = f_func() && t_func(); - test_case("&& operator (eval. 1st operand)", res == F && ct == 2 ); - - # only first function should be called - ct = 0; - res = t_func() || f_func(); - test_case("|| operator (eval. 1st operand)", res == T && ct == 1 ); - - # both functions should be called - ct = 0; - res = f_func() || t_func(); - test_case("|| operator (eval. both operands)", res == T && ct == 3 ); -} - diff --git a/testing/btest/language/short-circuit.zeek b/testing/btest/language/short-circuit.zeek new file mode 100644 index 0000000000..45d1046ab3 --- /dev/null +++ b/testing/btest/language/short-circuit.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +global ct: count; + +function t_func(): bool + { + ct += 1; + return T; + } + +function f_func(): bool + { + ct += 2; + return F; + } + + +event zeek_init() +{ + local res: bool; + + # both functions should be called + ct = 0; + res = t_func() && f_func(); + test_case("&& operator (eval. both operands)", res == F && ct == 3 ); + + # only first function should be called + ct = 0; + res = f_func() && t_func(); + test_case("&& operator (eval. 1st operand)", res == F && ct == 2 ); + + # only first function should be called + ct = 0; + res = t_func() || f_func(); + test_case("|| operator (eval. 1st operand)", res == T && ct == 1 ); + + # both functions should be called + ct = 0; + res = f_func() || t_func(); + test_case("|| operator (eval. both operands)", res == T && ct == 3 ); +} + diff --git a/testing/btest/language/sizeof.bro b/testing/btest/language/sizeof.bro deleted file mode 100644 index 396984780a..0000000000 --- a/testing/btest/language/sizeof.bro +++ /dev/null @@ -1,121 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -# Demo policy for the sizeof operator "|x|". -# ------------------------------------------ -# -# This script creates various types and values and shows the result of the -# sizeof operator on these values. -# -# For any types not covered in this script, the sizeof operator's semantics -# are not defined and its application returns a count of 0. At the moment -# the only type where this should happen is string patterns. - -type example_enum: enum { ENUM1, ENUM2, ENUM3 }; - -type example_record: record { - i: int &optional; - j: int &optional; - k: int &optional; -}; - -global a: addr = 1.2.3.4; -global a6: addr = [::1]; -global b: bool = T; -global c: count = 10; -global d: double = -1.23; -global f: file = open_log_file("sizeof_demo"); -global i: int = -10; -global iv: interval = -5sec; -global p: port = 80/tcp; -global r: example_record = [ $i = +10 ]; -global si: set[int]; -global s: string = "Hello"; -global sn: subnet = 192.168.0.0/24; -global t: table[string] of string; -global ti: time = current_time(); -global v: vector of string; - -# Additional initialization -# -print f, "12345678901234567890"; - -add si[1]; -add si[10]; -add si[100]; - -t["foo"] = "Hello"; -t["bar"] = "World"; - -v[0] = "Hello"; -v[4] = "World"; - -# Print out the sizes of the various vals: -#----------------------------------------- - -# Size of addr: returns number of bits required to represent the address -# which is 32 for IPv4 or 128 for IPv6 -print fmt("IPv4 Address %s: %d", a, |a|); -print fmt("IPv6 Address %s: %d", a6, |a6|); - -# Size of boolean: returns 1 or 0. -print fmt("Boolean %s: %d", b, |b|); - -# Size of count: identity. -print fmt("Count %s: %d", c, |c|); - -# Size of integral arithmetic expression should coerce to int before absolute -# value operation to help prevent common unsigned int overflow situations. -print fmt("Expr: %d", |5 - 9|); - -# Size of double: returns absolute value. -print fmt("Double %s: %f", d, |d|); - -# Size of enum: returns numeric value of enum constant. -print fmt("Enum %s: %d", ENUM3, |ENUM3|); - -# Size of file: returns current file size. -# Note that this is a double so that file sizes >> 4GB -# can be expressed. -print fmt("File %f", |f|); - -# Size of function: returns number of arguments. -print fmt("Function add_interface: %d", |add_interface|); - -# Size of integer: returns absolute value. -print fmt("Integer %s: %d", i, |i|); - -# Size of interval: returns double representation of the interval -print fmt("Interval %s: %f", iv, |iv|); - -# Size of port: returns port number as a count. -print fmt("Port %s: %d", p, |p|); - -# Size of record: returns number of fields (assigned + unassigned) -print fmt("Record %s: %d", r, |r|); - -# Size of set: returns number of elements in set. -# Don't print the set, as its order depends on the seeding of the hash -# fnction, and it's not worth the trouble to normalize it. -print fmt("Set: %d", |si|); - -# Size of string: returns string length. -print fmt("String '%s': %d", s, |s|); - -# Size of subnet: returns size of net as a double -# (so that 2^32 can be expressed too). -print fmt("Subnet %s: %f", sn, |sn|); - -# Size of table: returns number of elements in table -print fmt("Table %d", |t|); - -# Size of time: returns double representation of the time -# print fmt("Time %s: %f", ti, |ti|); - -# Size of vector: returns largest assigned index. -# Note that this is not the number of assigned values. -# The following prints "5": -# -print fmt("Vector %s: %d", v, |v|); - -close(f); diff --git a/testing/btest/language/sizeof.zeek b/testing/btest/language/sizeof.zeek new file mode 100644 index 0000000000..fc510afb70 --- /dev/null +++ b/testing/btest/language/sizeof.zeek @@ -0,0 +1,121 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +# Demo policy for the sizeof operator "|x|". +# ------------------------------------------ +# +# This script creates various types and values and shows the result of the +# sizeof operator on these values. +# +# For any types not covered in this script, the sizeof operator's semantics +# are not defined and its application returns a count of 0. At the moment +# the only type where this should happen is string patterns. + +type example_enum: enum { ENUM1, ENUM2, ENUM3 }; + +type example_record: record { + i: int &optional; + j: int &optional; + k: int &optional; +}; + +global a: addr = 1.2.3.4; +global a6: addr = [::1]; +global b: bool = T; +global c: count = 10; +global d: double = -1.23; +global f: file = open_log_file("sizeof_demo"); +global i: int = -10; +global iv: interval = -5sec; +global p: port = 80/tcp; +global r: example_record = [ $i = +10 ]; +global si: set[int]; +global s: string = "Hello"; +global sn: subnet = 192.168.0.0/24; +global t: table[string] of string; +global ti: time = current_time(); +global v: vector of string; + +# Additional initialization +# +print f, "12345678901234567890"; + +add si[1]; +add si[10]; +add si[100]; + +t["foo"] = "Hello"; +t["bar"] = "World"; + +v[0] = "Hello"; +v[4] = "World"; + +# Print out the sizes of the various vals: +#----------------------------------------- + +# Size of addr: returns number of bits required to represent the address +# which is 32 for IPv4 or 128 for IPv6 +print fmt("IPv4 Address %s: %d", a, |a|); +print fmt("IPv6 Address %s: %d", a6, |a6|); + +# Size of boolean: returns 1 or 0. +print fmt("Boolean %s: %d", b, |b|); + +# Size of count: identity. +print fmt("Count %s: %d", c, |c|); + +# Size of integral arithmetic expression should coerce to int before absolute +# value operation to help prevent common unsigned int overflow situations. +print fmt("Expr: %d", |5 - 9|); + +# Size of double: returns absolute value. +print fmt("Double %s: %f", d, |d|); + +# Size of enum: returns numeric value of enum constant. +print fmt("Enum %s: %d", ENUM3, |ENUM3|); + +# Size of file: returns current file size. +# Note that this is a double so that file sizes >> 4GB +# can be expressed. +print fmt("File %f", |f|); + +# Size of function: returns number of arguments. +print fmt("Function add_interface: %d", |add_interface|); + +# Size of integer: returns absolute value. +print fmt("Integer %s: %d", i, |i|); + +# Size of interval: returns double representation of the interval +print fmt("Interval %s: %f", iv, |iv|); + +# Size of port: returns port number as a count. +print fmt("Port %s: %d", p, |p|); + +# Size of record: returns number of fields (assigned + unassigned) +print fmt("Record %s: %d", r, |r|); + +# Size of set: returns number of elements in set. +# Don't print the set, as its order depends on the seeding of the hash +# fnction, and it's not worth the trouble to normalize it. +print fmt("Set: %d", |si|); + +# Size of string: returns string length. +print fmt("String '%s': %d", s, |s|); + +# Size of subnet: returns size of net as a double +# (so that 2^32 can be expressed too). +print fmt("Subnet %s: %f", sn, |sn|); + +# Size of table: returns number of elements in table +print fmt("Table %d", |t|); + +# Size of time: returns double representation of the time +# print fmt("Time %s: %f", ti, |ti|); + +# Size of vector: returns largest assigned index. +# Note that this is not the number of assigned values. +# The following prints "5": +# +print fmt("Vector %s: %d", v, |v|); + +close(f); diff --git a/testing/btest/language/smith-waterman-test.bro b/testing/btest/language/smith-waterman-test.bro deleted file mode 100644 index 2113d88e24..0000000000 --- a/testing/btest/language/smith-waterman-test.bro +++ /dev/null @@ -1,88 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -global params: sw_params = [ $min_strlen = 2, $sw_variant = 0 ]; - -global min: vector of count; -global mode: vector of count; -global c: count = 0; - -# Alignment pairs: -global s1: string_vec; -global s2: string_vec; - -# Single alignment, no matches: -s1[++c] = "abcdefgh"; -s2[c] = "ijklmnop"; -min[c] = 2;; -mode[c] = 0; - -# Simple single match, beginning: -s1[++c] = "AAAabcefghij"; -s2[c] = "lmnopAAAqrst"; -min[c] = 2;; -mode[c] = 0; - -# Simple single match, middle: -s1[++c] = "abcAAAefghij"; -s2[c] = "lmnopAAAqrst"; -min[c] = 2;; -mode[c] = 0; - -# Simple single match, end: -s1[++c] = "abcefghijAAA"; -s2[c] = "lmnopAAAqrst"; -min[c] = 2;; -mode[c] = 0; - -# Repeated alignment: -s1[++c] = "xxxAAAyyy"; -s2[c] = "AAAaAAAbAAA"; -min[c] = 2;; -mode[c] = 1; - -# Repeated alignment, swapped input: -s1[++c] = "AAAaAAAbAAA"; -s2[c] = "xxxAAAyyy"; -min[c] = 2;; -mode[c] = 1; - -# Repeated alignment, split: -s1[++c] = "xxCDyABzCDyABzz"; -s2[c] = "ABCD"; -min[c] = 2;; -mode[c] = 1; - -# Repeated alignment, split, swapped: -s1[++c] = "ABCD"; -s2[c] = "xxCDyABzCDyABzz"; -min[c] = 2;; -mode[c] = 1; - -# Used to cause problems -s1[++c] = "Cache-control: no-cache^M^JAccept:"; -s2[c] = "Accept-: deflate^M^JAccept-: Accept-"; -min[c] = 6; -mode[c] = 1; - -# Repeated occurrences in shorter string -s1[++c] = "xxAAxxAAxx"; -s2[c] = "yyyyyAAyyyyy"; -min[c] = 2; -mode[c] = 1; - -for ( i in s1 ) - { - local ss: sw_substring_vec; - - params$min_strlen = min[i]; - params$sw_variant = mode[i]; - ss = str_smith_waterman(s1[i], s2[i], params); - - print fmt("%s - %s:", s1[i], s2[i]); - - for ( j in ss ) - print fmt("tok %d: %s (%d/%d, %s)", - j, ss[j]$str, ss[j]$aligns[1]$index, - ss[j]$aligns[2]$index, ss[j]$new); - } diff --git a/testing/btest/language/smith-waterman-test.zeek b/testing/btest/language/smith-waterman-test.zeek new file mode 100644 index 0000000000..1eff86ef83 --- /dev/null +++ b/testing/btest/language/smith-waterman-test.zeek @@ -0,0 +1,88 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +global params: sw_params = [ $min_strlen = 2, $sw_variant = 0 ]; + +global min: vector of count; +global mode: vector of count; +global c: count = 0; + +# Alignment pairs: +global s1: string_vec; +global s2: string_vec; + +# Single alignment, no matches: +s1[++c] = "abcdefgh"; +s2[c] = "ijklmnop"; +min[c] = 2;; +mode[c] = 0; + +# Simple single match, beginning: +s1[++c] = "AAAabcefghij"; +s2[c] = "lmnopAAAqrst"; +min[c] = 2;; +mode[c] = 0; + +# Simple single match, middle: +s1[++c] = "abcAAAefghij"; +s2[c] = "lmnopAAAqrst"; +min[c] = 2;; +mode[c] = 0; + +# Simple single match, end: +s1[++c] = "abcefghijAAA"; +s2[c] = "lmnopAAAqrst"; +min[c] = 2;; +mode[c] = 0; + +# Repeated alignment: +s1[++c] = "xxxAAAyyy"; +s2[c] = "AAAaAAAbAAA"; +min[c] = 2;; +mode[c] = 1; + +# Repeated alignment, swapped input: +s1[++c] = "AAAaAAAbAAA"; +s2[c] = "xxxAAAyyy"; +min[c] = 2;; +mode[c] = 1; + +# Repeated alignment, split: +s1[++c] = "xxCDyABzCDyABzz"; +s2[c] = "ABCD"; +min[c] = 2;; +mode[c] = 1; + +# Repeated alignment, split, swapped: +s1[++c] = "ABCD"; +s2[c] = "xxCDyABzCDyABzz"; +min[c] = 2;; +mode[c] = 1; + +# Used to cause problems +s1[++c] = "Cache-control: no-cache^M^JAccept:"; +s2[c] = "Accept-: deflate^M^JAccept-: Accept-"; +min[c] = 6; +mode[c] = 1; + +# Repeated occurrences in shorter string +s1[++c] = "xxAAxxAAxx"; +s2[c] = "yyyyyAAyyyyy"; +min[c] = 2; +mode[c] = 1; + +for ( i in s1 ) + { + local ss: sw_substring_vec; + + params$min_strlen = min[i]; + params$sw_variant = mode[i]; + ss = str_smith_waterman(s1[i], s2[i], params); + + print fmt("%s - %s:", s1[i], s2[i]); + + for ( j in ss ) + print fmt("tok %d: %s (%d/%d, %s)", + j, ss[j]$str, ss[j]$aligns[1]$index, + ss[j]$aligns[2]$index, ss[j]$new); + } diff --git a/testing/btest/language/string-indexing.bro b/testing/btest/language/string-indexing.bro deleted file mode 100644 index e109eeba80..0000000000 --- a/testing/btest/language/string-indexing.bro +++ /dev/null @@ -1,102 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -local word = "HelpA"; -local s = "0123456789"; -local indices = vector(-100, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 100); - -print s[1]; -print s[1:2]; -print s[1:6]; -print s[0:20]; -print s[-2]; -print s[-3:-1]; -print s[-1:-10]; -print s[-1:0]; -print s[-1:5]; -print s[20:23]; -print s[-20:23]; -print s[0:5][2]; -print s[0:5][1:3][0]; - -s = "012345"; - -for ( i in indices ) - print fmt("word[%s] = %s", indices[i], word[indices[i]]); - -for ( i in indices ) - print fmt("word[:%s] = %s", indices[i], word[:indices[i]]); - -for ( i in indices ) - print fmt("word[%s:] = %s", indices[i], word[indices[i]:]); - -print word[:]; - -print ""; - -print "A"; -print s[1:-1]; -print s[1:-2]; -print s[1:-3]; -print s[1:-4]; -print s[1:-5]; -print s[1:-6]; -print s[1:-7]; -print s[1:-8]; -print s[1:-9]; - -print ""; - -print "B"; -print s[-1:-1]; -print s[-1:-2]; -print s[-1:-3]; -print s[-1:-4]; - -print ""; - -print "C"; -print s[-100:-99]; -print s[-100:-2]; -print s[-100:0]; -print s[-100:2]; -print s[-100:100]; - -print ""; - -print "D";; -print s[-2:-99]; -print s[-2:-3]; -print s[-2:-1]; -print s[-2:0]; -print s[-2:2]; -print s[-2:100]; - -print ""; - -print "E";; -print s[0:-100]; -print s[0:-1]; -print s[0:0]; -print s[0:2]; -print s[0:100]; - -print ""; - -print "F";; -print s[2:-100]; -print s[2:-1]; -print s[2:0]; -print s[2:1]; -print s[2:4]; -print s[2:100]; - -print ""; - -print "F";; -print s[100:-100]; -print s[100:-1]; -print s[100:0]; -print s[100:1]; -print s[100:4]; -print s[100:100]; diff --git a/testing/btest/language/string-indexing.zeek b/testing/btest/language/string-indexing.zeek new file mode 100644 index 0000000000..6cce3ab713 --- /dev/null +++ b/testing/btest/language/string-indexing.zeek @@ -0,0 +1,102 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +local word = "HelpA"; +local s = "0123456789"; +local indices = vector(-100, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 100); + +print s[1]; +print s[1:2]; +print s[1:6]; +print s[0:20]; +print s[-2]; +print s[-3:-1]; +print s[-1:-10]; +print s[-1:0]; +print s[-1:5]; +print s[20:23]; +print s[-20:23]; +print s[0:5][2]; +print s[0:5][1:3][0]; + +s = "012345"; + +for ( i in indices ) + print fmt("word[%s] = %s", indices[i], word[indices[i]]); + +for ( i in indices ) + print fmt("word[:%s] = %s", indices[i], word[:indices[i]]); + +for ( i in indices ) + print fmt("word[%s:] = %s", indices[i], word[indices[i]:]); + +print word[:]; + +print ""; + +print "A"; +print s[1:-1]; +print s[1:-2]; +print s[1:-3]; +print s[1:-4]; +print s[1:-5]; +print s[1:-6]; +print s[1:-7]; +print s[1:-8]; +print s[1:-9]; + +print ""; + +print "B"; +print s[-1:-1]; +print s[-1:-2]; +print s[-1:-3]; +print s[-1:-4]; + +print ""; + +print "C"; +print s[-100:-99]; +print s[-100:-2]; +print s[-100:0]; +print s[-100:2]; +print s[-100:100]; + +print ""; + +print "D";; +print s[-2:-99]; +print s[-2:-3]; +print s[-2:-1]; +print s[-2:0]; +print s[-2:2]; +print s[-2:100]; + +print ""; + +print "E";; +print s[0:-100]; +print s[0:-1]; +print s[0:0]; +print s[0:2]; +print s[0:100]; + +print ""; + +print "F";; +print s[2:-100]; +print s[2:-1]; +print s[2:0]; +print s[2:1]; +print s[2:4]; +print s[2:100]; + +print ""; + +print "F";; +print s[100:-100]; +print s[100:-1]; +print s[100:0]; +print s[100:1]; +print s[100:4]; +print s[100:100]; diff --git a/testing/btest/language/string.bro b/testing/btest/language/string.bro deleted file mode 100644 index abaa556b26..0000000000 --- a/testing/btest/language/string.bro +++ /dev/null @@ -1,74 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local s1: string = "a\ty"; # tab - local s2: string = "a\nb"; # newline - local s3: string = "a\"b"; # double quote - local s4: string = "a\\b"; # backslash - local s5: string = "a\x9y"; # 1-digit hex value (tab character) - local s6: string = "a\x0ab"; # 2-digit hex value (newline character) - local s7: string = "a\x22b"; # 2-digit hex value (double quote) - local s8: string = "a\x00b"; # 2-digit hex value (null character) - local s9: string = "a\011y"; # 3-digit octal value (tab character) - local s10: string = "a\12b"; # 2-digit octal value (newline character) - local s11: string = "a\0b"; # 1-digit octal value (null character) - - local s20: string = ""; - local s21: string = "x"; - local s22: string = s21 + s11; - local s23: string = "test"; - local s24: string = "this is a very long string" + - "which continues on the next line" + - "the end"; - local s25: string = "on"; - local s26 = "x"; - - # Type inference test - - test_case( "type inference", type_name(s26) == "string" ); - - # Escape sequence tests - - test_case( "tab escape sequence", |s1| == 3 ); - test_case( "newline escape sequence", |s2| == 3 ); - test_case( "double quote escape sequence", |s3| == 3 ); - test_case( "backslash escape sequence", |s4| == 3 ); - test_case( "1-digit hex escape sequence", |s5| == 3 ); - test_case( "2-digit hex escape sequence", |s6| == 3 ); - test_case( "2-digit hex escape sequence", |s7| == 3 ); - test_case( "2-digit hex escape sequence", |s8| == 3 ); - test_case( "3-digit octal escape sequence", |s9| == 3 ); - test_case( "2-digit octal escape sequence", |s10| == 3 ); - test_case( "1-digit octal escape sequence", |s11| == 3 ); - test_case( "tab escape sequence", s1 == s5 ); - test_case( "tab escape sequence", s5 == s9 ); - test_case( "newline escape sequence", s2 == s6 ); - test_case( "newline escape sequence", s6 == s10 ); - test_case( "double quote escape sequence", s3 == s7 ); - test_case( "null escape sequence", s8 == s11 ); - - # Operator tests - - test_case( "empty string", |s20| == 0 ); - test_case( "nonempty string", |s21| == 1 ); - test_case( "string comparison", s21 > s11 ); - test_case( "string comparison", s21 >= s11 ); - test_case( "string comparison", s11 < s21 ); - test_case( "string comparison", s11 <= s21 ); - test_case( "string concatenation", |s22| == 4 ); - s23 += s21; - test_case( "string concatenation", s23 == "testx" ); - test_case( "multi-line string initialization", |s24| == 65 ); - test_case( "in operator", s25 in s24 ); - test_case( "!in operator", s25 !in s23 ); - -} - diff --git a/testing/btest/language/string.zeek b/testing/btest/language/string.zeek new file mode 100644 index 0000000000..8f9350a16d --- /dev/null +++ b/testing/btest/language/string.zeek @@ -0,0 +1,74 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local s1: string = "a\ty"; # tab + local s2: string = "a\nb"; # newline + local s3: string = "a\"b"; # double quote + local s4: string = "a\\b"; # backslash + local s5: string = "a\x9y"; # 1-digit hex value (tab character) + local s6: string = "a\x0ab"; # 2-digit hex value (newline character) + local s7: string = "a\x22b"; # 2-digit hex value (double quote) + local s8: string = "a\x00b"; # 2-digit hex value (null character) + local s9: string = "a\011y"; # 3-digit octal value (tab character) + local s10: string = "a\12b"; # 2-digit octal value (newline character) + local s11: string = "a\0b"; # 1-digit octal value (null character) + + local s20: string = ""; + local s21: string = "x"; + local s22: string = s21 + s11; + local s23: string = "test"; + local s24: string = "this is a very long string" + + "which continues on the next line" + + "the end"; + local s25: string = "on"; + local s26 = "x"; + + # Type inference test + + test_case( "type inference", type_name(s26) == "string" ); + + # Escape sequence tests + + test_case( "tab escape sequence", |s1| == 3 ); + test_case( "newline escape sequence", |s2| == 3 ); + test_case( "double quote escape sequence", |s3| == 3 ); + test_case( "backslash escape sequence", |s4| == 3 ); + test_case( "1-digit hex escape sequence", |s5| == 3 ); + test_case( "2-digit hex escape sequence", |s6| == 3 ); + test_case( "2-digit hex escape sequence", |s7| == 3 ); + test_case( "2-digit hex escape sequence", |s8| == 3 ); + test_case( "3-digit octal escape sequence", |s9| == 3 ); + test_case( "2-digit octal escape sequence", |s10| == 3 ); + test_case( "1-digit octal escape sequence", |s11| == 3 ); + test_case( "tab escape sequence", s1 == s5 ); + test_case( "tab escape sequence", s5 == s9 ); + test_case( "newline escape sequence", s2 == s6 ); + test_case( "newline escape sequence", s6 == s10 ); + test_case( "double quote escape sequence", s3 == s7 ); + test_case( "null escape sequence", s8 == s11 ); + + # Operator tests + + test_case( "empty string", |s20| == 0 ); + test_case( "nonempty string", |s21| == 1 ); + test_case( "string comparison", s21 > s11 ); + test_case( "string comparison", s21 >= s11 ); + test_case( "string comparison", s11 < s21 ); + test_case( "string comparison", s11 <= s21 ); + test_case( "string concatenation", |s22| == 4 ); + s23 += s21; + test_case( "string concatenation", s23 == "testx" ); + test_case( "multi-line string initialization", |s24| == 65 ); + test_case( "in operator", s25 in s24 ); + test_case( "!in operator", s25 !in s23 ); + +} + diff --git a/testing/btest/language/strings.bro b/testing/btest/language/strings.bro deleted file mode 100644 index f601797978..0000000000 --- a/testing/btest/language/strings.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -# Demo policy for string functions -# - -event bro_init() -{ - local s1: string = "broisaveryneatids"; - - print fmt("Input string: %s", s1); - print fmt(); - print fmt("String splitting"); - print fmt("----------------"); - - local idx1: index_vec; - - idx1[0] = 0; # We really need initializers for vectors ... - idx1[1] = 3; - idx1[2] = 5; - idx1[3] = 6; - idx1[4] = 10; - idx1[5] = 14; - - print fmt("Splitting '%s' at %d points...", s1, |idx1|); - local res_split: string_vec = str_split(s1, idx1); - - for ( i in res_split ) - print res_split[i]; - - print fmt(); - print fmt("Substrings"); - print fmt("----------"); - print fmt("3@0: %s", sub_bytes(s1, 0, 3)); - print fmt("5@2: %s", sub_bytes(s1, 2, 5)); - print fmt("7@4: %s", sub_bytes(s1, 4, 7)); - print fmt("10@10: %s", sub_bytes(s1, 10, 10)); - print fmt(); - - - print fmt("Finding strings"); - print fmt("---------------"); - print fmt("isa: %d", strstr(s1, "isa")); - print fmt("very: %d", strstr(s1, "very")); - print fmt("ids: %d", strstr(s1, "ids")); - print fmt("nono: %d", strstr(s1, "nono")); -} - diff --git a/testing/btest/language/strings.zeek b/testing/btest/language/strings.zeek new file mode 100644 index 0000000000..a5d8cbf69b --- /dev/null +++ b/testing/btest/language/strings.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +# Demo policy for string functions +# + +event zeek_init() +{ + local s1: string = "broisaveryneatids"; + + print fmt("Input string: %s", s1); + print fmt(); + print fmt("String splitting"); + print fmt("----------------"); + + local idx1: index_vec; + + idx1[0] = 0; # We really need initializers for vectors ... + idx1[1] = 3; + idx1[2] = 5; + idx1[3] = 6; + idx1[4] = 10; + idx1[5] = 14; + + print fmt("Splitting '%s' at %d points...", s1, |idx1|); + local res_split: string_vec = str_split(s1, idx1); + + for ( i in res_split ) + print res_split[i]; + + print fmt(); + print fmt("Substrings"); + print fmt("----------"); + print fmt("3@0: %s", sub_bytes(s1, 0, 3)); + print fmt("5@2: %s", sub_bytes(s1, 2, 5)); + print fmt("7@4: %s", sub_bytes(s1, 4, 7)); + print fmt("10@10: %s", sub_bytes(s1, 10, 10)); + print fmt(); + + + print fmt("Finding strings"); + print fmt("---------------"); + print fmt("isa: %d", strstr(s1, "isa")); + print fmt("very: %d", strstr(s1, "very")); + print fmt("ids: %d", strstr(s1, "ids")); + print fmt("nono: %d", strstr(s1, "nono")); +} + diff --git a/testing/btest/language/subnet-errors.bro b/testing/btest/language/subnet-errors.bro deleted file mode 100644 index fa98dcec48..0000000000 --- a/testing/btest/language/subnet-errors.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event bro_init() - { - local i = 32; - print 1.2.3.4/i; - ++i; - print 1.2.3.4/i; - print "init 1"; - } - -event bro_init() - { - local i = 128; - print [::]/i; - ++i; - print [::]/i; - print "init 1"; - } - -event bro_init() &priority=-10 - { - print "init last"; - } - diff --git a/testing/btest/language/subnet-errors.zeek b/testing/btest/language/subnet-errors.zeek new file mode 100644 index 0000000000..875817c433 --- /dev/null +++ b/testing/btest/language/subnet-errors.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +event zeek_init() + { + local i = 32; + print 1.2.3.4/i; + ++i; + print 1.2.3.4/i; + print "init 1"; + } + +event zeek_init() + { + local i = 128; + print [::]/i; + ++i; + print [::]/i; + print "init 1"; + } + +event zeek_init() &priority=-10 + { + print "init last"; + } + diff --git a/testing/btest/language/subnet.bro b/testing/btest/language/subnet.bro deleted file mode 100644 index b3b50e085f..0000000000 --- a/testing/btest/language/subnet.bro +++ /dev/null @@ -1,64 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - # IPv4 addr - local a1: addr = 192.1.2.3; - - # IPv4 subnets - local s1: subnet = 0.0.0.0/0; - local s2: subnet = 192.0.0.0/8; - local s3: subnet = 255.255.255.255/32; - local s4 = 10.0.0.0/16; - - test_case( "IPv4 subnet equality", a1/8 == s2 ); - test_case( "IPv4 subnet inequality", a1/4 != s2 ); - test_case( "IPv4 subnet in operator", a1 in s2 ); - test_case( "IPv4 subnet !in operator", a1 !in s3 ); - test_case( "IPv4 subnet type inference", type_name(s4) == "subnet" ); - - # IPv6 addrs - local b1: addr = [ffff::]; - local b2: addr = [ffff::1]; - local b3: addr = [ffff:1::1]; - - # IPv6 subnets - local t1: subnet = [::]/0; - local t2: subnet = [ffff::]/64; - local t3 = [a::]/32; - - test_case( "IPv6 subnet equality", b1/64 == t2 ); - test_case( "IPv6 subnet inequality", b3/64 != t2 ); - test_case( "IPv6 subnet in operator", b2 in t2 ); - test_case( "IPv6 subnet !in operator", b3 !in t2 ); - test_case( "IPv6 subnet type inference", type_name(t3) == "subnet" ); - - test_case( "IPv4 and IPv6 subnet inequality", s1 != t1 ); - test_case( "IPv4 address and IPv6 subnet", a1 !in t2 ); - - # IPv4-mapped-IPv6 subnets - local u1: subnet = [::ffff:0:0]/96; - - test_case( "IPv4 in IPv4-mapped-IPv6 subnet", 1.2.3.4 in u1 ); - test_case( "IPv6 !in IPv4-mapped-IPv6 subnet", [fe80::1] !in u1 ); - test_case( "IPv4-mapped-IPv6 in IPv4-mapped-IPv6 subnet", - [::ffff:1.2.3.4] in u1 ); - test_case( "IPv4-mapped-IPv6 subnet equality", - [::ffff:1.2.3.4]/112 == 1.2.0.0/16 ); - test_case( "subnet literal const whitespace", - [::ffff:1.2.3.4] / 112 == 1.2.0.0 / 16 ); - test_case( "subnet literal const whitespace", - [::ffff:1.2.3.4]/ 128 == 1.2.3.4/ 32 ); - test_case( "subnet literal const whitespace", - [::ffff:1.2.3.4] /96 == 1.2.3.4 /0 ); - test_case( "subnet literal const whitespace", - [::ffff:1.2.3.4] / 92 == [::fffe:1.2.3.4] / 92 ); -} - diff --git a/testing/btest/language/subnet.zeek b/testing/btest/language/subnet.zeek new file mode 100644 index 0000000000..db61460df9 --- /dev/null +++ b/testing/btest/language/subnet.zeek @@ -0,0 +1,64 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + # IPv4 addr + local a1: addr = 192.1.2.3; + + # IPv4 subnets + local s1: subnet = 0.0.0.0/0; + local s2: subnet = 192.0.0.0/8; + local s3: subnet = 255.255.255.255/32; + local s4 = 10.0.0.0/16; + + test_case( "IPv4 subnet equality", a1/8 == s2 ); + test_case( "IPv4 subnet inequality", a1/4 != s2 ); + test_case( "IPv4 subnet in operator", a1 in s2 ); + test_case( "IPv4 subnet !in operator", a1 !in s3 ); + test_case( "IPv4 subnet type inference", type_name(s4) == "subnet" ); + + # IPv6 addrs + local b1: addr = [ffff::]; + local b2: addr = [ffff::1]; + local b3: addr = [ffff:1::1]; + + # IPv6 subnets + local t1: subnet = [::]/0; + local t2: subnet = [ffff::]/64; + local t3 = [a::]/32; + + test_case( "IPv6 subnet equality", b1/64 == t2 ); + test_case( "IPv6 subnet inequality", b3/64 != t2 ); + test_case( "IPv6 subnet in operator", b2 in t2 ); + test_case( "IPv6 subnet !in operator", b3 !in t2 ); + test_case( "IPv6 subnet type inference", type_name(t3) == "subnet" ); + + test_case( "IPv4 and IPv6 subnet inequality", s1 != t1 ); + test_case( "IPv4 address and IPv6 subnet", a1 !in t2 ); + + # IPv4-mapped-IPv6 subnets + local u1: subnet = [::ffff:0:0]/96; + + test_case( "IPv4 in IPv4-mapped-IPv6 subnet", 1.2.3.4 in u1 ); + test_case( "IPv6 !in IPv4-mapped-IPv6 subnet", [fe80::1] !in u1 ); + test_case( "IPv4-mapped-IPv6 in IPv4-mapped-IPv6 subnet", + [::ffff:1.2.3.4] in u1 ); + test_case( "IPv4-mapped-IPv6 subnet equality", + [::ffff:1.2.3.4]/112 == 1.2.0.0/16 ); + test_case( "subnet literal const whitespace", + [::ffff:1.2.3.4] / 112 == 1.2.0.0 / 16 ); + test_case( "subnet literal const whitespace", + [::ffff:1.2.3.4]/ 128 == 1.2.3.4/ 32 ); + test_case( "subnet literal const whitespace", + [::ffff:1.2.3.4] /96 == 1.2.3.4 /0 ); + test_case( "subnet literal const whitespace", + [::ffff:1.2.3.4] / 92 == [::fffe:1.2.3.4] / 92 ); +} + diff --git a/testing/btest/language/switch-error-mixed.bro b/testing/btest/language/switch-error-mixed.bro deleted file mode 100644 index 78c7a2091f..0000000000 --- a/testing/btest/language/switch-error-mixed.bro +++ /dev/null @@ -1,13 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -function switch_one(v: count): string - { - switch (v) { - case 42: - return "42!"; - case type count: - return "Count!"; - } - } - diff --git a/testing/btest/language/switch-error-mixed.zeek b/testing/btest/language/switch-error-mixed.zeek new file mode 100644 index 0000000000..4eb68f38d7 --- /dev/null +++ b/testing/btest/language/switch-error-mixed.zeek @@ -0,0 +1,13 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +function switch_one(v: count): string + { + switch (v) { + case 42: + return "42!"; + case type count: + return "Count!"; + } + } + diff --git a/testing/btest/language/switch-incomplete.bro b/testing/btest/language/switch-incomplete.bro deleted file mode 100644 index 7ee800b274..0000000000 --- a/testing/btest/language/switch-incomplete.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event bro_init() - { - switch ( 1 ) { - case 1: - print 1; - # error: neither break/fallthrough/return here. - } - } - diff --git a/testing/btest/language/switch-incomplete.zeek b/testing/btest/language/switch-incomplete.zeek new file mode 100644 index 0000000000..62f55f63d2 --- /dev/null +++ b/testing/btest/language/switch-incomplete.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +event zeek_init() + { + switch ( 1 ) { + case 1: + print 1; + # error: neither break/fallthrough/return here. + } + } + diff --git a/testing/btest/language/switch-statement.bro b/testing/btest/language/switch-statement.bro deleted file mode 100644 index 152b14f87d..0000000000 --- a/testing/btest/language/switch-statement.bro +++ /dev/null @@ -1,293 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type MyEnum: enum { - RED, - GREEN, - BLUE, - PINK, -}; - -function switch_bool(v: bool): string - { - switch (v) { - case T: - return "true"; - case F: - return "false"; - } - return "n/a"; - } - -function switch_int(v: int): string - { - switch (v) { - case +1: - return "one"; - case +2: - return "two"; - case -3: - return "minus three"; - } - return "n/a"; - } - -function switch_enum(v: MyEnum): string - { - switch (v) { - case RED: - return "red"; - case GREEN: - return "green"; - case BLUE: - return "blue"; - } - return "n/a"; - } - -function switch_count(v: count): string - { - switch (v) { - case 1: - return "1"; - case 2: - return "2"; - case 3: - return "3"; - } - return "n/a"; - } - -function switch_port(v: port): string - { - switch (v) { - case 22/tcp: - return "ssh"; - case 53/udp: - return "dns"; - case 0/icmp: - return "echo"; - } - return "n/a"; - } - -function switch_double(v: double): string - { - switch (v) { - case 1.1: - return "1.1"; - case 2.2: - return "2.2"; - case 3.3: - return "3.3"; - } - return "n/a"; - } - -function switch_interval(v: interval): string - { - switch (v) { - case 1sec: - return "1sec"; - case 2day: - return "2day"; - case 3min: - return "3min"; - } - return "n/a"; - } - -function switch_string(v: string): string - { - switch (v) { - case "one": - return "first"; - case "two": - return "second"; - case "three": - return "third"; - } - return "n/a"; - } - -function switch_addr(v: addr): string - { - switch (v) { - case 1.2.3.4: - return "ipv4"; - case [fe80::1]: - return "ipv6"; - case 0.0.0.0: - return "unspec"; - } - return "n/a"; - } - -function switch_subnet(v: subnet): string - { - switch (v) { - case 1.2.3.0/24: - return "1.2.3.0/24"; - case [fe80::0]/96: - return "[fe80::0]"; - case 192.168.0.0/16: - return "192.168.0.0/16"; - } - return "n/a"; - } - -function switch_empty(v: count): string - { - switch ( v ) { - } - return "n/a"; - } - -function switch_fallthrough(v: count): string - { - local rval = ""; - switch ( v ) { - case 1: - rval += "test"; - fallthrough; - case 2: - rval += "testing"; - fallthrough; - case 3: - rval += "tested"; - break; - } - return rval + "return"; - } - -function switch_default(v: count): string - { - local rval = ""; - switch ( v ) { - case 1: - rval += "1"; - fallthrough; - case 2: - rval += "2"; - break; - case 3: - rval += "3"; - fallthrough; - default: - rval += "d"; - break; - } - return rval + "r"; - } - -function switch_default_placement(v: count): string - { - local rval = ""; - switch ( v ) { - case 1: - rval += "1"; - fallthrough; - default: - rval += "d"; - fallthrough; - case 2: - rval += "2"; - break; - case 3: - rval += "3"; - break; - } - return rval + "r"; - } - -function switch_case_list(v: count): string - { - switch ( v ) { - case 1, 2: - return "1,2"; - case 3, 4, 5: - return "3,4,5"; - case 6, 7, 8, 9: - return "6,7,8,9"; - } - return "n/a"; - } - -function test_switch(actual: string, expect: string) - { - if ( actual != expect ) - print fmt("%s != %s", actual, expect); - } - -event bro_init() - { - test_switch( switch_bool(T) , "true" ); - test_switch( switch_bool(F) , "false" ); - test_switch( switch_int(+1) , "one" ); - test_switch( switch_int(+2) , "two" ); - test_switch( switch_int(-3) , "minus three" ); - test_switch( switch_int(40) , "n/a" ); - test_switch( switch_enum(RED) , "red" ); - test_switch( switch_enum(BLUE) , "blue" ); - test_switch( switch_enum(GREEN) , "green" ); - test_switch( switch_enum(PINK) , "n/a" ); - test_switch( switch_count(1) , "1" ); - test_switch( switch_count(2) , "2" ); - test_switch( switch_count(3) , "3" ); - test_switch( switch_count(100) , "n/a" ); - test_switch( switch_port(22/tcp) , "ssh" ); - test_switch( switch_port(53/udp) , "dns" ); - test_switch( switch_port(0/icmp) , "echo" ); - test_switch( switch_port(1000/tcp) , "n/a" ); - test_switch( switch_double(1.1) , "1.1" ); - test_switch( switch_double(2.2) , "2.2" ); - test_switch( switch_double(3.3) , "3.3" ); - test_switch( switch_interval(1sec) , "1sec" ); - test_switch( switch_interval(2day) , "2day" ); - test_switch( switch_interval(3min) , "3min" ); - test_switch( switch_string("one") , "first" ); - test_switch( switch_string("two") , "second" ); - test_switch( switch_string("three") , "third" ); - test_switch( switch_addr(1.2.3.4) , "ipv4" ); - test_switch( switch_addr([fe80::1]) , "ipv6" ); - test_switch( switch_addr(0.0.0.0) , "unspec" ); - test_switch( switch_subnet(1.2.3.4/24) , "1.2.3.0/24" ); - test_switch( switch_subnet([fe80::1]/96) , "[fe80::0]" ); - test_switch( switch_subnet(192.168.1.100/16) , "192.168.0.0/16" ); - test_switch( switch_empty(2) , "n/a" ); - test_switch( switch_fallthrough(1) , "testtestingtestedreturn" ); - test_switch( switch_fallthrough(2) , "testingtestedreturn" ); - test_switch( switch_fallthrough(3) , "testedreturn" ); - test_switch( switch_default(1) , "12r" ); - test_switch( switch_default(2) , "2r" ); - test_switch( switch_default(3) , "3dr" ); - test_switch( switch_default(4) , "dr" ); - test_switch( switch_default_placement(1) , "1d2r" ); - test_switch( switch_default_placement(2) , "2r" ); - test_switch( switch_default_placement(3) , "3r" ); - test_switch( switch_default_placement(4) , "d2r" ); - - local v = vector(0,1,2,3,4,5,6,7,9,10); - local expect: string; - - for ( i in v ) - { - switch ( v[i] ) { - case 1, 2: - expect = "1,2"; - break; - case 3, 4, 5: - expect = "3,4,5"; - break; - case 6, 7, 8, 9: - expect = "6,7,8,9"; - break; - default: - expect = "n/a"; - break; - } - test_switch( switch_case_list(v[i]) , expect ); - } - - print "done"; - } diff --git a/testing/btest/language/switch-statement.zeek b/testing/btest/language/switch-statement.zeek new file mode 100644 index 0000000000..2f4bf56118 --- /dev/null +++ b/testing/btest/language/switch-statement.zeek @@ -0,0 +1,293 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type MyEnum: enum { + RED, + GREEN, + BLUE, + PINK, +}; + +function switch_bool(v: bool): string + { + switch (v) { + case T: + return "true"; + case F: + return "false"; + } + return "n/a"; + } + +function switch_int(v: int): string + { + switch (v) { + case +1: + return "one"; + case +2: + return "two"; + case -3: + return "minus three"; + } + return "n/a"; + } + +function switch_enum(v: MyEnum): string + { + switch (v) { + case RED: + return "red"; + case GREEN: + return "green"; + case BLUE: + return "blue"; + } + return "n/a"; + } + +function switch_count(v: count): string + { + switch (v) { + case 1: + return "1"; + case 2: + return "2"; + case 3: + return "3"; + } + return "n/a"; + } + +function switch_port(v: port): string + { + switch (v) { + case 22/tcp: + return "ssh"; + case 53/udp: + return "dns"; + case 0/icmp: + return "echo"; + } + return "n/a"; + } + +function switch_double(v: double): string + { + switch (v) { + case 1.1: + return "1.1"; + case 2.2: + return "2.2"; + case 3.3: + return "3.3"; + } + return "n/a"; + } + +function switch_interval(v: interval): string + { + switch (v) { + case 1sec: + return "1sec"; + case 2day: + return "2day"; + case 3min: + return "3min"; + } + return "n/a"; + } + +function switch_string(v: string): string + { + switch (v) { + case "one": + return "first"; + case "two": + return "second"; + case "three": + return "third"; + } + return "n/a"; + } + +function switch_addr(v: addr): string + { + switch (v) { + case 1.2.3.4: + return "ipv4"; + case [fe80::1]: + return "ipv6"; + case 0.0.0.0: + return "unspec"; + } + return "n/a"; + } + +function switch_subnet(v: subnet): string + { + switch (v) { + case 1.2.3.0/24: + return "1.2.3.0/24"; + case [fe80::0]/96: + return "[fe80::0]"; + case 192.168.0.0/16: + return "192.168.0.0/16"; + } + return "n/a"; + } + +function switch_empty(v: count): string + { + switch ( v ) { + } + return "n/a"; + } + +function switch_fallthrough(v: count): string + { + local rval = ""; + switch ( v ) { + case 1: + rval += "test"; + fallthrough; + case 2: + rval += "testing"; + fallthrough; + case 3: + rval += "tested"; + break; + } + return rval + "return"; + } + +function switch_default(v: count): string + { + local rval = ""; + switch ( v ) { + case 1: + rval += "1"; + fallthrough; + case 2: + rval += "2"; + break; + case 3: + rval += "3"; + fallthrough; + default: + rval += "d"; + break; + } + return rval + "r"; + } + +function switch_default_placement(v: count): string + { + local rval = ""; + switch ( v ) { + case 1: + rval += "1"; + fallthrough; + default: + rval += "d"; + fallthrough; + case 2: + rval += "2"; + break; + case 3: + rval += "3"; + break; + } + return rval + "r"; + } + +function switch_case_list(v: count): string + { + switch ( v ) { + case 1, 2: + return "1,2"; + case 3, 4, 5: + return "3,4,5"; + case 6, 7, 8, 9: + return "6,7,8,9"; + } + return "n/a"; + } + +function test_switch(actual: string, expect: string) + { + if ( actual != expect ) + print fmt("%s != %s", actual, expect); + } + +event zeek_init() + { + test_switch( switch_bool(T) , "true" ); + test_switch( switch_bool(F) , "false" ); + test_switch( switch_int(+1) , "one" ); + test_switch( switch_int(+2) , "two" ); + test_switch( switch_int(-3) , "minus three" ); + test_switch( switch_int(40) , "n/a" ); + test_switch( switch_enum(RED) , "red" ); + test_switch( switch_enum(BLUE) , "blue" ); + test_switch( switch_enum(GREEN) , "green" ); + test_switch( switch_enum(PINK) , "n/a" ); + test_switch( switch_count(1) , "1" ); + test_switch( switch_count(2) , "2" ); + test_switch( switch_count(3) , "3" ); + test_switch( switch_count(100) , "n/a" ); + test_switch( switch_port(22/tcp) , "ssh" ); + test_switch( switch_port(53/udp) , "dns" ); + test_switch( switch_port(0/icmp) , "echo" ); + test_switch( switch_port(1000/tcp) , "n/a" ); + test_switch( switch_double(1.1) , "1.1" ); + test_switch( switch_double(2.2) , "2.2" ); + test_switch( switch_double(3.3) , "3.3" ); + test_switch( switch_interval(1sec) , "1sec" ); + test_switch( switch_interval(2day) , "2day" ); + test_switch( switch_interval(3min) , "3min" ); + test_switch( switch_string("one") , "first" ); + test_switch( switch_string("two") , "second" ); + test_switch( switch_string("three") , "third" ); + test_switch( switch_addr(1.2.3.4) , "ipv4" ); + test_switch( switch_addr([fe80::1]) , "ipv6" ); + test_switch( switch_addr(0.0.0.0) , "unspec" ); + test_switch( switch_subnet(1.2.3.4/24) , "1.2.3.0/24" ); + test_switch( switch_subnet([fe80::1]/96) , "[fe80::0]" ); + test_switch( switch_subnet(192.168.1.100/16) , "192.168.0.0/16" ); + test_switch( switch_empty(2) , "n/a" ); + test_switch( switch_fallthrough(1) , "testtestingtestedreturn" ); + test_switch( switch_fallthrough(2) , "testingtestedreturn" ); + test_switch( switch_fallthrough(3) , "testedreturn" ); + test_switch( switch_default(1) , "12r" ); + test_switch( switch_default(2) , "2r" ); + test_switch( switch_default(3) , "3dr" ); + test_switch( switch_default(4) , "dr" ); + test_switch( switch_default_placement(1) , "1d2r" ); + test_switch( switch_default_placement(2) , "2r" ); + test_switch( switch_default_placement(3) , "3r" ); + test_switch( switch_default_placement(4) , "d2r" ); + + local v = vector(0,1,2,3,4,5,6,7,9,10); + local expect: string; + + for ( i in v ) + { + switch ( v[i] ) { + case 1, 2: + expect = "1,2"; + break; + case 3, 4, 5: + expect = "3,4,5"; + break; + case 6, 7, 8, 9: + expect = "6,7,8,9"; + break; + default: + expect = "n/a"; + break; + } + test_switch( switch_case_list(v[i]) , expect ); + } + + print "done"; + } diff --git a/testing/btest/language/switch-types-error-duplicate.bro b/testing/btest/language/switch-types-error-duplicate.bro deleted file mode 100644 index 846d228be3..0000000000 --- a/testing/btest/language/switch-types-error-duplicate.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -function switch_one(v: any): string - { - switch (v) { - case type string: - return "String!"; - case type count: - return "Count!"; - case type bool, type count: - return "Bool or address!"; - default: - return "Somethign else!"; - } - - } - diff --git a/testing/btest/language/switch-types-error-duplicate.zeek b/testing/btest/language/switch-types-error-duplicate.zeek new file mode 100644 index 0000000000..3b40e2fcfe --- /dev/null +++ b/testing/btest/language/switch-types-error-duplicate.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +function switch_one(v: any): string + { + switch (v) { + case type string: + return "String!"; + case type count: + return "Count!"; + case type bool, type count: + return "Bool or address!"; + default: + return "Somethign else!"; + } + + } + diff --git a/testing/btest/language/switch-types-error-unsupported.bro b/testing/btest/language/switch-types-error-unsupported.bro deleted file mode 100644 index d8b8d039df..0000000000 --- a/testing/btest/language/switch-types-error-unsupported.bro +++ /dev/null @@ -1,17 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -function switch_one(v: string): string - { - switch (v) { - case type string: - return "String!"; - case type count: - return "Count!"; - case type bool, type addr: - return "Bool or address!"; - default: - return "Somethign else!"; - } - } - diff --git a/testing/btest/language/switch-types-error-unsupported.zeek b/testing/btest/language/switch-types-error-unsupported.zeek new file mode 100644 index 0000000000..3045336f22 --- /dev/null +++ b/testing/btest/language/switch-types-error-unsupported.zeek @@ -0,0 +1,17 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +function switch_one(v: string): string + { + switch (v) { + case type string: + return "String!"; + case type count: + return "Count!"; + case type bool, type addr: + return "Bool or address!"; + default: + return "Somethign else!"; + } + } + diff --git a/testing/btest/language/switch-types-vars.bro b/testing/btest/language/switch-types-vars.bro deleted file mode 100644 index 1b0ca5591b..0000000000 --- a/testing/btest/language/switch-types-vars.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function switch_one(v: any) - { - switch (v) { - case type string as s: - print "string!", s; - break; - - case type count as c: - print "count!", c; - break; - - case type int: - print "int!"; - break; - - case type double, type port: - print "double or port"; - break; - - case type bool as b, type addr as a: - print "Bool or address?"; - - if ( v is bool ) - print " bool", b; - - if ( v is addr ) - print " addr", a; - - break; - default: - print "Somethign else!"; - break; - } - } - -event bro_init() - { - switch_one("My StrIng"); - switch_one(42); - switch_one(1.2.3.4); - switch_one(T); - switch_one(-13); - switch_one(42/udp); - switch_one(3.1415926); - } diff --git a/testing/btest/language/switch-types-vars.zeek b/testing/btest/language/switch-types-vars.zeek new file mode 100644 index 0000000000..c92a16e5e6 --- /dev/null +++ b/testing/btest/language/switch-types-vars.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function switch_one(v: any) + { + switch (v) { + case type string as s: + print "string!", s; + break; + + case type count as c: + print "count!", c; + break; + + case type int: + print "int!"; + break; + + case type double, type port: + print "double or port"; + break; + + case type bool as b, type addr as a: + print "Bool or address?"; + + if ( v is bool ) + print " bool", b; + + if ( v is addr ) + print " addr", a; + + break; + default: + print "Somethign else!"; + break; + } + } + +event zeek_init() + { + switch_one("My StrIng"); + switch_one(42); + switch_one(1.2.3.4); + switch_one(T); + switch_one(-13); + switch_one(42/udp); + switch_one(3.1415926); + } diff --git a/testing/btest/language/switch-types.bro b/testing/btest/language/switch-types.bro deleted file mode 100644 index 468ba93922..0000000000 --- a/testing/btest/language/switch-types.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function switch_one(v: any): string - { - switch (v) { - case type string: - return "String!"; - case type count: - return "Count!"; - case type bool, type addr: - return "Bool or address!"; - default: - return "Somethign else!"; - } - } - -function switch_one_no_default(v: any): string - { - switch (v) { - case type string: - return "String!"; - case type count: - return "Count!"; - case type bool, type addr: - return "Bool or address!"; - } - - return "n/a"; - } - - -event bro_init() - { - print switch_one("string"); - print switch_one(42); - print switch_one(T); - print switch_one(1947/tcp); - print ""; - print switch_one_no_default(1.2.3.4); - print switch_one_no_default(1947/tcp); - - } diff --git a/testing/btest/language/switch-types.zeek b/testing/btest/language/switch-types.zeek new file mode 100644 index 0000000000..031a311774 --- /dev/null +++ b/testing/btest/language/switch-types.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function switch_one(v: any): string + { + switch (v) { + case type string: + return "String!"; + case type count: + return "Count!"; + case type bool, type addr: + return "Bool or address!"; + default: + return "Somethign else!"; + } + } + +function switch_one_no_default(v: any): string + { + switch (v) { + case type string: + return "String!"; + case type count: + return "Count!"; + case type bool, type addr: + return "Bool or address!"; + } + + return "n/a"; + } + + +event zeek_init() + { + print switch_one("string"); + print switch_one(42); + print switch_one(T); + print switch_one(1947/tcp); + print ""; + print switch_one_no_default(1.2.3.4); + print switch_one_no_default(1947/tcp); + + } diff --git a/testing/btest/language/table-default-record.bro b/testing/btest/language/table-default-record.bro deleted file mode 100644 index 3894f3ac09..0000000000 --- a/testing/btest/language/table-default-record.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -type Foo: record { - x: count &default=0; -}; - -global foo: table[count] of Foo = {} &default=[]; - -# returns the &default value as usual -print(foo[0]$x); -print(foo[1]$x); - -# these are essentially no-ops since a copy of the &default value is returned -# by the lookup -foo[0]$x = 0; -foo[1]$x = 1; - -# the &default value isn't modified -print(foo[0]$x); -print(foo[1]$x); - -# table membership isn't modified -print(foo); diff --git a/testing/btest/language/table-default-record.zeek b/testing/btest/language/table-default-record.zeek new file mode 100644 index 0000000000..c7f561d19f --- /dev/null +++ b/testing/btest/language/table-default-record.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type Foo: record { + x: count &default=0; +}; + +global foo: table[count] of Foo = {} &default=[]; + +# returns the &default value as usual +print(foo[0]$x); +print(foo[1]$x); + +# these are essentially no-ops since a copy of the &default value is returned +# by the lookup +foo[0]$x = 0; +foo[1]$x = 1; + +# the &default value isn't modified +print(foo[0]$x); +print(foo[1]$x); + +# table membership isn't modified +print(foo); diff --git a/testing/btest/language/table-init-attrs.bro b/testing/btest/language/table-init-attrs.bro deleted file mode 100644 index 76d98b9fed..0000000000 --- a/testing/btest/language/table-init-attrs.bro +++ /dev/null @@ -1,115 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -# set()/table() constructors are allowed to have attributes. When initializing -# an identifier, those attributes should also apply to it. - -const my_set_ctor_init: set[string] = set("test1") &redef; - -redef my_set_ctor_init += { - "test2", - "test3", -}; - -redef my_set_ctor_init += set("test4"); - -const my_table_ctor_init: table[count] of string = table([1] = "test1") &redef &default="nope"; - -redef my_table_ctor_init += { - [2] = "test2", - [3] = "test3", -}; - -# initializer list versions work the same way. - -const my_set_init: set[string] = { "test1" } &redef; - -redef my_set_init += { - "test2", - "test3", -}; - -redef my_set_init += set("test4"); - -const my_table_init: table[count] of string = { [1] = "test1" } &redef &default="nope"; - -redef my_table_init += { - [2] = "test2", - [3] = "test3", -}; - -redef my_table_init += table([4] = "test4"); - -# For tables that yield tables, we can apply attributes to the both other and -# inner tables... - -global inception_table: table[count] of table[count] of string = table( - [0] = table([13] = "bar") &default="forty-two" -) &default=table() &default="we need to go deeper"; - -global inception_table2: table[count] of table[count] of string = { - [0] = table([13] = "bar") &default="forty-two", -} &default=table() &default="we need to go deeper"; - -event bro_init() - { - print "my_set_ctor_init"; - print my_set_ctor_init; - print ""; - print "my_table_ctor_init"; - print my_table_ctor_init; - print my_table_ctor_init[5]; - print ""; - print "my_set_init"; - print my_set_init; - print ""; - print "my_table_init"; - print my_table_init; - print my_table_init[5]; - print ""; - print "inception"; - print inception_table; - print inception_table[0]; - print inception_table[0][13]; - print inception_table[0][42]; - print inception_table[1]; - print inception_table[1][2]; - print inception_table2; - print inception_table2[0]; - print inception_table2[0][13]; - print inception_table2[0][42]; - print inception_table2[1]; - print inception_table2[1][2]; - print ""; - - # just checking attributes on locals works, too - print "local table t1"; - local t1: table[count] of string = table([1] = "foo") &default="nope"; - print t1; - print t1[1]; - print t1[2]; - print ""; - - print "local table t2"; - local t2: table[count] of string = {[1] = "foo"} &default="nope"; - print t2; - print t2[1]; - print t2[2]; - print ""; - - # and for empty initializers... - print "local table t3"; - local t3: table[count] of string = table() &default="nope"; - print t3; - print t3[1]; - print t3[2]; - print ""; - - print "local table t4"; - local t4: table[count] of string = {} &default="nope"; - print t4; - print t4[1]; - print t4[2]; - print ""; - - } diff --git a/testing/btest/language/table-init-attrs.zeek b/testing/btest/language/table-init-attrs.zeek new file mode 100644 index 0000000000..5f1e742479 --- /dev/null +++ b/testing/btest/language/table-init-attrs.zeek @@ -0,0 +1,115 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +# set()/table() constructors are allowed to have attributes. When initializing +# an identifier, those attributes should also apply to it. + +const my_set_ctor_init: set[string] = set("test1") &redef; + +redef my_set_ctor_init += { + "test2", + "test3", +}; + +redef my_set_ctor_init += set("test4"); + +const my_table_ctor_init: table[count] of string = table([1] = "test1") &redef &default="nope"; + +redef my_table_ctor_init += { + [2] = "test2", + [3] = "test3", +}; + +# initializer list versions work the same way. + +const my_set_init: set[string] = { "test1" } &redef; + +redef my_set_init += { + "test2", + "test3", +}; + +redef my_set_init += set("test4"); + +const my_table_init: table[count] of string = { [1] = "test1" } &redef &default="nope"; + +redef my_table_init += { + [2] = "test2", + [3] = "test3", +}; + +redef my_table_init += table([4] = "test4"); + +# For tables that yield tables, we can apply attributes to the both other and +# inner tables... + +global inception_table: table[count] of table[count] of string = table( + [0] = table([13] = "bar") &default="forty-two" +) &default=table() &default="we need to go deeper"; + +global inception_table2: table[count] of table[count] of string = { + [0] = table([13] = "bar") &default="forty-two", +} &default=table() &default="we need to go deeper"; + +event zeek_init() + { + print "my_set_ctor_init"; + print my_set_ctor_init; + print ""; + print "my_table_ctor_init"; + print my_table_ctor_init; + print my_table_ctor_init[5]; + print ""; + print "my_set_init"; + print my_set_init; + print ""; + print "my_table_init"; + print my_table_init; + print my_table_init[5]; + print ""; + print "inception"; + print inception_table; + print inception_table[0]; + print inception_table[0][13]; + print inception_table[0][42]; + print inception_table[1]; + print inception_table[1][2]; + print inception_table2; + print inception_table2[0]; + print inception_table2[0][13]; + print inception_table2[0][42]; + print inception_table2[1]; + print inception_table2[1][2]; + print ""; + + # just checking attributes on locals works, too + print "local table t1"; + local t1: table[count] of string = table([1] = "foo") &default="nope"; + print t1; + print t1[1]; + print t1[2]; + print ""; + + print "local table t2"; + local t2: table[count] of string = {[1] = "foo"} &default="nope"; + print t2; + print t2[1]; + print t2[2]; + print ""; + + # and for empty initializers... + print "local table t3"; + local t3: table[count] of string = table() &default="nope"; + print t3; + print t3[1]; + print t3[2]; + print ""; + + print "local table t4"; + local t4: table[count] of string = {} &default="nope"; + print t4; + print t4[1]; + print t4[2]; + print ""; + + } diff --git a/testing/btest/language/table-init-container-ctors.bro b/testing/btest/language/table-init-container-ctors.bro deleted file mode 100644 index 1f9e18d848..0000000000 --- a/testing/btest/language/table-init-container-ctors.bro +++ /dev/null @@ -1,95 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -# The various container constructor expressions should work in table -# initialization lists (as yields). - -type set_yield: set[string, count]; -type vector_yield: vector of count; -type table_yield: table[string, count] of count; -type record_yield: record { - a: count; - b: string; -}; - -global lone_set_ctor: set_yield = set(["foo", 1], ["bar", 2]); -global lone_vector_ctor: vector_yield = vector(1, 2); -global lone_table_ctor: table_yield = table(["foo", 1] = 1, ["bar", 2] = 2); -global lone_record_ctor: record_yield = record($a=1, $b="foo"); - -global table_of_set: table[count] of set_yield = { - [13] = lone_set_ctor, - [5] = set(["bah", 3], ["baz", 4]), -}; - -global table_of_vector: table[count] of vector_yield = { - [13] = lone_vector_ctor, - [5] = vector(3, 4), -}; - -global table_of_table: table[count] of table_yield = { - [13] = lone_table_ctor, - [5] = table(["bah", 3] = 3, ["baz", 4] = 4), -}; - -global table_of_record: table[count] of record_yield = { - [13] = lone_record_ctor, - [5] = record($a=2, $b="bar"), -}; - -# Just copying the inline ctors used in the table initializer lists here -# for later comparisons. -global inline_set_ctor: set_yield = set(["bah", 3], ["baz", 4]); -global inline_vector_ctor: vector_yield = vector(3, 4); -global inline_table_ctor: table_yield = table(["bah", 3] = 3, ["baz", 4] = 4); -global inline_record_ctor: record_yield = record($a=2, $b="bar"); - -function compare_set_yield(a: set_yield, b: set_yield) - { - local s: string; - local c: count; - for ( [s, c] in a ) - print [s, c] in b; - } - -function compare_vector_yield(a: vector_yield, b: vector_yield) - { - local c: count; - for ( c in a ) - print a[c] == b[c]; - } - -function compare_table_yield(a: table_yield, b: table_yield) - { - local s: string; - local c: count; - for ( [s, c] in a ) - print [s, c] in b && a[s, c] == b[s, c]; - } - -function compare_record_yield(a: record_yield, b: record_yield) - { - print a$a == b$a && a$b == b$b; - } - -print "table of set"; -print table_of_set; -print ""; -print "table of vector"; -print table_of_vector; -print ""; -print "table of table"; -print table_of_table; -print ""; -print "table of record"; -print table_of_record; -print ""; - -compare_set_yield(table_of_set[13], lone_set_ctor); -compare_set_yield(table_of_set[5], inline_set_ctor); -compare_vector_yield(table_of_vector[13], lone_vector_ctor); -compare_vector_yield(table_of_vector[5], inline_vector_ctor); -compare_table_yield(table_of_table[13], lone_table_ctor); -compare_table_yield(table_of_table[5], inline_table_ctor); -compare_record_yield(table_of_record[13], lone_record_ctor); -compare_record_yield(table_of_record[5], inline_record_ctor); diff --git a/testing/btest/language/table-init-container-ctors.zeek b/testing/btest/language/table-init-container-ctors.zeek new file mode 100644 index 0000000000..6302ca83e1 --- /dev/null +++ b/testing/btest/language/table-init-container-ctors.zeek @@ -0,0 +1,95 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +# The various container constructor expressions should work in table +# initialization lists (as yields). + +type set_yield: set[string, count]; +type vector_yield: vector of count; +type table_yield: table[string, count] of count; +type record_yield: record { + a: count; + b: string; +}; + +global lone_set_ctor: set_yield = set(["foo", 1], ["bar", 2]); +global lone_vector_ctor: vector_yield = vector(1, 2); +global lone_table_ctor: table_yield = table(["foo", 1] = 1, ["bar", 2] = 2); +global lone_record_ctor: record_yield = record($a=1, $b="foo"); + +global table_of_set: table[count] of set_yield = { + [13] = lone_set_ctor, + [5] = set(["bah", 3], ["baz", 4]), +}; + +global table_of_vector: table[count] of vector_yield = { + [13] = lone_vector_ctor, + [5] = vector(3, 4), +}; + +global table_of_table: table[count] of table_yield = { + [13] = lone_table_ctor, + [5] = table(["bah", 3] = 3, ["baz", 4] = 4), +}; + +global table_of_record: table[count] of record_yield = { + [13] = lone_record_ctor, + [5] = record($a=2, $b="bar"), +}; + +# Just copying the inline ctors used in the table initializer lists here +# for later comparisons. +global inline_set_ctor: set_yield = set(["bah", 3], ["baz", 4]); +global inline_vector_ctor: vector_yield = vector(3, 4); +global inline_table_ctor: table_yield = table(["bah", 3] = 3, ["baz", 4] = 4); +global inline_record_ctor: record_yield = record($a=2, $b="bar"); + +function compare_set_yield(a: set_yield, b: set_yield) + { + local s: string; + local c: count; + for ( [s, c] in a ) + print [s, c] in b; + } + +function compare_vector_yield(a: vector_yield, b: vector_yield) + { + local c: count; + for ( c in a ) + print a[c] == b[c]; + } + +function compare_table_yield(a: table_yield, b: table_yield) + { + local s: string; + local c: count; + for ( [s, c] in a ) + print [s, c] in b && a[s, c] == b[s, c]; + } + +function compare_record_yield(a: record_yield, b: record_yield) + { + print a$a == b$a && a$b == b$b; + } + +print "table of set"; +print table_of_set; +print ""; +print "table of vector"; +print table_of_vector; +print ""; +print "table of table"; +print table_of_table; +print ""; +print "table of record"; +print table_of_record; +print ""; + +compare_set_yield(table_of_set[13], lone_set_ctor); +compare_set_yield(table_of_set[5], inline_set_ctor); +compare_vector_yield(table_of_vector[13], lone_vector_ctor); +compare_vector_yield(table_of_vector[5], inline_vector_ctor); +compare_table_yield(table_of_table[13], lone_table_ctor); +compare_table_yield(table_of_table[5], inline_table_ctor); +compare_record_yield(table_of_record[13], lone_record_ctor); +compare_record_yield(table_of_record[5], inline_record_ctor); diff --git a/testing/btest/language/table-init-record-idx.bro b/testing/btest/language/table-init-record-idx.bro deleted file mode 100644 index db9716dc42..0000000000 --- a/testing/btest/language/table-init-record-idx.bro +++ /dev/null @@ -1,216 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -# Record constructors should work in table initializers - -type r: record { - a: string; - b: count; -}; - -global a: r = [$a="foo", $b=1]; -global b: r = [$a="foo", $b=2]; -global c: r = [$a="bar", $b=3]; -global d: r = [$a="bar", $b=4]; -global e: r = [$a="baz", $b=5]; -global f: r = [$a="baz", $b=6]; - -global foo: table[r] of count = { - [a] = 1, - [record($a="foo", $b=2)] = 2, - [[$a="bar", $b=3]] = 3, -}; - -foo[d] = 4; -foo[[$a="baz", $b=5]] = 5; -foo[record($a="baz", $b=6)] = 6; - -print "following should all be true..."; - -print a in foo; -print b in foo; -print c in foo; -print d in foo; -print e in foo; -print f in foo; - -print [$a="foo", $b=1] in foo; -print record($a="foo", $b=1) in foo; - -print foo[a]; -print foo[[$a="foo", $b=1]]; -print foo[record($a="foo", $b=1)]; - -print "following should all be false..."; - -local bah: r = [$a="bah", $b=0]; - -print bah in foo; -print [$a="bah", $b=0] in foo; -print record($a="bah", $b=0) in foo; - -print "now here's the foo table..."; - -print foo; - -# @TEST-START-NEXT - -# They can be part of a compound index type, too... - -type r: record { - a: string; - b: count; -}; - -global a: r = [$a="foo", $b=1]; -global b: r = [$a="foo", $b=2]; -global c: r = [$a="bar", $b=3]; -global d: r = [$a="bar", $b=4]; -global e: r = [$a="baz", $b=5]; -global f: r = [$a="baz", $b=6]; - -global foo: table[r, count] of count = { - [a, 1] = 1, - [record($a="foo", $b=2), 2] = 2, - [[$a="bar", $b=3], 3] = 3, -}; - -foo[d, 4] = 4; -foo[[$a="baz", $b=5], 5] = 5; -foo[record($a="baz", $b=6), 6] = 6; - -print "following should all be true..."; - -print [a, 1] in foo; -print [b, 2] in foo; -print [c, 3] in foo; -print [d, 4] in foo; -print [e, 5] in foo; -print [f, 6] in foo; - -print [[$a="foo", $b=1], 1] in foo; -print [record($a="foo", $b=1), 1] in foo; - -print foo[a, 1]; -print foo[[$a="foo", $b=1], 1]; -print foo[record($a="foo", $b=1), 1]; - -print "following should all be false..."; - -local bah: r = [$a="bah", $b=0]; - -print [bah, 0] in foo; -print [[$a="bah", $b=0], 0] in foo; -print [record($a="bah", $b=0), 0] in foo; - -print "now here's the foo table..."; - -print foo; - -# @TEST-START-NEXT - -# Now checking table() ctor versus { } initializer - -type r: record { - a: string; - b: count; -}; - -global a: r = [$a="foo", $b=1]; -global b: r = [$a="foo", $b=2]; -global c: r = [$a="bar", $b=3]; -global d: r = [$a="bar", $b=4]; -global e: r = [$a="baz", $b=5]; -global f: r = [$a="baz", $b=6]; - -global foo: table[r] of count = table( - [a] = 1, - [record($a="foo", $b=2)] = 2, - [[$a="bar", $b=3]] = 3 -); - -foo[d] = 4; -foo[[$a="baz", $b=5]] = 5; -foo[record($a="baz", $b=6)] = 6; - -print "following should all be true..."; - -print a in foo; -print b in foo; -print c in foo; -print d in foo; -print e in foo; -print f in foo; - -print [$a="foo", $b=1] in foo; -print record($a="foo", $b=1) in foo; - -print foo[a]; -print foo[[$a="foo", $b=1]]; -print foo[record($a="foo", $b=1)]; - -print "following should all be false..."; - -local bah: r = [$a="bah", $b=0]; - -print bah in foo; -print [$a="bah", $b=0] in foo; -print record($a="bah", $b=0) in foo; - -print "now here's the foo table..."; - -print foo; - -# @TEST-START-NEXT - -# Now checking table() ctor versus { } initializer for compound index - -type r: record { - a: string; - b: count; -}; - -global a: r = [$a="foo", $b=1]; -global b: r = [$a="foo", $b=2]; -global c: r = [$a="bar", $b=3]; -global d: r = [$a="bar", $b=4]; -global e: r = [$a="baz", $b=5]; -global f: r = [$a="baz", $b=6]; - -global foo: table[r, count] of count = table( - [a, 1] = 1, - [record($a="foo", $b=2), 2] = 2, - [[$a="bar", $b=3], 3] = 3 -); - -foo[d, 4] = 4; -foo[[$a="baz", $b=5], 5] = 5; -foo[record($a="baz", $b=6), 6] = 6; - -print "following should all be true..."; - -print [a, 1] in foo; -print [b, 2] in foo; -print [c, 3] in foo; -print [d, 4] in foo; -print [e, 5] in foo; -print [f, 6] in foo; - -print [[$a="foo", $b=1], 1] in foo; -print [record($a="foo", $b=1), 1] in foo; - -print foo[a, 1]; -print foo[[$a="foo", $b=1], 1]; -print foo[record($a="foo", $b=1), 1]; - -print "following should all be false..."; - -local bah: r = [$a="bah", $b=0]; - -print [bah, 0] in foo; -print [[$a="bah", $b=0], 0] in foo; -print [record($a="bah", $b=0), 0] in foo; - -print "now here's the foo table..."; - -print foo; diff --git a/testing/btest/language/table-init-record-idx.zeek b/testing/btest/language/table-init-record-idx.zeek new file mode 100644 index 0000000000..e3c1c4823c --- /dev/null +++ b/testing/btest/language/table-init-record-idx.zeek @@ -0,0 +1,216 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +# Record constructors should work in table initializers + +type r: record { + a: string; + b: count; +}; + +global a: r = [$a="foo", $b=1]; +global b: r = [$a="foo", $b=2]; +global c: r = [$a="bar", $b=3]; +global d: r = [$a="bar", $b=4]; +global e: r = [$a="baz", $b=5]; +global f: r = [$a="baz", $b=6]; + +global foo: table[r] of count = { + [a] = 1, + [record($a="foo", $b=2)] = 2, + [[$a="bar", $b=3]] = 3, +}; + +foo[d] = 4; +foo[[$a="baz", $b=5]] = 5; +foo[record($a="baz", $b=6)] = 6; + +print "following should all be true..."; + +print a in foo; +print b in foo; +print c in foo; +print d in foo; +print e in foo; +print f in foo; + +print [$a="foo", $b=1] in foo; +print record($a="foo", $b=1) in foo; + +print foo[a]; +print foo[[$a="foo", $b=1]]; +print foo[record($a="foo", $b=1)]; + +print "following should all be false..."; + +local bah: r = [$a="bah", $b=0]; + +print bah in foo; +print [$a="bah", $b=0] in foo; +print record($a="bah", $b=0) in foo; + +print "now here's the foo table..."; + +print foo; + +# @TEST-START-NEXT + +# They can be part of a compound index type, too... + +type r: record { + a: string; + b: count; +}; + +global a: r = [$a="foo", $b=1]; +global b: r = [$a="foo", $b=2]; +global c: r = [$a="bar", $b=3]; +global d: r = [$a="bar", $b=4]; +global e: r = [$a="baz", $b=5]; +global f: r = [$a="baz", $b=6]; + +global foo: table[r, count] of count = { + [a, 1] = 1, + [record($a="foo", $b=2), 2] = 2, + [[$a="bar", $b=3], 3] = 3, +}; + +foo[d, 4] = 4; +foo[[$a="baz", $b=5], 5] = 5; +foo[record($a="baz", $b=6), 6] = 6; + +print "following should all be true..."; + +print [a, 1] in foo; +print [b, 2] in foo; +print [c, 3] in foo; +print [d, 4] in foo; +print [e, 5] in foo; +print [f, 6] in foo; + +print [[$a="foo", $b=1], 1] in foo; +print [record($a="foo", $b=1), 1] in foo; + +print foo[a, 1]; +print foo[[$a="foo", $b=1], 1]; +print foo[record($a="foo", $b=1), 1]; + +print "following should all be false..."; + +local bah: r = [$a="bah", $b=0]; + +print [bah, 0] in foo; +print [[$a="bah", $b=0], 0] in foo; +print [record($a="bah", $b=0), 0] in foo; + +print "now here's the foo table..."; + +print foo; + +# @TEST-START-NEXT + +# Now checking table() ctor versus { } initializer + +type r: record { + a: string; + b: count; +}; + +global a: r = [$a="foo", $b=1]; +global b: r = [$a="foo", $b=2]; +global c: r = [$a="bar", $b=3]; +global d: r = [$a="bar", $b=4]; +global e: r = [$a="baz", $b=5]; +global f: r = [$a="baz", $b=6]; + +global foo: table[r] of count = table( + [a] = 1, + [record($a="foo", $b=2)] = 2, + [[$a="bar", $b=3]] = 3 +); + +foo[d] = 4; +foo[[$a="baz", $b=5]] = 5; +foo[record($a="baz", $b=6)] = 6; + +print "following should all be true..."; + +print a in foo; +print b in foo; +print c in foo; +print d in foo; +print e in foo; +print f in foo; + +print [$a="foo", $b=1] in foo; +print record($a="foo", $b=1) in foo; + +print foo[a]; +print foo[[$a="foo", $b=1]]; +print foo[record($a="foo", $b=1)]; + +print "following should all be false..."; + +local bah: r = [$a="bah", $b=0]; + +print bah in foo; +print [$a="bah", $b=0] in foo; +print record($a="bah", $b=0) in foo; + +print "now here's the foo table..."; + +print foo; + +# @TEST-START-NEXT + +# Now checking table() ctor versus { } initializer for compound index + +type r: record { + a: string; + b: count; +}; + +global a: r = [$a="foo", $b=1]; +global b: r = [$a="foo", $b=2]; +global c: r = [$a="bar", $b=3]; +global d: r = [$a="bar", $b=4]; +global e: r = [$a="baz", $b=5]; +global f: r = [$a="baz", $b=6]; + +global foo: table[r, count] of count = table( + [a, 1] = 1, + [record($a="foo", $b=2), 2] = 2, + [[$a="bar", $b=3], 3] = 3 +); + +foo[d, 4] = 4; +foo[[$a="baz", $b=5], 5] = 5; +foo[record($a="baz", $b=6), 6] = 6; + +print "following should all be true..."; + +print [a, 1] in foo; +print [b, 2] in foo; +print [c, 3] in foo; +print [d, 4] in foo; +print [e, 5] in foo; +print [f, 6] in foo; + +print [[$a="foo", $b=1], 1] in foo; +print [record($a="foo", $b=1), 1] in foo; + +print foo[a, 1]; +print foo[[$a="foo", $b=1], 1]; +print foo[record($a="foo", $b=1), 1]; + +print "following should all be false..."; + +local bah: r = [$a="bah", $b=0]; + +print [bah, 0] in foo; +print [[$a="bah", $b=0], 0] in foo; +print [record($a="bah", $b=0), 0] in foo; + +print "now here's the foo table..."; + +print foo; diff --git a/testing/btest/language/table-init.bro b/testing/btest/language/table-init.bro deleted file mode 100644 index 7419a50879..0000000000 --- a/testing/btest/language/table-init.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -global global_table: table[count] of string = { - [1] = "one", - [2] = "two" -} &default = "global table default"; - -event bro_init() - { - local local_table: table[count] of string = { - [3] = "three", - [4] = "four" - } &default = "local table default"; - - print global_table; - print global_table[0]; - print local_table; - print local_table[0]; - } diff --git a/testing/btest/language/table-init.zeek b/testing/btest/language/table-init.zeek new file mode 100644 index 0000000000..0a2514e0b9 --- /dev/null +++ b/testing/btest/language/table-init.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +global global_table: table[count] of string = { + [1] = "one", + [2] = "two" +} &default = "global table default"; + +event zeek_init() + { + local local_table: table[count] of string = { + [3] = "three", + [4] = "four" + } &default = "local table default"; + + print global_table; + print global_table[0]; + print local_table; + print local_table[0]; + } diff --git a/testing/btest/language/table-list-assign-type-check.zeek b/testing/btest/language/table-list-assign-type-check.zeek new file mode 100644 index 0000000000..74541aba87 --- /dev/null +++ b/testing/btest/language/table-list-assign-type-check.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +event zeek_init() + { + # This assignment should pass type-checking. + local service_table_good: table[string, count] of string = { + ["www", 80] = "Internal Web Server", + ["dns1", 53] = "Internal DNS 1", + ["dns2", 53] = "Internal DNS 2", + ["dhcp-for-wifi", 443] = "DHCP Management interface for WiFi" + }; + + # This assignment should fail type-checking due to yield mismatch. + local service_table_bad_yield: table[string, count] of count = { + ["www", 80] = "Internal Web Server", + ["dns1", 53] = "Internal DNS 1", + ["dns2", 53] = "Internal DNS 2", + ["dhcp-for-wifi", 443] = "DHCP Management interface for WiFi" + }; + + # This assignment should fail type-checking due to index mismatch. + local service_table_bad_index: table[string, count] of string = { + ["www", "80"] = "Internal Web Server", + ["dns1", "53"] = "Internal DNS 1", + ["dns2", "53"] = "Internal DNS 2", + ["dhcp-for-wifi", "443"] = "DHCP Management interface for WiFi" + }; + + local test_set_good: set[string] = {"1", "2", "3"}; + local test_set_bad: set[string] = {1, 2, 3}; + } diff --git a/testing/btest/language/table-redef.bro b/testing/btest/language/table-redef.bro deleted file mode 100644 index 290610499f..0000000000 --- a/testing/btest/language/table-redef.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT > out -# @TEST-EXEC: btest-diff out - -const foo: table[string] of double &redef; - -# full (re)initialization -redef foo = { ["nope"] = 37.0 }; - -# full (re)initialization, discards "nope" index -redef foo = { ["abc"] = 42.0 }; - -# add elements -redef foo += { ["def"] = -42.0, ["ghi"] = 7.0 }; - -# remove elements from LHS based on indices shared with RHS -redef foo -= { ["ghi"] = 0.0 }; - -# RHS can be a table value -redef foo += table(["cool"] = 5.0, ["neat"] = 1.0); - -# Redef at a single index is allowed, same as += when RHS has overlapping index -redef foo["cool"] = 28.0; -redef foo["abc"] = 8.0; -redef foo += { ["def"] = 99.0 }; - -print foo; diff --git a/testing/btest/language/table-redef.zeek b/testing/btest/language/table-redef.zeek new file mode 100644 index 0000000000..51c4360044 --- /dev/null +++ b/testing/btest/language/table-redef.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -b %INPUT > out +# @TEST-EXEC: btest-diff out + +const foo: table[string] of double &redef; + +# full (re)initialization +redef foo = { ["nope"] = 37.0 }; + +# full (re)initialization, discards "nope" index +redef foo = { ["abc"] = 42.0 }; + +# add elements +redef foo += { ["def"] = -42.0, ["ghi"] = 7.0 }; + +# remove elements from LHS based on indices shared with RHS +redef foo -= { ["ghi"] = 0.0 }; + +# RHS can be a table value +redef foo += table(["cool"] = 5.0, ["neat"] = 1.0); + +# Redef at a single index is allowed, same as += when RHS has overlapping index +redef foo["cool"] = 28.0; +redef foo["abc"] = 8.0; +redef foo += { ["def"] = 99.0 }; + +print foo; diff --git a/testing/btest/language/table-type-checking.bro b/testing/btest/language/table-type-checking.bro deleted file mode 100644 index f579a83d37..0000000000 --- a/testing/btest/language/table-type-checking.bro +++ /dev/null @@ -1,46 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type MyTable: table[port] of count; - -# global, type deduction, named ctor -global gdn = MyTable(["zero"] = 0); # type clash in init - -# global, type explicit, named ctor -global gen: MyTable = MyTable(["one"] = 1); # type clash in init - -# global, type deduction, anon ctor -global gda = table(["two"] = 2); # fine -global gda2 = MyTable([2/tcp] = 2); # fine -event bro_init() - { - gda = gda2; # type clash - } - -# global, type explicit, anon ctor -global gea: MyTable = table(["three"] = 3); # type clash - -# local, type deduction, named ctor -event bro_init() - { - local ldn = MyTable(["thousand"] = 1000); # type clash - } - -# local, type explicit, named ctor -event bro_init() - { - local len: MyTable = MyTable(["thousand-one"] = 1001); # type clash - } - -# local, type deduction, anon ctor -event bro_init() - { - local lda = table(["thousand-two"] = 1002); # fine - lda = MyTable(["thousand-two"] = 1002); # type clash - } - -# local, type explicit, anon ctor -event bro_init() - { - local lea: MyTable = table(["thousand-three"] = 1003); # type clash - } diff --git a/testing/btest/language/table-type-checking.zeek b/testing/btest/language/table-type-checking.zeek new file mode 100644 index 0000000000..faefaf3a60 --- /dev/null +++ b/testing/btest/language/table-type-checking.zeek @@ -0,0 +1,46 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type MyTable: table[port] of count; + +# global, type deduction, named ctor +global gdn = MyTable(["zero"] = 0); # type clash in init + +# global, type explicit, named ctor +global gen: MyTable = MyTable(["one"] = 1); # type clash in init + +# global, type deduction, anon ctor +global gda = table(["two"] = 2); # fine +global gda2 = MyTable([2/tcp] = 2); # fine +event zeek_init() + { + gda = gda2; # type clash + } + +# global, type explicit, anon ctor +global gea: MyTable = table(["three"] = 3); # type clash + +# local, type deduction, named ctor +event zeek_init() + { + local ldn = MyTable(["thousand"] = 1000); # type clash + } + +# local, type explicit, named ctor +event zeek_init() + { + local len: MyTable = MyTable(["thousand-one"] = 1001); # type clash + } + +# local, type deduction, anon ctor +event zeek_init() + { + local lda = table(["thousand-two"] = 1002); # fine + lda = MyTable(["thousand-two"] = 1002); # type clash + } + +# local, type explicit, anon ctor +event zeek_init() + { + local lea: MyTable = table(["thousand-three"] = 1003); # type clash + } diff --git a/testing/btest/language/table.bro b/testing/btest/language/table.bro deleted file mode 100644 index 3c8e8db280..0000000000 --- a/testing/btest/language/table.bro +++ /dev/null @@ -1,163 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - -# Note: only global tables can be initialized with curly braces when the table -# type is not explicitly specified -global tg1 = { [1] = "type", [2] = "inference", [3] = "test" }; - -event bro_init() -{ - local t1: table[count] of string = table( [5] = "test", [0] = "example" ); - local t2: table[count] of string = table(); - local t3: table[count] of string; - local t4 = table( [1] = "type inference" ); - local t5: table[count] of string = { [1] = "curly", [3] = "braces" }; - local t6: table[port, string, bool] of string = table( - [1/tcp, "test", T] = "test1", - [2/tcp, "example", F] = "test2" ); - local t7: table[port, string, bool] of string = table(); - local t8: table[port, string, bool] of string; - local t9 = table( [8/tcp, "type inference", T] = "this" ); - local t10: table[port, string, bool] of string = { - [10/udp, "curly", F] = "first", - [11/udp, "braces", T] = "second" }; - local t11: table[conn_id, bool] of count = { - [ [$orig_h=1.1.1.1, $orig_p=1234/tcp, - $resp_h=2.2.2.2, $resp_p=4321/tcp], T ] = 42 }; - - # Type inference tests - - test_case( "type inference", type_name(t4) == "table[count] of string" ); - test_case( "type inference", type_name(t9) == "table[port,string,bool] of string" ); - test_case( "type inference", type_name(tg1) == "table[count] of string" ); - - # Test the size of each table - - test_case( "cardinality", |t1| == 2 ); - test_case( "cardinality", |t2| == 0 ); - test_case( "cardinality", |t3| == 0 ); - test_case( "cardinality", |t4| == 1 ); - test_case( "cardinality", |t5| == 2 ); - test_case( "cardinality", |t6| == 2 ); - test_case( "cardinality", |t7| == 0 ); - test_case( "cardinality", |t8| == 0 ); - test_case( "cardinality", |t9| == 1 ); - test_case( "cardinality", |t10| == 2 ); - test_case( "cardinality", |t11| == 1 ); - test_case( "cardinality", |tg1| == 3 ); - - # Test iterating over each table - - local ct: count; - ct = 0; - for ( c in t1 ) - { - if ( type_name(c) != "count" ) - print "Error: wrong index type"; - if ( type_name(t1[c]) != "string" ) - print "Error: wrong table type"; - ++ct; - } - test_case( "iterate over table", ct == 2 ); - - ct = 0; - for ( c in t2 ) - { - ++ct; - } - test_case( "iterate over table", ct == 0 ); - - ct = 0; - for ( c in t3 ) - { - ++ct; - } - test_case( "iterate over table", ct == 0 ); - - ct = 0; - for ( [c1, c2, c3] in t6 ) - { - ++ct; - } - test_case( "iterate over table", ct == 2 ); - - ct = 0; - for ( [c1, c2, c3] in t7 ) - { - ++ct; - } - test_case( "iterate over table", ct == 0 ); - - # Test overwriting elements in each table (Note: cannot overwrite - # elements in tables of multiple types) - - t1[5] = "overwrite"; - test_case( "overwrite element", |t1| == 2 && t1[5] == "overwrite" ); - - # Test adding elements to each table (Note: cannot add elements to - # tables of multiple types) - - t1[1] = "added"; - test_case( "add element", |t1| == 3 ); - test_case( "in operator", 1 in t1 ); - - t2[11] = "another"; - test_case( "add element", |t2| == 1 ); - t2[0] = "test"; - test_case( "add element", |t2| == 2 ); - test_case( "in operator", 11 in t2 ); - test_case( "in operator", 0 in t2 ); - - t3[3] = "foo"; - test_case( "add element", |t3| == 1 ); - test_case( "in operator", 3 in t3 ); - - t4[4] = "local"; - test_case( "add element", |t4| == 2 ); - test_case( "in operator", 4 in t4 ); - - t5[10] = "local2"; - test_case( "add element", |t5| == 3 ); - test_case( "in operator", 10 in t5 ); - - local cid = [$orig_h=1.1.1.1, $orig_p=1234/tcp, - $resp_h=2.2.2.2, $resp_p=4321/tcp]; - t11[[$orig_h=[::1], $orig_p=3/tcp, $resp_h=[::2], $resp_p=3/tcp], F] = 3; - test_case( "composite index add element", |t11| == 2 ); - test_case( "composite index in operator", [cid, T] in t11 ); - test_case( "composite index in operator", [[$orig_h=[::1], $orig_p=3/tcp, $resp_h=[::2], $resp_p=3/tcp], F] in t11 ); - - # Test removing elements from each table (Note: cannot remove elements - # from tables of multiple types) - - delete t1[0]; - delete t1[17]; # element does not exist (nothing happens) - test_case( "remove element", |t1| == 2 ); - test_case( "!in operator", 0 !in t1 ); - - delete t2[0]; - test_case( "remove element", |t2| == 1 ); - test_case( "!in operator", 0 !in t2 ); - - delete t3[3]; - test_case( "remove element", |t3| == 0 ); - test_case( "!in operator", 3 !in t3 ); - - delete t4[1]; - test_case( "remove element", |t4| == 1 ); - test_case( "!in operator", 1 !in t4 ); - - delete t5[1]; - test_case( "remove element", |t5| == 2 ); - test_case( "!in operator", 1 !in t5 ); - - delete t11[cid, T]; - test_case( "remove element", |t11| == 1 ); - test_case( "!in operator", [cid, T] !in t11 ); -} - diff --git a/testing/btest/language/table.zeek b/testing/btest/language/table.zeek new file mode 100644 index 0000000000..cb26b5c17b --- /dev/null +++ b/testing/btest/language/table.zeek @@ -0,0 +1,163 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + +# Note: only global tables can be initialized with curly braces when the table +# type is not explicitly specified +global tg1 = { [1] = "type", [2] = "inference", [3] = "test" }; + +event zeek_init() +{ + local t1: table[count] of string = table( [5] = "test", [0] = "example" ); + local t2: table[count] of string = table(); + local t3: table[count] of string; + local t4 = table( [1] = "type inference" ); + local t5: table[count] of string = { [1] = "curly", [3] = "braces" }; + local t6: table[port, string, bool] of string = table( + [1/tcp, "test", T] = "test1", + [2/tcp, "example", F] = "test2" ); + local t7: table[port, string, bool] of string = table(); + local t8: table[port, string, bool] of string; + local t9 = table( [8/tcp, "type inference", T] = "this" ); + local t10: table[port, string, bool] of string = { + [10/udp, "curly", F] = "first", + [11/udp, "braces", T] = "second" }; + local t11: table[conn_id, bool] of count = { + [ [$orig_h=1.1.1.1, $orig_p=1234/tcp, + $resp_h=2.2.2.2, $resp_p=4321/tcp], T ] = 42 }; + + # Type inference tests + + test_case( "type inference", type_name(t4) == "table[count] of string" ); + test_case( "type inference", type_name(t9) == "table[port,string,bool] of string" ); + test_case( "type inference", type_name(tg1) == "table[count] of string" ); + + # Test the size of each table + + test_case( "cardinality", |t1| == 2 ); + test_case( "cardinality", |t2| == 0 ); + test_case( "cardinality", |t3| == 0 ); + test_case( "cardinality", |t4| == 1 ); + test_case( "cardinality", |t5| == 2 ); + test_case( "cardinality", |t6| == 2 ); + test_case( "cardinality", |t7| == 0 ); + test_case( "cardinality", |t8| == 0 ); + test_case( "cardinality", |t9| == 1 ); + test_case( "cardinality", |t10| == 2 ); + test_case( "cardinality", |t11| == 1 ); + test_case( "cardinality", |tg1| == 3 ); + + # Test iterating over each table + + local ct: count; + ct = 0; + for ( c in t1 ) + { + if ( type_name(c) != "count" ) + print "Error: wrong index type"; + if ( type_name(t1[c]) != "string" ) + print "Error: wrong table type"; + ++ct; + } + test_case( "iterate over table", ct == 2 ); + + ct = 0; + for ( c in t2 ) + { + ++ct; + } + test_case( "iterate over table", ct == 0 ); + + ct = 0; + for ( c in t3 ) + { + ++ct; + } + test_case( "iterate over table", ct == 0 ); + + ct = 0; + for ( [c1, c2, c3] in t6 ) + { + ++ct; + } + test_case( "iterate over table", ct == 2 ); + + ct = 0; + for ( [c1, c2, c3] in t7 ) + { + ++ct; + } + test_case( "iterate over table", ct == 0 ); + + # Test overwriting elements in each table (Note: cannot overwrite + # elements in tables of multiple types) + + t1[5] = "overwrite"; + test_case( "overwrite element", |t1| == 2 && t1[5] == "overwrite" ); + + # Test adding elements to each table (Note: cannot add elements to + # tables of multiple types) + + t1[1] = "added"; + test_case( "add element", |t1| == 3 ); + test_case( "in operator", 1 in t1 ); + + t2[11] = "another"; + test_case( "add element", |t2| == 1 ); + t2[0] = "test"; + test_case( "add element", |t2| == 2 ); + test_case( "in operator", 11 in t2 ); + test_case( "in operator", 0 in t2 ); + + t3[3] = "foo"; + test_case( "add element", |t3| == 1 ); + test_case( "in operator", 3 in t3 ); + + t4[4] = "local"; + test_case( "add element", |t4| == 2 ); + test_case( "in operator", 4 in t4 ); + + t5[10] = "local2"; + test_case( "add element", |t5| == 3 ); + test_case( "in operator", 10 in t5 ); + + local cid = [$orig_h=1.1.1.1, $orig_p=1234/tcp, + $resp_h=2.2.2.2, $resp_p=4321/tcp]; + t11[[$orig_h=[::1], $orig_p=3/tcp, $resp_h=[::2], $resp_p=3/tcp], F] = 3; + test_case( "composite index add element", |t11| == 2 ); + test_case( "composite index in operator", [cid, T] in t11 ); + test_case( "composite index in operator", [[$orig_h=[::1], $orig_p=3/tcp, $resp_h=[::2], $resp_p=3/tcp], F] in t11 ); + + # Test removing elements from each table (Note: cannot remove elements + # from tables of multiple types) + + delete t1[0]; + delete t1[17]; # element does not exist (nothing happens) + test_case( "remove element", |t1| == 2 ); + test_case( "!in operator", 0 !in t1 ); + + delete t2[0]; + test_case( "remove element", |t2| == 1 ); + test_case( "!in operator", 0 !in t2 ); + + delete t3[3]; + test_case( "remove element", |t3| == 0 ); + test_case( "!in operator", 3 !in t3 ); + + delete t4[1]; + test_case( "remove element", |t4| == 1 ); + test_case( "!in operator", 1 !in t4 ); + + delete t5[1]; + test_case( "remove element", |t5| == 2 ); + test_case( "!in operator", 1 !in t5 ); + + delete t11[cid, T]; + test_case( "remove element", |t11| == 1 ); + test_case( "!in operator", [cid, T] !in t11 ); +} + diff --git a/testing/btest/language/ternary-record-mismatch.bro b/testing/btest/language/ternary-record-mismatch.bro deleted file mode 100644 index 068952a69f..0000000000 --- a/testing/btest/language/ternary-record-mismatch.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath" btest-diff out - -type MyRecord: record { - a: string; - b: count; - c: bool &default = T; -}; - -event bro_init() - { - local rec: MyRecord = record($a = "a string", $b = 6); - local rec2: MyRecord = (F) ? MyRecord($a = "a string", $b = 6) : - record($a = "a different string", $b = 7); - rec2$c = F; - } diff --git a/testing/btest/language/ternary-record-mismatch.zeek b/testing/btest/language/ternary-record-mismatch.zeek new file mode 100644 index 0000000000..1b9796a799 --- /dev/null +++ b/testing/btest/language/ternary-record-mismatch.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath" btest-diff out + +type MyRecord: record { + a: string; + b: count; + c: bool &default = T; +}; + +event zeek_init() + { + local rec: MyRecord = record($a = "a string", $b = 6); + local rec2: MyRecord = (F) ? MyRecord($a = "a string", $b = 6) : + record($a = "a different string", $b = 7); + rec2$c = F; + } diff --git a/testing/btest/language/time.bro b/testing/btest/language/time.bro deleted file mode 100644 index dd4b6336fe..0000000000 --- a/testing/btest/language/time.bro +++ /dev/null @@ -1,33 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -event bro_init() -{ - local t1: time = current_time(); - local t2: time = t1 + 3 sec; - local t3: time = t2 - 10 sec; - local t4: time = t1; - local t5: time = double_to_time(1234567890); - local t6 = current_time(); - - # Type inference test - - test_case( "type inference", type_name(t6) == "time" ); - - # Operator tests - - test_case( "add interval", t1 < t2 ); - test_case( "subtract interval", t1 > t3 ); - test_case( "inequality", t1 != t3 ); - test_case( "equality", t1 == t4 ); - test_case( "subtract time", t2 - t1 == 3sec); - test_case( "size operator", |t5| == 1234567890.0 ); - -} - diff --git a/testing/btest/language/time.zeek b/testing/btest/language/time.zeek new file mode 100644 index 0000000000..685b011217 --- /dev/null +++ b/testing/btest/language/time.zeek @@ -0,0 +1,33 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +event zeek_init() +{ + local t1: time = current_time(); + local t2: time = t1 + 3 sec; + local t3: time = t2 - 10 sec; + local t4: time = t1; + local t5: time = double_to_time(1234567890); + local t6 = current_time(); + + # Type inference test + + test_case( "type inference", type_name(t6) == "time" ); + + # Operator tests + + test_case( "add interval", t1 < t2 ); + test_case( "subtract interval", t1 > t3 ); + test_case( "inequality", t1 != t3 ); + test_case( "equality", t1 == t4 ); + test_case( "subtract time", t2 - t1 == 3sec); + test_case( "size operator", |t5| == 1234567890.0 ); + +} + diff --git a/testing/btest/language/timeout.bro b/testing/btest/language/timeout.bro deleted file mode 100644 index 632ab18b5f..0000000000 --- a/testing/btest/language/timeout.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: unset BRO_DNS_FAKE && bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - - -event bro_init() -{ - local h1: addr = 1.2.3.4; - - when ( local h1name = lookup_addr(h1) ) - { - print "lookup successful"; - } - timeout 3 secs - { - print "timeout"; - } - -} - diff --git a/testing/btest/language/timeout.zeek b/testing/btest/language/timeout.zeek new file mode 100644 index 0000000000..b23839cd53 --- /dev/null +++ b/testing/btest/language/timeout.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: unset ZEEK_DNS_FAKE && unset BRO_DNS_FAKE && zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + + +event zeek_init() +{ + local h1: addr = 1.2.3.4; + + when ( local h1name = lookup_addr(h1) ) + { + print "lookup successful"; + } + timeout 3 secs + { + print "timeout"; + } + +} + diff --git a/testing/btest/language/type-cast-any.bro b/testing/btest/language/type-cast-any.bro deleted file mode 100644 index ddd4ea2dbe..0000000000 --- a/testing/btest/language/type-cast-any.bro +++ /dev/null @@ -1,45 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type X: record { - a: addr; - b: port; -}; - -function cast_to_string(a: any, b: string) - { - local P = (a as string); - local Cmp = (P == b); - print a, P, P is string, fmt("%s==%s => %s", b, P, Cmp); - } - -function cast_to_count(a: any, b: count) - { - local P = (a as count); - local Cmp = (P == b); - print a, P, P is count, fmt("%s==%s => %s", b, P, Cmp); - } - -function cast_to_X(a: any, b: X) - { - local P = (a as X); - local Cmp = (P$a == b$a && P$b == b$b); - print a, P, P is X, fmt("%s==%s => %s", b, P, Cmp); - } - -event bro_init() - { - local x: X; - x = [$a = 1.2.3.4, $b=1947/tcp]; - - cast_to_string("Foo", "Foo"); - cast_to_string("Foo", "Bar"); - - cast_to_count(42, 42); - cast_to_count(42, 21); - - cast_to_X(x, [$a=1.2.3.4, $b=1947/tcp]); - cast_to_X(x, [$a=2.3.4.5, $b=1947/tcp]); - } - - diff --git a/testing/btest/language/type-cast-any.zeek b/testing/btest/language/type-cast-any.zeek new file mode 100644 index 0000000000..f79e8abcce --- /dev/null +++ b/testing/btest/language/type-cast-any.zeek @@ -0,0 +1,45 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type X: record { + a: addr; + b: port; +}; + +function cast_to_string(a: any, b: string) + { + local P = (a as string); + local Cmp = (P == b); + print a, P, P is string, fmt("%s==%s => %s", b, P, Cmp); + } + +function cast_to_count(a: any, b: count) + { + local P = (a as count); + local Cmp = (P == b); + print a, P, P is count, fmt("%s==%s => %s", b, P, Cmp); + } + +function cast_to_X(a: any, b: X) + { + local P = (a as X); + local Cmp = (P$a == b$a && P$b == b$b); + print a, P, P is X, fmt("%s==%s => %s", b, P, Cmp); + } + +event zeek_init() + { + local x: X; + x = [$a = 1.2.3.4, $b=1947/tcp]; + + cast_to_string("Foo", "Foo"); + cast_to_string("Foo", "Bar"); + + cast_to_count(42, 42); + cast_to_count(42, 21); + + cast_to_X(x, [$a=1.2.3.4, $b=1947/tcp]); + cast_to_X(x, [$a=2.3.4.5, $b=1947/tcp]); + } + + diff --git a/testing/btest/language/type-cast-error-dynamic.bro b/testing/btest/language/type-cast-error-dynamic.bro deleted file mode 100644 index c18548b0c4..0000000000 --- a/testing/btest/language/type-cast-error-dynamic.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type X: record { - a: addr; - b: port; -}; - -function cast_to_string(a: any) - { - print a as string; - } - -event bro_init() - { - cast_to_string(42); - } - -event bro_init() - { - local x: X; - x = [$a = 1.2.3.4, $b=1947/tcp]; - cast_to_string(x); - } - -event bro_init() - { - print "data is string", Broker::Data() is string; - cast_to_string(Broker::Data()); - } - - diff --git a/testing/btest/language/type-cast-error-dynamic.zeek b/testing/btest/language/type-cast-error-dynamic.zeek new file mode 100644 index 0000000000..1edf9e3d2a --- /dev/null +++ b/testing/btest/language/type-cast-error-dynamic.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type X: record { + a: addr; + b: port; +}; + +function cast_to_string(a: any) + { + print a as string; + } + +event zeek_init() + { + cast_to_string(42); + } + +event zeek_init() + { + local x: X; + x = [$a = 1.2.3.4, $b=1947/tcp]; + cast_to_string(x); + } + +event zeek_init() + { + print "data is string", Broker::Data() is string; + cast_to_string(Broker::Data()); + } + + diff --git a/testing/btest/language/type-cast-error-static.bro b/testing/btest/language/type-cast-error-static.bro deleted file mode 100644 index 3533fef3cb..0000000000 --- a/testing/btest/language/type-cast-error-static.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type X: record { - a: addr; - b: port; -}; - -event bro_init() - { - local x: X; - x = [$a = 1.2.3.4, $b=1947/tcp]; - - print "string" as count; - print "string" as X; - } - - diff --git a/testing/btest/language/type-cast-error-static.zeek b/testing/btest/language/type-cast-error-static.zeek new file mode 100644 index 0000000000..05ab92e09e --- /dev/null +++ b/testing/btest/language/type-cast-error-static.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type X: record { + a: addr; + b: port; +}; + +event zeek_init() + { + local x: X; + x = [$a = 1.2.3.4, $b=1947/tcp]; + + print "string" as count; + print "string" as X; + } + + diff --git a/testing/btest/language/type-cast-same.bro b/testing/btest/language/type-cast-same.bro deleted file mode 100644 index 93c3b633fa..0000000000 --- a/testing/btest/language/type-cast-same.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type X: record { - a: addr; - b: port; -}; - -event bro_init() - { - local x: X; - x = [$a = 1.2.3.4, $b=1947/tcp]; - - local s = "sTriNg" as string; - local y = x as X; - - print s, s is string; - print y, y is X; - } - - diff --git a/testing/btest/language/type-cast-same.zeek b/testing/btest/language/type-cast-same.zeek new file mode 100644 index 0000000000..226eb05b17 --- /dev/null +++ b/testing/btest/language/type-cast-same.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type X: record { + a: addr; + b: port; +}; + +event zeek_init() + { + local x: X; + x = [$a = 1.2.3.4, $b=1947/tcp]; + + local s = "sTriNg" as string; + local y = x as X; + + print s, s is string; + print y, y is X; + } + + diff --git a/testing/btest/language/type-check-any.bro b/testing/btest/language/type-check-any.bro deleted file mode 100644 index 5d882c8997..0000000000 --- a/testing/btest/language/type-check-any.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type X: record { - a: addr; - b: port; -}; - -function check(a: any) - { - print a, a is string, a is count, a is X; - } - -event bro_init() - { - local x: X; - x = [$a = 1.2.3.4, $b=1947/tcp]; - - check("Foo"); - check(1); - check(x); - } - - diff --git a/testing/btest/language/type-check-any.zeek b/testing/btest/language/type-check-any.zeek new file mode 100644 index 0000000000..95047c8de1 --- /dev/null +++ b/testing/btest/language/type-check-any.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type X: record { + a: addr; + b: port; +}; + +function check(a: any) + { + print a, a is string, a is count, a is X; + } + +event zeek_init() + { + local x: X; + x = [$a = 1.2.3.4, $b=1947/tcp]; + + check("Foo"); + check(1); + check(x); + } + + diff --git a/testing/btest/language/type-check-vector.bro b/testing/btest/language/type-check-vector.bro deleted file mode 100644 index 461fb312fb..0000000000 --- a/testing/btest/language/type-check-vector.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type myvec: vector of any; - -function check(a: any) - { - print a is myvec; - print a as myvec; - } - -event bro_init() - { - local v = myvec("one", "two", 3); - check(v); - local sv = string_vec("one", "two", "three"); - check(sv); - } diff --git a/testing/btest/language/type-check-vector.zeek b/testing/btest/language/type-check-vector.zeek new file mode 100644 index 0000000000..b7ea42241e --- /dev/null +++ b/testing/btest/language/type-check-vector.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type myvec: vector of any; + +function check(a: any) + { + print a is myvec; + print a as myvec; + } + +event zeek_init() + { + local v = myvec("one", "two", 3); + check(v); + local sv = string_vec("one", "two", "three"); + check(sv); + } diff --git a/testing/btest/language/type-coerce-numerics.zeek b/testing/btest/language/type-coerce-numerics.zeek new file mode 100644 index 0000000000..996326361b --- /dev/null +++ b/testing/btest/language/type-coerce-numerics.zeek @@ -0,0 +1,132 @@ +# @TEST-EXEC: zeek -b first_set.zeek >first_set.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff first_set.out + +# @TEST-EXEC-FAIL: zeek -b double_convert_failure1.zeek >double_convert_failure1.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff double_convert_failure1.out + +# @TEST-EXEC-FAIL: zeek -b double_convert_failure2.zeek >double_convert_failure2.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff double_convert_failure1.out + +# @TEST-EXEC-FAIL: zeek -b int_convert_failure.zeek >int_convert_failure.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff int_convert_failure.out + +# @TEST-EXEC: zeek -b vectors.zeek >vectors.out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff vectors.out + +@TEST-START-FILE first_set.zeek +type myrecord : record { + ii: int &optional; + cc: count &optional; + dd: double &optional; +}; + +# Allow coercion from count values to int +global globalint: myrecord &redef; +redef globalint = [$ii = 2]; + +# All of these cases should succeed +event zeek_init() + { + # Allow coercion from count values to int + local intconvert1 = myrecord($ii = 3); + print(intconvert1$ii); + print(type_name(intconvert1$ii)); + + local intconvert2: myrecord = record($ii = 4); + print(intconvert2$ii); + print(type_name(intconvert2$ii)); + + local intconvert3: myrecord = [$ii = 5]; + print(intconvert3$ii); + print(type_name(intconvert3$ii)); + + local intconvert4: myrecord; + intconvert4$ii = 6; + print(intconvert4$ii); + print(type_name(intconvert4$ii)); + + # Convert from count/integer values into doubles + local doubleconvert1 = myrecord($dd = 7); + print(doubleconvert1$dd); + print(type_name(doubleconvert1$dd)); + + local doubleconvert2 = myrecord($dd = -5); + print(doubleconvert2$dd); + print(type_name(doubleconvert2$dd)); + } + +event zeek_init() + { + # This value is INT64_MAX+1, which overflows a signed integer and + # throws an error + local overflow = myrecord($ii = 9223372036854775808); + } +@TEST-END-FILE + +@TEST-START-FILE double_convert_failure1.zeek +type myrecord : record { + cc: count &optional; +}; + +event zeek_init() + { + local convert = myrecord($cc = 5.0); + } +@TEST-END-FILE + +@TEST-START-FILE double_convert_failure2.zeek +type myrecord : record { + cc: count &optional; +}; + +event zeek_init() + { + local convert = myrecord($cc = -5.0); + } +@TEST-END-FILE + +@TEST-START-FILE int_convert_failure.zeek +type myrecord : record { + cc: count &optional; +}; + +event zeek_init() + { + local convert = myrecord($cc = -5); + } +@TEST-END-FILE + +@TEST-START-FILE vectors.zeek +event zeek_init() + { + local c1 : vector of count = { 1 , 2 }; + local c2 : vector of count = { 3 , 4 }; + local c3 = c1 + c2; + print type_name(c1); + print type_name(c2); + print type_name(c3); + print c1; + print c2; + print c3; + + local i1 : vector of int = { 1, 2 }; + local i2 : vector of int = { 3, 4 }; + local i3 = i1 + i2; + print type_name(i1); + print type_name(i2); + print type_name(i3); + print i1; + print i2; + print i3; + + local d1 : vector of double = { 1, 2 }; + local d2 : vector of double = { 3, 4 }; + local d3 = d1 + d2; + print type_name(d1); + print type_name(d2); + print type_name(d3); + print d1; + print d2; + print d3; + } +@TEST-END-FILE diff --git a/testing/btest/language/type-type-error.bro b/testing/btest/language/type-type-error.bro deleted file mode 100644 index 047e4b34ef..0000000000 --- a/testing/btest/language/type-type-error.bro +++ /dev/null @@ -1,14 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -type r: record { - a: string; -}; - -event bro_init() - { - # This should generate a parse error indicating that the type identifier - # is incorrectly used in an expression expecting a real value and not - # a value of type TypeType. - print r$a; - } diff --git a/testing/btest/language/type-type-error.zeek b/testing/btest/language/type-type-error.zeek new file mode 100644 index 0000000000..586b181ec5 --- /dev/null +++ b/testing/btest/language/type-type-error.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr + +type r: record { + a: string; +}; + +event zeek_init() + { + # This should generate a parse error indicating that the type identifier + # is incorrectly used in an expression expecting a real value and not + # a value of type TypeType. + print r$a; + } diff --git a/testing/btest/language/undefined-delete-field.bro b/testing/btest/language/undefined-delete-field.bro deleted file mode 100644 index 8271f016fe..0000000000 --- a/testing/btest/language/undefined-delete-field.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 || echo $? >>output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type MyRecordType: record - { - a: count; - b: count; - }; - -event bro_init() - { - local x = MyRecordType($a=1, $b=2); - - delete x$c; - } diff --git a/testing/btest/language/undefined-delete-field.zeek b/testing/btest/language/undefined-delete-field.zeek new file mode 100644 index 0000000000..f4ecfdb106 --- /dev/null +++ b/testing/btest/language/undefined-delete-field.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 || echo $? >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type MyRecordType: record + { + a: count; + b: count; + }; + +event zeek_init() + { + local x = MyRecordType($a=1, $b=2); + + delete x$c; + } diff --git a/testing/btest/language/uninitialized-local.bro b/testing/btest/language/uninitialized-local.bro deleted file mode 100644 index ae486ebf1f..0000000000 --- a/testing/btest/language/uninitialized-local.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event testit() &priority=10 - { - local my_count: count = 10; - } - -event testit() - { - # my_string's value occupies same Frame offset as my_count's from above - # handler, but execution of this handler body should still "initialize" - # it to a null value instead of referring to a left-over value of my_count. - local my_string: string; - local my_vector: vector of string; - my_vector[0] = my_string; - } - -event bro_init() - { - event testit(); - } diff --git a/testing/btest/language/uninitialized-local.zeek b/testing/btest/language/uninitialized-local.zeek new file mode 100644 index 0000000000..6d8e26be72 --- /dev/null +++ b/testing/btest/language/uninitialized-local.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +event testit() &priority=10 + { + local my_count: count = 10; + } + +event testit() + { + # my_string's value occupies same Frame offset as my_count's from above + # handler, but execution of this handler body should still "initialize" + # it to a null value instead of referring to a left-over value of my_count. + local my_string: string; + local my_vector: vector of string; + my_vector[0] = my_string; + } + +event zeek_init() + { + event testit(); + } diff --git a/testing/btest/language/uninitialized-local2.bro b/testing/btest/language/uninitialized-local2.bro deleted file mode 100644 index f11a5fda10..0000000000 --- a/testing/btest/language/uninitialized-local2.bro +++ /dev/null @@ -1,25 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -event test() - { - local var_a: string = "foo"; - } - -event test() - { - if ( F ) - { - local var_b: string = "bar"; - } - - local var_a: string = "baz"; - - print "var_a is", var_a; - print "var_b is", var_b; - } - -event bro_init() - { - event test(); - } diff --git a/testing/btest/language/uninitialized-local2.zeek b/testing/btest/language/uninitialized-local2.zeek new file mode 100644 index 0000000000..4b8f0c8275 --- /dev/null +++ b/testing/btest/language/uninitialized-local2.zeek @@ -0,0 +1,25 @@ +# @TEST-EXEC: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +event test() + { + local var_a: string = "foo"; + } + +event test() + { + if ( F ) + { + local var_b: string = "bar"; + } + + local var_a: string = "baz"; + + print "var_a is", var_a; + print "var_b is", var_b; + } + +event zeek_init() + { + event test(); + } diff --git a/testing/btest/language/vector-any-append.bro b/testing/btest/language/vector-any-append.bro deleted file mode 100644 index 816627fbf1..0000000000 --- a/testing/btest/language/vector-any-append.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function assign(v: vector of any) - { - v[|v|] = |v|; - } - -function append(v: vector of any) - { - v += |v|; - } - -event bro_init() - { - local v: vector of count; - assign(v); - assign(v); - append(v); - append(v); - print v; - } diff --git a/testing/btest/language/vector-any-append.zeek b/testing/btest/language/vector-any-append.zeek new file mode 100644 index 0000000000..599859b1d8 --- /dev/null +++ b/testing/btest/language/vector-any-append.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function assign(v: vector of any) + { + v[|v|] = |v|; + } + +function append(v: vector of any) + { + v += |v|; + } + +event zeek_init() + { + local v: vector of count; + assign(v); + assign(v); + append(v); + append(v); + print v; + } diff --git a/testing/btest/language/vector-coerce-expr.bro b/testing/btest/language/vector-coerce-expr.bro deleted file mode 100644 index 97f9617665..0000000000 --- a/testing/btest/language/vector-coerce-expr.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -type X: record { - a: vector of bool &default=vector(T, F, T); - b: vector of bool &default=vector(); -}; - -global x: X; - -global a: vector of count; - -a = vector(); -print a; - -a = vector(1,2,3); -print a; - -print x$a; -print x$b; diff --git a/testing/btest/language/vector-coerce-expr.zeek b/testing/btest/language/vector-coerce-expr.zeek new file mode 100644 index 0000000000..7fa4affe9c --- /dev/null +++ b/testing/btest/language/vector-coerce-expr.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +type X: record { + a: vector of bool &default=vector(T, F, T); + b: vector of bool &default=vector(); +}; + +global x: X; + +global a: vector of count; + +a = vector(); +print a; + +a = vector(1,2,3); +print a; + +print x$a; +print x$b; diff --git a/testing/btest/language/vector-in-operator.bro b/testing/btest/language/vector-in-operator.bro deleted file mode 100644 index 5936145363..0000000000 --- a/testing/btest/language/vector-in-operator.bro +++ /dev/null @@ -1,17 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -local ten = "0123456789"; -local vec: vector of string = { "zero", "one" }; -local n = 0; -vec[5] = "five"; -vec[7] = "seven"; -print vec; -vec = vec + ".exe"; - -for ( c in ten ) - { - local is_set: bool = (n in vec); - print fmt("vec[%s] = %s", n, is_set ? vec[n] : ""); - ++n; - } diff --git a/testing/btest/language/vector-in-operator.zeek b/testing/btest/language/vector-in-operator.zeek new file mode 100644 index 0000000000..ceea232f0e --- /dev/null +++ b/testing/btest/language/vector-in-operator.zeek @@ -0,0 +1,17 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +local ten = "0123456789"; +local vec: vector of string = { "zero", "one" }; +local n = 0; +vec[5] = "five"; +vec[7] = "seven"; +print vec; +vec = vec + ".exe"; + +for ( c in ten ) + { + local is_set: bool = (n in vec); + print fmt("vec[%s] = %s", n, is_set ? vec[n] : ""); + ++n; + } diff --git a/testing/btest/language/vector-list-init-records.bro b/testing/btest/language/vector-list-init-records.bro deleted file mode 100644 index b1eee0ac92..0000000000 --- a/testing/btest/language/vector-list-init-records.bro +++ /dev/null @@ -1,20 +0,0 @@ -# Initializing a vector with a list of records should promote elements as -# necessary to match the vector's yield type. - -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -type Foo: record { - s: string; - o: string &optional; -}; - -const v: vector of Foo = { - [$s="bar", $o="check"], - [$s="baz"] -}; - -for ( i in v ) - print fmt("element %d = %s", i, v[i]); - -print v; diff --git a/testing/btest/language/vector-list-init-records.zeek b/testing/btest/language/vector-list-init-records.zeek new file mode 100644 index 0000000000..d7aad468a2 --- /dev/null +++ b/testing/btest/language/vector-list-init-records.zeek @@ -0,0 +1,20 @@ +# Initializing a vector with a list of records should promote elements as +# necessary to match the vector's yield type. + +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +type Foo: record { + s: string; + o: string &optional; +}; + +const v: vector of Foo = { + [$s="bar", $o="check"], + [$s="baz"] +}; + +for ( i in v ) + print fmt("element %d = %s", i, v[i]); + +print v; diff --git a/testing/btest/language/vector-type-checking.bro b/testing/btest/language/vector-type-checking.bro deleted file mode 100644 index b4c75118d1..0000000000 --- a/testing/btest/language/vector-type-checking.bro +++ /dev/null @@ -1,46 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -type MyVec: vector of count; - -# global, type deduction, named ctor -global gdn = MyVec("zero"); # type clash in init - -# global, type explicit, named ctor -global gen: MyVec = MyVec("one"); # type clash in init - -# global, type deduction, anon ctor -global gda = vector("two"); # fine -global gda2 = MyVec(2); # fine -event bro_init() - { - gda = gda2; # type clash - } - -# global, type explicit, anon ctor -global gea: MyVec = vector("three"); # type clash - -# local, type deduction, named ctor -event bro_init() - { - local ldn = MyVec("thousand"); # type clash - } - -# local, type explicit, named ctor -event bro_init() - { - local len: MyVec = MyVec("thousand-one"); # type clash - } - -# local, type deduction, anon ctor -event bro_init() - { - local lda = vector("thousand-two"); # fine - lda = MyVec("thousand-two"); # type clash - } - -# local, type explicit, anon ctor -event bro_init() - { - local lea: MyVec = vector("thousand-three"); # type clash - } diff --git a/testing/btest/language/vector-type-checking.zeek b/testing/btest/language/vector-type-checking.zeek new file mode 100644 index 0000000000..bdea76c4cd --- /dev/null +++ b/testing/btest/language/vector-type-checking.zeek @@ -0,0 +1,46 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +type MyVec: vector of count; + +# global, type deduction, named ctor +global gdn = MyVec("zero"); # type clash in init + +# global, type explicit, named ctor +global gen: MyVec = MyVec("one"); # type clash in init + +# global, type deduction, anon ctor +global gda = vector("two"); # fine +global gda2 = MyVec(2); # fine +event zeek_init() + { + gda = gda2; # type clash + } + +# global, type explicit, anon ctor +global gea: MyVec = vector("three"); # type clash + +# local, type deduction, named ctor +event zeek_init() + { + local ldn = MyVec("thousand"); # type clash + } + +# local, type explicit, named ctor +event zeek_init() + { + local len: MyVec = MyVec("thousand-one"); # type clash + } + +# local, type deduction, anon ctor +event zeek_init() + { + local lda = vector("thousand-two"); # fine + lda = MyVec("thousand-two"); # type clash + } + +# local, type explicit, anon ctor +event zeek_init() + { + local lea: MyVec = vector("thousand-three"); # type clash + } diff --git a/testing/btest/language/vector-unspecified.bro b/testing/btest/language/vector-unspecified.bro deleted file mode 100644 index b91f910504..0000000000 --- a/testing/btest/language/vector-unspecified.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output - -# Test assignment behavior of unspecified vectors -local a = vector(); - -a[0] = 5; -a[1] = "Hi"; -a[2] = 127.0.0.1; - -print a; diff --git a/testing/btest/language/vector-unspecified.zeek b/testing/btest/language/vector-unspecified.zeek new file mode 100644 index 0000000000..d0898b5d42 --- /dev/null +++ b/testing/btest/language/vector-unspecified.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: btest-diff output + +# Test assignment behavior of unspecified vectors +local a = vector(); + +a[0] = 5; +a[1] = "Hi"; +a[2] = 127.0.0.1; + +print a; diff --git a/testing/btest/language/vector.bro b/testing/btest/language/vector.bro deleted file mode 100644 index 0eafd6c60c..0000000000 --- a/testing/btest/language/vector.bro +++ /dev/null @@ -1,172 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_case(msg: string, expect: bool) - { - print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); - } - - -# Note: only global vectors can be initialized with curly braces -global vg1: vector of string = { "curly", "braces" }; - -event bro_init() -{ - local v1: vector of string = vector( "test", "example" ); - local v2: vector of string = vector(); - local v3: vector of string; - local v4 = vector( "type inference" ); - local v5 = vector( 1, 2, 3 ); - local v6 = vector( 10, 20, 30 ); - local v7 = v5 + v6; - local v8 = v6 - v5; - local v9 = v5 * v6; - local v10 = v6 / v5; - local v11 = v6 % v5; - local v12 = vector( T, F, T ); - local v13 = vector( F, F, T ); - local v14 = v12 && v13; - local v15 = v12 || v13; - - # Type inference tests - - test_case( "type inference", type_name(v4) == "vector of string" ); - test_case( "type inference", type_name(v5) == "vector of count" ); - test_case( "type inference", type_name(v12) == "vector of bool" ); - - # Test the size of each vector - - test_case( "cardinality", |v1| == 2 ); - test_case( "cardinality", |v2| == 0 ); - test_case( "cardinality", |v3| == 0 ); - test_case( "cardinality", |v4| == 1 ); - test_case( "cardinality", |v5| == 3 ); - test_case( "cardinality", |v6| == 3 ); - test_case( "cardinality", |v7| == 3 ); - test_case( "cardinality", |v8| == 3 ); - test_case( "cardinality", |v9| == 3 ); - test_case( "cardinality", |v10| == 3 ); - test_case( "cardinality", |v11| == 3 ); - test_case( "cardinality", |v12| == 3 ); - test_case( "cardinality", |v13| == 3 ); - test_case( "cardinality", |v14| == 3 ); - test_case( "cardinality", |v15| == 3 ); - test_case( "cardinality", |vg1| == 2 ); - - # Test that vectors use zero-based indexing - - test_case( "zero-based indexing", v1[0] == "test" && v5[0] == 1 ); - - # Test iterating over each vector - - local ct: count; - ct = 0; - for ( c in v1 ) - { - if ( type_name(c) != "count" ) - print "Error: wrong index type"; - if ( type_name(v1[c]) != "string" ) - print "Error: wrong vector type"; - ++ct; - } - test_case( "iterate over vector", ct == 2 ); - - ct = 0; - for ( c in v2 ) - { - ++ct; - } - test_case( "iterate over vector", ct == 0 ); - - ct = 0; - for ( c in vg1 ) - { - ++ct; - } - test_case( "iterate over vector", ct == 2 ); - - # Test adding elements to each vector - - v1[2] = "added"; - test_case( "add element", |v1| == 3 ); - test_case( "access element", v1[2] == "added" ); - - v2[0] = "another"; - test_case( "add element", |v2| == 1 ); - v2[1] = "test"; - test_case( "add element", |v2| == 2 ); - test_case( "access element", v2[0] == "another" ); - test_case( "access element", v2[1] == "test" ); - - v3[0] = "foo"; - test_case( "add element", |v3| == 1 ); - test_case( "access element", v3[0] == "foo" ); - - v4[1] = "local"; - test_case( "add element", |v4| == 2 ); - test_case( "access element", v4[1] == "local" ); - - v5[3] = 77; - test_case( "add element", |v5| == 4 ); - test_case( "access element", v5[3] == 77 ); - - vg1[2] = "global"; - test_case( "add element", |vg1| == 3 ); - test_case( "access element", vg1[2] == "global" ); - - # Test overwriting elements of each vector - - v1[0] = "new1"; - test_case( "overwrite element", |v1| == 3 ); - test_case( "access element", v1[0] == "new1" ); - - v2[1] = "new2"; - test_case( "overwrite element", |v2| == 2 ); - test_case( "access element", v2[0] == "another" ); - test_case( "access element", v2[1] == "new2" ); - - v3[0] = "new3"; - test_case( "overwrite element", |v3| == 1 ); - test_case( "access element", v3[0] == "new3" ); - - v4[0] = "new4"; - test_case( "overwrite element", |v4| == 2 ); - test_case( "access element", v4[0] == "new4" ); - - v5[0] = 0; - test_case( "overwrite element", |v5| == 4 ); - test_case( "access element", v5[0] == 0 ); - - vg1[1] = "new5"; - test_case( "overwrite element", |vg1| == 3 ); - test_case( "access element", vg1[1] == "new5" ); - - # Test increment/decrement operators - - ++v5; - test_case( "++ operator", |v5| == 4 && v5[0] == 1 && v5[1] == 3 - && v5[2] == 4 && v5[3] == 78 ); - --v5; - test_case( "-- operator", |v5| == 4 && v5[0] == 0 && v5[1] == 2 - && v5[2] == 3 && v5[3] == 77 ); - - # Test +,-,*,/,% of two vectors - - test_case( "+ operator", v7[0] == 11 && v7[1] == 22 && v7[2] == 33 ); - test_case( "- operator", v8[0] == 9 && v8[1] == 18 && v8[2] == 27 ); - test_case( "* operator", v9[0] == 10 && v9[1] == 40 && v9[2] == 90 ); - test_case( "/ operator", v10[0] == 10 && v10[1] == 10 && v10[2] == 10 ); - test_case( "% operator", v11[0] == 0 && v11[1] == 0 && v11[2] == 0 ); - - # Test &&,|| of two vectors - - test_case( "&& operator", v14[0] == F && v14[1] == F && v14[2] == T ); - test_case( "|| operator", v15[0] == T && v15[1] == F && v15[2] == T ); - - # Test += operator. - local v16 = v6; - v16 += 40; - test_case( "+= operator", all_set(v16 == vector( 10, 20, 30, 40 )) ); - -} - diff --git a/testing/btest/language/vector.zeek b/testing/btest/language/vector.zeek new file mode 100644 index 0000000000..ea330f3842 --- /dev/null +++ b/testing/btest/language/vector.zeek @@ -0,0 +1,186 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_case(msg: string, expect: bool) + { + print fmt("%s (%s)", msg, expect ? "PASS" : "FAIL"); + } + + +# Note: only global vectors can be initialized with curly braces +global vg1: vector of string = { "curly", "braces" }; + +event zeek_init() +{ + local v1: vector of string = vector( "test", "example" ); + local v2: vector of string = vector(); + local v3: vector of string; + local v4 = vector( "type inference" ); + local v5 = vector( 1, 2, 3 ); + local v6 = vector( 10, 20, 30 ); + local v7 = v5 + v6; + local v8 = v6 - v5; + local v9 = v5 * v6; + local v10 = v6 / v5; + local v11 = v6 % v5; + local v12 = vector( T, F, T ); + local v13 = vector( F, F, T ); + local v14 = v12 && v13; + local v15 = v12 || v13; + + # Type inference tests + + test_case( "type inference", type_name(v4) == "vector of string" ); + test_case( "type inference", type_name(v5) == "vector of count" ); + test_case( "type inference", type_name(v12) == "vector of bool" ); + + # Test the size of each vector + + test_case( "cardinality", |v1| == 2 ); + test_case( "cardinality", |v2| == 0 ); + test_case( "cardinality", |v3| == 0 ); + test_case( "cardinality", |v4| == 1 ); + test_case( "cardinality", |v5| == 3 ); + test_case( "cardinality", |v6| == 3 ); + test_case( "cardinality", |v7| == 3 ); + test_case( "cardinality", |v8| == 3 ); + test_case( "cardinality", |v9| == 3 ); + test_case( "cardinality", |v10| == 3 ); + test_case( "cardinality", |v11| == 3 ); + test_case( "cardinality", |v12| == 3 ); + test_case( "cardinality", |v13| == 3 ); + test_case( "cardinality", |v14| == 3 ); + test_case( "cardinality", |v15| == 3 ); + test_case( "cardinality", |vg1| == 2 ); + + # Test that vectors use zero-based indexing + + test_case( "zero-based indexing", v1[0] == "test" && v5[0] == 1 ); + + # Test iterating over each vector + + local ct: count; + ct = 0; + for ( c in v1 ) + { + if ( type_name(c) != "count" ) + print "Error: wrong index type"; + if ( type_name(v1[c]) != "string" ) + print "Error: wrong vector type"; + ++ct; + } + test_case( "iterate over vector", ct == 2 ); + + ct = 0; + for ( c in v2 ) + { + ++ct; + } + test_case( "iterate over vector", ct == 0 ); + + ct = 0; + for ( c in vg1 ) + { + ++ct; + } + test_case( "iterate over vector", ct == 2 ); + + # Test adding elements to each vector + + v1[2] = "added"; + test_case( "add element", |v1| == 3 ); + test_case( "access element", v1[2] == "added" ); + + v2[0] = "another"; + test_case( "add element", |v2| == 1 ); + v2[1] = "test"; + test_case( "add element", |v2| == 2 ); + test_case( "access element", v2[0] == "another" ); + test_case( "access element", v2[1] == "test" ); + + v3[0] = "foo"; + test_case( "add element", |v3| == 1 ); + test_case( "access element", v3[0] == "foo" ); + + v4[1] = "local"; + test_case( "add element", |v4| == 2 ); + test_case( "access element", v4[1] == "local" ); + + v5[3] = 77; + test_case( "add element", |v5| == 4 ); + test_case( "access element", v5[3] == 77 ); + + vg1[2] = "global"; + test_case( "add element", |vg1| == 3 ); + test_case( "access element", vg1[2] == "global" ); + + # Test overwriting elements of each vector + + v1[0] = "new1"; + test_case( "overwrite element", |v1| == 3 ); + test_case( "access element", v1[0] == "new1" ); + + v2[1] = "new2"; + test_case( "overwrite element", |v2| == 2 ); + test_case( "access element", v2[0] == "another" ); + test_case( "access element", v2[1] == "new2" ); + + v3[0] = "new3"; + test_case( "overwrite element", |v3| == 1 ); + test_case( "access element", v3[0] == "new3" ); + + v4[0] = "new4"; + test_case( "overwrite element", |v4| == 2 ); + test_case( "access element", v4[0] == "new4" ); + + v5[0] = 0; + test_case( "overwrite element", |v5| == 4 ); + test_case( "access element", v5[0] == 0 ); + + vg1[1] = "new5"; + test_case( "overwrite element", |vg1| == 3 ); + test_case( "access element", vg1[1] == "new5" ); + + # Test increment/decrement operators + + ++v5; + test_case( "++ operator", |v5| == 4 && v5[0] == 1 && v5[1] == 3 + && v5[2] == 4 && v5[3] == 78 ); + --v5; + test_case( "-- operator", |v5| == 4 && v5[0] == 0 && v5[1] == 2 + && v5[2] == 3 && v5[3] == 77 ); + + # Test +,-,*,/,% of two vectors + + test_case( "+ operator", v7[0] == 11 && v7[1] == 22 && v7[2] == 33 ); + test_case( "- operator", v8[0] == 9 && v8[1] == 18 && v8[2] == 27 ); + test_case( "* operator", v9[0] == 10 && v9[1] == 40 && v9[2] == 90 ); + test_case( "/ operator", v10[0] == 10 && v10[1] == 10 && v10[2] == 10 ); + test_case( "% operator", v11[0] == 0 && v11[1] == 0 && v11[2] == 0 ); + + # Test &&,|| of two vectors + + test_case( "&& operator", v14[0] == F && v14[1] == F && v14[2] == T ); + test_case( "|| operator", v15[0] == T && v15[1] == F && v15[2] == T ); + + # Test += operator. + local v16 = v6; + v16 += 40; + test_case( "+= operator", all_set(v16 == vector( 10, 20, 30, 40 )) ); + + # Slicing tests. + local v17 = vector( 1, 2, 3, 4, 5 ); + test_case( "slicing", all_set(v17[0:2] == vector( 1, 2 )) ); + test_case( "slicing", all_set(v17[-3:-1] == vector( 3, 4 )) ); + test_case( "slicing", all_set(v17[:2] == vector( 1, 2 )) ); + test_case( "slicing", all_set(v17[2:] == vector( 3, 4, 5 )) ); + test_case( "slicing", all_set(v17[:] == v17) ); + v17[0:1] = vector(6); + test_case( "slicing assignment", all_set(v17 == vector(6, 2, 3, 4, 5)) ); + v17[2:4] = vector(7, 8); + test_case( "slicing assignment", all_set(v17 == vector(6, 2, 7, 8, 5)) ); + v17[2:4] = vector(9, 10, 11); + test_case( "slicing assignment grow", all_set(v17 == vector(6, 2, 9, 10, 11, 5)) ); + v17[2:5] = vector(9); + test_case( "slicing assignment shrink", all_set(v17 == vector(6, 2, 9, 5)) ); +} diff --git a/testing/btest/language/when-on-globals.zeek b/testing/btest/language/when-on-globals.zeek new file mode 100644 index 0000000000..087a88b4db --- /dev/null +++ b/testing/btest/language/when-on-globals.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT | sort >out +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +type X: record { + s: string; + x: set[string] &optional; +}; + +global x1 = 42; +global x2: table[count] of X; +global x3: table[count] of X; + +event quit() +{ + terminate(); +} + +event zeek_init() + { + x2[10] = [$s="foo"]; + x3[20] = [$s="bar", $x=set("i")]; + + when ( x1 != 42 ) + { + print "x1 != 42", x1 != 42; + } + timeout 1sec + { + print "unexpected timeout (1)"; + } + + when ( 15 in x2 ) + { + print "15 in x2", 10 in x2; + } + timeout 1sec + { + print "unexpected timeout (2)"; + } + + when ( x2[10]$s == "bar" ) + { + print "x2[10]", x2[10]$s == "bar"; + } + timeout 1sec + { + print "unexpected timeout (3)"; + } + + when ( "j" in x3[20]$x ) + { + print "unexpected trigger"; + } + timeout 1sec + { + print "\"j\" in x3[20]$x, expected timeout"; + } + + x1 = 100; + x2[15] = [$s="xyz"]; + x2[10]$s = "bar"; + + # This will *NOT* trigger then when-condition because we're modifying + # an inner value that's not directly tracked. + add x3[20]$x["j"]; + + schedule 2secs { quit() }; +} + diff --git a/testing/btest/language/when-unitialized-rhs.bro b/testing/btest/language/when-unitialized-rhs.bro deleted file mode 100644 index 21b94c6e02..0000000000 --- a/testing/btest/language/when-unitialized-rhs.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT >out 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out - -global crashMe: function(): string; -global x: int; - -event bro_init() - { - when( local result = crashMe() ) - { - print "1st when stmt executing", result; - } - - when( local other_result = x ) - { - print "2nd when stmt executing", other_result; - } - } - -global conn_count = 0; - -event new_connection(c: connection) - { - ++conn_count; - print conn_count; - - if ( conn_count == 10 ) - { - x = 999; - crashMe = function(): string { return "not anymore you don't"; }; - } - } diff --git a/testing/btest/language/when-unitialized-rhs.zeek b/testing/btest/language/when-unitialized-rhs.zeek new file mode 100644 index 0000000000..62464004f2 --- /dev/null +++ b/testing/btest/language/when-unitialized-rhs.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT >out 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out + +global crashMe: function(): string; +global x: int; + +event zeek_init() + { + when( local result = crashMe() ) + { + print "1st when stmt executing", result; + } + + when( local other_result = x ) + { + print "2nd when stmt executing", other_result; + } + } + +global conn_count = 0; + +event new_connection(c: connection) + { + ++conn_count; + print conn_count; + + if ( conn_count == 10 ) + { + x = 999; + crashMe = function(): string { return "not anymore you don't"; }; + } + } diff --git a/testing/btest/language/when.bro b/testing/btest/language/when.bro deleted file mode 100644 index a2bad6a620..0000000000 --- a/testing/btest/language/when.bro +++ /dev/null @@ -1,31 +0,0 @@ -# @TEST-EXEC: btest-bg-run test1 bro %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: mv test1/.stdout out -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -event bro_init() -{ - local h: addr = 127.0.0.1; - - when ( local hname = lookup_addr(h) ) - { - print "lookup successful"; - terminate(); - } - timeout 10sec - { - print "timeout (1)"; - } - - local to = 5sec; - # Just checking that timeouts can use arbitrary expressions... - when ( local hname2 = lookup_addr(h) ) {} - timeout to {} - when ( local hname3 = lookup_addr(h) ) {} - timeout to + 2sec {} - - print "done"; -} - diff --git a/testing/btest/language/when.zeek b/testing/btest/language/when.zeek new file mode 100644 index 0000000000..de710aa736 --- /dev/null +++ b/testing/btest/language/when.zeek @@ -0,0 +1,31 @@ +# @TEST-EXEC: btest-bg-run test1 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: mv test1/.stdout out +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +event zeek_init() +{ + local h: addr = 127.0.0.1; + + when ( local hname = lookup_addr(h) ) + { + print "lookup successful"; + terminate(); + } + timeout 10sec + { + print "timeout (1)"; + } + + local to = 5sec; + # Just checking that timeouts can use arbitrary expressions... + when ( local hname2 = lookup_addr(h) ) {} + timeout to {} + when ( local hname3 = lookup_addr(h) ) {} + timeout to + 2sec {} + + print "done"; +} + diff --git a/testing/btest/language/while.bro b/testing/btest/language/while.bro deleted file mode 100644 index 6828b00b41..0000000000 --- a/testing/btest/language/while.bro +++ /dev/null @@ -1,77 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >out -# @TEST-EXEC: btest-diff out - -function test_noop() - { - while ( F ) - print "noooooooooo"; - } - -function test_it() - { - local i = 0; - - while ( i < 10 ) - ++i; - - print i; - } - -function test_break() - { - local s = ""; - - while ( T ) - { - s += "s"; - print s; - - if ( s == "sss" ) - break; - } - } - -function test_next() - { - local s: set[count]; - local i = 0; - - while ( 9 !in s ) - { - ++i; - - if ( i % 2 == 0 ) - next; - - add s[i]; - } - - print s; - } - -function test_return(): vector of string - { - local i = 0; - local rval: vector of string; - - while ( T ) - { - rval[i] = fmt("number %d", i); - ++i; - - if ( i == 13 ) - return rval; - } - - rval[0] = "noooo"; - return rval; - } - -event bro_init() - { - test_noop(); - test_it(); - test_break(); - test_next(); - print test_return(); - } diff --git a/testing/btest/language/while.zeek b/testing/btest/language/while.zeek new file mode 100644 index 0000000000..3e12c81514 --- /dev/null +++ b/testing/btest/language/while.zeek @@ -0,0 +1,77 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +function test_noop() + { + while ( F ) + print "noooooooooo"; + } + +function test_it() + { + local i = 0; + + while ( i < 10 ) + ++i; + + print i; + } + +function test_break() + { + local s = ""; + + while ( T ) + { + s += "s"; + print s; + + if ( s == "sss" ) + break; + } + } + +function test_next() + { + local s: set[count]; + local i = 0; + + while ( 9 !in s ) + { + ++i; + + if ( i % 2 == 0 ) + next; + + add s[i]; + } + + print s; + } + +function test_return(): vector of string + { + local i = 0; + local rval: vector of string; + + while ( T ) + { + rval[i] = fmt("number %d", i); + ++i; + + if ( i == 13 ) + return rval; + } + + rval[0] = "noooo"; + return rval; + } + +event zeek_init() + { + test_noop(); + test_it(); + test_break(); + test_next(); + print test_return(); + } diff --git a/testing/btest/language/wrong-delete-field.bro b/testing/btest/language/wrong-delete-field.bro deleted file mode 100644 index 63573faf8a..0000000000 --- a/testing/btest/language/wrong-delete-field.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >output 2>&1 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output - -type X: record { - a: count; -}; - -global x: X = [$a=20]; - -delete x$a; diff --git a/testing/btest/language/wrong-delete-field.zeek b/testing/btest/language/wrong-delete-field.zeek new file mode 100644 index 0000000000..c393f66c16 --- /dev/null +++ b/testing/btest/language/wrong-delete-field.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +type X: record { + a: count; +}; + +global x: X = [$a=20]; + +delete x$a; diff --git a/testing/btest/language/wrong-record-extension.bro b/testing/btest/language/wrong-record-extension.bro deleted file mode 100644 index a8ef6a64e9..0000000000 --- a/testing/btest/language/wrong-record-extension.bro +++ /dev/null @@ -1,14 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b %INPUT >output.tmp 2>&1 -# @TEST-EXEC: sed 's#^.*:##g' output -# @TEST-EXEC: btest-diff output - -type Foo: record { - a: count; - b: count &optional; -}; - -redef record Foo += { - c: count; - d: string &optional; -}; - diff --git a/testing/btest/language/wrong-record-extension.zeek b/testing/btest/language/wrong-record-extension.zeek new file mode 100644 index 0000000000..72b66c4ee3 --- /dev/null +++ b/testing/btest/language/wrong-record-extension.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC-FAIL: zeek -b %INPUT >output.tmp 2>&1 +# @TEST-EXEC: sed 's#^.*:##g' output +# @TEST-EXEC: btest-diff output + +type Foo: record { + a: count; + b: count &optional; +}; + +redef record Foo += { + c: count; + d: string &optional; +}; + diff --git a/testing/btest/language/zeek_init.zeek b/testing/btest/language/zeek_init.zeek new file mode 100644 index 0000000000..c1ca3ba65c --- /dev/null +++ b/testing/btest/language/zeek_init.zeek @@ -0,0 +1,44 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + + +event zeek_init() &priority=10 + { + print "zeek_init at priority 10!"; + } + +event bro_init() &priority=5 + { + print "bro_init at priority 5!"; + } + +event zeek_init() &priority=0 + { + print "zeek_init at priority 0!"; + } + +event bro_init() &priority=-10 + { + print "bro_init at priority -10!"; + } + + +event zeek_done() &priority=10 + { + print "zeek_done at priority 10!"; + } + +event bro_done() &priority=5 + { + print "bro_done at priority 5!"; + } + +event zeek_done() &priority=0 + { + print "zeek_done at priority 0!"; + } + +event bro_done() &priority=-10 + { + print "bro_done at priority -10!"; + } diff --git a/testing/btest/language/zeek_script_loaded.zeek b/testing/btest/language/zeek_script_loaded.zeek new file mode 100644 index 0000000000..9011790e93 --- /dev/null +++ b/testing/btest/language/zeek_script_loaded.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -b %INPUT >out +# @TEST-EXEC: btest-diff out + +event zeek_script_loaded(path: string, level: count) &priority=10 + { + if ( /zeek_script_loaded.zeek/ in path ) + print "zeek_script_loaded priority 10"; + } + +event bro_script_loaded(path: string, level: count) &priority=5 + { + if ( /zeek_script_loaded.zeek/ in path ) + print "bro_script_loaded priority 5"; + } + +event zeek_script_loaded(path: string, level: count) &priority=0 + { + if ( /zeek_script_loaded.zeek/ in path ) + print "zeek_script_loaded priority 0"; + } + +event bro_script_loaded(path: string, level: count) &priority=-10 + { + if ( /zeek_script_loaded.zeek/ in path ) + print "bro_script_loaded priority -10"; + } diff --git a/testing/btest/plugins/bifs-and-scripts-install.sh b/testing/btest/plugins/bifs-and-scripts-install.sh index 60c754f8ff..0fbad20b36 100644 --- a/testing/btest/plugins/bifs-and-scripts-install.sh +++ b/testing/btest/plugins/bifs-and-scripts-install.sh @@ -1,32 +1,32 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo # @TEST-EXEC: bash %INPUT -# @TEST-EXEC: ./configure --bro-dist=${DIST} --install-root=`pwd`/test-install +# @TEST-EXEC: ./configure --zeek-dist=${DIST} --install-root=`pwd`/test-install # @TEST-EXEC: make # @TEST-EXEC: make install -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd`/test-install bro -NN Demo::Foo >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd`/test-install bro demo/foo -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd`/test-install zeek -NN Demo::Foo >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd`/test-install zeek Demo/Foo -r $TRACES/empty.trace >>output # @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output -mkdir -p scripts/demo/foo/base/ +mkdir -p scripts/Demo/Foo/base/ -cat >scripts/__load__.bro <scripts/__load__.zeek <scripts/demo/foo/__load__.bro <scripts/Demo/Foo/__load__.zeek <scripts/demo/foo/manually.bro <scripts/Demo/Foo/manually.zeek <scripts/demo/foo/base/at-startup.bro <scripts/Demo/Foo/base/at-startup.zeek <activate.bro <activate.zeek <>output +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output # @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r $TRACES/empty.trace >>output # @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro demo/foo -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek Demo/Foo -r $TRACES/empty.trace >>output # @TEST-EXEC: echo =-= >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -b -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -b -r $TRACES/empty.trace >>output # @TEST-EXEC: echo =-= >>output -# @TEST-EXEC-FAIL: BRO_PLUGIN_PATH=`pwd` bro -b demo/foo -r $TRACES/empty.trace >>output +# @TEST-EXEC-FAIL: ZEEK_PLUGIN_PATH=`pwd` zeek -b Demo/Foo -r $TRACES/empty.trace >>output # @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -b ./activate.bro -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -b ./activate.zeek -r $TRACES/empty.trace >>output # @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -b ./activate.bro demo/foo -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -b ./activate.zeek Demo/Foo -r $TRACES/empty.trace >>output # @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -b Demo::Foo demo/foo -r $TRACES/empty.trace >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -b Demo::Foo Demo/Foo -r $TRACES/empty.trace >>output # @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output -mkdir -p scripts/demo/foo/base/ +mkdir -p scripts/Demo/Foo/base/ -cat >scripts/__load__.bro <scripts/__load__.zeek <scripts/demo/foo/__load__.bro <scripts/Demo/Foo/__load__.zeek <scripts/demo/foo/manually.bro <scripts/Demo/Foo/manually.zeek <scripts/demo/foo/base/at-startup.bro <scripts/Demo/Foo/base/at-startup.zeek <activate.bro <activate.zeek <>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/ftp/retr.trace %INPUT >>output -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output - -event file_new(f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_FOO); - } - -event foo_piece(f: fa_file, data: string) - { - print "foo_piece", f$id, sub_bytes(data, 0, 20); - } - diff --git a/testing/btest/plugins/file.zeek b/testing/btest/plugins/file.zeek new file mode 100644 index 0000000000..5a59af6ad7 --- /dev/null +++ b/testing/btest/plugins/file.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/file-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r $TRACES/ftp/retr.trace %INPUT >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_FOO); + } + +event foo_piece(f: fa_file, data: string) + { + print "foo_piece", f$id, sub_bytes(data, 0, 20); + } + diff --git a/testing/btest/plugins/hooks.bro b/testing/btest/plugins/hooks.bro deleted file mode 100644 index a6778475cf..0000000000 --- a/testing/btest/plugins/hooks.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Hooks -# @TEST-EXEC: cp -r %DIR/hooks-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_ACTIVATE="Demo::Hooks" BRO_PLUGIN_PATH=`pwd` bro -b -r $TRACES/http/get.trace %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output -# @TEST-EXEC: btest-diff output - -@unload base/misc/version -@load base/init-default.bro diff --git a/testing/btest/plugins/hooks.zeek b/testing/btest/plugins/hooks.zeek new file mode 100644 index 0000000000..79f750bac5 --- /dev/null +++ b/testing/btest/plugins/hooks.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Hooks +# @TEST-EXEC: cp -r %DIR/hooks-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_ACTIVATE="Demo::Hooks" ZEEK_PLUGIN_PATH=`pwd` zeek -b -r $TRACES/http/get.trace %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output +# @TEST-EXEC: btest-diff output + +@unload base/misc/version +@load base/init-default diff --git a/testing/btest/plugins/init-plugin.bro b/testing/btest/plugins/init-plugin.bro deleted file mode 100644 index a4ebf7b00c..0000000000 --- a/testing/btest/plugins/init-plugin.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/port4242.trace >>output -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output diff --git a/testing/btest/plugins/init-plugin.zeek b/testing/btest/plugins/init-plugin.zeek new file mode 100644 index 0000000000..afd167f449 --- /dev/null +++ b/testing/btest/plugins/init-plugin.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r $TRACES/port4242.trace >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output diff --git a/testing/btest/Baseline/doc.manual.framework_notice_hook_01/.stdout b/testing/btest/plugins/legacy-plugin/.btest-ignore similarity index 100% rename from testing/btest/Baseline/doc.manual.framework_notice_hook_01/.stdout rename to testing/btest/plugins/legacy-plugin/.btest-ignore diff --git a/testing/btest/plugins/legacy-plugin/CMakeLists.txt b/testing/btest/plugins/legacy-plugin/CMakeLists.txt new file mode 100644 index 0000000000..92e1a90e9d --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/CMakeLists.txt @@ -0,0 +1,19 @@ + +project(Zeek-Plugin-Demo-Foo) + +cmake_minimum_required(VERSION 2.6.3) + +if ( NOT BRO_DIST ) + message(FATAL_ERROR "BRO_DIST not set") +endif () + +set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) + +include(BroPlugin) + +bro_plugin_begin(Demo Foo) +bro_plugin_cc(src/Plugin.cc) +bro_plugin_cc(src/Foo.cc) +bro_plugin_bif(src/events.bif) +bro_plugin_pac(src/foo.pac src/foo-protocol.pac src/foo-analyzer.pac) +bro_plugin_end() diff --git a/testing/btest/plugins/legacy-plugin/scripts/Demo/Foo/base/main.zeek b/testing/btest/plugins/legacy-plugin/scripts/Demo/Foo/base/main.zeek new file mode 100644 index 0000000000..76c63723b7 --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/scripts/Demo/Foo/base/main.zeek @@ -0,0 +1,7 @@ + +const ports = { 4242/tcp }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_FOO, ports); + } diff --git a/testing/btest/plugins/protocol-plugin/scripts/__load__.bro b/testing/btest/plugins/legacy-plugin/scripts/__load__.zeek similarity index 100% rename from testing/btest/plugins/protocol-plugin/scripts/__load__.bro rename to testing/btest/plugins/legacy-plugin/scripts/__load__.zeek diff --git a/testing/btest/plugins/legacy-plugin/src/Foo.cc b/testing/btest/plugins/legacy-plugin/src/Foo.cc new file mode 100644 index 0000000000..be3c52a98b --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/Foo.cc @@ -0,0 +1,59 @@ + +#include "Foo.h" +#include "foo_pac.h" +#include "events.bif.h" + +#include + +using namespace plugin::Demo_Foo; + +Foo::Foo(Connection* conn) + : analyzer::tcp::TCP_ApplicationAnalyzer("Foo", conn) + { + interp = new binpac::Foo::Foo_Conn(this); + } + +Foo::~Foo() + { + delete interp; + } + +void Foo::Done() + { + analyzer::tcp::TCP_ApplicationAnalyzer::Done(); + + interp->FlowEOF(true); + interp->FlowEOF(false); + } + +void Foo::EndpointEOF(bool is_orig) + { + analyzer::tcp::TCP_ApplicationAnalyzer::EndpointEOF(is_orig); + interp->FlowEOF(is_orig); + } + +void Foo::DeliverStream(int len, const u_char* data, bool orig) + { + analyzer::tcp::TCP_ApplicationAnalyzer::DeliverStream(len, data, orig); + + assert(TCP()); + + if ( TCP()->IsPartial() ) + // punt on partial. + return; + + try + { + interp->NewData(orig, data, data + len); + } + catch ( const binpac::Exception& e ) + { + ProtocolViolation(fmt("Binpac exception: %s", e.c_msg())); + } + } + +void Foo::Undelivered(uint64 seq, int len, bool orig) + { + analyzer::tcp::TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); + interp->NewGap(orig, len); + } diff --git a/testing/btest/plugins/legacy-plugin/src/Foo.h b/testing/btest/plugins/legacy-plugin/src/Foo.h new file mode 100644 index 0000000000..e12fed889d --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/Foo.h @@ -0,0 +1,32 @@ + +#ifndef BRO_PLUGIN_DEMO_FOO_H +#define BRO_PLUGIN_DEMO_FOO_H + +#include "analyzer/protocol/tcp/TCP.h" +#include "analyzer/protocol/pia/PIA.h" + +namespace binpac { namespace Foo { class Foo_Conn; } } + +namespace plugin { +namespace Demo_Foo { + +class Foo : public analyzer::tcp::TCP_ApplicationAnalyzer { +public: + Foo(Connection* conn); + ~Foo(); + + virtual void Done(); + virtual void DeliverStream(int len, const u_char* data, bool orig); + virtual void Undelivered(uint64 seq, int len, bool orig); + virtual void EndpointEOF(bool is_orig); + + static analyzer::Analyzer* Instantiate(Connection* conn) + { return new Foo(conn); } + +protected: + binpac::Foo::Foo_Conn* interp; +}; + +} } + +#endif diff --git a/testing/btest/plugins/legacy-plugin/src/Plugin.cc b/testing/btest/plugins/legacy-plugin/src/Plugin.cc new file mode 100644 index 0000000000..bd2662d67c --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/Plugin.cc @@ -0,0 +1,21 @@ + +#include "Plugin.h" + +#include "Foo.h" + +namespace plugin { namespace Demo_Foo { Plugin plugin; } } + +using namespace plugin::Demo_Foo; + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::analyzer::Component("Foo", plugin::Demo_Foo::Foo::Instantiate)); + + plugin::Configuration config; + config.name = "Demo::Foo"; + config.description = "A Foo test analyzer"; + config.version.major = 1; + config.version.minor = 0; + config.version.patch = 0; + return config; + } diff --git a/testing/btest/plugins/legacy-plugin/src/events.bif b/testing/btest/plugins/legacy-plugin/src/events.bif new file mode 100644 index 0000000000..4603fe4cf6 --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/events.bif @@ -0,0 +1,2 @@ + +event foo_message%(c: connection, data: string%); diff --git a/testing/btest/plugins/legacy-plugin/src/foo-analyzer.pac b/testing/btest/plugins/legacy-plugin/src/foo-analyzer.pac new file mode 100644 index 0000000000..a210a8430c --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/foo-analyzer.pac @@ -0,0 +1,15 @@ + +refine connection Foo_Conn += { + + function Foo_data(msg: Foo_Message): bool + %{ + StringVal* data = new StringVal(${msg.data}.length(), (const char*) ${msg.data}.data()); + BifEvent::generate_foo_message(bro_analyzer(), bro_analyzer()->Conn(), data); + return true; + %} + +}; + +refine typeattr Foo_Message += &let { + proc: bool = $context.connection.Foo_data(this); +}; diff --git a/testing/btest/plugins/legacy-plugin/src/foo-protocol.pac b/testing/btest/plugins/legacy-plugin/src/foo-protocol.pac new file mode 100644 index 0000000000..892513c4f0 --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/foo-protocol.pac @@ -0,0 +1,4 @@ + +type Foo_Message(is_orig: bool) = record { + data: bytestring &restofdata; +}; diff --git a/testing/btest/plugins/legacy-plugin/src/foo.pac b/testing/btest/plugins/legacy-plugin/src/foo.pac new file mode 100644 index 0000000000..826bcc624e --- /dev/null +++ b/testing/btest/plugins/legacy-plugin/src/foo.pac @@ -0,0 +1,26 @@ +%include binpac.pac +%include bro.pac + +%extern{ +#include "Foo.h" + +#include "events.bif.h" +%} + +analyzer Foo withcontext { + connection: Foo_Conn; + flow: Foo_Flow; +}; + +connection Foo_Conn(bro_analyzer: BroAnalyzer) { + upflow = Foo_Flow(true); + downflow = Foo_Flow(false); +}; + +%include foo-protocol.pac + +flow Foo_Flow(is_orig: bool) { + datagram = Foo_Message(is_orig) withcontext(connection, this); +}; + +%include foo-analyzer.pac diff --git a/testing/btest/plugins/legacy.zeek b/testing/btest/plugins/legacy.zeek new file mode 100644 index 0000000000..bb663d744b --- /dev/null +++ b/testing/btest/plugins/legacy.zeek @@ -0,0 +1,14 @@ +# Test that legacy Bro plugins still work. +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/legacy-plugin/* . +# @TEST-EXEC: ./configure --bro-dist=${DIST} && make +# @TEST-EXEC: unset ZEEK_PLUGIN_PATH; BRO_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: unset ZEEK_PLUGIN_PATH; BRO_PLUGIN_PATH=`pwd` zeek -r $TRACES/port4242.trace %INPUT >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output + +event foo_message(c: connection, data: string) + { + print "foo_message", c$id, data; + } + diff --git a/testing/btest/plugins/logging-hooks.bro b/testing/btest/plugins/logging-hooks.bro deleted file mode 100644 index f2ca926c06..0000000000 --- a/testing/btest/plugins/logging-hooks.bro +++ /dev/null @@ -1,72 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Log Hooks -# @TEST-EXEC: cp -r %DIR/logging-hooks-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_ACTIVATE="Log::Hooks" BRO_PLUGIN_PATH=`pwd` bro -b %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff ssh.log - -redef LogAscii::empty_field = "EMPTY"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int &optional; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local empty_set: set[string]; - local empty_vector: vector of string; - - local i = 0; - while ( ++i < 4 ) - Log::write(SSH::LOG, [ - $b=T, - $i=-i, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=network_time(), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); -} diff --git a/testing/btest/plugins/logging-hooks.zeek b/testing/btest/plugins/logging-hooks.zeek new file mode 100644 index 0000000000..b11e3a89f3 --- /dev/null +++ b/testing/btest/plugins/logging-hooks.zeek @@ -0,0 +1,72 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Log Hooks +# @TEST-EXEC: cp -r %DIR/logging-hooks-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_ACTIVATE="Log::Hooks" ZEEK_PLUGIN_PATH=`pwd` zeek -b %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff ssh.log + +redef LogAscii::empty_field = "EMPTY"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int &optional; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local empty_set: set[string]; + local empty_vector: vector of string; + + local i = 0; + while ( ++i < 4 ) + Log::write(SSH::LOG, [ + $b=T, + $i=-i, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=network_time(), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); +} diff --git a/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt b/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt index 2234907ad2..0b92f3b0ca 100644 --- a/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt +++ b/testing/btest/plugins/pktdumper-plugin/CMakeLists.txt @@ -1,17 +1,17 @@ -project(Bro-Plugin-Demo-Foo) +project(Zeek-Plugin-Demo-Foo) cmake_minimum_required(VERSION 2.6.3) -if ( NOT BRO_DIST ) - message(FATAL_ERROR "BRO_DIST not set") +if ( NOT ZEEK_DIST ) + message(FATAL_ERROR "ZEEK_DIST not set") endif () -set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) +set(CMAKE_MODULE_PATH ${ZEEK_DIST}/cmake) -include(BroPlugin) +include(ZeekPlugin) -bro_plugin_begin(Demo Foo) -bro_plugin_cc(src/Plugin.cc) -bro_plugin_cc(src/Foo.cc) -bro_plugin_end() +zeek_plugin_begin(Demo Foo) +zeek_plugin_cc(src/Plugin.cc) +zeek_plugin_cc(src/Foo.cc) +zeek_plugin_end() diff --git a/testing/btest/plugins/pktdumper.bro b/testing/btest/plugins/pktdumper.bro deleted file mode 100644 index d9bd91a5a6..0000000000 --- a/testing/btest/plugins/pktdumper.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo -# @TEST-EXEC: cp -r %DIR/pktdumper-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/port4242.trace -w foo::XXX %INPUT FilteredTraceDetection::enable=F >>output -# @TEST-EXEC: btest-diff output - diff --git a/testing/btest/plugins/pktdumper.zeek b/testing/btest/plugins/pktdumper.zeek new file mode 100644 index 0000000000..ff78dad502 --- /dev/null +++ b/testing/btest/plugins/pktdumper.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/pktdumper-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r $TRACES/port4242.trace -w foo::XXX %INPUT FilteredTraceDetection::enable=F >>output +# @TEST-EXEC: btest-diff output + diff --git a/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt b/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt index 2234907ad2..0b92f3b0ca 100644 --- a/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt +++ b/testing/btest/plugins/pktsrc-plugin/CMakeLists.txt @@ -1,17 +1,17 @@ -project(Bro-Plugin-Demo-Foo) +project(Zeek-Plugin-Demo-Foo) cmake_minimum_required(VERSION 2.6.3) -if ( NOT BRO_DIST ) - message(FATAL_ERROR "BRO_DIST not set") +if ( NOT ZEEK_DIST ) + message(FATAL_ERROR "ZEEK_DIST not set") endif () -set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) +set(CMAKE_MODULE_PATH ${ZEEK_DIST}/cmake) -include(BroPlugin) +include(ZeekPlugin) -bro_plugin_begin(Demo Foo) -bro_plugin_cc(src/Plugin.cc) -bro_plugin_cc(src/Foo.cc) -bro_plugin_end() +zeek_plugin_begin(Demo Foo) +zeek_plugin_cc(src/Plugin.cc) +zeek_plugin_cc(src/Foo.cc) +zeek_plugin_end() diff --git a/testing/btest/plugins/pktsrc.bro b/testing/btest/plugins/pktsrc.bro deleted file mode 100644 index a13596e245..0000000000 --- a/testing/btest/plugins/pktsrc.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo -# @TEST-EXEC: cp -r %DIR/pktsrc-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r foo::XXX %INPUT FilteredTraceDetection::enable=F >>output -# @TEST-EXEC: btest-diff conn.log - diff --git a/testing/btest/plugins/pktsrc.zeek b/testing/btest/plugins/pktsrc.zeek new file mode 100644 index 0000000000..59a2ea2148 --- /dev/null +++ b/testing/btest/plugins/pktsrc.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/pktsrc-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r foo::XXX %INPUT FilteredTraceDetection::enable=F >>output +# @TEST-EXEC: btest-diff conn.log + diff --git a/testing/btest/plugins/plugin-nopatchversion.bro b/testing/btest/plugins/plugin-nopatchversion.bro deleted file mode 100644 index 2279efde6a..0000000000 --- a/testing/btest/plugins/plugin-nopatchversion.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Testing NoPatchVersion -# @TEST-EXEC: cp -r %DIR/plugin-nopatchversion-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=$(pwd) bro -N Testing::NoPatchVersion >> output -# @TEST-EXEC: btest-diff output diff --git a/testing/btest/plugins/plugin-nopatchversion.zeek b/testing/btest/plugins/plugin-nopatchversion.zeek new file mode 100644 index 0000000000..d5f5693bc7 --- /dev/null +++ b/testing/btest/plugins/plugin-nopatchversion.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Testing NoPatchVersion +# @TEST-EXEC: cp -r %DIR/plugin-nopatchversion-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=$(pwd) zeek -N Testing::NoPatchVersion >> output +# @TEST-EXEC: btest-diff output diff --git a/testing/btest/plugins/plugin-withpatchversion.bro b/testing/btest/plugins/plugin-withpatchversion.bro deleted file mode 100644 index 4d86f09719..0000000000 --- a/testing/btest/plugins/plugin-withpatchversion.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Testing WithPatchVersion -# @TEST-EXEC: cp -r %DIR/plugin-withpatchversion-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=$(pwd) bro -N Testing::WithPatchVersion >> output -# @TEST-EXEC: btest-diff output diff --git a/testing/btest/plugins/plugin-withpatchversion.zeek b/testing/btest/plugins/plugin-withpatchversion.zeek new file mode 100644 index 0000000000..cc484ce44d --- /dev/null +++ b/testing/btest/plugins/plugin-withpatchversion.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Testing WithPatchVersion +# @TEST-EXEC: cp -r %DIR/plugin-withpatchversion-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=$(pwd) zeek -N Testing::WithPatchVersion >> output +# @TEST-EXEC: btest-diff output diff --git a/testing/btest/plugins/protocol-plugin/CMakeLists.txt b/testing/btest/plugins/protocol-plugin/CMakeLists.txt index 4bc8460c06..b8faa26ebd 100644 --- a/testing/btest/plugins/protocol-plugin/CMakeLists.txt +++ b/testing/btest/plugins/protocol-plugin/CMakeLists.txt @@ -1,19 +1,19 @@ -project(Bro-Plugin-Demo-Foo) +project(Zeek-Plugin-Demo-Foo) cmake_minimum_required(VERSION 2.6.3) -if ( NOT BRO_DIST ) - message(FATAL_ERROR "BRO_DIST not set") +if ( NOT ZEEK_DIST ) + message(FATAL_ERROR "ZEEK_DIST not set") endif () -set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) +set(CMAKE_MODULE_PATH ${ZEEK_DIST}/cmake) -include(BroPlugin) +include(ZeekPlugin) -bro_plugin_begin(Demo Foo) -bro_plugin_cc(src/Plugin.cc) -bro_plugin_cc(src/Foo.cc) -bro_plugin_bif(src/events.bif) -bro_plugin_pac(src/foo.pac src/foo-protocol.pac src/foo-analyzer.pac) -bro_plugin_end() +zeek_plugin_begin(Demo Foo) +zeek_plugin_cc(src/Plugin.cc) +zeek_plugin_cc(src/Foo.cc) +zeek_plugin_bif(src/events.bif) +zeek_plugin_pac(src/foo.pac src/foo-protocol.pac src/foo-analyzer.pac) +zeek_plugin_end() diff --git a/testing/btest/plugins/protocol-plugin/scripts/Demo/Foo/base/main.bro b/testing/btest/plugins/protocol-plugin/scripts/Demo/Foo/base/main.bro deleted file mode 100644 index 2e2d174b47..0000000000 --- a/testing/btest/plugins/protocol-plugin/scripts/Demo/Foo/base/main.bro +++ /dev/null @@ -1,7 +0,0 @@ - -const ports = { 4242/tcp }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_FOO, ports); - } diff --git a/testing/btest/plugins/protocol-plugin/scripts/Demo/Foo/base/main.zeek b/testing/btest/plugins/protocol-plugin/scripts/Demo/Foo/base/main.zeek new file mode 100644 index 0000000000..76c63723b7 --- /dev/null +++ b/testing/btest/plugins/protocol-plugin/scripts/Demo/Foo/base/main.zeek @@ -0,0 +1,7 @@ + +const ports = { 4242/tcp }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_FOO, ports); + } diff --git a/testing/btest/plugins/protocol-plugin/scripts/__load__.zeek b/testing/btest/plugins/protocol-plugin/scripts/__load__.zeek new file mode 100644 index 0000000000..330718c604 --- /dev/null +++ b/testing/btest/plugins/protocol-plugin/scripts/__load__.zeek @@ -0,0 +1 @@ +@load Demo/Foo/base/main diff --git a/testing/btest/plugins/protocol.bro b/testing/btest/plugins/protocol.bro deleted file mode 100644 index 8a6c2a6399..0000000000 --- a/testing/btest/plugins/protocol.bro +++ /dev/null @@ -1,13 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo -# @TEST-EXEC: cp -r %DIR/protocol-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/port4242.trace %INPUT >>output -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output - -event foo_message(c: connection, data: string) - { - print "foo_message", c$id, data; - } - diff --git a/testing/btest/plugins/protocol.zeek b/testing/btest/plugins/protocol.zeek new file mode 100644 index 0000000000..295d8dbd2d --- /dev/null +++ b/testing/btest/plugins/protocol.zeek @@ -0,0 +1,13 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/protocol-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r $TRACES/port4242.trace %INPUT >>output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output + +event foo_message(c: connection, data: string) + { + print "foo_message", c$id, data; + } + diff --git a/testing/btest/plugins/reader-plugin/CMakeLists.txt b/testing/btest/plugins/reader-plugin/CMakeLists.txt index 2234907ad2..0b92f3b0ca 100644 --- a/testing/btest/plugins/reader-plugin/CMakeLists.txt +++ b/testing/btest/plugins/reader-plugin/CMakeLists.txt @@ -1,17 +1,17 @@ -project(Bro-Plugin-Demo-Foo) +project(Zeek-Plugin-Demo-Foo) cmake_minimum_required(VERSION 2.6.3) -if ( NOT BRO_DIST ) - message(FATAL_ERROR "BRO_DIST not set") +if ( NOT ZEEK_DIST ) + message(FATAL_ERROR "ZEEK_DIST not set") endif () -set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) +set(CMAKE_MODULE_PATH ${ZEEK_DIST}/cmake) -include(BroPlugin) +include(ZeekPlugin) -bro_plugin_begin(Demo Foo) -bro_plugin_cc(src/Plugin.cc) -bro_plugin_cc(src/Foo.cc) -bro_plugin_end() +zeek_plugin_begin(Demo Foo) +zeek_plugin_cc(src/Plugin.cc) +zeek_plugin_cc(src/Foo.cc) +zeek_plugin_end() diff --git a/testing/btest/plugins/reader.bro b/testing/btest/plugins/reader.bro deleted file mode 100644 index ec9b6cf046..0000000000 --- a/testing/btest/plugins/reader.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo -# @TEST-EXEC: cp -r %DIR/reader-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` btest-bg-run bro bro %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output -# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff out - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -module A; - -type Val: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, tpe; - print outfile, s; - try = try + 1; - if ( try == 5 ) - { - Input::remove("input"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="../input.log", $reader=Input::READER_FOO, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/plugins/reader.zeek b/testing/btest/plugins/reader.zeek new file mode 100644 index 0000000000..40cb97765d --- /dev/null +++ b/testing/btest/plugins/reader.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/reader-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` btest-bg-run zeek zeek %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff output +# @TEST-EXEC: TEST_DIFF_CANONIFIER= btest-diff out + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, tpe; + print outfile, s; + try = try + 1; + if ( try == 5 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_FOO, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/plugins/reporter-hook.bro b/testing/btest/plugins/reporter-hook.bro deleted file mode 100644 index 13e98fc76e..0000000000 --- a/testing/btest/plugins/reporter-hook.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Reporter Hook -# @TEST-EXEC: cp -r %DIR/reporter-hook-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_ACTIVATE="Reporter::Hook" BRO_PLUGIN_PATH=`pwd` bro -b %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log - -@load base/frameworks/reporter - -type TestType: record { - a: bool &optional; -}; - -event bro_init() - { - Reporter::info("Some Info"); - Reporter::warning("A warning"); - Reporter::error("An Error"); - Reporter::error("An Error that does not show up in the log"); - - # And just trigger a runtime problem. - local b = TestType(); - print b$a; - } diff --git a/testing/btest/plugins/reporter-hook.zeek b/testing/btest/plugins/reporter-hook.zeek new file mode 100644 index 0000000000..01229b3d49 --- /dev/null +++ b/testing/btest/plugins/reporter-hook.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Reporter Hook +# @TEST-EXEC: cp -r %DIR/reporter-hook-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_ACTIVATE="Reporter::Hook" ZEEK_PLUGIN_PATH=`pwd` zeek -b %INPUT 2>&1 | $SCRIPTS/diff-remove-abspath | sort | uniq >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log + +@load base/frameworks/reporter + +type TestType: record { + a: bool &optional; +}; + +event zeek_init() + { + Reporter::info("Some Info"); + Reporter::warning("A warning"); + Reporter::error("An Error"); + Reporter::error("An Error that does not show up in the log"); + + # And just trigger a runtime problem. + local b = TestType(); + print b$a; + } diff --git a/testing/btest/plugins/writer-plugin/CMakeLists.txt b/testing/btest/plugins/writer-plugin/CMakeLists.txt index 2234907ad2..0b92f3b0ca 100644 --- a/testing/btest/plugins/writer-plugin/CMakeLists.txt +++ b/testing/btest/plugins/writer-plugin/CMakeLists.txt @@ -1,17 +1,17 @@ -project(Bro-Plugin-Demo-Foo) +project(Zeek-Plugin-Demo-Foo) cmake_minimum_required(VERSION 2.6.3) -if ( NOT BRO_DIST ) - message(FATAL_ERROR "BRO_DIST not set") +if ( NOT ZEEK_DIST ) + message(FATAL_ERROR "ZEEK_DIST not set") endif () -set(CMAKE_MODULE_PATH ${BRO_DIST}/cmake) +set(CMAKE_MODULE_PATH ${ZEEK_DIST}/cmake) -include(BroPlugin) +include(ZeekPlugin) -bro_plugin_begin(Demo Foo) -bro_plugin_cc(src/Plugin.cc) -bro_plugin_cc(src/Foo.cc) -bro_plugin_end() +zeek_plugin_begin(Demo Foo) +zeek_plugin_cc(src/Plugin.cc) +zeek_plugin_cc(src/Foo.cc) +zeek_plugin_end() diff --git a/testing/btest/plugins/writer.bro b/testing/btest/plugins/writer.bro deleted file mode 100644 index 732d726fd7..0000000000 --- a/testing/btest/plugins/writer.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: ${DIST}/aux/bro-aux/plugin-support/init-plugin -u . Demo Foo -# @TEST-EXEC: cp -r %DIR/writer-plugin/* . -# @TEST-EXEC: ./configure --bro-dist=${DIST} && make -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -NN Demo::Foo >>output -# @TEST-EXEC: echo === >>output -# @TEST-EXEC: BRO_PLUGIN_PATH=`pwd` bro -r $TRACES/socks.trace Log::default_writer=Log::WRITER_FOO %INPUT | sort >>output -# @TEST-EXEC: btest-diff output - diff --git a/testing/btest/plugins/writer.zeek b/testing/btest/plugins/writer.zeek new file mode 100644 index 0000000000..21426dfdae --- /dev/null +++ b/testing/btest/plugins/writer.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: ${DIST}/aux/zeek-aux/plugin-support/init-plugin -u . Demo Foo +# @TEST-EXEC: cp -r %DIR/writer-plugin/* . +# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -NN Demo::Foo >>output +# @TEST-EXEC: echo === >>output +# @TEST-EXEC: ZEEK_PLUGIN_PATH=`pwd` zeek -r $TRACES/socks.trace Log::default_writer=Log::WRITER_FOO %INPUT | sort >>output +# @TEST-EXEC: btest-diff output + diff --git a/testing/btest/scripts/base/files/data_event/basic.bro b/testing/btest/scripts/base/files/data_event/basic.bro deleted file mode 100644 index 2877155ebb..0000000000 --- a/testing/btest/scripts/base/files/data_event/basic.bro +++ /dev/null @@ -1,20 +0,0 @@ -# Just a very basic test to check if ANALYZER_DATA_EVENT works. -# Also check if "in" works with binary data. -# @TEST-EXEC: bro -r $TRACES/pe/pe.trace %INPUT -# @TEST-EXEC: btest-diff .stdout -# @TEST-EXEC: btest-diff .stderr - -event stream_data(f: fa_file, data: string) - { - if ( "Windows" in data ) - { - print "Found"; - } - } - -event file_new (f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_DATA_EVENT, - [$stream_event=stream_data]); - } - diff --git a/testing/btest/scripts/base/files/data_event/basic.zeek b/testing/btest/scripts/base/files/data_event/basic.zeek new file mode 100644 index 0000000000..a5026c287c --- /dev/null +++ b/testing/btest/scripts/base/files/data_event/basic.zeek @@ -0,0 +1,20 @@ +# Just a very basic test to check if ANALYZER_DATA_EVENT works. +# Also check if "in" works with binary data. +# @TEST-EXEC: zeek -r $TRACES/pe/pe.trace %INPUT +# @TEST-EXEC: btest-diff .stdout +# @TEST-EXEC: btest-diff .stderr + +event stream_data(f: fa_file, data: string) + { + if ( "Windows" in data ) + { + print "Found"; + } + } + +event file_new (f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_DATA_EVENT, + [$stream_event=stream_data]); + } + diff --git a/testing/btest/scripts/base/files/entropy/basic.test b/testing/btest/scripts/base/files/entropy/basic.test index 2b867eb8cb..fda15d9724 100644 --- a/testing/btest/scripts/base/files/entropy/basic.test +++ b/testing/btest/scripts/base/files/entropy/basic.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/http/get.trace %INPUT # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/files/extract/limit.bro b/testing/btest/scripts/base/files/extract/limit.bro deleted file mode 100644 index 4deecd292d..0000000000 --- a/testing/btest/scripts/base/files/extract/limit.bro +++ /dev/null @@ -1,45 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/ftp/retr.trace %INPUT max_extract=3000 efname=1 -# @TEST-EXEC: btest-diff extract_files/1 -# @TEST-EXEC: btest-diff 1.out -# @TEST-EXEC: bro -b -r $TRACES/ftp/retr.trace %INPUT max_extract=3000 efname=2 double_it=T -# @TEST-EXEC: btest-diff extract_files/2 -# @TEST-EXEC: btest-diff 2.out -# @TEST-EXEC: btest-diff files.log -# @TEST-EXEC: bro -b -r $TRACES/ftp/retr.trace %INPUT max_extract=7000 efname=3 unlimit_it=T -# @TEST-EXEC: btest-diff extract_files/3 -# @TEST-EXEC: btest-diff 3.out - -@load base/files/extract -@load base/protocols/ftp - -global outfile: file; -const max_extract: count = 0 &redef; -const double_it: bool = F &redef; -const unlimit_it: bool = F &redef; -const efname: string = "0" &redef; -global doubled: bool = F; - -event file_new(f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_EXTRACT, - [$extract_filename=efname, $extract_limit=max_extract]); - } - -event file_extraction_limit(f: fa_file, args: any, limit: count, len: count) - { - print outfile, "file_extraction_limit", limit, len; - - if ( double_it && ! doubled ) - { - doubled = T; - print outfile, FileExtract::set_limit(f, args, max_extract*2); - } - - if ( unlimit_it ) - print outfile, FileExtract::set_limit(f, args, 0); - } - -event bro_init() - { - outfile = open(fmt("%s.out", efname)); - } diff --git a/testing/btest/scripts/base/files/extract/limit.zeek b/testing/btest/scripts/base/files/extract/limit.zeek new file mode 100644 index 0000000000..e676d0ebe0 --- /dev/null +++ b/testing/btest/scripts/base/files/extract/limit.zeek @@ -0,0 +1,45 @@ +# @TEST-EXEC: zeek -b -r $TRACES/ftp/retr.trace %INPUT max_extract=3000 efname=1 +# @TEST-EXEC: btest-diff extract_files/1 +# @TEST-EXEC: btest-diff 1.out +# @TEST-EXEC: zeek -b -r $TRACES/ftp/retr.trace %INPUT max_extract=3000 efname=2 double_it=T +# @TEST-EXEC: btest-diff extract_files/2 +# @TEST-EXEC: btest-diff 2.out +# @TEST-EXEC: btest-diff files.log +# @TEST-EXEC: zeek -b -r $TRACES/ftp/retr.trace %INPUT max_extract=7000 efname=3 unlimit_it=T +# @TEST-EXEC: btest-diff extract_files/3 +# @TEST-EXEC: btest-diff 3.out + +@load base/files/extract +@load base/protocols/ftp + +global outfile: file; +const max_extract: count = 0 &redef; +const double_it: bool = F &redef; +const unlimit_it: bool = F &redef; +const efname: string = "0" &redef; +global doubled: bool = F; + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_EXTRACT, + [$extract_filename=efname, $extract_limit=max_extract]); + } + +event file_extraction_limit(f: fa_file, args: any, limit: count, len: count) + { + print outfile, "file_extraction_limit", limit, len; + + if ( double_it && ! doubled ) + { + doubled = T; + print outfile, FileExtract::set_limit(f, args, max_extract*2); + } + + if ( unlimit_it ) + print outfile, FileExtract::set_limit(f, args, 0); + } + +event zeek_init() + { + outfile = open(fmt("%s.out", efname)); + } diff --git a/testing/btest/scripts/base/files/pe/basic.test b/testing/btest/scripts/base/files/pe/basic.test index 4ca9ceecef..99778b7943 100644 --- a/testing/btest/scripts/base/files/pe/basic.test +++ b/testing/btest/scripts/base/files/pe/basic.test @@ -1,5 +1,5 @@ # This tests the PE analyzer against a PCAP of 4 PE files being downloaded via FTP. # The files are a mix of DLL/EXEs, signed/unsigned, and 32/64-bit files. -# @TEST-EXEC: bro -r $TRACES/pe/pe.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/pe/pe.trace %INPUT # @TEST-EXEC: btest-diff pe.log diff --git a/testing/btest/scripts/base/files/unified2/alert.bro b/testing/btest/scripts/base/files/unified2/alert.bro deleted file mode 100644 index eca1ca036c..0000000000 --- a/testing/btest/scripts/base/files/unified2/alert.bro +++ /dev/null @@ -1,76 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT Unified2::watch_file=$FILES/unified2.u2 -# @TEST-EXEC: btest-diff unified2.log - -@TEST-START-FILE sid_msg.map -2003058 || ET MALWARE 180solutions (Zango) Spyware Installer Download || url,doc.emergingthreats.net/bin/view/Main/2003058 || url,securityresponse.symantec.com/avcenter/venc/data/pf/adware.180search.html -2012647 || ET POLICY Dropbox.com Offsite File Backup in Use || url,dereknewton.com/2011/04/dropbox-authentication-static-host-ids/ || url,www.dropbox.com -@TEST-END-FILE - -@TEST-START-FILE gen_msg.map -1 || 1 || snort general alert -2 || 1 || tag: Tagged Packet -3 || 1 || snort dynamic alert -100 || 1 || spp_portscan: Portscan Detected -100 || 2 || spp_portscan: Portscan Status -100 || 3 || spp_portscan: Portscan Ended -101 || 1 || spp_minfrag: minfrag alert -@TEST-END-FILE - -@TEST-START-FILE classification.config -# -# config classification:shortname,short description,priority -# - -#Traditional classifications. These will be replaced soon - -config classification: not-suspicious,Not Suspicious Traffic,3 -config classification: unknown,Unknown Traffic,3 -config classification: bad-unknown,Potentially Bad Traffic, 2 -config classification: attempted-recon,Attempted Information Leak,2 -config classification: successful-recon-limited,Information Leak,2 -config classification: successful-recon-largescale,Large Scale Information Leak,2 -config classification: attempted-dos,Attempted Denial of Service,2 -config classification: successful-dos,Denial of Service,2 -config classification: attempted-user,Attempted User Privilege Gain,1 -config classification: unsuccessful-user,Unsuccessful User Privilege Gain,1 -config classification: successful-user,Successful User Privilege Gain,1 -config classification: attempted-admin,Attempted Administrator Privilege Gain,1 -config classification: successful-admin,Successful Administrator Privilege Gain,1 -config classification: rpc-portmap-decode,Decode of an RPC Query,2 -config classification: shellcode-detect,Executable Code was Detected,1 -config classification: string-detect,A Suspicious String was Detected,3 -config classification: suspicious-filename-detect,A Suspicious Filename was Detected,2 -config classification: suspicious-login,An Attempted Login Using a Suspicious Username was Detected,2 -config classification: system-call-detect,A System Call was Detected,2 -config classification: tcp-connection,A TCP Connection was Detected,4 -config classification: trojan-activity,A Network Trojan was Detected, 1 -config classification: unusual-client-port-connection,A Client was Using an Unusual Port,2 -config classification: network-scan,Detection of a Network Scan,3 -config classification: denial-of-service,Detection of a Denial of Service Attack,2 -config classification: non-standard-protocol,Detection of a Non-Standard Protocol or Event,2 -config classification: protocol-command-decode,Generic Protocol Command Decode,3 -config classification: web-application-activity,Access to a Potentially Vulnerable Web Application,2 -config classification: web-application-attack,Web Application Attack,1 -config classification: misc-activity,Misc activity,3 -config classification: misc-attack,Misc Attack,2 -config classification: icmp-event,Generic ICMP event,3 -config classification: inappropriate-content,Inappropriate Content was Detected,1 -config classification: policy-violation,Potential Corporate Privacy Violation,1 -config classification: default-login-attempt,Attempt to Login By a Default Username and Password,2 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -@load base/files/unified2 - -redef Unified2::sid_msg = @DIR+"/sid_msg.map"; -redef Unified2::gen_msg = @DIR+"/gen_msg.map"; -redef Unified2::classification_config = @DIR+"/classification.config"; -global i = 0; - -event Unified2::alert(f: fa_file, ev: Unified2::IDSEvent, pkt: Unified2::Packet) - { - ++i; - if ( i == 2 ) - terminate(); - } \ No newline at end of file diff --git a/testing/btest/scripts/base/files/unified2/alert.zeek b/testing/btest/scripts/base/files/unified2/alert.zeek new file mode 100644 index 0000000000..3a5a12a9c8 --- /dev/null +++ b/testing/btest/scripts/base/files/unified2/alert.zeek @@ -0,0 +1,76 @@ +# @TEST-EXEC: zeek -b %INPUT Unified2::watch_file=$FILES/unified2.u2 +# @TEST-EXEC: btest-diff unified2.log + +@TEST-START-FILE sid_msg.map +2003058 || ET MALWARE 180solutions (Zango) Spyware Installer Download || url,doc.emergingthreats.net/bin/view/Main/2003058 || url,securityresponse.symantec.com/avcenter/venc/data/pf/adware.180search.html +2012647 || ET POLICY Dropbox.com Offsite File Backup in Use || url,dereknewton.com/2011/04/dropbox-authentication-static-host-ids/ || url,www.dropbox.com +@TEST-END-FILE + +@TEST-START-FILE gen_msg.map +1 || 1 || snort general alert +2 || 1 || tag: Tagged Packet +3 || 1 || snort dynamic alert +100 || 1 || spp_portscan: Portscan Detected +100 || 2 || spp_portscan: Portscan Status +100 || 3 || spp_portscan: Portscan Ended +101 || 1 || spp_minfrag: minfrag alert +@TEST-END-FILE + +@TEST-START-FILE classification.config +# +# config classification:shortname,short description,priority +# + +#Traditional classifications. These will be replaced soon + +config classification: not-suspicious,Not Suspicious Traffic,3 +config classification: unknown,Unknown Traffic,3 +config classification: bad-unknown,Potentially Bad Traffic, 2 +config classification: attempted-recon,Attempted Information Leak,2 +config classification: successful-recon-limited,Information Leak,2 +config classification: successful-recon-largescale,Large Scale Information Leak,2 +config classification: attempted-dos,Attempted Denial of Service,2 +config classification: successful-dos,Denial of Service,2 +config classification: attempted-user,Attempted User Privilege Gain,1 +config classification: unsuccessful-user,Unsuccessful User Privilege Gain,1 +config classification: successful-user,Successful User Privilege Gain,1 +config classification: attempted-admin,Attempted Administrator Privilege Gain,1 +config classification: successful-admin,Successful Administrator Privilege Gain,1 +config classification: rpc-portmap-decode,Decode of an RPC Query,2 +config classification: shellcode-detect,Executable Code was Detected,1 +config classification: string-detect,A Suspicious String was Detected,3 +config classification: suspicious-filename-detect,A Suspicious Filename was Detected,2 +config classification: suspicious-login,An Attempted Login Using a Suspicious Username was Detected,2 +config classification: system-call-detect,A System Call was Detected,2 +config classification: tcp-connection,A TCP Connection was Detected,4 +config classification: trojan-activity,A Network Trojan was Detected, 1 +config classification: unusual-client-port-connection,A Client was Using an Unusual Port,2 +config classification: network-scan,Detection of a Network Scan,3 +config classification: denial-of-service,Detection of a Denial of Service Attack,2 +config classification: non-standard-protocol,Detection of a Non-Standard Protocol or Event,2 +config classification: protocol-command-decode,Generic Protocol Command Decode,3 +config classification: web-application-activity,Access to a Potentially Vulnerable Web Application,2 +config classification: web-application-attack,Web Application Attack,1 +config classification: misc-activity,Misc activity,3 +config classification: misc-attack,Misc Attack,2 +config classification: icmp-event,Generic ICMP event,3 +config classification: inappropriate-content,Inappropriate Content was Detected,1 +config classification: policy-violation,Potential Corporate Privacy Violation,1 +config classification: default-login-attempt,Attempt to Login By a Default Username and Password,2 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +@load policy/files/unified2 + +redef Unified2::sid_msg = @DIR+"/sid_msg.map"; +redef Unified2::gen_msg = @DIR+"/gen_msg.map"; +redef Unified2::classification_config = @DIR+"/classification.config"; +global i = 0; + +event Unified2::alert(f: fa_file, ev: Unified2::IDSEvent, pkt: Unified2::Packet) + { + ++i; + if ( i == 2 ) + terminate(); + } diff --git a/testing/btest/scripts/base/files/x509/1999.test b/testing/btest/scripts/base/files/x509/1999.test index 7c1ab7971f..10c041db4f 100644 --- a/testing/btest/scripts/base/files/x509/1999.test +++ b/testing/btest/scripts/base/files/x509/1999.test @@ -1,5 +1,5 @@ # Test that the timestamp of a pre-y-2000 certificate is correctly parsed -# @TEST-EXEC: bro -r $TRACES/tls/telesec.pcap +# @TEST-EXEC: zeek -r $TRACES/tls/telesec.pcap # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/files/x509/signed_certificate_timestamp.test b/testing/btest/scripts/base/files/x509/signed_certificate_timestamp.test index 7ca60faf96..b50d9e2697 100644 --- a/testing/btest/scripts/base/files/x509/signed_certificate_timestamp.test +++ b/testing/btest/scripts/base/files/x509/signed_certificate_timestamp.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/certificate-with-sct.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/certificate-with-sct.pcap %INPUT # @TEST-EXEC: btest-diff .stdout @load protocols/ssl/validate-certs diff --git a/testing/btest/scripts/base/files/x509/signed_certificate_timestamp_ocsp.test b/testing/btest/scripts/base/files/x509/signed_certificate_timestamp_ocsp.test index 01ed128541..9755f4f2f0 100644 --- a/testing/btest/scripts/base/files/x509/signed_certificate_timestamp_ocsp.test +++ b/testing/btest/scripts/base/files/x509/signed_certificate_timestamp_ocsp.test @@ -1,7 +1,7 @@ -# @TEST-EXEC: bro -r $TRACES/tls/signed_certificate_timestamp.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/signed_certificate_timestamp.pcap %INPUT # @TEST-EXEC: btest-diff .stdout -event bro_init() +event zeek_init() { Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); } diff --git a/testing/btest/scripts/base/frameworks/analyzer/disable-analyzer.bro b/testing/btest/scripts/base/frameworks/analyzer/disable-analyzer.bro deleted file mode 100644 index 749236f6c6..0000000000 --- a/testing/btest/scripts/base/frameworks/analyzer/disable-analyzer.bro +++ /dev/null @@ -1,14 +0,0 @@ -# -# @TEST-EXEC: bro -r ${TRACES}/var-services-std-ports.trace %INPUT -# @TEST-EXEC: cat conn.log | bro-cut service | grep -vq dns -# @TEST-EXEC: cat conn.log | bro-cut service | grep -vq ssh -# - -redef Analyzer::disabled_analyzers += { Analyzer::ANALYZER_SSH }; - -event bro_init() - { - Analyzer::disable_analyzer(Analyzer::ANALYZER_DNS); - } - - diff --git a/testing/btest/scripts/base/frameworks/analyzer/disable-analyzer.zeek b/testing/btest/scripts/base/frameworks/analyzer/disable-analyzer.zeek new file mode 100644 index 0000000000..5b98ea0f6d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/analyzer/disable-analyzer.zeek @@ -0,0 +1,14 @@ +# +# @TEST-EXEC: zeek -r ${TRACES}/var-services-std-ports.trace %INPUT +# @TEST-EXEC: cat conn.log | zeek-cut service | grep -vq dns +# @TEST-EXEC: cat conn.log | zeek-cut service | grep -vq ssh +# + +redef Analyzer::disabled_analyzers += { Analyzer::ANALYZER_SSH }; + +event zeek_init() + { + Analyzer::disable_analyzer(Analyzer::ANALYZER_DNS); + } + + diff --git a/testing/btest/scripts/base/frameworks/analyzer/enable-analyzer.bro b/testing/btest/scripts/base/frameworks/analyzer/enable-analyzer.bro deleted file mode 100644 index bcee794768..0000000000 --- a/testing/btest/scripts/base/frameworks/analyzer/enable-analyzer.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -r ${TRACES}/var-services-std-ports.trace %INPUT -# @TEST-EXEC: cat conn.log | bro-cut service | grep -q dns -# - -redef Analyzer::disable_all = T; - -event bro_init() - { - Analyzer::enable_analyzer(Analyzer::ANALYZER_DNS); - } - - diff --git a/testing/btest/scripts/base/frameworks/analyzer/enable-analyzer.zeek b/testing/btest/scripts/base/frameworks/analyzer/enable-analyzer.zeek new file mode 100644 index 0000000000..edd2a77361 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/analyzer/enable-analyzer.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -r ${TRACES}/var-services-std-ports.trace %INPUT +# @TEST-EXEC: cat conn.log | zeek-cut service | grep -q dns +# + +redef Analyzer::disable_all = T; + +event zeek_init() + { + Analyzer::enable_analyzer(Analyzer::ANALYZER_DNS); + } + + diff --git a/testing/btest/scripts/base/frameworks/analyzer/register-for-port.bro b/testing/btest/scripts/base/frameworks/analyzer/register-for-port.bro deleted file mode 100644 index a764cc79c3..0000000000 --- a/testing/btest/scripts/base/frameworks/analyzer/register-for-port.bro +++ /dev/null @@ -1,13 +0,0 @@ -# -# @TEST-EXEC: bro -r ${TRACES}/ssh/ssh-on-port-80.trace %INPUT dpd_buffer_size=0; -# @TEST-EXEC: cat conn.log | bro-cut service | grep -q ssh -# -# @TEST-EXEC: bro -r ${TRACES}/ssh/ssh-on-port-80.trace dpd_buffer_size=0; -# @TEST-EXEC: cat conn.log | bro-cut service | grep -vq ssh - -event bro_init() - { - Analyzer::register_for_port(Analyzer::ANALYZER_SSH, 80/tcp); - } - - diff --git a/testing/btest/scripts/base/frameworks/analyzer/register-for-port.zeek b/testing/btest/scripts/base/frameworks/analyzer/register-for-port.zeek new file mode 100644 index 0000000000..8d3f92534b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/analyzer/register-for-port.zeek @@ -0,0 +1,13 @@ +# +# @TEST-EXEC: zeek -r ${TRACES}/ssh/ssh-on-port-80.trace %INPUT dpd_buffer_size=0; +# @TEST-EXEC: cat conn.log | zeek-cut service | grep -q ssh +# +# @TEST-EXEC: zeek -r ${TRACES}/ssh/ssh-on-port-80.trace dpd_buffer_size=0; +# @TEST-EXEC: cat conn.log | zeek-cut service | grep -vq ssh + +event zeek_init() + { + Analyzer::register_for_port(Analyzer::ANALYZER_SSH, 80/tcp); + } + + diff --git a/testing/btest/scripts/base/frameworks/analyzer/schedule-analyzer.bro b/testing/btest/scripts/base/frameworks/analyzer/schedule-analyzer.bro deleted file mode 100644 index 114ea73673..0000000000 --- a/testing/btest/scripts/base/frameworks/analyzer/schedule-analyzer.bro +++ /dev/null @@ -1,36 +0,0 @@ -# -# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | sort >output -# @TEST-EXEC: btest-diff output - -global x = 0; - -event new_connection(c: connection) - { - # Make sure expiration executes. - Analyzer::schedule_analyzer(1.2.3.4, 1.2.3.4, 8/tcp, Analyzer::ANALYZER_MODBUS, 100hrs); - - if ( x > 0 ) - return; - - x = 1; - - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_SSH, 100hrs); - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_HTTP, 100hrs); - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_DNS, 100hrs); - Analyzer::schedule_analyzer(0.0.0.0, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_FTP, 100hrs); - - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 7/tcp, Analyzer::ANALYZER_SSH, 1sec); - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 8/tcp, Analyzer::ANALYZER_HTTP, 1sec); - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 8/tcp, Analyzer::ANALYZER_DNS, 100hrs); - Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 9/tcp, Analyzer::ANALYZER_FTP, 1sec); - } - -event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) - { - print "APPLIED:", network_time(), c$id, a; - } - - - - - diff --git a/testing/btest/scripts/base/frameworks/analyzer/schedule-analyzer.zeek b/testing/btest/scripts/base/frameworks/analyzer/schedule-analyzer.zeek new file mode 100644 index 0000000000..07a84629fc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/analyzer/schedule-analyzer.zeek @@ -0,0 +1,36 @@ +# +# @TEST-EXEC: zeek -b -r ${TRACES}/rotation.trace %INPUT | sort >output +# @TEST-EXEC: btest-diff output + +global x = 0; + +event new_connection(c: connection) + { + # Make sure expiration executes. + Analyzer::schedule_analyzer(1.2.3.4, 1.2.3.4, 8/tcp, Analyzer::ANALYZER_MODBUS, 100hrs); + + if ( x > 0 ) + return; + + x = 1; + + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_SSH, 100hrs); + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_HTTP, 100hrs); + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_DNS, 100hrs); + Analyzer::schedule_analyzer(0.0.0.0, 10.0.0.3, 6/tcp, Analyzer::ANALYZER_FTP, 100hrs); + + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 7/tcp, Analyzer::ANALYZER_SSH, 1sec); + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 8/tcp, Analyzer::ANALYZER_HTTP, 1sec); + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 8/tcp, Analyzer::ANALYZER_DNS, 100hrs); + Analyzer::schedule_analyzer(10.0.0.2, 10.0.0.3, 9/tcp, Analyzer::ANALYZER_FTP, 1sec); + } + +event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) + { + print "APPLIED:", network_time(), c$id, a; + } + + + + + diff --git a/testing/btest/scripts/base/frameworks/cluster/custom_pool_exclusivity.bro b/testing/btest/scripts/base/frameworks/cluster/custom_pool_exclusivity.bro deleted file mode 100644 index dc2558f2a4..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/custom_pool_exclusivity.bro +++ /dev/null @@ -1,118 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: btest-diff manager-1/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global my_pool_spec: Cluster::PoolSpec = - Cluster::PoolSpec( - $topic = "bro/cluster/pool/my_pool", - $node_type = Cluster::PROXY - ); - -global my_pool: Cluster::Pool; - -redef Cluster::proxy_pool_spec = - Cluster::PoolSpec( - $topic = "bro/cluster/pool/proxy", - $node_type = Cluster::PROXY, - $exclusive = T, - $max_nodes = 1 - ); - -event bro_init() - { - my_pool = Cluster::register_pool(my_pool_spec); - } - -global proxy_count = 0; - -event go_away() - { - terminate(); - } - -function print_stuff(heading: string) - { - print heading; - - local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); - - for ( i in v ) - { - print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); - print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); - } - - local rr_key = "test"; - - for ( i in v ) - { - print "rr", Cluster::rr_topic(Cluster::proxy_pool, rr_key); - print "rr (custom pool)", Cluster::rr_topic(my_pool, rr_key); - } - - # Just checking the same keys still map to same topic ... - for ( i in v ) - { - print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); - print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); - } - } - -event Cluster::node_up(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" || name == "proxy-2" ) - ++proxy_count; - - if ( proxy_count == 2 ) - { - print_stuff("1st stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-1"), e); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" ) - { - print_stuff("2nd stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-2"), e); - } - - if ( name == "proxy-2" ) - { - print_stuff("no stuff"); - terminate(); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( name == "manager-1" ) - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/custom_pool_exclusivity.zeek b/testing/btest/scripts/base/frameworks/cluster/custom_pool_exclusivity.zeek new file mode 100644 index 0000000000..b3f1d36219 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/custom_pool_exclusivity.zeek @@ -0,0 +1,118 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: btest-diff manager-1/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global my_pool_spec: Cluster::PoolSpec = + Cluster::PoolSpec( + $topic = "zeek/cluster/pool/my_pool", + $node_type = Cluster::PROXY + ); + +global my_pool: Cluster::Pool; + +redef Cluster::proxy_pool_spec = + Cluster::PoolSpec( + $topic = "zeek/cluster/pool/proxy", + $node_type = Cluster::PROXY, + $exclusive = T, + $max_nodes = 1 + ); + +event zeek_init() + { + my_pool = Cluster::register_pool(my_pool_spec); + } + +global proxy_count = 0; + +event go_away() + { + terminate(); + } + +function print_stuff(heading: string) + { + print heading; + + local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); + + for ( i in v ) + { + print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); + print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); + } + + local rr_key = "test"; + + for ( i in v ) + { + print "rr", Cluster::rr_topic(Cluster::proxy_pool, rr_key); + print "rr (custom pool)", Cluster::rr_topic(my_pool, rr_key); + } + + # Just checking the same keys still map to same topic ... + for ( i in v ) + { + print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); + print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); + } + } + +event Cluster::node_up(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" || name == "proxy-2" ) + ++proxy_count; + + if ( proxy_count == 2 ) + { + print_stuff("1st stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-1"), e); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" ) + { + print_stuff("2nd stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-2"), e); + } + + if ( name == "proxy-2" ) + { + print_stuff("no stuff"); + terminate(); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( name == "manager-1" ) + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/cluster/custom_pool_limits.bro b/testing/btest/scripts/base/frameworks/cluster/custom_pool_limits.bro deleted file mode 100644 index 08202bd727..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/custom_pool_limits.bro +++ /dev/null @@ -1,118 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: btest-diff manager-1/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global my_pool_spec: Cluster::PoolSpec = - Cluster::PoolSpec( - $topic = "bro/cluster/pool/my_pool", - $node_type = Cluster::PROXY - ); - -global my_pool: Cluster::Pool; - -redef Cluster::proxy_pool_spec = - Cluster::PoolSpec( - $topic = "bro/cluster/pool/proxy", - $node_type = Cluster::PROXY, - $exclusive = F, - $max_nodes = 1 - ); - -event bro_init() - { - my_pool = Cluster::register_pool(my_pool_spec); - } - -global proxy_count = 0; - -event go_away() - { - terminate(); - } - -function print_stuff(heading: string) - { - print heading; - - local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); - - for ( i in v ) - { - print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); - print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); - } - - local rr_key = "test"; - - for ( i in v ) - { - print "rr", Cluster::rr_topic(Cluster::proxy_pool, rr_key); - print "rr (custom pool)", Cluster::rr_topic(my_pool, rr_key); - } - - # Just checking the same keys still map to same topic ... - for ( i in v ) - { - print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); - print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); - } - } - -event Cluster::node_up(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" || name == "proxy-2" ) - ++proxy_count; - - if ( proxy_count == 2 ) - { - print_stuff("1st stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-1"), e); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" ) - { - print_stuff("2nd stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-2"), e); - } - - if ( name == "proxy-2" ) - { - print_stuff("no stuff"); - terminate(); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( name == "manager-1" ) - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/custom_pool_limits.zeek b/testing/btest/scripts/base/frameworks/cluster/custom_pool_limits.zeek new file mode 100644 index 0000000000..23b56c8147 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/custom_pool_limits.zeek @@ -0,0 +1,118 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: btest-diff manager-1/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global my_pool_spec: Cluster::PoolSpec = + Cluster::PoolSpec( + $topic = "zeek/cluster/pool/my_pool", + $node_type = Cluster::PROXY + ); + +global my_pool: Cluster::Pool; + +redef Cluster::proxy_pool_spec = + Cluster::PoolSpec( + $topic = "zeek/cluster/pool/proxy", + $node_type = Cluster::PROXY, + $exclusive = F, + $max_nodes = 1 + ); + +event zeek_init() + { + my_pool = Cluster::register_pool(my_pool_spec); + } + +global proxy_count = 0; + +event go_away() + { + terminate(); + } + +function print_stuff(heading: string) + { + print heading; + + local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); + + for ( i in v ) + { + print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); + print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); + } + + local rr_key = "test"; + + for ( i in v ) + { + print "rr", Cluster::rr_topic(Cluster::proxy_pool, rr_key); + print "rr (custom pool)", Cluster::rr_topic(my_pool, rr_key); + } + + # Just checking the same keys still map to same topic ... + for ( i in v ) + { + print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); + print "hrw (custom pool)", v[i], Cluster::hrw_topic(my_pool, v[i]); + } + } + +event Cluster::node_up(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" || name == "proxy-2" ) + ++proxy_count; + + if ( proxy_count == 2 ) + { + print_stuff("1st stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-1"), e); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" ) + { + print_stuff("2nd stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-2"), e); + } + + if ( name == "proxy-2" ) + { + print_stuff("no stuff"); + terminate(); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( name == "manager-1" ) + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/cluster/forwarding.bro b/testing/btest/scripts/base/frameworks/cluster/forwarding.bro deleted file mode 100644 index e62a2ced66..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/forwarding.bro +++ /dev/null @@ -1,111 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff proxy-1/.stdout -# @TEST-EXEC: btest-diff proxy-2/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global fully_connected: event(); - -global peer_count = 0; -global peers_lost = 0; -global fully_connected_nodes = 0; - -redef Broker::forward_messages = T; - -event forwarded_event() - { - print "got forwarded event"; - - if ( Cluster::node == "manager-1" ) - print "manager should NOT have raised the forwarded event"; - - terminate(); - } - -event ready() - { - # note that the publishing node, worker-1, will not receive the forwarded - # event as Broker's forwarding prevents the message going back to the - # immediate sender. - Broker::publish("test_topic", forwarded_event); - } - -event fully_connected() - { - if ( ! is_remote_event() ) - return; - - print "Got fully_connected event"; - fully_connected_nodes = fully_connected_nodes + 1; - - if ( Cluster::node == "manager-1" ) - { - if ( peer_count == 4 && fully_connected_nodes == 4 ) - Broker::publish(Cluster::node_topic("worker-1"), ready); - } - } - -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, fully_connected); - - if ( Cluster::node == "manager-1" ) - Broker::forward("test_topic"); - if ( Cluster::node == "worker-1" ) - Broker::subscribe("test_topic"); - if ( Cluster::node == "worker-2" ) - Broker::subscribe("test_topic"); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Connected to a peer"; - peer_count = peer_count + 1; - - if ( Cluster::node == "manager-1" ) - { - if ( peer_count == 4 && fully_connected_nodes == 4 ) - Broker::publish(Cluster::node_topic("worker-1"), ready); - } - else - { - if ( peer_count == 3 ) - event fully_connected(); - } - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - ++peers_lost; - - if ( Cluster::node == "manager-1" ) - { - if ( peers_lost == 2 ) - # Both workers terminated - terminate(); - } - else - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/forwarding.zeek b/testing/btest/scripts/base/frameworks/cluster/forwarding.zeek new file mode 100644 index 0000000000..2b450948c6 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/forwarding.zeek @@ -0,0 +1,111 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-2 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff proxy-1/.stdout +# @TEST-EXEC: btest-diff proxy-2/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global fully_connected: event(); + +global peer_count = 0; +global peers_lost = 0; +global fully_connected_nodes = 0; + +redef Broker::forward_messages = T; + +event forwarded_event() + { + print "got forwarded event"; + + if ( Cluster::node == "manager-1" ) + print "manager should NOT have raised the forwarded event"; + + terminate(); + } + +event ready() + { + # note that the publishing node, worker-1, will not receive the forwarded + # event as Broker's forwarding prevents the message going back to the + # immediate sender. + Broker::publish("test_topic", forwarded_event); + } + +event fully_connected() + { + if ( ! is_remote_event() ) + return; + + print "Got fully_connected event"; + fully_connected_nodes = fully_connected_nodes + 1; + + if ( Cluster::node == "manager-1" ) + { + if ( peer_count == 4 && fully_connected_nodes == 4 ) + Broker::publish(Cluster::node_topic("worker-1"), ready); + } + } + +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, fully_connected); + + if ( Cluster::node == "manager-1" ) + Broker::forward("test_topic"); + if ( Cluster::node == "worker-1" ) + Broker::subscribe("test_topic"); + if ( Cluster::node == "worker-2" ) + Broker::subscribe("test_topic"); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Connected to a peer"; + peer_count = peer_count + 1; + + if ( Cluster::node == "manager-1" ) + { + if ( peer_count == 4 && fully_connected_nodes == 4 ) + Broker::publish(Cluster::node_topic("worker-1"), ready); + } + else + { + if ( peer_count == 3 ) + event fully_connected(); + } + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + ++peers_lost; + + if ( Cluster::node == "manager-1" ) + { + if ( peers_lost == 2 ) + # Both workers terminated + terminate(); + } + else + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/cluster/log_distribution.bro b/testing/btest/scripts/base/frameworks/cluster/log_distribution.bro deleted file mode 100644 index 199e265674..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/log_distribution.bro +++ /dev/null @@ -1,81 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# -# @TEST-EXEC: btest-bg-run logger-1 BROPATH=$BROPATH:.. CLUSTER_NODE=logger-1 bro %INPUT -# @TEST-EXEC: btest-bg-run logger-2 BROPATH=$BROPATH:.. CLUSTER_NODE=logger-2 bro %INPUT -# @TEST-EXEC: btest-bg-run manager BROPATH=$BROPATH:.. CLUSTER_NODE=manager bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: btest-diff logger-1/test.log -# @TEST-EXEC: btest-diff logger-2/test.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::manager_is_logger = F; - -redef Cluster::nodes = { - ["manager"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager", $interface="eth0"], - ["logger-1"] = [$node_type=Cluster::LOGGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager"], - ["logger-2"] = [$node_type=Cluster::LOGGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager"] -}; - -@TEST-END-FILE - -redef Log::default_rotation_interval = 0sec; - -module Test; -redef enum Log::ID += { LOG }; - -type Info: record { - num: count &log; -}; - -event bro_init() &priority=5 - { - Log::create_stream(Test::LOG, [$columns=Info, $path="test"]); - } - -global peer_count = 0; -global c = 0; - -event go_away() - { - terminate(); - } - -event do_count() - { - Log::write(Test::LOG, [$num = ++c]); - - if ( c == 100 ) - { - Broker::flush_logs(); - schedule 2sec { go_away() }; - } - else - schedule 0.01sec { do_count() }; - } - -event Cluster::node_up(name: string, id: string) - { - print "node_up", name; - ++peer_count; - - if ( Cluster::node == "worker-1" && peer_count == 3 ) - { - Cluster::logger_pool$rr_key_seq["Cluster::rr_log_topic"] = 0; - schedule 0.25sec { do_count() }; - } - } - -event Cluster::node_down(name: string, id: string) - { - print "node_down", name; - --peer_count; - - if ( name == "worker-1" ) - schedule 2sec { go_away() }; - } - diff --git a/testing/btest/scripts/base/frameworks/cluster/log_distribution.zeek b/testing/btest/scripts/base/frameworks/cluster/log_distribution.zeek new file mode 100644 index 0000000000..da32c25dd0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/log_distribution.zeek @@ -0,0 +1,81 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# +# @TEST-EXEC: btest-bg-run logger-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=logger-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run logger-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=logger-2 zeek %INPUT +# @TEST-EXEC: btest-bg-run manager ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: btest-diff logger-1/test.log +# @TEST-EXEC: btest-diff logger-2/test.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::manager_is_logger = F; + +redef Cluster::nodes = { + ["manager"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager", $interface="eth0"], + ["logger-1"] = [$node_type=Cluster::LOGGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager"], + ["logger-2"] = [$node_type=Cluster::LOGGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager"] +}; + +@TEST-END-FILE + +redef Log::default_rotation_interval = 0sec; + +module Test; +redef enum Log::ID += { LOG }; + +type Info: record { + num: count &log; +}; + +event zeek_init() &priority=5 + { + Log::create_stream(Test::LOG, [$columns=Info, $path="test"]); + } + +global peer_count = 0; +global c = 0; + +event go_away() + { + terminate(); + } + +event do_count() + { + Log::write(Test::LOG, [$num = ++c]); + + if ( c == 100 ) + { + Broker::flush_logs(); + schedule 2sec { go_away() }; + } + else + schedule 0.01sec { do_count() }; + } + +event Cluster::node_up(name: string, id: string) + { + print "node_up", name; + ++peer_count; + + if ( Cluster::node == "worker-1" && peer_count == 3 ) + { + Cluster::logger_pool$rr_key_seq["Cluster::rr_log_topic"] = 0; + schedule 0.25sec { do_count() }; + } + } + +event Cluster::node_down(name: string, id: string) + { + print "node_down", name; + --peer_count; + + if ( name == "worker-1" ) + schedule 2sec { go_away() }; + } + diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up-logger.bro b/testing/btest/scripts/base/frameworks/cluster/start-it-up-logger.bro deleted file mode 100644 index d94875e858..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/start-it-up-logger.bro +++ /dev/null @@ -1,91 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# @TEST-PORT: BROKER_PORT6 -# -# @TEST-EXEC: btest-bg-run logger-1 CLUSTER_NODE=logger-1 BROPATH=$BROPATH:.. bro %INPUT -# @TEST-EXEC: btest-bg-run manager-1 CLUSTER_NODE=manager-1 BROPATH=$BROPATH:.. bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 CLUSTER_NODE=proxy-1 BROPATH=$BROPATH:.. bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 CLUSTER_NODE=proxy-2 BROPATH=$BROPATH:.. bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 CLUSTER_NODE=worker-1 BROPATH=$BROPATH:.. bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 CLUSTER_NODE=worker-2 BROPATH=$BROPATH:.. bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff logger-1/.stdout -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff proxy-1/.stdout -# @TEST-EXEC: btest-diff proxy-2/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::manager_is_logger = F; -redef Cluster::nodes = { - ["logger-1"] = [$node_type=Cluster::LOGGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT6")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global peer_count = 0; - -global fully_connected_nodes = 0; - -event fully_connected(n: string) - { - ++fully_connected_nodes; - - if ( Cluster::node == "logger-1" ) - { - print "got fully_connected event from", n; - - if ( peer_count == 5 && fully_connected_nodes == 5 ) - { - print "termination condition met: shutting down"; - terminate(); - } - } - else - { - print "sent fully_connected event"; - } - } - -event bro_init() - { - Broker::auto_publish(Cluster::logger_topic, fully_connected); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Connected to a peer"; - ++peer_count; - - if ( Cluster::node == "logger-1" ) - { - if ( peer_count == 5 && fully_connected_nodes == 5 ) - { - print "termination condition met: shutting down"; - terminate(); - } - } - else if ( Cluster::node == "manager-1" ) - { - if ( peer_count == 5 ) - event fully_connected(Cluster::node); - } - else - { - if ( peer_count == 4 ) - event fully_connected(Cluster::node); - } - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up-logger.zeek b/testing/btest/scripts/base/frameworks/cluster/start-it-up-logger.zeek new file mode 100644 index 0000000000..a97cbf06b3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/start-it-up-logger.zeek @@ -0,0 +1,91 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# @TEST-PORT: BROKER_PORT6 +# +# @TEST-EXEC: btest-bg-run logger-1 CLUSTER_NODE=logger-1 ZEEKPATH=$ZEEKPATH:.. zeek %INPUT +# @TEST-EXEC: btest-bg-run manager-1 CLUSTER_NODE=manager-1 ZEEKPATH=$ZEEKPATH:.. zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 CLUSTER_NODE=proxy-1 ZEEKPATH=$ZEEKPATH:.. zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 CLUSTER_NODE=proxy-2 ZEEKPATH=$ZEEKPATH:.. zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 CLUSTER_NODE=worker-1 ZEEKPATH=$ZEEKPATH:.. zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 CLUSTER_NODE=worker-2 ZEEKPATH=$ZEEKPATH:.. zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff logger-1/.stdout +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff proxy-1/.stdout +# @TEST-EXEC: btest-diff proxy-2/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::manager_is_logger = F; +redef Cluster::nodes = { + ["logger-1"] = [$node_type=Cluster::LOGGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT6")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global peer_count = 0; + +global fully_connected_nodes = 0; + +event fully_connected(n: string) + { + ++fully_connected_nodes; + + if ( Cluster::node == "logger-1" ) + { + print "got fully_connected event from", n; + + if ( peer_count == 5 && fully_connected_nodes == 5 ) + { + print "termination condition met: shutting down"; + terminate(); + } + } + else + { + print "sent fully_connected event"; + } + } + +event zeek_init() + { + Broker::auto_publish(Cluster::logger_topic, fully_connected); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Connected to a peer"; + ++peer_count; + + if ( Cluster::node == "logger-1" ) + { + if ( peer_count == 5 && fully_connected_nodes == 5 ) + { + print "termination condition met: shutting down"; + terminate(); + } + } + else if ( Cluster::node == "manager-1" ) + { + if ( peer_count == 5 ) + event fully_connected(Cluster::node); + } + else + { + if ( peer_count == 4 ) + event fully_connected(Cluster::node); + } + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro deleted file mode 100644 index eee6c29215..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro +++ /dev/null @@ -1,75 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff proxy-1/.stdout -# @TEST-EXEC: btest-diff proxy-2/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global fully_connected: event(); - -global peer_count = 0; - -global fully_connected_nodes = 0; - -event fully_connected() - { - if ( ! is_remote_event() ) - return; - - print "Got fully_connected event"; - fully_connected_nodes = fully_connected_nodes + 1; - - if ( Cluster::node == "manager-1" ) - { - if ( peer_count == 4 && fully_connected_nodes == 4 ) - terminate(); - } - } - -event bro_init() - { - Broker::auto_publish(Cluster::manager_topic, fully_connected); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Connected to a peer"; - peer_count = peer_count + 1; - - if ( Cluster::node == "manager-1" ) - { - if ( peer_count == 4 && fully_connected_nodes == 4 ) - terminate(); - } - else - { - if ( peer_count == 3 ) - event fully_connected(); - } - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up.zeek b/testing/btest/scripts/base/frameworks/cluster/start-it-up.zeek new file mode 100644 index 0000000000..6f3c7d7651 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/start-it-up.zeek @@ -0,0 +1,75 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-2 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff proxy-1/.stdout +# @TEST-EXEC: btest-diff proxy-2/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global fully_connected: event(); + +global peer_count = 0; + +global fully_connected_nodes = 0; + +event fully_connected() + { + if ( ! is_remote_event() ) + return; + + print "Got fully_connected event"; + fully_connected_nodes = fully_connected_nodes + 1; + + if ( Cluster::node == "manager-1" ) + { + if ( peer_count == 4 && fully_connected_nodes == 4 ) + terminate(); + } + } + +event zeek_init() + { + Broker::auto_publish(Cluster::manager_topic, fully_connected); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Connected to a peer"; + peer_count = peer_count + 1; + + if ( Cluster::node == "manager-1" ) + { + if ( peer_count == 4 && fully_connected_nodes == 4 ) + terminate(); + } + else + { + if ( peer_count == 3 ) + event fully_connected(); + } + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/cluster/topic_distribution.bro b/testing/btest/scripts/base/frameworks/cluster/topic_distribution.bro deleted file mode 100644 index 317a38fbaa..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/topic_distribution.bro +++ /dev/null @@ -1,88 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: btest-diff manager-1/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global proxy_count = 0; - -event go_away() - { - terminate(); - } - -function print_stuff(heading: string) - { - print heading; - - local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); - - for ( i in v ) - print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); - - local rr_key = "test"; - - for ( i in v ) - print "rr", Cluster::rr_topic(Cluster::proxy_pool, rr_key); - - # Just checking the same keys still map to same topic ... - for ( i in v ) - print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); - } - -event Cluster::node_up(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" || name == "proxy-2" ) - ++proxy_count; - - if ( proxy_count == 2 ) - { - print_stuff("1st stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-1"), e); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" ) - { - print_stuff("2nd stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-2"), e); - } - - if ( name == "proxy-2" ) - { - print_stuff("no stuff"); - terminate(); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( name == "manager-1" ) - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/topic_distribution.zeek b/testing/btest/scripts/base/frameworks/cluster/topic_distribution.zeek new file mode 100644 index 0000000000..ff30aabea8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/topic_distribution.zeek @@ -0,0 +1,88 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: btest-diff manager-1/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global proxy_count = 0; + +event go_away() + { + terminate(); + } + +function print_stuff(heading: string) + { + print heading; + + local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); + + for ( i in v ) + print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); + + local rr_key = "test"; + + for ( i in v ) + print "rr", Cluster::rr_topic(Cluster::proxy_pool, rr_key); + + # Just checking the same keys still map to same topic ... + for ( i in v ) + print "hrw", v[i], Cluster::hrw_topic(Cluster::proxy_pool, v[i]); + } + +event Cluster::node_up(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" || name == "proxy-2" ) + ++proxy_count; + + if ( proxy_count == 2 ) + { + print_stuff("1st stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-1"), e); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" ) + { + print_stuff("2nd stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-2"), e); + } + + if ( name == "proxy-2" ) + { + print_stuff("no stuff"); + terminate(); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( name == "manager-1" ) + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/cluster/topic_distribution_bifs.bro b/testing/btest/scripts/base/frameworks/cluster/topic_distribution_bifs.bro deleted file mode 100644 index 35ed52f883..0000000000 --- a/testing/btest/scripts/base/frameworks/cluster/topic_distribution_bifs.bro +++ /dev/null @@ -1,99 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# @TEST-PORT: BROKER_PORT5 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 30 -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff proxy-1/.stdout -# @TEST-EXEC: btest-diff proxy-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -global proxy_count = 0; -global q = 0; - -event go_away() - { - terminate(); - } - -event distributed_event_hrw(c: count) - { - print "got distributed event hrw", c; - } - -event distributed_event_rr(c: count) - { - print "got distributed event rr", c; - } - -function send_stuff(heading: string) - { - print heading; - - local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); - - for ( i in v ) - print "hrw", v[i], Cluster::publish_hrw(Cluster::proxy_pool, v[i], - distributed_event_hrw, v[i]); - - local rr_key = "test"; - - for ( i in v ) - print "rr", Cluster::publish_rr(Cluster::proxy_pool, rr_key, - distributed_event_rr, v[i]); - } - -event Cluster::node_up(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" || name == "proxy-2" ) - ++proxy_count; - - if ( proxy_count == 2 ) - { - send_stuff("1st stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-1"), e); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( Cluster::node != "manager-1" ) - return; - - if ( name == "proxy-1" ) - { - send_stuff("2nd stuff"); - local e = Broker::make_event(go_away); - Broker::publish(Cluster::node_topic("proxy-2"), e); - } - - if ( name == "proxy-2" ) - { - send_stuff("no stuff"); - terminate(); - } - } - -event Cluster::node_down(name: string, id: string) - { - if ( name == "manager-1" ) - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/cluster/topic_distribution_bifs.zeek b/testing/btest/scripts/base/frameworks/cluster/topic_distribution_bifs.zeek new file mode 100644 index 0000000000..47bdaee125 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/cluster/topic_distribution_bifs.zeek @@ -0,0 +1,99 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# @TEST-PORT: BROKER_PORT5 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff proxy-1/.stdout +# @TEST-EXEC: btest-diff proxy-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["proxy-2"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT5")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +global proxy_count = 0; +global q = 0; + +event go_away() + { + terminate(); + } + +event distributed_event_hrw(c: count) + { + print "got distributed event hrw", c; + } + +event distributed_event_rr(c: count) + { + print "got distributed event rr", c; + } + +function send_stuff(heading: string) + { + print heading; + + local v: vector of count = vector(0, 1, 2, 3, 13, 37, 42, 101); + + for ( i in v ) + print "hrw", v[i], Cluster::publish_hrw(Cluster::proxy_pool, v[i], + distributed_event_hrw, v[i]); + + local rr_key = "test"; + + for ( i in v ) + print "rr", Cluster::publish_rr(Cluster::proxy_pool, rr_key, + distributed_event_rr, v[i]); + } + +event Cluster::node_up(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" || name == "proxy-2" ) + ++proxy_count; + + if ( proxy_count == 2 ) + { + send_stuff("1st stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-1"), e); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( Cluster::node != "manager-1" ) + return; + + if ( name == "proxy-1" ) + { + send_stuff("2nd stuff"); + local e = Broker::make_event(go_away); + Broker::publish(Cluster::node_topic("proxy-2"), e); + } + + if ( name == "proxy-2" ) + { + send_stuff("no stuff"); + terminate(); + } + } + +event Cluster::node_down(name: string, id: string) + { + if ( name == "manager-1" ) + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/config/basic.bro b/testing/btest/scripts/base/frameworks/config/basic.bro deleted file mode 100644 index f5a02983fd..0000000000 --- a/testing/btest/scripts/base/frameworks/config/basic.bro +++ /dev/null @@ -1,57 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff bro/config.log -# @TEST-EXEC: btest-diff bro/.stderr - -@load base/frameworks/config -@load base/protocols/conn - -redef exit_only_after_terminate = T; -redef Config::config_files += {"../configfile"}; - -@TEST-START-FILE configfile -testbool F -testcount 1 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testporttcp 42/tcp -testportudp 42/udp -testaddr 127.0.0.1 -testaddr 2607:f8b0:4005:801::200e -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,6 -test_set -test_set - -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testporttcp = 40/udp; - option testportudp = 40/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_vector: vector of count = {}; -} - -event Input::end_of_data(name: string, source:string) - { - if ( sub_bytes(name, 1, 7) != "config-" ) - return; - - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/config/basic.zeek b/testing/btest/scripts/base/frameworks/config/basic.zeek new file mode 100644 index 0000000000..0195388792 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/basic.zeek @@ -0,0 +1,57 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff zeek/config.log +# @TEST-EXEC: btest-diff zeek/.stderr + +@load base/frameworks/config +@load base/protocols/conn + +redef exit_only_after_terminate = T; +redef Config::config_files += {"../configfile"}; + +@TEST-START-FILE configfile +testbool F +testcount 1 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testporttcp 42/tcp +testportudp 42/udp +testaddr 127.0.0.1 +testaddr 2607:f8b0:4005:801::200e +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,6 +test_set +test_set - +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testporttcp = 40/udp; + option testportudp = 40/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_vector: vector of count = {}; +} + +event Input::end_of_data(name: string, source:string) + { + if ( sub_bytes(name, 1, 7) != "config-" ) + return; + + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/config/basic_cluster.bro b/testing/btest/scripts/base/frameworks/config/basic_cluster.bro deleted file mode 100644 index 99f1de8aeb..0000000000 --- a/testing/btest/scripts/base/frameworks/config/basic_cluster.bro +++ /dev/null @@ -1,83 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout -# @TEST-EXEC: btest-diff manager-1/config.log - -@load base/frameworks/config - - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -export { - option testport = 42/tcp; - option teststring = "a"; -} - -global n = 0; - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - } - -@if ( Cluster::node == "worker-1" ) -event ready_for_data() - { - Config::set_value("testport", 44/tcp); - Config::set_value("teststring", "b", "comment"); - } -@endif - -event die() - { - terminate(); - } - -function option_changed(ID: string, new_value: any, location: string): any - { - print "option changed", ID, new_value, location; - schedule 5sec { die() }; - return new_value; - } - -event bro_init() &priority=5 - { - Option::set_change_handler("testport", option_changed, -100); - Option::set_change_handler("teststring", option_changed, -100); - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; -event Cluster::node_up(name: string, id: string) - { - ++peer_count; - if ( peer_count == 2 ) - event ready_for_data(); - } - -@endif diff --git a/testing/btest/scripts/base/frameworks/config/basic_cluster.zeek b/testing/btest/scripts/base/frameworks/config/basic_cluster.zeek new file mode 100644 index 0000000000..5ddebf149b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/basic_cluster.zeek @@ -0,0 +1,83 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout +# @TEST-EXEC: btest-diff manager-1/config.log + +@load base/frameworks/config + + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +export { + option testport = 42/tcp; + option teststring = "a"; +} + +global n = 0; + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + } + +@if ( Cluster::node == "worker-1" ) +event ready_for_data() + { + Config::set_value("testport", 44/tcp); + Config::set_value("teststring", "b", "comment"); + } +@endif + +event die() + { + terminate(); + } + +function option_changed(ID: string, new_value: any, location: string): any + { + print "option changed", ID, new_value, location; + schedule 5sec { die() }; + return new_value; + } + +event zeek_init() &priority=5 + { + Option::set_change_handler("testport", option_changed, -100); + Option::set_change_handler("teststring", option_changed, -100); + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; +event Cluster::node_up(name: string, id: string) + { + ++peer_count; + if ( peer_count == 2 ) + event ready_for_data(); + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/config/cluster_resend.bro b/testing/btest/scripts/base/frameworks/config/cluster_resend.bro deleted file mode 100644 index c66d5b2ba2..0000000000 --- a/testing/btest/scripts/base/frameworks/config/cluster_resend.bro +++ /dev/null @@ -1,109 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: sleep 15 -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout -# @TEST-EXEC: btest-diff manager-1/config.log - -# In this test we check if values get updated on a worker, even if they were set before the -# worker is present. - -@load base/frameworks/config - - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -export { - option testport = 42/tcp; - option teststring = "a"; - option testcount: count = 0; -} - -global n = 0; - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - } - -@if ( Cluster::node == "worker-1" ) -event ready_for_data() - { - Config::set_value("testport", 44/tcp); - Config::set_value("teststring", "b", "comment"); - } -@endif - -@if ( Cluster::node == "manager-1" ) -event ready_for_data() - { - Config::set_value("testcount", 1); - } -@endif - -event die() - { - terminate(); - } - -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) -event Cluster::node_up(name: string, id: string) - { - print "Node up", name; - if ( name == "worker-2" ) - schedule 5sec { die() }; - } -@endif - -function option_changed(ID: string, new_value: any, location: string): any - { - print "option changed", ID, new_value, location; - return new_value; - } - -event bro_init() &priority=5 - { - Option::set_change_handler("testport", option_changed, -100); - Option::set_change_handler("teststring", option_changed, -100); - Option::set_change_handler("testcount", option_changed, -100); - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; -event Cluster::node_up(name: string, id: string) &priority=-5 - { - ++peer_count; - if ( peer_count == 1 ) - event ready_for_data(); - } - -@endif - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/config/cluster_resend.zeek b/testing/btest/scripts/base/frameworks/config/cluster_resend.zeek new file mode 100644 index 0000000000..dda06e987a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/cluster_resend.zeek @@ -0,0 +1,109 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: sleep 15 +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout +# @TEST-EXEC: btest-diff manager-1/config.log + +# In this test we check if values get updated on a worker, even if they were set before the +# worker is present. + +@load base/frameworks/config + + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +export { + option testport = 42/tcp; + option teststring = "a"; + option testcount: count = 0; +} + +global n = 0; + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + } + +@if ( Cluster::node == "worker-1" ) +event ready_for_data() + { + Config::set_value("testport", 44/tcp); + Config::set_value("teststring", "b", "comment"); + } +@endif + +@if ( Cluster::node == "manager-1" ) +event ready_for_data() + { + Config::set_value("testcount", 1); + } +@endif + +event die() + { + terminate(); + } + +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) +event Cluster::node_up(name: string, id: string) + { + print "Node up", name; + if ( name == "worker-2" ) + schedule 5sec { die() }; + } +@endif + +function option_changed(ID: string, new_value: any, location: string): any + { + print "option changed", ID, new_value, location; + return new_value; + } + +event zeek_init() &priority=5 + { + Option::set_change_handler("testport", option_changed, -100); + Option::set_change_handler("teststring", option_changed, -100); + Option::set_change_handler("testcount", option_changed, -100); + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; +event Cluster::node_up(name: string, id: string) &priority=-5 + { + ++peer_count; + if ( peer_count == 1 ) + event ready_for_data(); + } + +@endif + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/config/read_config.bro b/testing/btest/scripts/base/frameworks/config/read_config.bro deleted file mode 100644 index 753186beab..0000000000 --- a/testing/btest/scripts/base/frameworks/config/read_config.bro +++ /dev/null @@ -1,57 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff bro/config.log - -@load base/frameworks/config -@load base/protocols/conn - -redef exit_only_after_terminate = T; -redef InputConfig::empty_field = "(empty)"; - -@TEST-START-FILE configfile -testbool F -testcount 1 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testaddr 127.0.0.1 -testaddr 2607:f8b0:4005:801::200e -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,6 -test_set (empty) -test_set - -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_vector: vector of count = {}; -} - -event Input::end_of_data(name: string, source:string) - { - if ( sub_bytes(name, 1, 7) != "config-" ) - return; - - terminate(); - } - -event bro_init() - { - Config::read_config("../configfile"); - } diff --git a/testing/btest/scripts/base/frameworks/config/read_config.zeek b/testing/btest/scripts/base/frameworks/config/read_config.zeek new file mode 100644 index 0000000000..8ea2e4690e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/read_config.zeek @@ -0,0 +1,57 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff zeek/config.log + +@load base/frameworks/config +@load base/protocols/conn + +redef exit_only_after_terminate = T; +redef InputConfig::empty_field = "(empty)"; + +@TEST-START-FILE configfile +testbool F +testcount 1 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testaddr 127.0.0.1 +testaddr 2607:f8b0:4005:801::200e +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,6 +test_set (empty) +test_set - +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_vector: vector of count = {}; +} + +event Input::end_of_data(name: string, source:string) + { + if ( sub_bytes(name, 1, 7) != "config-" ) + return; + + terminate(); + } + +event zeek_init() + { + Config::read_config("../configfile"); + } diff --git a/testing/btest/scripts/base/frameworks/config/read_config_cluster.bro b/testing/btest/scripts/base/frameworks/config/read_config_cluster.bro deleted file mode 100644 index 3f77a0fdc3..0000000000 --- a/testing/btest/scripts/base/frameworks/config/read_config_cluster.bro +++ /dev/null @@ -1,106 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout -# @TEST-EXEC: btest-diff manager-1/config.log - -@load base/frameworks/config - - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -@TEST-START-FILE configfile -testbool F -testcount 1 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testaddr 127.0.0.1 -testaddr 2607:f8b0:4005:801::200e -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,6 -test_set (empty) -test_set - -test_set_full 1,3,4,5,6,7 -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_set_full: set[count] = {1, 2, 3, 7, 10, 15}; - option test_vector: vector of count = {}; -} - -event bro_init() - { - Config::read_config("../configfile"); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -function option_changed(ID: string, new_value: any, location: string): any - { - print "option changed", ID, new_value, location; - return new_value; - } - -event bro_init() &priority=5 - { - Option::set_change_handler("testport", option_changed, -100); - Option::set_change_handler("teststring", option_changed, -100); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) -event die() - { - terminate(); - } - -event Cluster::node_up(name: string, id: string) - { - schedule 10sec { die() }; - } -@endif - -module Config; - -event Config::cluster_set_option(ID: string, val: any, location: string) &priority=-10 - { - print "cluster_set_option", ID, val, location; - } diff --git a/testing/btest/scripts/base/frameworks/config/read_config_cluster.zeek b/testing/btest/scripts/base/frameworks/config/read_config_cluster.zeek new file mode 100644 index 0000000000..02bfc99d27 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/read_config_cluster.zeek @@ -0,0 +1,106 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout +# @TEST-EXEC: btest-diff manager-1/config.log + +@load base/frameworks/config + + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +@TEST-START-FILE configfile +testbool F +testcount 1 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testaddr 127.0.0.1 +testaddr 2607:f8b0:4005:801::200e +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,6 +test_set (empty) +test_set - +test_set_full 1,3,4,5,6,7 +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_set_full: set[count] = {1, 2, 3, 7, 10, 15}; + option test_vector: vector of count = {}; +} + +event zeek_init() + { + Config::read_config("../configfile"); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +function option_changed(ID: string, new_value: any, location: string): any + { + print "option changed", ID, new_value, location; + return new_value; + } + +event zeek_init() &priority=5 + { + Option::set_change_handler("testport", option_changed, -100); + Option::set_change_handler("teststring", option_changed, -100); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) +event die() + { + terminate(); + } + +event Cluster::node_up(name: string, id: string) + { + schedule 10sec { die() }; + } +@endif + +module Config; + +event Config::cluster_set_option(ID: string, val: any, location: string) &priority=-10 + { + print "cluster_set_option", ID, val, location; + } diff --git a/testing/btest/scripts/base/frameworks/config/several-files.bro b/testing/btest/scripts/base/frameworks/config/several-files.bro deleted file mode 100644 index c5ad563b4e..0000000000 --- a/testing/btest/scripts/base/frameworks/config/several-files.bro +++ /dev/null @@ -1,57 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-canonifier | grep -v ^# | $SCRIPTS/diff-sort" btest-diff bro/config.log - -@load base/frameworks/config -@load base/protocols/conn - -redef exit_only_after_terminate = T; -redef Config::config_files += {"../configfile1", "../configfile2"}; - -@TEST-START-FILE configfile1 -testbool F -testcount 2 -testint -1 -testenum Conn::LOG -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,6 -@TEST-END-FILE - -@TEST-START-FILE configfile2 -testport 45 -testaddr 127.0.0.1 -testinterval 60 -testtime 1507321987 -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_vector: vector of count = {}; -} - -global ct = 0; - -event Input::end_of_data(name: string, source: string) - { - if ( sub_bytes(name, 1, 7) != "config-" ) - return; - - ++ct; - - # Exit after this event has been raised for each config file. - if ( ct == 2 ) - terminate(); - - } diff --git a/testing/btest/scripts/base/frameworks/config/several-files.zeek b/testing/btest/scripts/base/frameworks/config/several-files.zeek new file mode 100644 index 0000000000..cc6d8ce8aa --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/several-files.zeek @@ -0,0 +1,57 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-canonifier | grep -v ^# | $SCRIPTS/diff-sort" btest-diff zeek/config.log + +@load base/frameworks/config +@load base/protocols/conn + +redef exit_only_after_terminate = T; +redef Config::config_files += {"../configfile1", "../configfile2"}; + +@TEST-START-FILE configfile1 +testbool F +testcount 2 +testint -1 +testenum Conn::LOG +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,6 +@TEST-END-FILE + +@TEST-START-FILE configfile2 +testport 45 +testaddr 127.0.0.1 +testinterval 60 +testtime 1507321987 +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_vector: vector of count = {}; +} + +global ct = 0; + +event Input::end_of_data(name: string, source: string) + { + if ( sub_bytes(name, 1, 7) != "config-" ) + return; + + ++ct; + + # Exit after this event has been raised for each config file. + if ( ct == 2 ) + terminate(); + + } diff --git a/testing/btest/scripts/base/frameworks/config/updates.bro b/testing/btest/scripts/base/frameworks/config/updates.bro deleted file mode 100644 index 5a2e051817..0000000000 --- a/testing/btest/scripts/base/frameworks/config/updates.bro +++ /dev/null @@ -1,111 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 10 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv configfile2 configfile -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 10 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv configfile3 configfile -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got3 10 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv configfile4 configfile -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff bro/config.log - -@load base/frameworks/config -@load base/protocols/conn - -redef exit_only_after_terminate = T; -redef Config::config_files += {"../configfile"}; - -@TEST-START-FILE configfile -testbool F -testcount 1 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testaddr 127.0.0.1 -testaddr 2607:f8b0:4005:801::200e -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,6 -@TEST-END-FILE - -@TEST-START-FILE configfile2 -testbool F -testcount 1 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testaddr 127.0.0.1 -testaddr 2607:f8b0:4005:801::200e -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,9 -@TEST-END-FILE - -@TEST-START-FILE configfile3 -testbool F -testcount 2 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -@TEST-END-FILE - -@TEST-START-FILE configfile4 -testbool F -testcount 2 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testinterval 60 -testtime 1507321987 -test_set a,b,c,d,erdbeerschnitzel -test_vector 1,2,3,4,5,9 -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_vector: vector of count = {}; -} - -global eolcount = 0; - -event Input::end_of_data(name: string, source:string) - { - print "eod"; - if ( sub_bytes(name, 1, 7) != "config-" ) - return; - - eolcount += 1; - - if ( eolcount == 1 ) - system("touch got1"); - else if ( eolcount == 2 ) - system("touch got2"); - else if ( eolcount == 3 ) - system("touch got3"); - else if ( eolcount == 4 ) - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/config/updates.zeek b/testing/btest/scripts/base/frameworks/config/updates.zeek new file mode 100644 index 0000000000..09bcc9d198 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/updates.zeek @@ -0,0 +1,111 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv configfile2 configfile +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv configfile3 configfile +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got3 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv configfile4 configfile +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff zeek/config.log + +@load base/frameworks/config +@load base/protocols/conn + +redef exit_only_after_terminate = T; +redef Config::config_files += {"../configfile"}; + +@TEST-START-FILE configfile +testbool F +testcount 1 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testaddr 127.0.0.1 +testaddr 2607:f8b0:4005:801::200e +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,6 +@TEST-END-FILE + +@TEST-START-FILE configfile2 +testbool F +testcount 1 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testaddr 127.0.0.1 +testaddr 2607:f8b0:4005:801::200e +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,9 +@TEST-END-FILE + +@TEST-START-FILE configfile3 +testbool F +testcount 2 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +@TEST-END-FILE + +@TEST-START-FILE configfile4 +testbool F +testcount 2 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testinterval 60 +testtime 1507321987 +test_set a,b,c,d,erdbeerschnitzel +test_vector 1,2,3,4,5,9 +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_vector: vector of count = {}; +} + +global eolcount = 0; + +event Input::end_of_data(name: string, source:string) + { + print "eod"; + if ( sub_bytes(name, 1, 7) != "config-" ) + return; + + eolcount += 1; + + if ( eolcount == 1 ) + system("touch got1"); + else if ( eolcount == 2 ) + system("touch got2"); + else if ( eolcount == 3 ) + system("touch got3"); + else if ( eolcount == 4 ) + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/config/weird.bro b/testing/btest/scripts/base/frameworks/config/weird.bro deleted file mode 100644 index ae3e0f2153..0000000000 --- a/testing/btest/scripts/base/frameworks/config/weird.bro +++ /dev/null @@ -1,66 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/bro.org.pcap %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: btest-diff config.log - -event bro_init() - { - Config::set_value("Weird::sampling_duration", 5sec); - Config::set_value("Weird::sampling_threshold", 10); - Config::set_value("Weird::sampling_rate", 10); - Config::set_value("Weird::sampling_whitelist", set("whitelisted_net_weird", "whitelisted_flow_weird", "whitelisted_conn_weird")); - print "Config values set"; - } - -event bro_init() &priority = -10 - { - print Reporter::get_weird_sampling_whitelist(); - print Reporter::get_weird_sampling_rate(); - print Reporter::get_weird_sampling_threshold(); - print Reporter::get_weird_sampling_duration(); - } - -event conn_weird(name: string, c: connection, addl: string) - { - print "conn_weird", name; - } - -event flow_weird(name: string, src: addr, dst: addr) - { - print "flow_weird", name; - } - -event net_weird(name: string) - { - print "net_weird", name; - } - -event gen_weirds(c: connection) - { - local num = 30; - - while ( num != 0 ) - { - Reporter::net_weird("my_net_weird"); - Reporter::flow_weird("my_flow_weird", c$id$orig_h, c$id$resp_h); - Reporter::conn_weird("my_conn_weird", c); - - Reporter::net_weird("whitelisted_net_weird"); - Reporter::flow_weird("whitelisted_flow_weird", c$id$orig_h, c$id$resp_h); - Reporter::conn_weird("whitelisted_conn_weird", c); - --num; - } - } - -global did_one_connection = F; - -event new_connection(c: connection) - { - if ( did_one_connection ) - return; - - did_one_connection = T; - event gen_weirds(c); # should permit 10 + 2 of each "my" weird - schedule 2sec { gen_weirds(c) }; # should permit 3 of each "my" weird - schedule 7sec { gen_weirds(c) }; # should permit 10 + 2 of each "my" weird - # Total of 27 "my" weirds of each type and 90 of each "whitelisted" type - } diff --git a/testing/btest/scripts/base/frameworks/config/weird.zeek b/testing/btest/scripts/base/frameworks/config/weird.zeek new file mode 100644 index 0000000000..300bb97101 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/config/weird.zeek @@ -0,0 +1,66 @@ +# @TEST-EXEC: zeek -r $TRACES/http/bro.org.pcap %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff config.log + +event zeek_init() + { + Config::set_value("Weird::sampling_duration", 5sec); + Config::set_value("Weird::sampling_threshold", 10); + Config::set_value("Weird::sampling_rate", 10); + Config::set_value("Weird::sampling_whitelist", set("whitelisted_net_weird", "whitelisted_flow_weird", "whitelisted_conn_weird")); + print "Config values set"; + } + +event zeek_init() &priority = -10 + { + print Reporter::get_weird_sampling_whitelist(); + print Reporter::get_weird_sampling_rate(); + print Reporter::get_weird_sampling_threshold(); + print Reporter::get_weird_sampling_duration(); + } + +event conn_weird(name: string, c: connection, addl: string) + { + print "conn_weird", name; + } + +event flow_weird(name: string, src: addr, dst: addr) + { + print "flow_weird", name; + } + +event net_weird(name: string) + { + print "net_weird", name; + } + +event gen_weirds(c: connection) + { + local num = 30; + + while ( num != 0 ) + { + Reporter::net_weird("my_net_weird"); + Reporter::flow_weird("my_flow_weird", c$id$orig_h, c$id$resp_h); + Reporter::conn_weird("my_conn_weird", c); + + Reporter::net_weird("whitelisted_net_weird"); + Reporter::flow_weird("whitelisted_flow_weird", c$id$orig_h, c$id$resp_h); + Reporter::conn_weird("whitelisted_conn_weird", c); + --num; + } + } + +global did_one_connection = F; + +event new_connection(c: connection) + { + if ( did_one_connection ) + return; + + did_one_connection = T; + event gen_weirds(c); # should permit 10 + 2 of each "my" weird + schedule 2sec { gen_weirds(c) }; # should permit 3 of each "my" weird + schedule 7sec { gen_weirds(c) }; # should permit 10 + 2 of each "my" weird + # Total of 27 "my" weirds of each type and 90 of each "whitelisted" type + } diff --git a/testing/btest/scripts/base/frameworks/control/configuration_update.bro b/testing/btest/scripts/base/frameworks/control/configuration_update.bro deleted file mode 100644 index e90151bcbb..0000000000 --- a/testing/btest/scripts/base/frameworks/control/configuration_update.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro -Bbroker %INPUT frameworks/control/controllee Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro -Bbroker %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=$BROKER_PORT Control::cmd=configuration_update -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff controllee/.stdout - -const test_var = "ORIGINAL VALUE (this should be printed out first)" &redef; - -@TEST-START-FILE test-redef.bro -redef test_var = "NEW VALUE (this should be printed out second)"; -@TEST-END-FILE - -event bro_init() - { - print test_var; - Reporter::info("handle bro_init"); - } - -event bro_done() - { - print test_var; - Reporter::info("handle bro_done"); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event Control::configuration_update_request() - { - Reporter::info("handle Control::configuration_update_request"); - } - -event Control::configuration_update_response() - { - Reporter::info("handle Control::configuration_update_response"); - } diff --git a/testing/btest/scripts/base/frameworks/control/configuration_update.zeek b/testing/btest/scripts/base/frameworks/control/configuration_update.zeek new file mode 100644 index 0000000000..728c026d73 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/control/configuration_update.zeek @@ -0,0 +1,39 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run controllee ZEEKPATH=$ZEEKPATH:.. zeek -Bbroker %INPUT frameworks/control/controllee Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-bg-run controller ZEEKPATH=$ZEEKPATH:.. zeek -Bbroker %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=$BROKER_PORT Control::cmd=configuration_update +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff controllee/.stdout + +const test_var = "ORIGINAL VALUE (this should be printed out first)" &redef; + +@TEST-START-FILE test-redef.zeek +redef test_var = "NEW VALUE (this should be printed out second)"; +@TEST-END-FILE + +event zeek_init() + { + print test_var; + Reporter::info("handle zeek_init"); + } + +event zeek_done() + { + print test_var; + Reporter::info("handle zeek_done"); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event Control::configuration_update_request() + { + Reporter::info("handle Control::configuration_update_request"); + } + +event Control::configuration_update_response() + { + Reporter::info("handle Control::configuration_update_response"); + } diff --git a/testing/btest/scripts/base/frameworks/control/id_value.bro b/testing/btest/scripts/base/frameworks/control/id_value.bro deleted file mode 100644 index 2528b28c25..0000000000 --- a/testing/btest/scripts/base/frameworks/control/id_value.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT only-for-controllee frameworks/control/controllee Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=$BROKER_PORT Control::cmd=id_value Control::arg=test_var -# @TEST-EXEC: btest-bg-wait -k 10 -# @TEST-EXEC: btest-diff controller/.stdout - -# This value shouldn't ever be printed to the controllers stdout. -const test_var = "Original value" &redef; - -@TEST-START-FILE only-for-controllee.bro -# This is only loaded on the controllee, but it's sent to the controller -# and should be printed there. -redef test_var = "This is the value from the controllee"; -@TEST-END-FILE - -event die() - { - terminate(); - } - -event Control::id_value_response(id: string, val: string) - { - print fmt("Got an id_value_response(%s, %s) event", id, val); - schedule 2sec { die() }; - } diff --git a/testing/btest/scripts/base/frameworks/control/id_value.zeek b/testing/btest/scripts/base/frameworks/control/id_value.zeek new file mode 100644 index 0000000000..2e60957f31 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/control/id_value.zeek @@ -0,0 +1,26 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run controllee ZEEKPATH=$ZEEKPATH:.. zeek %INPUT only-for-controllee frameworks/control/controllee Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-bg-run controller ZEEKPATH=$ZEEKPATH:.. zeek %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=$BROKER_PORT Control::cmd=id_value Control::arg=test_var +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-diff controller/.stdout + +# This value shouldn't ever be printed to the controllers stdout. +const test_var = "Original value" &redef; + +@TEST-START-FILE only-for-controllee.zeek +# This is only loaded on the controllee, but it's sent to the controller +# and should be printed there. +redef test_var = "This is the value from the controllee"; +@TEST-END-FILE + +event die() + { + terminate(); + } + +event Control::id_value_response(id: string, val: string) + { + print fmt("Got an id_value_response(%s, %s) event", id, val); + schedule 2sec { die() }; + } diff --git a/testing/btest/scripts/base/frameworks/control/shutdown.bro b/testing/btest/scripts/base/frameworks/control/shutdown.bro deleted file mode 100644 index a8089bf08a..0000000000 --- a/testing/btest/scripts/base/frameworks/control/shutdown.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# -# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Broker::default_port=$BROKER_PORT -# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=$BROKER_PORT Control::cmd=shutdown -# @TEST-EXEC: btest-bg-wait 10 - diff --git a/testing/btest/scripts/base/frameworks/control/shutdown.zeek b/testing/btest/scripts/base/frameworks/control/shutdown.zeek new file mode 100644 index 0000000000..3fd58ef033 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/control/shutdown.zeek @@ -0,0 +1,6 @@ +# @TEST-PORT: BROKER_PORT +# +# @TEST-EXEC: btest-bg-run controllee ZEEKPATH=$ZEEKPATH:.. zeek %INPUT frameworks/control/controllee Broker::default_port=$BROKER_PORT +# @TEST-EXEC: btest-bg-run controller ZEEKPATH=$ZEEKPATH:.. zeek %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=$BROKER_PORT Control::cmd=shutdown +# @TEST-EXEC: btest-bg-wait 10 + diff --git a/testing/btest/scripts/base/frameworks/file-analysis/actions/data_event.bro b/testing/btest/scripts/base/frameworks/file-analysis/actions/data_event.bro deleted file mode 100644 index bcecbd8aa3..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/actions/data_event.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out - -redef test_print_file_data_events = T; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/actions/data_event.zeek b/testing/btest/scripts/base/frameworks/file-analysis/actions/data_event.zeek new file mode 100644 index 0000000000..d5ecb55445 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/actions/data_event.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out + +redef test_print_file_data_events = T; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/file_exists_lookup_file.bro b/testing/btest/scripts/base/frameworks/file-analysis/bifs/file_exists_lookup_file.bro deleted file mode 100644 index cba82bbfab..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/bifs/file_exists_lookup_file.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace %INPUT 2>&1 -# @TEST-EXEC: btest-diff .stdout - -event bro_init() - { - print "This should fail but not crash"; - print Files::lookup_file("asdf"); - - print "This should return F"; - print Files::file_exists("asdf"); - } - -event file_sniff(f: fa_file, meta: fa_metadata) - { - print "lookup fid: " + f$id; - local looked_up_file = Files::lookup_file(f$id); - print "We should have found the file id: " + looked_up_file$id ; - - print "This should return T"; - print Files::file_exists(f$id); - } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/file_exists_lookup_file.zeek b/testing/btest/scripts/base/frameworks/file-analysis/bifs/file_exists_lookup_file.zeek new file mode 100644 index 0000000000..c3a6fe208b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/bifs/file_exists_lookup_file.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace %INPUT 2>&1 +# @TEST-EXEC: btest-diff .stdout + +event zeek_init() + { + print "This should fail but not crash"; + print Files::lookup_file("asdf"); + + print "This should return F"; + print Files::file_exists("asdf"); + } + +event file_sniff(f: fa_file, meta: fa_metadata) + { + print "lookup fid: " + f$id; + local looked_up_file = Files::lookup_file(f$id); + print "We should have found the file id: " + looked_up_file$id ; + + print "This should return T"; + print Files::file_exists(f$id); + } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/register_mime_type.bro b/testing/btest/scripts/base/frameworks/file-analysis/bifs/register_mime_type.bro deleted file mode 100644 index 9b6d11ce0d..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/bifs/register_mime_type.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-diff files.log - -event bro_init() - { - Files::register_for_mime_type(Files::ANALYZER_MD5, "text/plain"); - }; - - diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/register_mime_type.zeek b/testing/btest/scripts/base/frameworks/file-analysis/bifs/register_mime_type.zeek new file mode 100644 index 0000000000..2392c8558d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/bifs/register_mime_type.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-diff files.log + +event zeek_init() + { + Files::register_for_mime_type(Files::ANALYZER_MD5, "text/plain"); + }; + + diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/remove_action.bro b/testing/btest/scripts/base/frameworks/file-analysis/bifs/remove_action.bro deleted file mode 100644 index a3704618bd..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/bifs/remove_action.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT >get.out -# @TEST-EXEC: btest-diff get.out - -redef test_file_analysis_source = "HTTP"; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%s-file", f$id); - }; - -event file_new(f: fa_file) &priority=-10 - { - for ( tag in test_file_analyzers ) - Files::remove_analyzer(f, tag); - local filename = test_get_file_name(f); - Files::remove_analyzer(f, Files::ANALYZER_EXTRACT, - [$extract_filename=filename]); - } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/remove_action.zeek b/testing/btest/scripts/base/frameworks/file-analysis/bifs/remove_action.zeek new file mode 100644 index 0000000000..3d2d9b5949 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/bifs/remove_action.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.zeek %INPUT >get.out +# @TEST-EXEC: btest-diff get.out + +redef test_file_analysis_source = "HTTP"; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%s-file", f$id); + }; + +event file_new(f: fa_file) &priority=-10 + { + for ( tag in test_file_analyzers ) + Files::remove_analyzer(f, tag); + local filename = test_get_file_name(f); + Files::remove_analyzer(f, Files::ANALYZER_EXTRACT, + [$extract_filename=filename]); + } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/set_timeout_interval.bro b/testing/btest/scripts/base/frameworks/file-analysis/bifs/set_timeout_interval.bro deleted file mode 100644 index c9eac4c31d..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/bifs/set_timeout_interval.bro +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -r $TRACES/http/206_example_b.pcap $SCRIPTS/file-analysis-test.bro %INPUT -# @TEST-EXEC: btest-bg-wait 8 -# @TEST-EXEC: btest-diff bro/.stdout - -global cnt: count = 0; -global timeout_cnt: count = 0; - -redef test_file_analysis_source = "HTTP"; - -redef test_get_file_name = function(f: fa_file): string - { - local rval: string = fmt("%s-file%d", f$id, cnt); - ++cnt; - return rval; - }; - -redef exit_only_after_terminate = T; -redef default_file_timeout_interval = 2sec; - -event file_timeout(f: fa_file) - { - if ( timeout_cnt < 1 ) - Files::set_timeout_interval(f, f$timeout_interval); - else - terminate(); - ++timeout_cnt; - } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/set_timeout_interval.zeek b/testing/btest/scripts/base/frameworks/file-analysis/bifs/set_timeout_interval.zeek new file mode 100644 index 0000000000..c78bb521a8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/bifs/set_timeout_interval.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -r $TRACES/http/206_example_b.pcap $SCRIPTS/file-analysis-test.zeek %INPUT +# @TEST-EXEC: btest-bg-wait 8 +# @TEST-EXEC: btest-diff zeek/.stdout + +global cnt: count = 0; +global timeout_cnt: count = 0; + +redef test_file_analysis_source = "HTTP"; + +redef test_get_file_name = function(f: fa_file): string + { + local rval: string = fmt("%s-file%d", f$id, cnt); + ++cnt; + return rval; + }; + +redef exit_only_after_terminate = T; +redef default_file_timeout_interval = 2sec; + +event file_timeout(f: fa_file) + { + if ( timeout_cnt < 1 ) + Files::set_timeout_interval(f, f$timeout_interval); + else + terminate(); + ++timeout_cnt; + } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/stop.bro b/testing/btest/scripts/base/frameworks/file-analysis/bifs/stop.bro deleted file mode 100644 index dd40c69684..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/bifs/stop.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT >get.out -# @TEST-EXEC: btest-diff get.out -# @TEST-EXEC: test ! -s Cx92a0ym5R8-file - -event file_new(f: fa_file) - { - Files::stop(f); - } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/bifs/stop.zeek b/testing/btest/scripts/base/frameworks/file-analysis/bifs/stop.zeek new file mode 100644 index 0000000000..e70ea5a553 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/bifs/stop.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.zeek %INPUT >get.out +# @TEST-EXEC: btest-diff get.out +# @TEST-EXEC: test ! -s Cx92a0ym5R8-file + +event file_new(f: fa_file) + { + Files::stop(f); + } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/big-bof-buffer.bro b/testing/btest/scripts/base/frameworks/file-analysis/big-bof-buffer.bro deleted file mode 100644 index 0f7e23ddcf..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/big-bof-buffer.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-diff files.log - -@load frameworks/files/hash-all-files - -redef default_file_bof_buffer_size=5000; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/big-bof-buffer.zeek b/testing/btest/scripts/base/frameworks/file-analysis/big-bof-buffer.zeek new file mode 100644 index 0000000000..fdf320cd43 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/big-bof-buffer.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-diff files.log + +@load frameworks/files/hash-all-files + +redef default_file_bof_buffer_size=5000; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/byteranges.bro b/testing/btest/scripts/base/frameworks/file-analysis/byteranges.bro deleted file mode 100644 index 7cf0ef239c..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/byteranges.bro +++ /dev/null @@ -1,6 +0,0 @@ -# This used to crash the file reassemly code. -# -# @TEST-EXEC: bro -r $TRACES/http/byteranges.trace frameworks/files/extract-all-files FileExtract::default_limit=4000 -# -# @TEST-EXEC: btest-diff files.log - diff --git a/testing/btest/scripts/base/frameworks/file-analysis/byteranges.zeek b/testing/btest/scripts/base/frameworks/file-analysis/byteranges.zeek new file mode 100644 index 0000000000..583a97481e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/byteranges.zeek @@ -0,0 +1,6 @@ +# This used to crash the file reassemly code. +# +# @TEST-EXEC: zeek -r $TRACES/http/byteranges.trace frameworks/files/extract-all-files FileExtract::default_limit=4000 +# +# @TEST-EXEC: btest-diff files.log + diff --git a/testing/btest/scripts/base/frameworks/file-analysis/ftp.bro b/testing/btest/scripts/base/frameworks/file-analysis/ftp.bro deleted file mode 100644 index 2c2da188fe..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/ftp.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ftp/retr.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff thefile - -redef test_file_analysis_source = "FTP_DATA"; - -redef test_get_file_name = function(f: fa_file): string - { - return "thefile"; - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/ftp.zeek b/testing/btest/scripts/base/frameworks/file-analysis/ftp.zeek new file mode 100644 index 0000000000..43a6506f6c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/ftp.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -r $TRACES/ftp/retr.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff thefile + +redef test_file_analysis_source = "FTP_DATA"; + +redef test_get_file_name = function(f: fa_file): string + { + return "thefile"; + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/get.bro b/testing/btest/scripts/base/frameworks/file-analysis/http/get.bro deleted file mode 100644 index f7f4a0395b..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/http/get.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT c=1 >get.out -# @TEST-EXEC: bro -r $TRACES/http/get-gzip.trace $SCRIPTS/file-analysis-test.bro %INPUT c=2 >get-gzip.out -# @TEST-EXEC: btest-diff get.out -# @TEST-EXEC: btest-diff get-gzip.out -# @TEST-EXEC: btest-diff 1-file -# @TEST-EXEC: btest-diff 2-file - -redef test_file_analysis_source = "HTTP"; - -global c = 0 &redef; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%d-file", c); - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/get.zeek b/testing/btest/scripts/base/frameworks/file-analysis/http/get.zeek new file mode 100644 index 0000000000..e62a952410 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/http/get.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.zeek %INPUT c=1 >get.out +# @TEST-EXEC: zeek -r $TRACES/http/get-gzip.trace $SCRIPTS/file-analysis-test.zeek %INPUT c=2 >get-gzip.out +# @TEST-EXEC: btest-diff get.out +# @TEST-EXEC: btest-diff get-gzip.out +# @TEST-EXEC: btest-diff 1-file +# @TEST-EXEC: btest-diff 2-file + +redef test_file_analysis_source = "HTTP"; + +global c = 0 &redef; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%d-file", c); + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/multipart.bro b/testing/btest/scripts/base/frameworks/file-analysis/http/multipart.bro deleted file mode 100644 index 57fe2348c2..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/http/multipart.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/multipart.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff 1-file -# @TEST-EXEC: btest-diff 2-file -# @TEST-EXEC: btest-diff 3-file -# @TEST-EXEC: btest-diff 4-file - -redef test_file_analysis_source = "HTTP"; - -global cnt: count = 0; - -redef test_get_file_name = function(f: fa_file): string - { - ++cnt; - return fmt("%d-file", cnt); - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/multipart.zeek b/testing/btest/scripts/base/frameworks/file-analysis/http/multipart.zeek new file mode 100644 index 0000000000..7cc1efda09 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/http/multipart.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -r $TRACES/http/multipart.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff 1-file +# @TEST-EXEC: btest-diff 2-file +# @TEST-EXEC: btest-diff 3-file +# @TEST-EXEC: btest-diff 4-file + +redef test_file_analysis_source = "HTTP"; + +global cnt: count = 0; + +redef test_get_file_name = function(f: fa_file): string + { + ++cnt; + return fmt("%d-file", cnt); + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/partial-content.bro b/testing/btest/scripts/base/frameworks/file-analysis/http/partial-content.bro deleted file mode 100644 index 93443f0ca8..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/http/partial-content.bro +++ /dev/null @@ -1,25 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/206_example_a.pcap $SCRIPTS/file-analysis-test.bro %INPUT >a.out -# @TEST-EXEC: btest-diff a.out -# @TEST-EXEC: wc -c file-0 | sed 's/^[ \t]* //g' >a.size -# @TEST-EXEC: btest-diff a.size - -# @TEST-EXEC: bro -r $TRACES/http/206_example_b.pcap $SCRIPTS/file-analysis-test.bro %INPUT >b.out -# @TEST-EXEC: btest-diff b.out -# @TEST-EXEC: wc -c file-0 | sed 's/^[ \t]* //g' >b.size -# @TEST-EXEC: btest-diff b.size - -# @TEST-EXEC: bro -r $TRACES/http/206_example_c.pcap $SCRIPTS/file-analysis-test.bro %INPUT >c.out -# @TEST-EXEC: btest-diff c.out -# @TEST-EXEC: wc -c file-0 | sed 's/^[ \t]* //g' >c.size -# @TEST-EXEC: btest-diff c.size - -global cnt: count = 0; - -redef test_file_analysis_source = "HTTP"; - -redef test_get_file_name = function(f: fa_file): string - { - local rval: string = fmt("file-%d", cnt); - ++cnt; - return rval; - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/partial-content.zeek b/testing/btest/scripts/base/frameworks/file-analysis/http/partial-content.zeek new file mode 100644 index 0000000000..c675adbb40 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/http/partial-content.zeek @@ -0,0 +1,25 @@ +# @TEST-EXEC: zeek -r $TRACES/http/206_example_a.pcap $SCRIPTS/file-analysis-test.zeek %INPUT >a.out +# @TEST-EXEC: btest-diff a.out +# @TEST-EXEC: wc -c file-0 | sed 's/^[ \t]* //g' >a.size +# @TEST-EXEC: btest-diff a.size + +# @TEST-EXEC: zeek -r $TRACES/http/206_example_b.pcap $SCRIPTS/file-analysis-test.zeek %INPUT >b.out +# @TEST-EXEC: btest-diff b.out +# @TEST-EXEC: wc -c file-0 | sed 's/^[ \t]* //g' >b.size +# @TEST-EXEC: btest-diff b.size + +# @TEST-EXEC: zeek -r $TRACES/http/206_example_c.pcap $SCRIPTS/file-analysis-test.zeek %INPUT >c.out +# @TEST-EXEC: btest-diff c.out +# @TEST-EXEC: wc -c file-0 | sed 's/^[ \t]* //g' >c.size +# @TEST-EXEC: btest-diff c.size + +global cnt: count = 0; + +redef test_file_analysis_source = "HTTP"; + +redef test_get_file_name = function(f: fa_file): string + { + local rval: string = fmt("file-%d", cnt); + ++cnt; + return rval; + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/pipeline.bro b/testing/btest/scripts/base/frameworks/file-analysis/http/pipeline.bro deleted file mode 100644 index 36743a8bad..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/http/pipeline.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/pipelined-requests.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff 1-file -# @TEST-EXEC: btest-diff 2-file -# @TEST-EXEC: btest-diff 3-file -# @TEST-EXEC: btest-diff 4-file -# @TEST-EXEC: btest-diff 5-file - -redef test_file_analysis_source = "HTTP"; - -global c = 0; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%d-file", ++c); - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/pipeline.zeek b/testing/btest/scripts/base/frameworks/file-analysis/http/pipeline.zeek new file mode 100644 index 0000000000..acc635ae29 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/http/pipeline.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -r $TRACES/http/pipelined-requests.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff 1-file +# @TEST-EXEC: btest-diff 2-file +# @TEST-EXEC: btest-diff 3-file +# @TEST-EXEC: btest-diff 4-file +# @TEST-EXEC: btest-diff 5-file + +redef test_file_analysis_source = "HTTP"; + +global c = 0; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%d-file", ++c); + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/post.bro b/testing/btest/scripts/base/frameworks/file-analysis/http/post.bro deleted file mode 100644 index 79ac1cb5c1..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/http/post.bro +++ /dev/null @@ -1,13 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/post.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff 1-file -# @TEST-EXEC: btest-diff 2-file - -redef test_file_analysis_source = "HTTP"; - -global c = 0; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%d-file", ++c); - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/http/post.zeek b/testing/btest/scripts/base/frameworks/file-analysis/http/post.zeek new file mode 100644 index 0000000000..122c188b6c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/http/post.zeek @@ -0,0 +1,13 @@ +# @TEST-EXEC: zeek -r $TRACES/http/post.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff 1-file +# @TEST-EXEC: btest-diff 2-file + +redef test_file_analysis_source = "HTTP"; + +global c = 0; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%d-file", ++c); + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro deleted file mode 100644 index 053341c840..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b $SCRIPTS/file-analysis-test.bro %INPUT -# @TEST-EXEC: btest-bg-wait 8 -# @TEST-EXEC: btest-diff bro/.stdout -# @TEST-EXEC: diff -q bro/FK8WqY1Q9U1rVxnDge-file input.log - -redef exit_only_after_terminate = T; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%s-file", f$id); - }; - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve ns -#types bool int enum count port subnet addr double time interval string table table table vector vector string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -@TEST-END-FILE - -event bro_init() - { - local source: string = "../input.log"; - Input::add_analysis([$source=source, $reader=Input::READER_BINARY, - $mode=Input::MANUAL, $name=source]); - Input::remove(source); - } - -event file_state_remove(f: fa_file) &priority=-10 - { - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.zeek b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.zeek new file mode 100644 index 0000000000..3051459945 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b $SCRIPTS/file-analysis-test.zeek %INPUT +# @TEST-EXEC: btest-bg-wait 8 +# @TEST-EXEC: btest-diff zeek/.stdout +# @TEST-EXEC: diff -q zeek/FK8WqY1Q9U1rVxnDge-file input.log + +redef exit_only_after_terminate = T; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%s-file", f$id); + }; + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve ns +#types bool int enum count port subnet addr double time interval string table table table vector vector string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +@TEST-END-FILE + +event zeek_init() + { + local source: string = "../input.log"; + Input::add_analysis([$source=source, $reader=Input::READER_BINARY, + $mode=Input::MANUAL, $name=source]); + Input::remove(source); + } + +event file_state_remove(f: fa_file) &priority=-10 + { + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/file-analysis/irc.bro b/testing/btest/scripts/base/frameworks/file-analysis/irc.bro deleted file mode 100644 index 9fd8e06613..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/irc.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/irc-dcc-send.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff thefile - -redef test_file_analysis_source = "IRC_DATA"; - -global first: bool = T; - -function myfile(f: fa_file): string - { - if ( first ) - { - first = F; - return "thefile"; - } - else - return ""; - } - -redef test_get_file_name = myfile; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/irc.zeek b/testing/btest/scripts/base/frameworks/file-analysis/irc.zeek new file mode 100644 index 0000000000..4b3e641f34 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/irc.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -r $TRACES/irc-dcc-send.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff thefile + +redef test_file_analysis_source = "IRC_DATA"; + +global first: bool = T; + +function myfile(f: fa_file): string + { + if ( first ) + { + first = F; + return "thefile"; + } + else + return ""; + } + +redef test_get_file_name = myfile; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/logging.bro b/testing/btest/scripts/base/frameworks/file-analysis/logging.bro deleted file mode 100644 index 1d1f5fd721..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/logging.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.bro %INPUT -# @TEST-EXEC: btest-diff files.log - -redef test_file_analysis_source = "HTTP"; - -redef test_get_file_name = function(f: fa_file): string - { - return fmt("%s-file", f$id); - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/logging.zeek b/testing/btest/scripts/base/frameworks/file-analysis/logging.zeek new file mode 100644 index 0000000000..96c302a31a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/logging.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace $SCRIPTS/file-analysis-test.zeek %INPUT +# @TEST-EXEC: btest-diff files.log + +redef test_file_analysis_source = "HTTP"; + +redef test_get_file_name = function(f: fa_file): string + { + return fmt("%s-file", f$id); + }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/smtp.bro b/testing/btest/scripts/base/frameworks/file-analysis/smtp.bro deleted file mode 100644 index 79b929c4cd..0000000000 --- a/testing/btest/scripts/base/frameworks/file-analysis/smtp.bro +++ /dev/null @@ -1,16 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace $SCRIPTS/file-analysis-test.bro %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff thefile0 -# @TEST-EXEC: btest-diff thefile1 -# @TEST-EXEC: btest-diff thefile2 - -redef test_file_analysis_source = "SMTP"; - -global mycnt: count = 0; - -redef test_get_file_name = function(f: fa_file): string - { - local rval: string = fmt("thefile%d", mycnt); - ++mycnt; - return rval; - }; diff --git a/testing/btest/scripts/base/frameworks/file-analysis/smtp.zeek b/testing/btest/scripts/base/frameworks/file-analysis/smtp.zeek new file mode 100644 index 0000000000..0fddcc7f98 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/file-analysis/smtp.zeek @@ -0,0 +1,16 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace $SCRIPTS/file-analysis-test.zeek %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff thefile0 +# @TEST-EXEC: btest-diff thefile1 +# @TEST-EXEC: btest-diff thefile2 + +redef test_file_analysis_source = "SMTP"; + +global mycnt: count = 0; + +redef test_get_file_name = function(f: fa_file): string + { + local rval: string = fmt("thefile%d", mycnt); + ++mycnt; + return rval; + }; diff --git a/testing/btest/scripts/base/frameworks/input/bad_patterns.zeek b/testing/btest/scripts/base/frameworks/input/bad_patterns.zeek new file mode 100644 index 0000000000..23d25b516b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/bad_patterns.zeek @@ -0,0 +1,38 @@ +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff .stderr + +@TEST-START-FILE input.log +#separator \x09 +#fields i p +#types count pattern +1 /d/og/ +2 /cat/sss +3 /foo|bar +4 this is not a pattern +5 /5 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + p: pattern; +}; + +event kill_me() + { + terminate(); + } + +global pats: table[int] of Val = table(); + +event zeek_init() + { + Input::add_table([$source="input.log", $name="pats", $idx=Idx, $val=Val, $destination=pats]); + schedule 10msec { kill_me() }; + } diff --git a/testing/btest/scripts/base/frameworks/input/basic.bro b/testing/btest/scripts/base/frameworks/input/basic.bro deleted file mode 100644 index 356b87d70b..0000000000 --- a/testing/btest/scripts/base/frameworks/input/basic.bro +++ /dev/null @@ -1,64 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b bt i e c p pp sn a d t iv s sc ss se vc ve ns -#types bool int enum count port port subnet addr double time interval string table table table vector vector string -T 1 -42 SSH::LOG 21 123 5/icmp 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -@TEST-END-FILE - -@load base/protocols/ssh - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - bt: bool; - e: Log::ID; - c: count; - p: port; - pp: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - ns: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - print outfile, to_count(servers[-42]$ns); # try to actually use a string. If null-termination is wrong this will fail. - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/basic.zeek b/testing/btest/scripts/base/frameworks/input/basic.zeek new file mode 100644 index 0000000000..e96784fc0d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/basic.zeek @@ -0,0 +1,64 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b bt i e c p pp sn a d t iv s sc ss se vc ve ns +#types bool int enum count port port subnet addr double time interval string table table table vector vector string +T 1 -42 SSH::LOG 21 123 5/icmp 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +@TEST-END-FILE + +@load base/protocols/ssh + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + bt: bool; + e: Log::ID; + c: count; + p: port; + pp: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + ns: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + print outfile, to_count(servers[-42]$ns); # try to actually use a string. If null-termination is wrong this will fail. + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/bignumber.bro b/testing/btest/scripts/base/frameworks/input/bignumber.bro deleted file mode 100644 index 15d711b1c4..0000000000 --- a/testing/btest/scripts/base/frameworks/input/bignumber.bro +++ /dev/null @@ -1,42 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input.log -#separator \x09 -#fields i c -#types int count -9223372036854775800 18446744073709551612 --9223372036854775800 18446744073709551612 -@TEST-END-FILE - -global outfile: file; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - c: count; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/bignumber.zeek b/testing/btest/scripts/base/frameworks/input/bignumber.zeek new file mode 100644 index 0000000000..dd3a483050 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/bignumber.zeek @@ -0,0 +1,42 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input.log +#separator \x09 +#fields i c +#types int count +9223372036854775800 18446744073709551612 +-9223372036854775800 18446744073709551612 +@TEST-END-FILE + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + c: count; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/binary.bro b/testing/btest/scripts/base/frameworks/input/binary.bro deleted file mode 100644 index 11701fbd8a..0000000000 --- a/testing/btest/scripts/base/frameworks/input/binary.bro +++ /dev/null @@ -1,53 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -redef InputAscii::separator = "|"; -redef InputAscii::set_separator = ","; -redef InputAscii::empty_field = "(empty)"; -redef InputAscii::unset_field = "-"; - -@TEST-START-FILE input.log -#separator | -#set_separator|, -#empty_field|(empty) -#unset_field|- -#path|ssh -#open|2012-07-20-01-49-19 -#fields|data|data2 -#types|string|string -abc\x0a\xffdef|DATA2 -abc\x7c\xffdef|DATA2 -abc\xff\x7cdef|DATA2 -#end|2012-07-20-01-49-19 -@TEST-END-FILE - -global outfile: file; -global try: count; - -type Val: record { - data: string; - data2: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, a: string, b: string) - { - print outfile, a; - print outfile, b; - try = try + 1; - if ( try == 3 ) - { - Input::remove("input"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/binary.zeek b/testing/btest/scripts/base/frameworks/input/binary.zeek new file mode 100644 index 0000000000..fa98625997 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/binary.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +redef InputAscii::separator = "|"; +redef InputAscii::set_separator = ","; +redef InputAscii::empty_field = "(empty)"; +redef InputAscii::unset_field = "-"; + +@TEST-START-FILE input.log +#separator | +#set_separator|, +#empty_field|(empty) +#unset_field|- +#path|ssh +#open|2012-07-20-01-49-19 +#fields|data|data2 +#types|string|string +abc\x0a\xffdef|DATA2 +abc\x7c\xffdef|DATA2 +abc\xff\x7cdef|DATA2 +#end|2012-07-20-01-49-19 +@TEST-END-FILE + +global outfile: file; +global try: count; + +type Val: record { + data: string; + data2: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, a: string, b: string) + { + print outfile, a; + print outfile, b; + try = try + 1; + if ( try == 3 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/config/basic.bro b/testing/btest/scripts/base/frameworks/input/config/basic.bro deleted file mode 100644 index c8d68fc822..0000000000 --- a/testing/btest/scripts/base/frameworks/input/config/basic.bro +++ /dev/null @@ -1,75 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; -redef InputConfig::empty_field = "EMPTY"; -redef InputConfig::set_separator = "\t"; - -@TEST-START-FILE configfile -testbool F -testcount 1 -testcount 2 -testcount 2 -testint -1 -testenum Conn::LOG -testport 45 -testportandproto 45/udp -testaddr 127.0.0.1 -testaddr 2607:f8b0:4005:801::200e -testinterval 60 -testtime 1507321987 -test_set a b c d erdbeerschnitzel -test_vector 1 2 3 4 5 6 -test_set (empty) -test_set EMPTY -test_set - -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -global outfile: file; - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testportandproto = 42/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_vector: vector of count = {}; -} - -type Idx: record { - option_name: string; -}; - -type Val: record { - option_val: string; -}; - -global currconfig: table[string] of string = table(); - -event InputConfig::new_value(name: string, source: string, id: string, value: any) - { - print outfile, id, value; - } - -event Input::end_of_data(name: string, source:string) - { - close(outfile); - terminate(); - } - -event bro_init() - { - outfile = open("../out"); - Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]); - } - diff --git a/testing/btest/scripts/base/frameworks/input/config/basic.zeek b/testing/btest/scripts/base/frameworks/input/config/basic.zeek new file mode 100644 index 0000000000..b6f7c2a78a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/config/basic.zeek @@ -0,0 +1,75 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; +redef InputConfig::empty_field = "EMPTY"; +redef InputConfig::set_separator = "\t"; + +@TEST-START-FILE configfile +testbool F +testcount 1 +testcount 2 +testcount 2 +testint -1 +testenum Conn::LOG +testport 45 +testportandproto 45/udp +testaddr 127.0.0.1 +testaddr 2607:f8b0:4005:801::200e +testinterval 60 +testtime 1507321987 +test_set a b c d erdbeerschnitzel +test_vector 1 2 3 4 5 6 +test_set (empty) +test_set EMPTY +test_set - +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +global outfile: file; + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testportandproto = 42/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_vector: vector of count = {}; +} + +type Idx: record { + option_name: string; +}; + +type Val: record { + option_val: string; +}; + +global currconfig: table[string] of string = table(); + +event InputConfig::new_value(name: string, source: string, id: string, value: any) + { + print outfile, id, value; + } + +event Input::end_of_data(name: string, source:string) + { + close(outfile); + terminate(); + } + +event zeek_init() + { + outfile = open("../out"); + Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]); + } + diff --git a/testing/btest/scripts/base/frameworks/input/config/errors.bro b/testing/btest/scripts/base/frameworks/input/config/errors.bro deleted file mode 100644 index 4f398956dc..0000000000 --- a/testing/btest/scripts/base/frameworks/input/config/errors.bro +++ /dev/null @@ -1,66 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: tail -n +2 .stderr > errout -# @TEST-EXEC: btest-diff errout - -redef exit_only_after_terminate = T; - -@TEST-START-FILE configfile -testbool A -testtesttesttesttesttest -testbool A B -testcount A -testenum unknown -testbooool T -test_any F -test_table whatever -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -global outfile: file; - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testenum = SSH::LOG; - option testport = 42/tcp; - option testaddr = 127.0.0.1; - option testtime = network_time(); - option testinterval = 1sec; - option teststring = "a"; - option test_set: set[string] = {}; - option test_vector: vector of count = {}; - option test_any: any = 5; - option test_table: table[string] of string = {}; -} - -type Idx: record { - option_name: string; -}; - -type Val: record { - option_val: string; -}; - -global currconfig: table[string] of string = table(); - -event InputConfig::new_value(name: string, source: string, id: string, value: any) - { - print outfile, id, value; - } - -event Input::end_of_data(name: string, source:string) - { - close(outfile); - terminate(); - } - -event bro_init() - { - outfile = open("../out"); - Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]); - } - diff --git a/testing/btest/scripts/base/frameworks/input/config/errors.zeek b/testing/btest/scripts/base/frameworks/input/config/errors.zeek new file mode 100644 index 0000000000..0271dbe711 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/config/errors.zeek @@ -0,0 +1,66 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: tail -n +2 .stderr > errout +# @TEST-EXEC: btest-diff errout + +redef exit_only_after_terminate = T; + +@TEST-START-FILE configfile +testbool A +testtesttesttesttesttest +testbool A B +testcount A +testenum unknown +testbooool T +test_any F +test_table whatever +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +global outfile: file; + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testenum = SSH::LOG; + option testport = 42/tcp; + option testaddr = 127.0.0.1; + option testtime = network_time(); + option testinterval = 1sec; + option teststring = "a"; + option test_set: set[string] = {}; + option test_vector: vector of count = {}; + option test_any: any = 5; + option test_table: table[string] of string = {}; +} + +type Idx: record { + option_name: string; +}; + +type Val: record { + option_val: string; +}; + +global currconfig: table[string] of string = table(); + +event InputConfig::new_value(name: string, source: string, id: string, value: any) + { + print outfile, id, value; + } + +event Input::end_of_data(name: string, source:string) + { + close(outfile); + terminate(); + } + +event zeek_init() + { + outfile = open("../out"); + Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]); + } + diff --git a/testing/btest/scripts/base/frameworks/input/config/spaces.bro b/testing/btest/scripts/base/frameworks/input/config/spaces.bro deleted file mode 100644 index 90afa20b13..0000000000 --- a/testing/btest/scripts/base/frameworks/input/config/spaces.bro +++ /dev/null @@ -1,59 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; -redef InputConfig::empty_field = "EMPTY"; - -@TEST-START-FILE configfile -testbool F -testcount 1 -testint -1 -testportandproto 45/udp -testaddr 127.0.0.3 -test_set 127.0.0.1, 127.0.0.2, 127.0.0.3 -test_vector 10.0.0.1/32, 10.0.0.1/16, 10.0.0.1/8 -@TEST-END-FILE - -@load base/protocols/ssh -@load base/protocols/conn - -global outfile: file; - -export { - option testbool: bool = T; - option testcount: count = 0; - option testint: int = 0; - option testportandproto = 42/tcp; - option testaddr = 127.0.0.1; - option test_set: set[addr] = {}; - option test_vector: vector of subnet = {}; -} - -type Idx: record { - option_name: string; -}; - -type Val: record { - option_val: string; -}; - -global currconfig: table[string] of string = table(); - -event InputConfig::new_value(name: string, source: string, id: string, value: any) - { - print outfile, id, value; - } - -event Input::end_of_data(name: string, source:string) - { - close(outfile); - terminate(); - } - -event bro_init() - { - outfile = open("../out"); - Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]); - } - diff --git a/testing/btest/scripts/base/frameworks/input/config/spaces.zeek b/testing/btest/scripts/base/frameworks/input/config/spaces.zeek new file mode 100644 index 0000000000..321deb3fa4 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/config/spaces.zeek @@ -0,0 +1,59 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; +redef InputConfig::empty_field = "EMPTY"; + +@TEST-START-FILE configfile +testbool F +testcount 1 +testint -1 +testportandproto 45/udp +testaddr 127.0.0.3 +test_set 127.0.0.1, 127.0.0.2, 127.0.0.3 +test_vector 10.0.0.1/32, 10.0.0.1/16, 10.0.0.1/8 +@TEST-END-FILE + +@load base/protocols/ssh +@load base/protocols/conn + +global outfile: file; + +export { + option testbool: bool = T; + option testcount: count = 0; + option testint: int = 0; + option testportandproto = 42/tcp; + option testaddr = 127.0.0.1; + option test_set: set[addr] = {}; + option test_vector: vector of subnet = {}; +} + +type Idx: record { + option_name: string; +}; + +type Val: record { + option_val: string; +}; + +global currconfig: table[string] of string = table(); + +event InputConfig::new_value(name: string, source: string, id: string, value: any) + { + print outfile, id, value; + } + +event Input::end_of_data(name: string, source:string) + { + close(outfile); + terminate(); + } + +event zeek_init() + { + outfile = open("../out"); + Input::add_table([$reader=Input::READER_CONFIG, $source="../configfile", $name="configuration", $idx=Idx, $val=Val, $destination=currconfig, $want_record=F]); + } + diff --git a/testing/btest/scripts/base/frameworks/input/default.bro b/testing/btest/scripts/base/frameworks/input/default.bro deleted file mode 100644 index c5b0e2f967..0000000000 --- a/testing/btest/scripts/base/frameworks/input/default.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -module A; - -type Val: record { - i: int; - b: bool; - s: string &default="leer"; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, val: Val) - { - print outfile, val; - } - -event bro_init() - { - outfile = open("../out"); - Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=T]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End-of-data"; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/default.zeek b/testing/btest/scripts/base/frameworks/input/default.zeek new file mode 100644 index 0000000000..a3e65e74e0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/default.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +module A; + +type Val: record { + i: int; + b: bool; + s: string &default="leer"; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, val: Val) + { + print outfile, val; + } + +event zeek_init() + { + outfile = open("../out"); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=T]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End-of-data"; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro b/testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro deleted file mode 100644 index b46c299c2c..0000000000 --- a/testing/btest/scripts/base/frameworks/input/empty-values-hashing.bro +++ /dev/null @@ -1,87 +0,0 @@ -# @TEST-EXEC: mv input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input2.log input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input1.log -#separator \x09 -#fields i s ss -#types int sting string -1 - TEST -2 - - -@TEST-END-FILE -@TEST-START-FILE input2.log -#separator \x09 -#fields i s ss -#types int sting string -1 TEST - -2 TEST TEST -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - s: string; - ss: string; -}; - -global servers: table[int] of Val = table(); - -global outfile: file; - -global try: count; - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) - { - print outfile, "============EVENT============"; - print outfile, "Description"; - print outfile, description; - print outfile, "Type"; - print outfile, tpe; - print outfile, "Left"; - print outfile, left; - print outfile, "Right"; - print outfile, right; - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, - $pred(typ: Input::Event, left: Idx, right: Val) = { - print outfile, "============PREDICATE============"; - print outfile, typ; - print outfile, left; - print outfile, right; - return T; - } - ]); - } - - -event Input::end_of_data(name: string, source: string) - { - print outfile, "==========SERVERS============"; - print outfile, servers; - - try = try + 1; - if ( try == 1 ) - system("touch got1"); - else if ( try == 2 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/empty-values-hashing.zeek b/testing/btest/scripts/base/frameworks/input/empty-values-hashing.zeek new file mode 100644 index 0000000000..810aa96c6a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/empty-values-hashing.zeek @@ -0,0 +1,87 @@ +# @TEST-EXEC: mv input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input2.log input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#fields i s ss +#types int sting string +1 - TEST +2 - - +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#fields i s ss +#types int sting string +1 TEST - +2 TEST TEST +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + s: string; + ss: string; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print outfile, "============EVENT============"; + print outfile, "Description"; + print outfile, description; + print outfile, "Type"; + print outfile, tpe; + print outfile, "Left"; + print outfile, left; + print outfile, "Right"; + print outfile, right; + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE============"; + print outfile, typ; + print outfile, left; + print outfile, right; + return T; + } + ]); + } + + +event Input::end_of_data(name: string, source: string) + { + print outfile, "==========SERVERS============"; + print outfile, servers; + + try = try + 1; + if ( try == 1 ) + system("touch got1"); + else if ( try == 2 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/emptyvals.bro b/testing/btest/scripts/base/frameworks/input/emptyvals.bro deleted file mode 100644 index 57e79dd977..0000000000 --- a/testing/btest/scripts/base/frameworks/input/emptyvals.bro +++ /dev/null @@ -1,45 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i -##types bool int -T 1 -- 2 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/emptyvals.zeek b/testing/btest/scripts/base/frameworks/input/emptyvals.zeek new file mode 100644 index 0000000000..b495832d6d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/emptyvals.zeek @@ -0,0 +1,45 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i +##types bool int +T 1 +- 2 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/errors.bro b/testing/btest/scripts/base/frameworks/input/errors.bro deleted file mode 100644 index 0d0376694a..0000000000 --- a/testing/btest/scripts/base/frameworks/input/errors.bro +++ /dev/null @@ -1,192 +0,0 @@ -# Test different kinds of errors of the input framework -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff .stderr -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve ns -#types bool int enum count port subnet addr double time interval string table table table vector vector string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -@TEST-END-FILE - -redef Input::accept_unsupported_types = T; - -redef exit_only_after_terminate = T; - -module Test; - -global outfile: file; - -type Idx: record { - c: count; -}; - -type Idx2: record { - c: count; - i: int; -}; - -type FileVal: record { - i: int; - s: file; -}; - -type Val: record { - i: int; - s: string; - a: addr; -}; - -type OptionalRecordVal: record { - i: int; - r: FileVal &optional; -}; - -type OptionalFileVal: record { - i: int; - s: file &optional; -}; - -global file_table: table[count] of FileVal = table(); -global optional_file_table: table[count] of OptionalFileVal = table(); -global record_table: table[count] of OptionalRecordVal = table(); -global string_table: table[string] of OptionalRecordVal = table(); - -global val_table: table[count] of Val = table(); -global val_table2: table[count, int] of Val = table(); -global val_table3: table[count, int] of int = table(); -global val_table4: table[count] of int; - -event line_file(description: Input::EventDescription, tpe: Input::Event, r:FileVal) - { - print outfile, description$name; - print outfile, r; - } - -event optional_line_file(description: Input::EventDescription, tpe: Input::Event, r:OptionalFileVal) - { - print outfile, description$name; - print outfile, r; - } - -event line_record(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) - { - print outfile, description$name; - print outfile, r; - } - -event event1(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event2(description: Input::TableDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event3(description: Input::TableDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event4(description: Input::TableDescription, tpe: Input::Event, r: Idx, r2: OptionalRecordVal) - { - } - -event event5(description: Input::EventDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) - { - } - -event event6(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) - { - } - -event event7(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2:OptionalRecordVal) - { - } - -event event8(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:string) - { - } - -event event9(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:addr, ii: int) - { - } - -event event10(description: Input::TableDescription, tpe: Input::Event, i: Idx, c: count) - { - } - -# these are legit to test the error events -event event11(description: Input::EventDescription, tpe: Input::Event, v: Val) - { - } - -event errorhandler1(desc: Input::TableDescription, msg: string, level: Reporter::Level) - { - } - -event errorhandler2(desc: Input::EventDescription, msg: string, level: Reporter::Level) - { - } - -event errorhandler3(desc: string, msg: string, level: Reporter::Level) - { - } - -event errorhandler4(desc: Input::EventDescription, msg: count, level: Reporter::Level) - { - } - -event errorhandler5(desc: Input::EventDescription, msg: string, level: count) - { - } - -event kill_me() - { - terminate(); - } - -event bro_init() - { - outfile = open("out"); - Input::add_event([$source="input.log", $name="file", $fields=FileVal, $ev=line_file, $want_record=T]); - Input::add_event([$source="input.log", $name="optionalrecord", $fields=OptionalRecordVal, $ev=line_record, $want_record=T]); - Input::add_event([$source="input.log", $name="optionalfile", $fields=OptionalFileVal, $ev=optional_line_file, $want_record=T]); - Input::add_table([$source="input.log", $name="filetable", $idx=Idx, $val=FileVal, $destination=file_table]); - Input::add_table([$source="input.log", $name="optionalrecordtable", $idx=Idx, $val=OptionalRecordVal, $destination=record_table]); - Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table]); - Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=record_table]); - Input::add_table([$source="input.log", $name="optionalfiletable2", $idx=Idx, $val=OptionalFileVal, $destination=string_table]); - Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=terminate]); - Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=kill_me]); - Input::add_table([$source="input.log", $name="optionalfiletable4", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event1]); - Input::add_table([$source="input.log", $name="optionalfiletable5", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event2]); - Input::add_table([$source="input.log", $name="optionalfiletable6", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event3]); - Input::add_table([$source="input.log", $name="optionalfiletable7", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event4]); - Input::add_table([$source="input.log", $name="optionalfiletable8", $idx=Idx, $val=Val, $destination=val_table4, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable9", $idx=Idx2, $val=Val, $destination=val_table, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable10", $idx=Idx, $val=Val, $destination=val_table2, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable11", $idx=Idx2, $val=Idx, $destination=val_table3, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable12", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable14", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event10, $want_record=F]); - Input::add_table([$source="input.log", $name="optionalfiletable15", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=T]); - Input::add_event([$source="input.log", $name="event1", $fields=OptionalFileVal, $ev=terminate, $want_record=T]); - Input::add_event([$source="input.log", $name="event2", $fields=OptionalFileVal, $ev=kill_me, $want_record=T]); - Input::add_event([$source="input.log", $name="event3", $fields=OptionalFileVal, $ev=event3, $want_record=T]); - Input::add_event([$source="input.log", $name="event4", $fields=OptionalFileVal, $ev=event5, $want_record=T]); - Input::add_event([$source="input.log", $name="event5", $fields=OptionalFileVal, $ev=event6, $want_record=T]); - Input::add_event([$source="input.log", $name="event6", $fields=OptionalFileVal, $ev=event7, $want_record=T]); - Input::add_event([$source="input.log", $name="event7", $fields=OptionalFileVal, $ev=event7, $want_record=F]); - Input::add_event([$source="input.log", $name="event8", $fields=Val, $ev=event8, $want_record=F]); - Input::add_event([$source="input.log", $name="event9", $fields=Val, $ev=event9, $want_record=F]); - - Input::add_event([$source="input.log", $name="error1", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler1]); - Input::add_table([$source="input.log", $name="error2", $idx=Idx, $val=Val, $destination=val_table, $error_ev=errorhandler2]); - Input::add_event([$source="input.log", $name="error3", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler3]); - Input::add_event([$source="input.log", $name="error4", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler4]); - Input::add_event([$source="input.log", $name="error5", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler5]); - - schedule 3secs { kill_me() }; - } diff --git a/testing/btest/scripts/base/frameworks/input/errors.zeek b/testing/btest/scripts/base/frameworks/input/errors.zeek new file mode 100644 index 0000000000..0bd80f70e3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/errors.zeek @@ -0,0 +1,194 @@ +# Test different kinds of errors of the input framework +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff .stderr +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve ns +#types bool int enum count port subnet addr double time interval string table table table vector vector string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +@TEST-END-FILE + +redef Input::accept_unsupported_types = T; + +redef exit_only_after_terminate = T; + +module Test; + +global outfile: file; + +type Idx: record { + c: count; +}; + +type Idx2: record { + c: count; + i: int; +}; + +type FileVal: record { + i: int; + s: file; +}; + +type Val: record { + i: int; + s: string; + a: addr; +}; + +type OptionalRecordVal: record { + i: int; + r: FileVal &optional; +}; + +type OptionalFileVal: record { + i: int; + s: file &optional; +}; + +global file_table: table[count] of FileVal = table(); +global optional_file_table: table[count] of OptionalFileVal = table(); +global record_table: table[count] of OptionalRecordVal = table(); +global string_table: table[string] of OptionalRecordVal = table(); + +global val_table: table[count] of Val = table(); +global val_table2: table[count, int] of Val = table(); +global val_table3: table[count, int] of int = table(); +global val_table4: table[count] of int; + +event line_file(description: Input::EventDescription, tpe: Input::Event, r:FileVal) + { + print outfile, description$name; + print outfile, r; + } + +event optional_line_file(description: Input::EventDescription, tpe: Input::Event, r:OptionalFileVal) + { + print outfile, description$name; + print outfile, r; + } + +event line_record(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) + { + print outfile, description$name; + print outfile, r; + } + +event event1(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event2(description: Input::TableDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event3(description: Input::TableDescription, tpe: Input::Event, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event4(description: Input::TableDescription, tpe: Input::Event, r: Idx, r2: OptionalRecordVal) + { + } + +event event5(description: Input::EventDescription, tpe: string, r: OptionalRecordVal, r2: OptionalRecordVal) + { + } + +event event6(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal) + { + } + +event event7(description: Input::EventDescription, tpe: Input::Event, r: OptionalRecordVal, r2:OptionalRecordVal) + { + } + +event event8(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:string) + { + } + +event event9(description: Input::EventDescription, tpe: Input::Event, i: int, s:string, a:addr, ii: int) + { + } + +event event10(description: Input::TableDescription, tpe: Input::Event, i: Idx, c: count) + { + } + +# these are legit to test the error events +event event11(description: Input::EventDescription, tpe: Input::Event, v: Val) + { + } + +event errorhandler1(desc: Input::TableDescription, msg: string, level: Reporter::Level) + { + } + +event errorhandler2(desc: Input::EventDescription, msg: string, level: Reporter::Level) + { + } + +event errorhandler3(desc: string, msg: string, level: Reporter::Level) + { + } + +event errorhandler4(desc: Input::EventDescription, msg: count, level: Reporter::Level) + { + } + +event errorhandler5(desc: Input::EventDescription, msg: string, level: count) + { + } + +event kill_me() + { + terminate(); + } + +event zeek_init() + { + outfile = open("out"); + Input::add_event([$source="input.log", $name="file", $fields=FileVal, $ev=line_file, $want_record=T]); + Input::add_event([$source="input.log", $name="optionalrecord", $fields=OptionalRecordVal, $ev=line_record, $want_record=T]); + Input::add_event([$source="input.log", $name="optionalfile", $fields=OptionalFileVal, $ev=optional_line_file, $want_record=T]); + Input::add_table([$source="input.log", $name="filetable", $idx=Idx, $val=FileVal, $destination=file_table]); + Input::add_table([$source="input.log", $name="optionalrecordtable", $idx=Idx, $val=OptionalRecordVal, $destination=record_table]); + Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table]); + Input::add_table([$source="input.log", $name="optionalfiletable", $idx=Idx, $val=OptionalFileVal, $destination=record_table]); + Input::add_table([$source="input.log", $name="optionalfiletable2", $idx=Idx, $val=OptionalFileVal, $destination=string_table]); + Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=terminate]); + Input::add_table([$source="input.log", $name="optionalfiletable3", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=kill_me]); + Input::add_table([$source="input.log", $name="optionalfiletable4", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event1]); + Input::add_table([$source="input.log", $name="optionalfiletable5", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event2]); + Input::add_table([$source="input.log", $name="optionalfiletable6", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event3]); + Input::add_table([$source="input.log", $name="optionalfiletable7", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event4]); + Input::add_table([$source="input.log", $name="optionalfiletable8", $idx=Idx, $val=Val, $destination=val_table4, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable9", $idx=Idx2, $val=Val, $destination=val_table, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable10", $idx=Idx, $val=Val, $destination=val_table2, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable11", $idx=Idx2, $val=Idx, $destination=val_table3, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable12", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable14", $idx=Idx, $val=OptionalFileVal, $destination=optional_file_table, $ev=event10, $want_record=F]); + Input::add_table([$source="input.log", $name="optionalfiletable15", $idx=Idx2, $val=Idx, $destination=val_table2, $want_record=T]); + Input::add_event([$source="input.log", $name="event1", $fields=OptionalFileVal, $ev=terminate, $want_record=T]); + Input::add_event([$source="input.log", $name="event2", $fields=OptionalFileVal, $ev=kill_me, $want_record=T]); + Input::add_event([$source="input.log", $name="event3", $fields=OptionalFileVal, $ev=event3, $want_record=T]); + Input::add_event([$source="input.log", $name="event4", $fields=OptionalFileVal, $ev=event5, $want_record=T]); + Input::add_event([$source="input.log", $name="event5", $fields=OptionalFileVal, $ev=event6, $want_record=T]); + Input::add_event([$source="input.log", $name="event6", $fields=OptionalFileVal, $ev=event7, $want_record=T]); + Input::add_event([$source="input.log", $name="event7", $fields=OptionalFileVal, $ev=event7, $want_record=F]); + Input::add_event([$source="input.log", $name="event8", $fields=Val, $ev=event8, $want_record=F]); + Input::add_event([$source="input.log", $name="event9", $fields=Val, $ev=event9, $want_record=F]); + + Input::add_event([$source="input.log", $name="error1", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler1]); + Input::add_table([$source="input.log", $name="error2", $idx=Idx, $val=Val, $destination=val_table, $error_ev=errorhandler2]); + Input::add_event([$source="input.log", $name="error3", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler3]); + Input::add_event([$source="input.log", $name="error4", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler4]); + Input::add_event([$source="input.log", $name="error5", $fields=Val, $ev=event11, $want_record=T, $error_ev=errorhandler5]); + + Input::add_table([$source="input.log", $name="error6", $idx=Idx, $destination=val_table]); + + schedule 3secs { kill_me() }; + } diff --git a/testing/btest/scripts/base/frameworks/input/event.bro b/testing/btest/scripts/base/frameworks/input/event.bro deleted file mode 100644 index 6b6a391939..0000000000 --- a/testing/btest/scripts/base/frameworks/input/event.bro +++ /dev/null @@ -1,50 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -module A; - -type Val: record { - i: int; - b: bool; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) - { - print outfile, description; - print outfile, tpe; - print outfile, i; - print outfile, b; - } - -event bro_init() - { - outfile = open("../out"); - Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=F]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End-of-data"; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/event.zeek b/testing/btest/scripts/base/frameworks/input/event.zeek new file mode 100644 index 0000000000..f23d9cf52d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/event.zeek @@ -0,0 +1,50 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +module A; + +type Val: record { + i: int; + b: bool; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) + { + print outfile, description; + print outfile, tpe; + print outfile, i; + print outfile, b; + } + +event zeek_init() + { + outfile = open("../out"); + Input::add_event([$source="../input.log", $name="input", $fields=Val, $ev=line, $want_record=F]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End-of-data"; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/invalid-lines.bro b/testing/btest/scripts/base/frameworks/input/invalid-lines.bro deleted file mode 100644 index 83be1efd09..0000000000 --- a/testing/btest/scripts/base/frameworks/input/invalid-lines.bro +++ /dev/null @@ -1,67 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; -redef InputAscii::fail_on_invalid_lines = F; - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve ns -#types bool int enum count port subnet addr double time interval string table table table vector vector string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 HOHOHO -T -41 -@TEST-END-FILE - -@load base/protocols/ssh - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - ns: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); -global servers2: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - Input::add_table([$source="../input.log", $name="ssh2", $idx=Idx, $val=Val, $destination=servers2, $config=table(["fail_on_invalid_lines"] = "T")]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/invalid-lines.zeek b/testing/btest/scripts/base/frameworks/input/invalid-lines.zeek new file mode 100644 index 0000000000..86ace59204 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/invalid-lines.zeek @@ -0,0 +1,67 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; +redef InputAscii::fail_on_invalid_lines = F; + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve ns +#types bool int enum count port subnet addr double time interval string table table table vector vector string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 HOHOHO +T -41 +@TEST-END-FILE + +@load base/protocols/ssh + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + ns: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); +global servers2: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + Input::add_table([$source="../input.log", $name="ssh2", $idx=Idx, $val=Val, $destination=servers2, $config=table(["fail_on_invalid_lines"] = "T")]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro b/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro deleted file mode 100644 index f2fefaa5d0..0000000000 --- a/testing/btest/scripts/base/frameworks/input/invalidnumbers.bro +++ /dev/null @@ -1,45 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline - -@TEST-START-FILE input.log -#separator \x09 -#fields i c -#types int count -12129223372036854775800 121218446744073709551612 -9223372036854775801TEXTHERE 1Justtext -Justtext 1 -9223372036854775800 -18446744073709551612 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - c: count; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/invalidnumbers.zeek b/testing/btest/scripts/base/frameworks/input/invalidnumbers.zeek new file mode 100644 index 0000000000..16a3cda1de --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/invalidnumbers.zeek @@ -0,0 +1,45 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline + +@TEST-START-FILE input.log +#separator \x09 +#fields i c +#types int count +12129223372036854775800 121218446744073709551612 +9223372036854775801TEXTHERE 1Justtext +Justtext 1 +9223372036854775800 -18446744073709551612 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + c: count; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/invalidset.bro b/testing/btest/scripts/base/frameworks/input/invalidset.bro deleted file mode 100644 index 932060424e..0000000000 --- a/testing/btest/scripts/base/frameworks/input/invalidset.bro +++ /dev/null @@ -1,65 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out -# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline - -@TEST-START-FILE input.log -#separator \x09 -#fields i s -name - -name 127.0.0.1 -@TEST-END-FILE - -redef exit_only_after_terminate = T; -redef InputAscii::fail_on_invalid_lines = T; - -global outfile: file; - -module A; - -type Idx: record { - i: string; -}; - -type Val: record { - s: set[subnet]; -}; - -global endcount: count = 0; - -global servers: table[string] of Val = table(); - -event handle_our_errors(desc: Input::TableDescription, msg: string, level: Reporter::Level) - { - print outfile, "TableErrorEvent", msg, level; - } - -event handle_our_errors_event(desc: Input::EventDescription, msg: string, level: Reporter::Level) - { - print outfile, "EventErrorEvent", msg, level; - } - -event line(description: Input::EventDescription, tpe: Input::Event, v: Val) - { - print outfile, "Event", v; - } - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $error_ev=handle_our_errors, $idx=Idx, $val=Val, $destination=servers]); - Input::add_event([$source="../input.log", $name="sshevent", $error_ev=handle_our_errors_event, $fields=Val, $want_record=T, $ev=line]); - } - -event Input::end_of_data(name: string, source:string) - { - ++endcount; - - if ( endcount == 2 ) - { - print outfile, servers; - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/invalidset.zeek b/testing/btest/scripts/base/frameworks/input/invalidset.zeek new file mode 100644 index 0000000000..67aff58254 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/invalidset.zeek @@ -0,0 +1,65 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out +# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline + +@TEST-START-FILE input.log +#separator \x09 +#fields i s +name - +name 127.0.0.1 +@TEST-END-FILE + +redef exit_only_after_terminate = T; +redef InputAscii::fail_on_invalid_lines = T; + +global outfile: file; + +module A; + +type Idx: record { + i: string; +}; + +type Val: record { + s: set[subnet]; +}; + +global endcount: count = 0; + +global servers: table[string] of Val = table(); + +event handle_our_errors(desc: Input::TableDescription, msg: string, level: Reporter::Level) + { + print outfile, "TableErrorEvent", msg, level; + } + +event handle_our_errors_event(desc: Input::EventDescription, msg: string, level: Reporter::Level) + { + print outfile, "EventErrorEvent", msg, level; + } + +event line(description: Input::EventDescription, tpe: Input::Event, v: Val) + { + print outfile, "Event", v; + } + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $error_ev=handle_our_errors, $idx=Idx, $val=Val, $destination=servers]); + Input::add_event([$source="../input.log", $name="sshevent", $error_ev=handle_our_errors_event, $fields=Val, $want_record=T, $ev=line]); + } + +event Input::end_of_data(name: string, source:string) + { + ++endcount; + + if ( endcount == 2 ) + { + print outfile, servers; + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/invalidtext.bro b/testing/btest/scripts/base/frameworks/input/invalidtext.bro deleted file mode 100644 index 3f5b590dec..0000000000 --- a/testing/btest/scripts/base/frameworks/input/invalidtext.bro +++ /dev/null @@ -1,66 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out -# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline - -@TEST-START-FILE input.log -#separator \x09 -#fields i c -#types int count - l - 5 -@TEST-END-FILE - -redef exit_only_after_terminate = T; -redef InputAscii::fail_on_invalid_lines = T; - -global outfile: file; - -module A; - -type Idx: record { - i: string; -}; - -type Val: record { - c: count; -}; - -global endcount: count = 0; - -global servers: table[string] of Val = table(); - -event handle_our_errors(desc: Input::TableDescription, msg: string, level: Reporter::Level) - { - print outfile, "TableErrorEvent", msg, level; - } - -event handle_our_errors_event(desc: Input::EventDescription, msg: string, level: Reporter::Level) - { - print outfile, "EventErrorEvent", msg, level; - } - -event line(description: Input::EventDescription, tpe: Input::Event, v: Val) - { - print outfile, "Event", v; - } - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $error_ev=handle_our_errors, $idx=Idx, $val=Val, $destination=servers]); - Input::add_event([$source="../input.log", $name="sshevent", $error_ev=handle_our_errors_event, $fields=Val, $want_record=T, $ev=line]); - } - -event Input::end_of_data(name: string, source:string) - { - ++endcount; - - if ( endcount == 2 ) - { - print outfile, servers; - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/invalidtext.zeek b/testing/btest/scripts/base/frameworks/input/invalidtext.zeek new file mode 100644 index 0000000000..2c2809861a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/invalidtext.zeek @@ -0,0 +1,66 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out +# @TEST-EXEC: sed 1d .stderr > .stderrwithoutfirstline +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderrwithoutfirstline + +@TEST-START-FILE input.log +#separator \x09 +#fields i c +#types int count + l + 5 +@TEST-END-FILE + +redef exit_only_after_terminate = T; +redef InputAscii::fail_on_invalid_lines = T; + +global outfile: file; + +module A; + +type Idx: record { + i: string; +}; + +type Val: record { + c: count; +}; + +global endcount: count = 0; + +global servers: table[string] of Val = table(); + +event handle_our_errors(desc: Input::TableDescription, msg: string, level: Reporter::Level) + { + print outfile, "TableErrorEvent", msg, level; + } + +event handle_our_errors_event(desc: Input::EventDescription, msg: string, level: Reporter::Level) + { + print outfile, "EventErrorEvent", msg, level; + } + +event line(description: Input::EventDescription, tpe: Input::Event, v: Val) + { + print outfile, "Event", v; + } + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $error_ev=handle_our_errors, $idx=Idx, $val=Val, $destination=servers]); + Input::add_event([$source="../input.log", $name="sshevent", $error_ev=handle_our_errors_event, $fields=Val, $want_record=T, $ev=line]); + } + +event Input::end_of_data(name: string, source:string) + { + ++endcount; + + if ( endcount == 2 ) + { + print outfile, servers; + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/missing-enum.bro b/testing/btest/scripts/base/frameworks/input/missing-enum.bro deleted file mode 100644 index 0d37aae453..0000000000 --- a/testing/btest/scripts/base/frameworks/input/missing-enum.bro +++ /dev/null @@ -1,37 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff bro/.stderr -# @TEST-EXEC: btest-diff bro/.stdout - -@TEST-START-FILE input.log -#fields e i -IdoNot::Exist 1 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - e: Log::ID; -}; - -global etable: table[int] of Log::ID = table(); - -event bro_init() - { - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="enum", $idx=Idx, $val=Val, $destination=etable, $want_record=F]); - } - -event Input::end_of_data(name: string, source:string) - { - print "Table:"; - print etable; - Input::remove("enum"); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/missing-enum.zeek b/testing/btest/scripts/base/frameworks/input/missing-enum.zeek new file mode 100644 index 0000000000..9c5850cfac --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/missing-enum.zeek @@ -0,0 +1,37 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff zeek/.stderr +# @TEST-EXEC: btest-diff zeek/.stdout + +@TEST-START-FILE input.log +#fields e i +IdoNot::Exist 1 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + e: Log::ID; +}; + +global etable: table[int] of Log::ID = table(); + +event zeek_init() + { + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="enum", $idx=Idx, $val=Val, $destination=etable, $want_record=F]); + } + +event Input::end_of_data(name: string, source:string) + { + print "Table:"; + print etable; + Input::remove("enum"); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/missing-file-initially.bro b/testing/btest/scripts/base/frameworks/input/missing-file-initially.bro deleted file mode 100644 index 7c9f51994c..0000000000 --- a/testing/btest/scripts/base/frameworks/input/missing-file-initially.bro +++ /dev/null @@ -1,61 +0,0 @@ -# This tests files that don't exist initially and then do later during -# runtime to make sure the ascii reader is resilient to files missing. -# It does a second test at the same time which configures the old -# failing behavior. - -# @TEST-EXEC: btest-bg-run bro bro %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/init 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv does-exist.dat does-not-exist.dat -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/next 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv does-not-exist.dat does-not-exist-again.dat -# @TEST-EXEC: echo "3 streaming still works" >> does-not-exist-again.dat -# @TEST-EXEC: btest-bg-wait 5 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff bro/.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff bro/.stderr - -@TEST-START-FILE does-exist.dat -#separator \x09 -#fields line -#types string -1 now it does -2 and more! -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -@load base/frameworks/input - -module A; - -type Val: record { - line: string; -}; - -global line_count = 0; - -event line(description: Input::EventDescription, tpe: Input::Event, v: Val) - { - print fmt("%s: %s", description$name, v$line); - ++line_count; - - if ( line_count == 4 ) - system("touch next"); - if ( line_count == 5 ) - terminate(); - } - -event line2(description: Input::EventDescription, tpe: Input::Event, v: Val) - { - print "DONT PRINT THIS LINE"; - } - - -event bro_init() - { - Input::add_event([$source="../does-not-exist.dat", $name="input", $reader=Input::READER_ASCII, $mode=Input::REREAD, $fields=Val, $ev=line, $want_record=T]); - Input::add_event([$source="../does-not-exist.dat", $name="inputstream", $reader=Input::READER_ASCII, $mode=Input::STREAM, $fields=Val, $ev=line, $want_record=T]); - Input::add_event([$source="../does-not-exist.dat", $name="inputmanual", $reader=Input::READER_ASCII, $mode=Input::MANUAL, $fields=Val, $ev=line, $want_record=T]); - Input::add_event([$source="../does-not-exist.dat", $name="input2", $reader=Input::READER_ASCII, $mode=Input::REREAD, $fields=Val, $ev=line2, $want_record=T, - $config=table(["fail_on_file_problem"] = "T")]); - system("touch init"); - } diff --git a/testing/btest/scripts/base/frameworks/input/missing-file-initially.zeek b/testing/btest/scripts/base/frameworks/input/missing-file-initially.zeek new file mode 100644 index 0000000000..5d87c6d786 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/missing-file-initially.zeek @@ -0,0 +1,61 @@ +# This tests files that don't exist initially and then do later during +# runtime to make sure the ascii reader is resilient to files missing. +# It does a second test at the same time which configures the old +# failing behavior. + +# @TEST-EXEC: btest-bg-run zeek zeek %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/init 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv does-exist.dat does-not-exist.dat +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/next 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv does-not-exist.dat does-not-exist-again.dat +# @TEST-EXEC: echo "3 streaming still works" >> does-not-exist-again.dat +# @TEST-EXEC: btest-bg-wait 5 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff zeek/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff zeek/.stderr + +@TEST-START-FILE does-exist.dat +#separator \x09 +#fields line +#types string +1 now it does +2 and more! +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +@load base/frameworks/input + +module A; + +type Val: record { + line: string; +}; + +global line_count = 0; + +event line(description: Input::EventDescription, tpe: Input::Event, v: Val) + { + print fmt("%s: %s", description$name, v$line); + ++line_count; + + if ( line_count == 4 ) + system("touch next"); + if ( line_count == 5 ) + terminate(); + } + +event line2(description: Input::EventDescription, tpe: Input::Event, v: Val) + { + print "DONT PRINT THIS LINE"; + } + + +event zeek_init() + { + Input::add_event([$source="../does-not-exist.dat", $name="input", $reader=Input::READER_ASCII, $mode=Input::REREAD, $fields=Val, $ev=line, $want_record=T]); + Input::add_event([$source="../does-not-exist.dat", $name="inputstream", $reader=Input::READER_ASCII, $mode=Input::STREAM, $fields=Val, $ev=line, $want_record=T]); + Input::add_event([$source="../does-not-exist.dat", $name="inputmanual", $reader=Input::READER_ASCII, $mode=Input::MANUAL, $fields=Val, $ev=line, $want_record=T]); + Input::add_event([$source="../does-not-exist.dat", $name="input2", $reader=Input::READER_ASCII, $mode=Input::REREAD, $fields=Val, $ev=line2, $want_record=T, + $config=table(["fail_on_file_problem"] = "T")]); + system("touch init"); + } diff --git a/testing/btest/scripts/base/frameworks/input/missing-file.bro b/testing/btest/scripts/base/frameworks/input/missing-file.bro deleted file mode 100644 index 2ec3bb937f..0000000000 --- a/testing/btest/scripts/base/frameworks/input/missing-file.bro +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait -k 5 -# @TEST-EXEC: btest-diff bro/.stderr - -redef exit_only_after_terminate = T; -redef InputAscii::fail_on_file_problem = T; - -global outfile: file; -global try: count; - -module A; - -type Val: record { - i: int; - b: bool; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) - { - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="does-not-exist.dat", $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/missing-file.zeek b/testing/btest/scripts/base/frameworks/input/missing-file.zeek new file mode 100644 index 0000000000..f1d4a203e2 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/missing-file.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff zeek/.stderr + +redef exit_only_after_terminate = T; +redef InputAscii::fail_on_file_problem = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + i: int; + b: bool; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, i: int, b: bool) + { + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="does-not-exist.dat", $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro deleted file mode 100644 index c38c4efd85..0000000000 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.bro +++ /dev/null @@ -1,44 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i -#types bool int -T -42 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global servers: table[int] of bool = table(); - -event bro_init() - { - outfile = open("../out"); - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers; - Input::remove("input"); - close(outfile); - terminate(); - } - diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.zeek b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.zeek new file mode 100644 index 0000000000..925ec13f82 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-norecord.zeek @@ -0,0 +1,44 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i +#types bool int +T -42 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of bool = table(); + +event zeek_init() + { + outfile = open("../out"); + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers; + Input::remove("input"); + close(outfile); + terminate(); + } + diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro b/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro deleted file mode 100644 index 3ee82983ff..0000000000 --- a/testing/btest/scripts/base/frameworks/input/onecolumn-record.bro +++ /dev/null @@ -1,44 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i -#types bool int -T -42 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - Input::add_table([$name="input", $source="../input.log", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers; - Input::remove("input"); - close(outfile); - terminate(); - } - diff --git a/testing/btest/scripts/base/frameworks/input/onecolumn-record.zeek b/testing/btest/scripts/base/frameworks/input/onecolumn-record.zeek new file mode 100644 index 0000000000..a55ddd318a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/onecolumn-record.zeek @@ -0,0 +1,44 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i +#types bool int +T -42 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + Input::add_table([$name="input", $source="../input.log", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers; + Input::remove("input"); + close(outfile); + terminate(); + } + diff --git a/testing/btest/scripts/base/frameworks/input/optional.bro b/testing/btest/scripts/base/frameworks/input/optional.bro deleted file mode 100644 index 56c261999d..0000000000 --- a/testing/btest/scripts/base/frameworks/input/optional.bro +++ /dev/null @@ -1,53 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - notb: bool &optional; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, - $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } - ]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/optional.zeek b/testing/btest/scripts/base/frameworks/input/optional.zeek new file mode 100644 index 0000000000..acea18810e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/optional.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + notb: bool &optional; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { right$notb = !right$b; return T; } + ]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-prefix.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-prefix.bro deleted file mode 100644 index df8a68613d..0000000000 --- a/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-prefix.bro +++ /dev/null @@ -1,54 +0,0 @@ -# These tests set the InputAscii::path_prefix / InputBinary::path_prefix -# variables to verify that an absolute path prefix gets added correctly -# to relative/path-less input sources. -# -# @TEST-EXEC: cat %INPUT | sed "s|@path_prefix@|$PWD/subdir|" >input.bro -# @TEST-EXEC: mkdir -p subdir -# -# Note, in the following we'd ideally use %DIR to express the -# additional path, but there's currently a problem in btest with using -# %DIR after TEST-START-NEXT. -# -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix bro -b input.bro >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE subdir/input.data -#fields ip tag -127.0.3.1 just -127.0.3.2 some -127.0.3.3 value -@TEST-END-FILE - -@load path-prefix-common-table.bro -redef InputAscii::path_prefix = "@path_prefix@"; - -event bro_init() - { - Input::add_table([$source="input.data", $name="input", $idx=Idx, $val=Val, - $destination=destination, $want_record=F]); - } - -# @TEST-START-NEXT -# -# The same test, but using event streams for input. - -@load path-prefix-common-event.bro -redef InputAscii::path_prefix = "@path_prefix@"; - -event bro_init() - { - Input::add_event([$source="input.data", $name="input", - $fields=Val, $ev=inputev]); - } - -# @TEST-START-NEXT -# -# The same test again, but using file analysis w/ binary readers. - -@load path-prefix-common-analysis.bro -redef InputBinary::path_prefix = "@path_prefix@"; - -event bro_init() - { - Input::add_analysis([$source="input.data", $name="input"]); - } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-prefix.zeek b/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-prefix.zeek new file mode 100644 index 0000000000..b529760e40 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-prefix.zeek @@ -0,0 +1,54 @@ +# These tests set the InputAscii::path_prefix / InputBinary::path_prefix +# variables to verify that an absolute path prefix gets added correctly +# to relative/path-less input sources. +# +# @TEST-EXEC: cat %INPUT | sed "s|@path_prefix@|$PWD/subdir|" >input.zeek +# @TEST-EXEC: mkdir -p subdir +# +# Note, in the following we'd ideally use %DIR to express the +# additional path, but there's currently a problem in btest with using +# %DIR after TEST-START-NEXT. +# +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix zeek -b input.zeek >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE subdir/input.data +#fields ip tag +127.0.3.1 just +127.0.3.2 some +127.0.3.3 value +@TEST-END-FILE + +@load path-prefix-common-table.zeek +redef InputAscii::path_prefix = "@path_prefix@"; + +event zeek_init() + { + Input::add_table([$source="input.data", $name="input", $idx=Idx, $val=Val, + $destination=destination, $want_record=F]); + } + +# @TEST-START-NEXT +# +# The same test, but using event streams for input. + +@load path-prefix-common-event.zeek +redef InputAscii::path_prefix = "@path_prefix@"; + +event zeek_init() + { + Input::add_event([$source="input.data", $name="input", + $fields=Val, $ev=inputev]); + } + +# @TEST-START-NEXT +# +# The same test again, but using file analysis w/ binary readers. + +@load path-prefix-common-analysis.zeek +redef InputBinary::path_prefix = "@path_prefix@"; + +event zeek_init() + { + Input::add_analysis([$source="input.data", $name="input"]); + } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-source.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-source.bro deleted file mode 100644 index 06d711a5e8..0000000000 --- a/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-source.bro +++ /dev/null @@ -1,48 +0,0 @@ -# These tests set the InputAscii::path_prefix / InputBinary::path_prefix -# variables to verify that setting these prefixes has no effect when -# an input file uses an absolute-path source. -# -# @TEST-EXEC: cat %INPUT | sed "s|@path_prefix@|$PWD|" >input.bro -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix bro -b input.bro >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE input.data -#fields ip tag -127.0.4.1 just -127.0.4.2 some -127.0.4.3 value -@TEST-END-FILE - -@load path-prefix-common-table.bro -redef InputAscii::path_prefix = "/this/does/not/exist"; - -event bro_init() - { - Input::add_table([$source="@path_prefix@/input.data", $name="input", $idx=Idx, $val=Val, - $destination=destination, $want_record=F]); - } - -# @TEST-START-NEXT -# -# The same test, but using event streams for input. - -@load path-prefix-common-event.bro -redef InputAscii::path_prefix = "/this/does/not/exist"; - -event bro_init() - { - Input::add_event([$source="@path_prefix@/input.data", $name="input", - $fields=Val, $ev=inputev]); - } - -# @TEST-START-NEXT -# -# The same test again, but using file analysis w/ binary readers. - -@load path-prefix-common-analysis.bro -redef InputBinary::path_prefix = "/this/does/not/exist"; - -event bro_init() - { - Input::add_analysis([$source="@path_prefix@/input.data", $name="input"]); - } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-source.zeek b/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-source.zeek new file mode 100644 index 0000000000..8e59555c11 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/path-prefix/absolute-source.zeek @@ -0,0 +1,48 @@ +# These tests set the InputAscii::path_prefix / InputBinary::path_prefix +# variables to verify that setting these prefixes has no effect when +# an input file uses an absolute-path source. +# +# @TEST-EXEC: cat %INPUT | sed "s|@path_prefix@|$PWD|" >input.zeek +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix zeek -b input.zeek >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE input.data +#fields ip tag +127.0.4.1 just +127.0.4.2 some +127.0.4.3 value +@TEST-END-FILE + +@load path-prefix-common-table.zeek +redef InputAscii::path_prefix = "/this/does/not/exist"; + +event zeek_init() + { + Input::add_table([$source="@path_prefix@/input.data", $name="input", $idx=Idx, $val=Val, + $destination=destination, $want_record=F]); + } + +# @TEST-START-NEXT +# +# The same test, but using event streams for input. + +@load path-prefix-common-event.zeek +redef InputAscii::path_prefix = "/this/does/not/exist"; + +event zeek_init() + { + Input::add_event([$source="@path_prefix@/input.data", $name="input", + $fields=Val, $ev=inputev]); + } + +# @TEST-START-NEXT +# +# The same test again, but using file analysis w/ binary readers. + +@load path-prefix-common-analysis.zeek +redef InputBinary::path_prefix = "/this/does/not/exist"; + +event zeek_init() + { + Input::add_analysis([$source="@path_prefix@/input.data", $name="input"]); + } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/no-paths.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/no-paths.bro deleted file mode 100644 index dd38fd7796..0000000000 --- a/testing/btest/scripts/base/frameworks/input/path-prefix/no-paths.bro +++ /dev/null @@ -1,43 +0,0 @@ -# These tests verify that when setting neither InputAscii::path_prefix -# nor InputBinary::path_prefix, Zeek correctly locates local input files. -# -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE input.data -#fields ip tag -127.0.0.1 just -127.0.0.2 some -127.0.0.3 value -@TEST-END-FILE - -@load path-prefix-common-table.bro - -event bro_init() - { - Input::add_table([$source="input.data", $name="input", $idx=Idx, $val=Val, - $destination=destination, $want_record=F]); - } - -# @TEST-START-NEXT -# -# The same test, but using event streams for input. - -@load path-prefix-common-event.bro - -event bro_init() - { - Input::add_event([$source="input.data", $name="input", - $fields=Val, $ev=inputev]); - } - -# @TEST-START-NEXT -# -# The same test again, but using file analysis w/ binary readers. - -@load path-prefix-common-analysis.bro - -event bro_init() - { - Input::add_analysis([$source="input.data", $name="input"]); - } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/no-paths.zeek b/testing/btest/scripts/base/frameworks/input/path-prefix/no-paths.zeek new file mode 100644 index 0000000000..ae687bfe81 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/path-prefix/no-paths.zeek @@ -0,0 +1,43 @@ +# These tests verify that when setting neither InputAscii::path_prefix +# nor InputBinary::path_prefix, Zeek correctly locates local input files. +# +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE input.data +#fields ip tag +127.0.0.1 just +127.0.0.2 some +127.0.0.3 value +@TEST-END-FILE + +@load path-prefix-common-table.zeek + +event zeek_init() + { + Input::add_table([$source="input.data", $name="input", $idx=Idx, $val=Val, + $destination=destination, $want_record=F]); + } + +# @TEST-START-NEXT +# +# The same test, but using event streams for input. + +@load path-prefix-common-event.zeek + +event zeek_init() + { + Input::add_event([$source="input.data", $name="input", + $fields=Val, $ev=inputev]); + } + +# @TEST-START-NEXT +# +# The same test again, but using file analysis w/ binary readers. + +@load path-prefix-common-analysis.zeek + +event zeek_init() + { + Input::add_analysis([$source="input.data", $name="input"]); + } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-analysis.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-analysis.zeek similarity index 100% rename from testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-analysis.bro rename to testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-analysis.zeek diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-event.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-event.zeek similarity index 100% rename from testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-event.bro rename to testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-event.zeek diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-table.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-table.zeek similarity index 100% rename from testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-table.bro rename to testing/btest/scripts/base/frameworks/input/path-prefix/path-prefix-common-table.zeek diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/relative-prefix.bro b/testing/btest/scripts/base/frameworks/input/path-prefix/relative-prefix.bro deleted file mode 100644 index 52ae233289..0000000000 --- a/testing/btest/scripts/base/frameworks/input/path-prefix/relative-prefix.bro +++ /dev/null @@ -1,48 +0,0 @@ -# This test sets the InputAscii::path_prefix / InputBinary::path_prefix -# variables to verify that a relative path prefix applies correctly -# from the current working directory. -# -# @TEST-EXEC: mkdir -p alternative -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE alternative/input.data -#fields ip tag -127.0.1.1 just -127.0.1.2 some -127.0.1.3 value -@TEST-END-FILE - -@load path-prefix-common-table.bro -redef InputAscii::path_prefix = "alternative"; - -event bro_init() - { - Input::add_table([$source="input.data", $name="input", $idx=Idx, $val=Val, - $destination=destination, $want_record=F]); - } - -# @TEST-START-NEXT -# -# The same test, but using event streams for input. - -@load path-prefix-common-event.bro -redef InputAscii::path_prefix = "alternative"; - -event bro_init() - { - Input::add_event([$source="input.data", $name="input", - $fields=Val, $ev=inputev]); - } - -# @TEST-START-NEXT -# -# The same test again, but using file analysis w/ binary readers. - -@load path-prefix-common-analysis.bro -redef InputBinary::path_prefix = "alternative"; - -event bro_init() - { - Input::add_analysis([$source="input.data", $name="input"]); - } diff --git a/testing/btest/scripts/base/frameworks/input/path-prefix/relative-prefix.zeek b/testing/btest/scripts/base/frameworks/input/path-prefix/relative-prefix.zeek new file mode 100644 index 0000000000..4a4d9208dc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/path-prefix/relative-prefix.zeek @@ -0,0 +1,48 @@ +# This test sets the InputAscii::path_prefix / InputBinary::path_prefix +# variables to verify that a relative path prefix applies correctly +# from the current working directory. +# +# @TEST-EXEC: mkdir -p alternative +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/input/path-prefix zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE alternative/input.data +#fields ip tag +127.0.1.1 just +127.0.1.2 some +127.0.1.3 value +@TEST-END-FILE + +@load path-prefix-common-table.zeek +redef InputAscii::path_prefix = "alternative"; + +event zeek_init() + { + Input::add_table([$source="input.data", $name="input", $idx=Idx, $val=Val, + $destination=destination, $want_record=F]); + } + +# @TEST-START-NEXT +# +# The same test, but using event streams for input. + +@load path-prefix-common-event.zeek +redef InputAscii::path_prefix = "alternative"; + +event zeek_init() + { + Input::add_event([$source="input.data", $name="input", + $fields=Val, $ev=inputev]); + } + +# @TEST-START-NEXT +# +# The same test again, but using file analysis w/ binary readers. + +@load path-prefix-common-analysis.zeek +redef InputBinary::path_prefix = "alternative"; + +event zeek_init() + { + Input::add_analysis([$source="input.data", $name="input"]); + } diff --git a/testing/btest/scripts/base/frameworks/input/patterns.zeek b/testing/btest/scripts/base/frameworks/input/patterns.zeek new file mode 100644 index 0000000000..d8c714ac0b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/patterns.zeek @@ -0,0 +1,47 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input.log +#separator \x09 +#fields i p +#types count pattern +1 /dog/ +2 /cat/ +3 /foo|bar/ +4 /^oob/ +@TEST-END-FILE + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + p: pattern; +}; + +global pats: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="pats", $idx=Idx, $val=Val, $destination=pats]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, (pats[3]$p in "foobar"); # T + print outfile, (pats[4]$p in "foobar"); # F + print outfile, (pats[3]$p == "foo"); # T + print outfile, pats; + Input::remove("pats"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/port-embedded.bro b/testing/btest/scripts/base/frameworks/input/port-embedded.bro deleted file mode 100644 index 8aab733069..0000000000 --- a/testing/btest/scripts/base/frameworks/input/port-embedded.bro +++ /dev/null @@ -1,44 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff bro/.stdout -# @TEST-EXEC: btest-diff bro/.stderr - -@TEST-START-FILE input.log -#fields i p -1.2.3.4 80/tcp -1.2.3.5 52/udp -1.2.3.6 30/unknown -1.2.3.7 50/trash -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: addr; -}; - -type Val: record { - p: port; -}; - -global servers: table[addr] of Val = table(); - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) - { - print left, right; - } - -event bro_init() - { - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $ev=line, $destination=servers]); - } - -event Input::end_of_data(name: string, source: string) - { - Input::remove("input"); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/port-embedded.zeek b/testing/btest/scripts/base/frameworks/input/port-embedded.zeek new file mode 100644 index 0000000000..ef4b0a0651 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/port-embedded.zeek @@ -0,0 +1,44 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff zeek/.stdout +# @TEST-EXEC: btest-diff zeek/.stderr + +@TEST-START-FILE input.log +#fields i p +1.2.3.4 80/tcp +1.2.3.5 52/udp +1.2.3.6 30/unknown +1.2.3.7 50/trash +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: addr; +}; + +type Val: record { + p: port; +}; + +global servers: table[addr] of Val = table(); + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print left, right; + } + +event zeek_init() + { + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $ev=line, $destination=servers]); + } + +event Input::end_of_data(name: string, source: string) + { + Input::remove("input"); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/port.bro b/testing/btest/scripts/base/frameworks/input/port.bro deleted file mode 100644 index 48571c5ecd..0000000000 --- a/testing/btest/scripts/base/frameworks/input/port.bro +++ /dev/null @@ -1,50 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#fields i p t -1.2.3.4 80 tcp -1.2.3.5 52 udp -1.2.3.6 30 unknown -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: addr; -}; - -type Val: record { - p: port &type_column="t"; -}; - -global servers: table[addr] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers]); - if ( 1.2.3.4 in servers ) - print outfile, servers[1.2.3.4]; - if ( 1.2.3.5 in servers ) - print outfile, servers[1.2.3.5]; - if ( 1.2.3.6 in servers ) - print outfile, servers[1.2.3.6]; - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers[1.2.3.4]; - print outfile, servers[1.2.3.5]; - print outfile, servers[1.2.3.6]; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/port.zeek b/testing/btest/scripts/base/frameworks/input/port.zeek new file mode 100644 index 0000000000..b7a4b78913 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/port.zeek @@ -0,0 +1,50 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#fields i p t +1.2.3.4 80 tcp +1.2.3.5 52 udp +1.2.3.6 30 unknown +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: addr; +}; + +type Val: record { + p: port &type_column="t"; +}; + +global servers: table[addr] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers]); + if ( 1.2.3.4 in servers ) + print outfile, servers[1.2.3.4]; + if ( 1.2.3.5 in servers ) + print outfile, servers[1.2.3.5]; + if ( 1.2.3.6 in servers ) + print outfile, servers[1.2.3.6]; + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers[1.2.3.4]; + print outfile, servers[1.2.3.5]; + print outfile, servers[1.2.3.6]; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro b/testing/btest/scripts/base/frameworks/input/predicate-stream.bro deleted file mode 100644 index aac44fb8ee..0000000000 --- a/testing/btest/scripts/base/frameworks/input/predicate-stream.bro +++ /dev/null @@ -1,76 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out -# -# only difference from predicate.bro is, that this one uses a stream source. -# the reason is, that the code-paths are quite different, because then the -# ascii reader uses the put and not the sendevent interface - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global servers: table[int] of bool = table(); -global ct: int; - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) - { - ct = ct + 1; - if ( ct < 3 ) - return; - - if ( 1 in servers ) - print outfile, "VALID"; - if ( 2 in servers ) - print outfile, "VALID"; - if ( !(3 in servers) ) - print outfile, "VALID"; - if ( !(4 in servers) ) - print outfile, "VALID"; - if ( !(5 in servers) ) - print outfile, "VALID"; - if ( !(6 in servers) ) - print outfile, "VALID"; - if ( 7 in servers ) - print outfile, "VALID"; - Input::remove("input"); - close(outfile); - terminate(); - } - -event bro_init() - { - outfile = open("../out"); - ct = 0; - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, - $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } - ]); - } - diff --git a/testing/btest/scripts/base/frameworks/input/predicate-stream.zeek b/testing/btest/scripts/base/frameworks/input/predicate-stream.zeek new file mode 100644 index 0000000000..25c818dae7 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicate-stream.zeek @@ -0,0 +1,76 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out +# +# only difference from predicate.zeek is, that this one uses a stream source. +# the reason is, that the code-paths are quite different, because then the +# ascii reader uses the put and not the sendevent interface + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of bool = table(); +global ct: int; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) + { + ct = ct + 1; + if ( ct < 3 ) + return; + + if ( 1 in servers ) + print outfile, "VALID"; + if ( 2 in servers ) + print outfile, "VALID"; + if ( !(3 in servers) ) + print outfile, "VALID"; + if ( !(4 in servers) ) + print outfile, "VALID"; + if ( !(5 in servers) ) + print outfile, "VALID"; + if ( !(6 in servers) ) + print outfile, "VALID"; + if ( 7 in servers ) + print outfile, "VALID"; + Input::remove("input"); + close(outfile); + terminate(); + } + +event zeek_init() + { + outfile = open("../out"); + ct = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, $ev=line, + $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } + ]); + } + diff --git a/testing/btest/scripts/base/frameworks/input/predicate.bro b/testing/btest/scripts/base/frameworks/input/predicate.bro deleted file mode 100644 index 9946e72211..0000000000 --- a/testing/btest/scripts/base/frameworks/input/predicate.bro +++ /dev/null @@ -1,65 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global servers: table[int] of bool = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, - $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } - ]); - } - -event Input::end_of_data(name: string, source: string) - { - if ( 1 in servers ) - print outfile, "VALID"; - if ( 2 in servers ) - print outfile, "VALID"; - if ( !(3 in servers) ) - print outfile, "VALID"; - if ( !(4 in servers) ) - print outfile, "VALID"; - if ( !(5 in servers) ) - print outfile, "VALID"; - if ( !(6 in servers) ) - print outfile, "VALID"; - if ( 7 in servers ) - print outfile, "VALID"; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/predicate.zeek b/testing/btest/scripts/base/frameworks/input/predicate.zeek new file mode 100644 index 0000000000..61f1a5cf16 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicate.zeek @@ -0,0 +1,65 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global servers: table[int] of bool = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $want_record=F, + $pred(typ: Input::Event, left: Idx, right: bool) = { return right; } + ]); + } + +event Input::end_of_data(name: string, source: string) + { + if ( 1 in servers ) + print outfile, "VALID"; + if ( 2 in servers ) + print outfile, "VALID"; + if ( !(3 in servers) ) + print outfile, "VALID"; + if ( !(4 in servers) ) + print outfile, "VALID"; + if ( !(5 in servers) ) + print outfile, "VALID"; + if ( !(6 in servers) ) + print outfile, "VALID"; + if ( 7 in servers ) + print outfile, "VALID"; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodify.bro b/testing/btest/scripts/base/frameworks/input/predicatemodify.bro deleted file mode 100644 index 13ed38d6ba..0000000000 --- a/testing/btest/scripts/base/frameworks/input/predicatemodify.bro +++ /dev/null @@ -1,56 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b s ss -#types int bool string string -1 T test1 idx1 -2 T test2 idx2 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; - ss: string; -}; - -type Val: record { - b: bool; - s: string; -}; - -global servers: table[int, string] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, - $pred(typ: Input::Event, left: Idx, right: Val) = { - if ( left$i == 1 ) - right$s = "testmodified"; - if ( left$i == 2 ) - left$ss = "idxmodified"; - return T; - } - ]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodify.zeek b/testing/btest/scripts/base/frameworks/input/predicatemodify.zeek new file mode 100644 index 0000000000..5de9f7bcc8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicatemodify.zeek @@ -0,0 +1,56 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 T test1 idx1 +2 T test2 idx2 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; + ss: string; +}; + +type Val: record { + b: bool; + s: string; +}; + +global servers: table[int, string] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { + if ( left$i == 1 ) + right$s = "testmodified"; + if ( left$i == 2 ) + left$ss = "idxmodified"; + return T; + } + ]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro deleted file mode 100644 index 2c6b58ff2d..0000000000 --- a/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.bro +++ /dev/null @@ -1,114 +0,0 @@ -# @TEST-EXEC: mv input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input2.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input3.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got3 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input4.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got4 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input5.log input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out -# - -@TEST-START-FILE input1.log -#separator \x09 -#path ssh -#fields i b s ss -#types int bool string string -1 T test1 idx1 -2 T test2 idx2 -@TEST-END-FILE - -@TEST-START-FILE input2.log -#separator \x09 -#path ssh -#fields i b s ss -#types int bool string string -1 F test1 idx1 -2 T test2 idx2 -@TEST-END-FILE - -@TEST-START-FILE input3.log -#separator \x09 -#path ssh -#fields i b s ss -#types int bool string string -1 F test1 idx1 -2 F test2 idx2 -@TEST-END-FILE - -@TEST-START-FILE input4.log -#separator \x09 -#path ssh -#fields i b s ss -#types int bool string string -2 F test2 idx2 -@TEST-END-FILE - -@TEST-START-FILE input5.log -#separator \x09 -#path ssh -#fields i b s ss -#types int bool string string -1 T test1 idx1 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; - ss: string; -}; - -type Val: record { - b: bool; - s: string; -}; - -global servers: table[int, string] of Val = table(); -global outfile: file; -global try: count; - -event bro_init() - { - try = 0; - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $mode=Input::REREAD, - $pred(typ: Input::Event, left: Idx, right: Val) = { - if ( left$i == 1 ) - right$s = "testmodified"; - if ( left$i == 2 ) - left$ss = "idxmodified"; - return T; - } - ]); - } - -event Input::end_of_data(name: string, source: string) - { - try = try + 1; - print outfile, fmt("Update_finished for %s, try %d", name, try); - print outfile, servers; - - if ( try == 1 ) - system("touch got1"); - else if ( try == 2 ) - system("touch got2"); - else if ( try == 3 ) - system("touch got3"); - else if ( try == 4 ) - system("touch got4"); - if ( try == 5 ) - { - close(outfile); - Input::remove("input"); - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.zeek b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.zeek new file mode 100644 index 0000000000..9f3d66df80 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicatemodifyandreread.zeek @@ -0,0 +1,114 @@ +# @TEST-EXEC: mv input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input2.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input3.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got3 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input4.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got4 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input5.log input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out +# + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 T test1 idx1 +2 T test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 F test1 idx1 +2 T test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 F test1 idx1 +2 F test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input4.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +2 F test2 idx2 +@TEST-END-FILE + +@TEST-START-FILE input5.log +#separator \x09 +#path ssh +#fields i b s ss +#types int bool string string +1 T test1 idx1 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; + ss: string; +}; + +type Val: record { + b: bool; + s: string; +}; + +global servers: table[int, string] of Val = table(); +global outfile: file; +global try: count; + +event zeek_init() + { + try = 0; + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, $mode=Input::REREAD, + $pred(typ: Input::Event, left: Idx, right: Val) = { + if ( left$i == 1 ) + right$s = "testmodified"; + if ( left$i == 2 ) + left$ss = "idxmodified"; + return T; + } + ]); + } + +event Input::end_of_data(name: string, source: string) + { + try = try + 1; + print outfile, fmt("Update_finished for %s, try %d", name, try); + print outfile, servers; + + if ( try == 1 ) + system("touch got1"); + else if ( try == 2 ) + system("touch got2"); + else if ( try == 3 ) + system("touch got3"); + else if ( try == 4 ) + system("touch got4"); + if ( try == 5 ) + { + close(outfile); + Input::remove("input"); + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro b/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro deleted file mode 100644 index ae756431cd..0000000000 --- a/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.bro +++ /dev/null @@ -1,53 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -# Ok, this one tests a fun case. -# Input file contains two lines mapping to the same index, but with different values, -# where the predicate accepts the first one and refuses the second one. -# Desired result -> first entry stays. - -@TEST-START-FILE input.log -#fields restriction guid severity confidence detecttime address protocol portlist asn prefix rir cc impact description alternativeid_restriction alternativeid -need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 65 1342656000 1.0.17.227 - - 2519 VECTANT VECTANT Ltd. 1.0.16.0/23 apnic JP spam infrastructure spamming public http://reputation.alienvault.com/reputation.generic -need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 95 1342569600 1.228.83.33 6 25 9318 HANARO-AS Hanaro Telecom Inc. 1.224.0.0/13 apnic KR spam infrastructure direct ube sources, spam operations & spam services public http://www.spamhaus.org/query/bl?ip=1.228.83.33 -need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 65 1342656000 1.228.83.33 - - 9318 HANARO-AS Hanaro Telecom Inc. 1.224.0.0/13 apnic KR spam infrastructure spamming;malware domain public http://reputation.alienvault.com/reputation.generic -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - address: addr; -}; - -type Val: record { - asn: string; - severity: string; - confidence: count; - detecttime: time; -}; - -global servers: table[addr] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, - $pred(typ: Input::Event, left: Idx, right: Val) = { if ( right$confidence > 90 ) { return T; } return F; } - ]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, servers; - Input::remove("input"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.zeek b/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.zeek new file mode 100644 index 0000000000..79d38fab0d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/predicaterefusesecondsamerecord.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +# Ok, this one tests a fun case. +# Input file contains two lines mapping to the same index, but with different values, +# where the predicate accepts the first one and refuses the second one. +# Desired result -> first entry stays. + +@TEST-START-FILE input.log +#fields restriction guid severity confidence detecttime address protocol portlist asn prefix rir cc impact description alternativeid_restriction alternativeid +need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 65 1342656000 1.0.17.227 - - 2519 VECTANT VECTANT Ltd. 1.0.16.0/23 apnic JP spam infrastructure spamming public http://reputation.alienvault.com/reputation.generic +need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 95 1342569600 1.228.83.33 6 25 9318 HANARO-AS Hanaro Telecom Inc. 1.224.0.0/13 apnic KR spam infrastructure direct ube sources, spam operations & spam services public http://www.spamhaus.org/query/bl?ip=1.228.83.33 +need-to-know 8c864306-d21a-37b1-8705-746a786719bf medium 65 1342656000 1.228.83.33 - - 9318 HANARO-AS Hanaro Telecom Inc. 1.224.0.0/13 apnic KR spam infrastructure spamming;malware domain public http://reputation.alienvault.com/reputation.generic +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + address: addr; +}; + +type Val: record { + asn: string; + severity: string; + confidence: count; + detecttime: time; +}; + +global servers: table[addr] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=servers, + $pred(typ: Input::Event, left: Idx, right: Val) = { if ( right$confidence > 90 ) { return T; } return F; } + ]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, servers; + Input::remove("input"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/basic.bro b/testing/btest/scripts/base/frameworks/input/raw/basic.bro deleted file mode 100644 index 377e34aca7..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/basic.bro +++ /dev/null @@ -1,46 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -module A; - -type Val: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, description; - print outfile, tpe; - print outfile, s; - try = try + 1; - if ( try == 8 ) - { - Input::remove("input"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/basic.zeek b/testing/btest/scripts/base/frameworks/input/raw/basic.zeek new file mode 100644 index 0000000000..af246fdfcb --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/basic.zeek @@ -0,0 +1,46 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description; + print outfile, tpe; + print outfile, s; + try = try + 1; + if ( try == 8 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/execute.bro b/testing/btest/scripts/base/frameworks/input/raw/execute.bro deleted file mode 100644 index 783b974c0f..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/execute.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: cat out.tmp | sed 's/^ *//g' >out -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -type Val: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, description; - print outfile, tpe; - print outfile, s; - Input::remove("input"); - close(outfile); - terminate(); - } - -event bro_init() - { - outfile = open("../out.tmp"); - Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/execute.zeek b/testing/btest/scripts/base/frameworks/input/raw/execute.zeek new file mode 100644 index 0000000000..672d8131d1 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/execute.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: cat out.tmp | sed 's/^ *//g' >out +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description; + print outfile, tpe; + print outfile, s; + Input::remove("input"); + close(outfile); + terminate(); + } + +event zeek_init() + { + outfile = open("../out.tmp"); + Input::add_event([$source="wc -l ../input.log |", $reader=Input::READER_RAW, $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/executestdin.bro b/testing/btest/scripts/base/frameworks/input/raw/executestdin.bro deleted file mode 100644 index b78dd4e0e3..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/executestdin.bro +++ /dev/null @@ -1,84 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: btest-diff test.txt -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out - -redef exit_only_after_terminate = T; - -global outfile: file; -global processes_finished: count = 0; -global lines_received: count = 0; -global n: count = 0; -global total_processes: count = 0; - -global config_strings: table[string] of string = { - ["stdin"] = "hello\nthere\1\2\3\4\5\1\2\3yay" -}; - -module A; - -type Val: record { - s: string; -}; - -global more_input: function(name_prefix: string); - -function check_terminate_condition() - { - if ( processes_finished != total_processes ) - return; - - if ( lines_received != (total_processes - 1) * 2 ) - return; - - terminate(); - } - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - ++lines_received; - print outfile, tpe, description$source, description$name, s; - } - -event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) - { - print "process_finished", name, source; - Input::remove(name); - ++processes_finished; - if ( processes_finished == 1 ) - { - more_input("input"); - more_input("input"); - more_input("input"); - more_input("input"); - more_input("input"); - } - else if ( processes_finished == total_processes ) - { - close(outfile); - check_terminate_condition(); - } - } - -function more_input(name_prefix: string) - { - local name = fmt("%s%d", name_prefix, n); - config_strings["stdin"] += fmt("%d", n); - ++n; - ++total_processes; - Input::add_event([$source="cat |", - $reader=Input::READER_RAW, $mode=Input::STREAM, - $name=name, $fields=Val, $ev=line, $want_record=F, - $config=config_strings]); - } - -event bro_init() - { - outfile = open("../out"); - ++total_processes; - - Input::add_event([$source="cat > ../test.txt |", - $reader=Input::READER_RAW, $mode=Input::STREAM, - $name="input", $fields=Val, $ev=line, $want_record=F, - $config=config_strings]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/executestdin.zeek b/testing/btest/scripts/base/frameworks/input/raw/executestdin.zeek new file mode 100644 index 0000000000..0beb8bca20 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/executestdin.zeek @@ -0,0 +1,84 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-diff test.txt +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +redef exit_only_after_terminate = T; + +global outfile: file; +global processes_finished: count = 0; +global lines_received: count = 0; +global n: count = 0; +global total_processes: count = 0; + +global config_strings: table[string] of string = { + ["stdin"] = "hello\nthere\1\2\3\4\5\1\2\3yay" +}; + +module A; + +type Val: record { + s: string; +}; + +global more_input: function(name_prefix: string); + +function check_terminate_condition() + { + if ( processes_finished != total_processes ) + return; + + if ( lines_received != (total_processes - 1) * 2 ) + return; + + terminate(); + } + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + ++lines_received; + print outfile, tpe, description$source, description$name, s; + } + +event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) + { + print "process_finished", name, source; + Input::remove(name); + ++processes_finished; + if ( processes_finished == 1 ) + { + more_input("input"); + more_input("input"); + more_input("input"); + more_input("input"); + more_input("input"); + } + else if ( processes_finished == total_processes ) + { + close(outfile); + check_terminate_condition(); + } + } + +function more_input(name_prefix: string) + { + local name = fmt("%s%d", name_prefix, n); + config_strings["stdin"] += fmt("%d", n); + ++n; + ++total_processes; + Input::add_event([$source="cat |", + $reader=Input::READER_RAW, $mode=Input::STREAM, + $name=name, $fields=Val, $ev=line, $want_record=F, + $config=config_strings]); + } + +event zeek_init() + { + outfile = open("../out"); + ++total_processes; + + Input::add_event([$source="cat > ../test.txt |", + $reader=Input::READER_RAW, $mode=Input::STREAM, + $name="input", $fields=Val, $ev=line, $want_record=F, + $config=config_strings]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/executestream.bro b/testing/btest/scripts/base/frameworks/input/raw/executestream.bro deleted file mode 100644 index 240761ee03..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/executestream.bro +++ /dev/null @@ -1,64 +0,0 @@ -# @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input2.log >> input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got3 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input1.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -@TEST-END-FILE - -@TEST-START-FILE input2.log -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -@TEST-END-FILE - -@TEST-START-FILE input3.log -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. -@TEST-END-FILE - - -module A; - -type Val: record { - s: string; -}; - -global try: count; -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, description$source, description$reader, description$mode, description$name; - print outfile, tpe; - print outfile, s; - - try = try + 1; - if ( try == 1 ) - system("touch got1"); - else if ( try == 3 ) - system("touch got3"); - else if ( try == 8 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - terminate(); - } - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - Input::add_event([$source="tail -f ../input.log |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/executestream.zeek b/testing/btest/scripts/base/frameworks/input/raw/executestream.zeek new file mode 100644 index 0000000000..73aec5cab7 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/executestream.zeek @@ -0,0 +1,64 @@ +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got3 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input1.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +@TEST-END-FILE + +@TEST-START-FILE input2.log +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +@TEST-END-FILE + +@TEST-START-FILE input3.log +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + + +module A; + +type Val: record { + s: string; +}; + +global try: count; +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description$source, description$reader, description$mode, description$name; + print outfile, tpe; + print outfile, s; + + try = try + 1; + if ( try == 1 ) + system("touch got1"); + else if ( try == 3 ) + system("touch got3"); + else if ( try == 8 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + Input::add_event([$source="tail -f ../input.log |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/long.bro b/testing/btest/scripts/base/frameworks/input/raw/long.bro deleted file mode 100644 index 266021ae28..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/long.bro +++ /dev/null @@ -1,37 +0,0 @@ -# @TEST-EXEC: dd if=/dev/zero of=input.log bs=8193 count=1 -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out -# -# this test should be longer than one block-size. to test behavior of input-reader if it has to re-allocate stuff. - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -module A; - -type Val: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, tpe; - print outfile, |s|; - try = try + 1; - if ( try == 1 ) - { - Input::remove("input"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/long.zeek b/testing/btest/scripts/base/frameworks/input/raw/long.zeek new file mode 100644 index 0000000000..bab9e388e5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/long.zeek @@ -0,0 +1,37 @@ +# @TEST-EXEC: dd if=/dev/zero of=input.log bs=8193 count=1 +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out +# +# this test should be longer than one block-size. to test behavior of input-reader if it has to re-allocate stuff. + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, tpe; + print outfile, |s|; + try = try + 1; + if ( try == 1 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/offset.bro b/testing/btest/scripts/base/frameworks/input/raw/offset.bro deleted file mode 100644 index f37fb9c28a..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/offset.bro +++ /dev/null @@ -1,53 +0,0 @@ -# @TEST-EXEC: cp input.log input2.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: echo "hi" >> input2.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out - -@TEST-START-FILE input.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -module A; - -type Val: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, s; - try = try + 1; - if ( try == 2 ) - system("touch got2"); - else if ( try == 3 ) - { - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - local config_strings: table[string] of string = { - ["offset"] = "2", - }; - local config_strings_two: table[string] of string = { - ["offset"] = "-3", # 2 characters before end, last char is newline. - }; - local config_strings_three: table[string] of string = { - ["offset"] = "-1", # End of file - }; - - Input::add_event([$source="../input.log", $config=config_strings, $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - Input::add_event([$source="../input.log", $config=config_strings_two, $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input2", $fields=Val, $ev=line, $want_record=F]); - Input::add_event([$source="../input2.log", $config=config_strings_three, $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input3", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/offset.zeek b/testing/btest/scripts/base/frameworks/input/raw/offset.zeek new file mode 100644 index 0000000000..87aa36fc8b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/offset.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: cp input.log input2.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: echo "hi" >> input2.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, s; + try = try + 1; + if ( try == 2 ) + system("touch got2"); + else if ( try == 3 ) + { + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + local config_strings: table[string] of string = { + ["offset"] = "2", + }; + local config_strings_two: table[string] of string = { + ["offset"] = "-3", # 2 characters before end, last char is newline. + }; + local config_strings_three: table[string] of string = { + ["offset"] = "-1", # End of file + }; + + Input::add_event([$source="../input.log", $config=config_strings, $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + Input::add_event([$source="../input.log", $config=config_strings_two, $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input2", $fields=Val, $ev=line, $want_record=F]); + Input::add_event([$source="../input2.log", $config=config_strings_three, $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input3", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/rereadraw.bro b/testing/btest/scripts/base/frameworks/input/raw/rereadraw.bro deleted file mode 100644 index f3dfb11ea5..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/rereadraw.bro +++ /dev/null @@ -1,47 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -module A; - -type Val: record { - s: string; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, description; - print outfile, tpe; - print outfile, s; - try = try + 1; - if ( try == 16 ) - { - Input::remove("input"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line, $want_record=F]); - Input::force_update("input"); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/rereadraw.zeek b/testing/btest/scripts/base/frameworks/input/raw/rereadraw.zeek new file mode 100644 index 0000000000..f187187f68 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/rereadraw.zeek @@ -0,0 +1,47 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +module A; + +type Val: record { + s: string; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description; + print outfile, tpe; + print outfile, s; + try = try + 1; + if ( try == 16 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::REREAD, $name="input", $fields=Val, $ev=line, $want_record=F]); + Input::force_update("input"); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/stderr.bro b/testing/btest/scripts/base/frameworks/input/raw/stderr.bro deleted file mode 100644 index 8ff4cc7f1b..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/stderr.bro +++ /dev/null @@ -1,68 +0,0 @@ -# @TEST-EXEC: mkdir mydir && touch mydir/a && touch mydir/b && touch mydir/c -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -type Val: record { - s: string; - is_stderr: bool; -}; - -global try = 0; -global n = 0; -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string, is_stderr: bool) - { - local line_output = fmt("%s line output (stderr=%s): ", tpe, is_stderr); - - if ( is_stderr ) - { - # work around localized error messages. and if some localization does not include the filename... well... that would be bad :) - if ( strstr(s, "nonexistant") > 0 ) - line_output += ""; - else - line_output += ""; - } - else - line_output += s; - - print outfile, line_output; - ++try; - - if ( n == 2 && try == 7 ) - terminate(); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End of Data event", name; - ++n; - - if ( n == 2 && try == 7 ) - terminate(); - } - -event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) - { - print outfile, "Process finished event", name, exit_code != 0; - ++n; - - if ( n == 2 && try == 7 ) - terminate(); - } - -event bro_init() - { - local config_strings: table[string] of string = { - ["read_stderr"] = "1" - }; - - outfile = open("../out"); - Input::add_event([$source="ls ../mydir ../nonexistant ../nonexistant2 ../nonexistant3 |", - $reader=Input::READER_RAW, $name="input", - $fields=Val, $ev=line, $want_record=F, - $config=config_strings, $mode=Input::STREAM]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/stderr.zeek b/testing/btest/scripts/base/frameworks/input/raw/stderr.zeek new file mode 100644 index 0000000000..a108ddbc4a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/stderr.zeek @@ -0,0 +1,68 @@ +# @TEST-EXEC: mkdir mydir && touch mydir/a && touch mydir/b && touch mydir/c +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +type Val: record { + s: string; + is_stderr: bool; +}; + +global try = 0; +global n = 0; +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string, is_stderr: bool) + { + local line_output = fmt("%s line output (stderr=%s): ", tpe, is_stderr); + + if ( is_stderr ) + { + # work around localized error messages. and if some localization does not include the filename... well... that would be bad :) + if ( strstr(s, "nonexistant") > 0 ) + line_output += ""; + else + line_output += ""; + } + else + line_output += s; + + print outfile, line_output; + ++try; + + if ( n == 2 && try == 7 ) + terminate(); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End of Data event", name; + ++n; + + if ( n == 2 && try == 7 ) + terminate(); + } + +event InputRaw::process_finished(name: string, source:string, exit_code:count, signal_exit:bool) + { + print outfile, "Process finished event", name, exit_code != 0; + ++n; + + if ( n == 2 && try == 7 ) + terminate(); + } + +event zeek_init() + { + local config_strings: table[string] of string = { + ["read_stderr"] = "1" + }; + + outfile = open("../out"); + Input::add_event([$source="ls ../mydir ../nonexistant ../nonexistant2 ../nonexistant3 |", + $reader=Input::READER_RAW, $name="input", + $fields=Val, $ev=line, $want_record=F, + $config=config_strings, $mode=Input::STREAM]); + } diff --git a/testing/btest/scripts/base/frameworks/input/raw/streamraw.bro b/testing/btest/scripts/base/frameworks/input/raw/streamraw.bro deleted file mode 100644 index 331db7eeb8..0000000000 --- a/testing/btest/scripts/base/frameworks/input/raw/streamraw.bro +++ /dev/null @@ -1,64 +0,0 @@ -# @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input2.log >> input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got3 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input1.log -sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF -@TEST-END-FILE - -@TEST-START-FILE input2.log -DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF -q3r3057fdf -@TEST-END-FILE - -@TEST-START-FILE input3.log -sdfs\d - -dfsdf -sdf -3rw43wRRERLlL#RWERERERE. -@TEST-END-FILE - -module A; - -type Val: record { - s: string; -}; - -global try: count; -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, s: string) - { - print outfile, description$source, description$reader, description$mode, description$name; - print outfile, tpe; - print outfile, s; - - try = try + 1; - - if ( try == 1 ) - system("touch got1"); - else if ( try == 3 ) - system("touch got3"); - else if ( try == 8 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - terminate(); - } - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); - } diff --git a/testing/btest/scripts/base/frameworks/input/raw/streamraw.zeek b/testing/btest/scripts/base/frameworks/input/raw/streamraw.zeek new file mode 100644 index 0000000000..741b3f92d6 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/raw/streamraw.zeek @@ -0,0 +1,64 @@ +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got3 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input1.log +sdfkh:KH;fdkncv;ISEUp34:Fkdj;YVpIODhfDF +@TEST-END-FILE + +@TEST-START-FILE input2.log +DSF"DFKJ"SDFKLh304yrsdkfj@#(*U$34jfDJup3UF +q3r3057fdf +@TEST-END-FILE + +@TEST-START-FILE input3.log +sdfs\d + +dfsdf +sdf +3rw43wRRERLlL#RWERERERE. +@TEST-END-FILE + +module A; + +type Val: record { + s: string; +}; + +global try: count; +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, s: string) + { + print outfile, description$source, description$reader, description$mode, description$name; + print outfile, tpe; + print outfile, s; + + try = try + 1; + + if ( try == 1 ) + system("touch got1"); + else if ( try == 3 ) + system("touch got3"); + else if ( try == 8 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + Input::add_event([$source="../input.log", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F]); + } diff --git a/testing/btest/scripts/base/frameworks/input/repeat.bro b/testing/btest/scripts/base/frameworks/input/repeat.bro deleted file mode 100644 index 5093e30351..0000000000 --- a/testing/btest/scripts/base/frameworks/input/repeat.bro +++ /dev/null @@ -1,54 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global destination: table[int] of bool = table(); - -const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; - -event bro_init() - { - try = 0; - outfile = open("../out"); - for ( i in one_to_32 ) - Input::add_table([$source="../input.log", $name=fmt("input%d", i), $idx=Idx, $val=Val, $destination=destination, $want_record=F]); - } - -event Input::end_of_data(name: string, source: string) - { - print outfile, name; - print outfile, source; - print outfile, destination; - Input::remove(name); - try = try + 1; - if ( try == 32 ) - { - close(outfile); - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/repeat.zeek b/testing/btest/scripts/base/frameworks/input/repeat.zeek new file mode 100644 index 0000000000..db9a6018d0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/repeat.zeek @@ -0,0 +1,54 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global destination: table[int] of bool = table(); + +const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; + +event zeek_init() + { + try = 0; + outfile = open("../out"); + for ( i in one_to_32 ) + Input::add_table([$source="../input.log", $name=fmt("input%d", i), $idx=Idx, $val=Val, $destination=destination, $want_record=F]); + } + +event Input::end_of_data(name: string, source: string) + { + print outfile, name; + print outfile, source; + print outfile, destination; + Input::remove(name); + try = try + 1; + if ( try == 32 ) + { + close(outfile); + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/reread.bro b/testing/btest/scripts/base/frameworks/input/reread.bro deleted file mode 100644 index 53cb2a91a8..0000000000 --- a/testing/btest/scripts/base/frameworks/input/reread.bro +++ /dev/null @@ -1,145 +0,0 @@ -# @TEST-EXEC: mv input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input2.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input3.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got3 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input4.log input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got4 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input5.log input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input1.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input2.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input3.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input4.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -F -45 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -0 -46 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -F -47 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input5.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE - -@load base/protocols/ssh - -redef exit_only_after_terminate = T; -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -global outfile: file; - -global try: count; - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) - { - print outfile, "============EVENT============"; - print outfile, "Description"; - print outfile, description; - print outfile, "Type"; - print outfile, tpe; - print outfile, "Left"; - print outfile, left; - print outfile, "Right"; - print outfile, right; - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, - $pred(typ: Input::Event, left: Idx, right: Val) = { - print outfile, "============PREDICATE============"; - print outfile, typ; - print outfile, left; - print outfile, right; - return T; - } - ]); - } - - -event Input::end_of_data(name: string, source: string) - { - print outfile, "==========SERVERS============"; - print outfile, servers; - - try = try + 1; - - if ( try == 1 ) - system("touch got1"); - else if ( try == 2 ) - system("touch got2"); - else if ( try == 3 ) - system("touch got3"); - else if ( try == 4 ) - system("touch got4"); - else if ( try == 5 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/reread.zeek b/testing/btest/scripts/base/frameworks/input/reread.zeek new file mode 100644 index 0000000000..ca98c9f214 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/reread.zeek @@ -0,0 +1,145 @@ +# @TEST-EXEC: mv input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input2.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input3.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got3 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input4.log input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got4 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input5.log input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input4.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -45 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +0 -46 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -47 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input5.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +F -48 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh + +redef exit_only_after_terminate = T; +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print outfile, "============EVENT============"; + print outfile, "Description"; + print outfile, description; + print outfile, "Type"; + print outfile, tpe; + print outfile, "Left"; + print outfile, left; + print outfile, "Right"; + print outfile, right; + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print outfile, "============PREDICATE============"; + print outfile, typ; + print outfile, left; + print outfile, right; + return T; + } + ]); + } + + +event Input::end_of_data(name: string, source: string) + { + print outfile, "==========SERVERS============"; + print outfile, servers; + + try = try + 1; + + if ( try == 1 ) + system("touch got1"); + else if ( try == 2 ) + system("touch got2"); + else if ( try == 3 ) + system("touch got3"); + else if ( try == 4 ) + system("touch got4"); + else if ( try == 5 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/set.bro b/testing/btest/scripts/base/frameworks/input/set.bro deleted file mode 100644 index d79e9ae17a..0000000000 --- a/testing/btest/scripts/base/frameworks/input/set.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#fields ip -#types addr -192.168.17.1 -192.168.17.2 -192.168.17.7 -192.168.17.14 -192.168.17.42 -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - ip: addr; -}; - -global servers: set[addr] = set(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/set.zeek b/testing/btest/scripts/base/frameworks/input/set.zeek new file mode 100644 index 0000000000..0d1021adae --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/set.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields ip +#types addr +192.168.17.1 +192.168.17.2 +192.168.17.7 +192.168.17.14 +192.168.17.42 +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + ip: addr; +}; + +global servers: set[addr] = set(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/setseparator.bro b/testing/btest/scripts/base/frameworks/input/setseparator.bro deleted file mode 100644 index 39a785236a..0000000000 --- a/testing/btest/scripts/base/frameworks/input/setseparator.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#fields i s ss -1 a|b|c|d|e|f 1|2|3|4|5|6 -@TEST-END-FILE - -redef InputAscii::set_separator = "|"; - -redef exit_only_after_terminate = T; - -global outfile: file; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - s: set[string]; - ss:vector of count; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/setseparator.zeek b/testing/btest/scripts/base/frameworks/input/setseparator.zeek new file mode 100644 index 0000000000..fc876e8a6d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/setseparator.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields i s ss +1 a|b|c|d|e|f 1|2|3|4|5|6 +@TEST-END-FILE + +redef InputAscii::set_separator = "|"; + +redef exit_only_after_terminate = T; + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + s: set[string]; + ss:vector of count; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/setspecialcases.bro b/testing/btest/scripts/base/frameworks/input/setspecialcases.bro deleted file mode 100644 index 40a708f772..0000000000 --- a/testing/btest/scripts/base/frameworks/input/setspecialcases.bro +++ /dev/null @@ -1,47 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#fields i s ss -1 testing\x2ctesting\x2ctesting\x2c testing\x2ctesting\x2ctesting\x2c -2 testing,,testing testing,,testing -3 ,testing ,testing -4 testing, testing, -5 ,,, ,,, -6 -@TEST-END-FILE - - -redef exit_only_after_terminate = T; - -global outfile: file; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - s: set[string]; - s: vector of string; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/setspecialcases.zeek b/testing/btest/scripts/base/frameworks/input/setspecialcases.zeek new file mode 100644 index 0000000000..b68e4b53d0 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/setspecialcases.zeek @@ -0,0 +1,47 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#fields i s ss +1 testing\x2ctesting\x2ctesting\x2c testing\x2ctesting\x2ctesting\x2c +2 testing,,testing testing,,testing +3 ,testing ,testing +4 testing, testing, +5 ,,, ,,, +6 +@TEST-END-FILE + + +redef exit_only_after_terminate = T; + +global outfile: file; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + s: set[string]; + s: vector of string; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/basic.bro b/testing/btest/scripts/base/frameworks/input/sqlite/basic.bro deleted file mode 100644 index eb1411970b..0000000000 --- a/testing/btest/scripts/base/frameworks/input/sqlite/basic.bro +++ /dev/null @@ -1,104 +0,0 @@ -# -# @TEST-GROUP: sqlite -# -# @TEST-REQUIRES: which sqlite3 -# -# @TEST-EXEC: cat conn.sql | sqlite3 conn.sqlite -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE conn.sql -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE conn ( -'ts' double precision, -'uid' text, -'id.orig_h' text, -'id.orig_p' integer, -'id.resp_h' text, -'id.resp_p' integer, -'proto' text, -'service' text, -'duration' double precision, -'orig_bytes' integer, -'resp_bytes' integer, -'conn_state' text, -'local_orig' boolean, -'local_resp' boolean, -'missed_bytes' integer, -'history' text, -'orig_pkts' integer, -'orig_ip_bytes' integer, -'resp_pkts' integer, -'resp_ip_bytes' integer, -'tunnel_parents' text -); -INSERT INTO "conn" VALUES(1.30047516709653496744e+09,'dnGM1AdIVyh','141.142.220.202',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,73,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516709701204296e+09,'fv9q7WjEgp1','fe80::217:f2ff:fed7:cf65',5353,'ff02::fb',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,199,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516709981608392e+09,'0Ox0H56yl88','141.142.220.50',5353,'224.0.0.251',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,179,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885389900212e+09,'rvmSc7rDQub','141.142.220.118',43927,'141.142.2.2',53,'udp','dns',4.351139068603515625e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885437798497e+09,'ogkztouSArh','141.142.220.118',37676,'141.142.2.2',53,'udp','dns',4.20093536376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885483694076e+09,'0UIDdXFt7Tb','141.142.220.118',40526,'141.142.2.2',53,'udp','dns',3.9196014404296875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885795593258e+09,'WqFYV51UIq7','141.142.220.118',32902,'141.142.2.2',53,'udp','dns',3.17096710205078125e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885830593104e+09,'ylcqZpbz6K2','141.142.220.118',59816,'141.142.2.2',53,'udp','dns',3.430843353271484375e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885871291159e+09,'blhldTzA7Y6','141.142.220.118',59714,'141.142.2.2',53,'udp','dns',3.750324249267578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889164400098e+09,'Sc34cGJo3Kg','141.142.220.118',58206,'141.142.2.2',53,'udp','dns',3.39031219482421875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889203691487e+09,'RzvFrfXSRfk','141.142.220.118',38911,'141.142.2.2',53,'udp','dns',3.349781036376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889241409298e+09,'GaaFI58mpbe','141.142.220.118',59746,'141.142.2.2',53,'udp','dns',4.208087921142578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889398789407e+09,'tr7M6tvAIQa','141.142.220.118',45000,'141.142.2.2',53,'udp','dns',3.840923309326171875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889442205426e+09,'gV0TcSc2pb4','141.142.220.118',48479,'141.142.2.2',53,'udp','dns',3.168582916259765625e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889478707315e+09,'MOG0z4PYOhk','141.142.220.118',48128,'141.142.2.2',53,'udp','dns',4.22954559326171875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516890174889565e+09,'PlehgEduUyj','141.142.220.118',56056,'141.142.2.2',53,'udp','dns',4.022121429443359375e-04,36,131,'SF',NULL,NULL,0,'Dd',1,64,1,159,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516890219497676e+09,'4eZgk09f2Re','141.142.220.118',55092,'141.142.2.2',53,'udp','dns',3.740787506103515625e-04,36,198,'SF',NULL,NULL,0,'Dd',1,64,1,226,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516989943790432e+09,'3xwJPc7mQ9a','141.142.220.44',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,85,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517086238408089e+09,'yxTcvvTKWQ4','141.142.220.226',137,'141.142.220.255',137,'udp','dns',2.61301684379577636718e+00,350,0,'S0',NULL,NULL,0,'D',7,546,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517167537188525e+09,'8bLW3XNfhCj','fe80::3074:17d5:2052:c324',65373,'ff02::1:3',5355,'udp','dns',1.00096225738525390625e-01,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517167708110807e+09,'rqjhiiRPjEe','141.142.220.226',55131,'224.0.0.252',5355,'udp','dns',1.00020885467529296875e-01,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517311674904827e+09,'hTPyfL3QSGa','fe80::3074:17d5:2052:c324',54213,'ff02::1:3',5355,'udp','dns',9.980106353759765625e-02,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517311736202235e+09,'EruUQ9AJRj4','141.142.220.226',55671,'224.0.0.252',5355,'udp','dns',9.98489856719970703125e-02,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047517315367889406e+09,'sw1bKJOMjuk','141.142.220.238',56641,'141.142.220.255',137,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,78,0,0,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516872400689127e+09,'NPHCuyWykE7','141.142.220.118',48649,'208.80.152.118',80,'tcp','http',1.19904994964599609375e-01,525,232,'S1',NULL,NULL,0,'ShADad',4,741,3,396,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889293599126e+09,'VapPqRhPgJ4','141.142.220.118',50000,'208.80.152.3',80,'tcp','http',2.29603052139282226562e-01,1148,734,'S1',NULL,NULL,0,'ShADad',6,1468,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885916304588e+09,'3607hh8C3bc','141.142.220.118',49998,'208.80.152.3',80,'tcp','http',2.15893030166625976562e-01,1130,734,'S1',NULL,NULL,0,'ShADad',6,1450,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516885530495647e+09,'tgYMrIvzDSg','141.142.220.118',49996,'208.80.152.3',80,'tcp','http',2.1850109100341796875e-01,1171,733,'S1',NULL,NULL,0,'ShADad',6,1491,4,949,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516889526700977e+09,'xQsjPwNBrXd','141.142.220.118',50001,'208.80.152.3',80,'tcp','http',2.27283954620361328125e-01,1178,734,'S1',NULL,NULL,0,'ShADad',6,1498,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516890263509747e+09,'Ap3GzMI1vM9','141.142.220.118',35642,'208.80.152.2',80,'tcp','http',1.200408935546875e-01,534,412,'S1',NULL,NULL,0,'ShADad',4,750,3,576,'(empty)'); -INSERT INTO "conn" VALUES(1300475168.85533,'FTVcgrmNy52','141.142.220.118',49997,'208.80.152.3',80,'tcp','http',2.19720125198364257812e-01,1125,734,'S1',NULL,NULL,0,'ShADad',6,1445,4,950,'(empty)'); -INSERT INTO "conn" VALUES(1.30047516978033089643e+09,'1xFx4PGdeq5','141.142.220.235',6705,'173.192.163.128',80,'tcp',NULL,NULL,NULL,NULL,'OTH',NULL,NULL,0,'h',0,0,1,48,'(empty)'); -INSERT INTO "conn" VALUES(1.3004751686520030498e+09,'WIG1ud65z22','141.142.220.118',35634,'208.80.152.2',80,'tcp',NULL,6.1328887939453125e-02,463,350,'OTH',NULL,NULL,0,'DdA',2,567,1,402,'(empty)'); -INSERT INTO "conn" VALUES(1.3004751688929131031e+09,'o2gAkl4V7sa','141.142.220.118',49999,'208.80.152.3',80,'tcp','http',2.20960855484008789062e-01,1137,733,'S1',NULL,NULL,0,'ShADad',6,1457,4,949,'(empty)'); -COMMIT; -@TEST-END-FILE - -@load base/protocols/conn - -redef exit_only_after_terminate = T; -redef Input::accept_unsupported_types = T; - -global outfile: file; - -module A; - -event line(description: Input::EventDescription, tpe: Input::Event, r: Conn::Info) - { - print outfile, r; - print outfile, |r$tunnel_parents|; # to make sure I got empty right - } - -event bro_init() - { - local config_strings: table[string] of string = { - ["query"] = "select * from conn;", - }; - - outfile = open("../out"); - Input::add_event([$source="../conn", $name="conn", $fields=Conn::Info, $ev=line, $want_record=T, $reader=Input::READER_SQLITE, $config=config_strings]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End of data"; - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/basic.zeek b/testing/btest/scripts/base/frameworks/input/sqlite/basic.zeek new file mode 100644 index 0000000000..d7c66f67ee --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/sqlite/basic.zeek @@ -0,0 +1,104 @@ +# +# @TEST-GROUP: sqlite +# +# @TEST-REQUIRES: which sqlite3 +# +# @TEST-EXEC: cat conn.sql | sqlite3 conn.sqlite +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE conn.sql +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE conn ( +'ts' double precision, +'uid' text, +'id.orig_h' text, +'id.orig_p' integer, +'id.resp_h' text, +'id.resp_p' integer, +'proto' text, +'service' text, +'duration' double precision, +'orig_bytes' integer, +'resp_bytes' integer, +'conn_state' text, +'local_orig' boolean, +'local_resp' boolean, +'missed_bytes' integer, +'history' text, +'orig_pkts' integer, +'orig_ip_bytes' integer, +'resp_pkts' integer, +'resp_ip_bytes' integer, +'tunnel_parents' text +); +INSERT INTO "conn" VALUES(1.30047516709653496744e+09,'dnGM1AdIVyh','141.142.220.202',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,73,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516709701204296e+09,'fv9q7WjEgp1','fe80::217:f2ff:fed7:cf65',5353,'ff02::fb',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,199,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516709981608392e+09,'0Ox0H56yl88','141.142.220.50',5353,'224.0.0.251',5353,'udp',NULL,NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,179,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885389900212e+09,'rvmSc7rDQub','141.142.220.118',43927,'141.142.2.2',53,'udp','dns',4.351139068603515625e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885437798497e+09,'ogkztouSArh','141.142.220.118',37676,'141.142.2.2',53,'udp','dns',4.20093536376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885483694076e+09,'0UIDdXFt7Tb','141.142.220.118',40526,'141.142.2.2',53,'udp','dns',3.9196014404296875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885795593258e+09,'WqFYV51UIq7','141.142.220.118',32902,'141.142.2.2',53,'udp','dns',3.17096710205078125e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885830593104e+09,'ylcqZpbz6K2','141.142.220.118',59816,'141.142.2.2',53,'udp','dns',3.430843353271484375e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885871291159e+09,'blhldTzA7Y6','141.142.220.118',59714,'141.142.2.2',53,'udp','dns',3.750324249267578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889164400098e+09,'Sc34cGJo3Kg','141.142.220.118',58206,'141.142.2.2',53,'udp','dns',3.39031219482421875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889203691487e+09,'RzvFrfXSRfk','141.142.220.118',38911,'141.142.2.2',53,'udp','dns',3.349781036376953125e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889241409298e+09,'GaaFI58mpbe','141.142.220.118',59746,'141.142.2.2',53,'udp','dns',4.208087921142578125e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889398789407e+09,'tr7M6tvAIQa','141.142.220.118',45000,'141.142.2.2',53,'udp','dns',3.840923309326171875e-04,38,89,'SF',NULL,NULL,0,'Dd',1,66,1,117,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889442205426e+09,'gV0TcSc2pb4','141.142.220.118',48479,'141.142.2.2',53,'udp','dns',3.168582916259765625e-04,52,99,'SF',NULL,NULL,0,'Dd',1,80,1,127,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889478707315e+09,'MOG0z4PYOhk','141.142.220.118',48128,'141.142.2.2',53,'udp','dns',4.22954559326171875e-04,38,183,'SF',NULL,NULL,0,'Dd',1,66,1,211,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516890174889565e+09,'PlehgEduUyj','141.142.220.118',56056,'141.142.2.2',53,'udp','dns',4.022121429443359375e-04,36,131,'SF',NULL,NULL,0,'Dd',1,64,1,159,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516890219497676e+09,'4eZgk09f2Re','141.142.220.118',55092,'141.142.2.2',53,'udp','dns',3.740787506103515625e-04,36,198,'SF',NULL,NULL,0,'Dd',1,64,1,226,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516989943790432e+09,'3xwJPc7mQ9a','141.142.220.44',5353,'224.0.0.251',5353,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,85,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517086238408089e+09,'yxTcvvTKWQ4','141.142.220.226',137,'141.142.220.255',137,'udp','dns',2.61301684379577636718e+00,350,0,'S0',NULL,NULL,0,'D',7,546,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517167537188525e+09,'8bLW3XNfhCj','fe80::3074:17d5:2052:c324',65373,'ff02::1:3',5355,'udp','dns',1.00096225738525390625e-01,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517167708110807e+09,'rqjhiiRPjEe','141.142.220.226',55131,'224.0.0.252',5355,'udp','dns',1.00020885467529296875e-01,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517311674904827e+09,'hTPyfL3QSGa','fe80::3074:17d5:2052:c324',54213,'ff02::1:3',5355,'udp','dns',9.980106353759765625e-02,66,0,'S0',NULL,NULL,0,'D',2,162,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517311736202235e+09,'EruUQ9AJRj4','141.142.220.226',55671,'224.0.0.252',5355,'udp','dns',9.98489856719970703125e-02,66,0,'S0',NULL,NULL,0,'D',2,122,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047517315367889406e+09,'sw1bKJOMjuk','141.142.220.238',56641,'141.142.220.255',137,'udp','dns',NULL,NULL,NULL,'S0',NULL,NULL,0,'D',1,78,0,0,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516872400689127e+09,'NPHCuyWykE7','141.142.220.118',48649,'208.80.152.118',80,'tcp','http',1.19904994964599609375e-01,525,232,'S1',NULL,NULL,0,'ShADad',4,741,3,396,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889293599126e+09,'VapPqRhPgJ4','141.142.220.118',50000,'208.80.152.3',80,'tcp','http',2.29603052139282226562e-01,1148,734,'S1',NULL,NULL,0,'ShADad',6,1468,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885916304588e+09,'3607hh8C3bc','141.142.220.118',49998,'208.80.152.3',80,'tcp','http',2.15893030166625976562e-01,1130,734,'S1',NULL,NULL,0,'ShADad',6,1450,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516885530495647e+09,'tgYMrIvzDSg','141.142.220.118',49996,'208.80.152.3',80,'tcp','http',2.1850109100341796875e-01,1171,733,'S1',NULL,NULL,0,'ShADad',6,1491,4,949,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516889526700977e+09,'xQsjPwNBrXd','141.142.220.118',50001,'208.80.152.3',80,'tcp','http',2.27283954620361328125e-01,1178,734,'S1',NULL,NULL,0,'ShADad',6,1498,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516890263509747e+09,'Ap3GzMI1vM9','141.142.220.118',35642,'208.80.152.2',80,'tcp','http',1.200408935546875e-01,534,412,'S1',NULL,NULL,0,'ShADad',4,750,3,576,'(empty)'); +INSERT INTO "conn" VALUES(1300475168.85533,'FTVcgrmNy52','141.142.220.118',49997,'208.80.152.3',80,'tcp','http',2.19720125198364257812e-01,1125,734,'S1',NULL,NULL,0,'ShADad',6,1445,4,950,'(empty)'); +INSERT INTO "conn" VALUES(1.30047516978033089643e+09,'1xFx4PGdeq5','141.142.220.235',6705,'173.192.163.128',80,'tcp',NULL,NULL,NULL,NULL,'OTH',NULL,NULL,0,'h',0,0,1,48,'(empty)'); +INSERT INTO "conn" VALUES(1.3004751686520030498e+09,'WIG1ud65z22','141.142.220.118',35634,'208.80.152.2',80,'tcp',NULL,6.1328887939453125e-02,463,350,'OTH',NULL,NULL,0,'DdA',2,567,1,402,'(empty)'); +INSERT INTO "conn" VALUES(1.3004751688929131031e+09,'o2gAkl4V7sa','141.142.220.118',49999,'208.80.152.3',80,'tcp','http',2.20960855484008789062e-01,1137,733,'S1',NULL,NULL,0,'ShADad',6,1457,4,949,'(empty)'); +COMMIT; +@TEST-END-FILE + +@load base/protocols/conn + +redef exit_only_after_terminate = T; +redef Input::accept_unsupported_types = T; + +global outfile: file; + +module A; + +event line(description: Input::EventDescription, tpe: Input::Event, r: Conn::Info) + { + print outfile, r; + print outfile, |r$tunnel_parents|; # to make sure I got empty right + } + +event zeek_init() + { + local config_strings: table[string] of string = { + ["query"] = "select * from conn;", + }; + + outfile = open("../out"); + Input::add_event([$source="../conn", $name="conn", $fields=Conn::Info, $ev=line, $want_record=T, $reader=Input::READER_SQLITE, $config=config_strings]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End of data"; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/error.bro b/testing/btest/scripts/base/frameworks/input/sqlite/error.bro deleted file mode 100644 index 08938e6df5..0000000000 --- a/testing/btest/scripts/base/frameworks/input/sqlite/error.bro +++ /dev/null @@ -1,98 +0,0 @@ -# @TEST-REQUIRES: which sqlite3 -# -# @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite -# -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: sed '1d' .stderr | sort > cmpfile -# @TEST-EXEC: btest-diff cmpfile - -@TEST-START-FILE ssh.sql -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE ssh ( -'b' boolean, -'i' integer, -'e' text, -'c' integer, -'p' integer, -'sn' text, -'a' text, -'d' double precision, -'t' double precision, -'iv' double precision, -'s' text, -'sc' text, -'ss' text, -'se' text, -'vc' text, -'vs' text, -'vn' text -); -INSERT INTO "ssh" VALUES(1,-42,'SSH::LOG',21,123,'10.0.0.0/24','1.2.3.4',3.14,1.35837684939385390286e+09,100.0,'hurz','2,4,1,3','CC,AA,BB','(empty)','10,20,30','', null); -COMMIT; -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - vs: vector of string; - vh: vector of string &optional; - } &log; -} - - -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, p: SSH::Log) - { - print outfile, p; - - print outfile, |p$se|; - print outfile, |p$vs|; - } - -event term_me() - { - terminate(); - } - -event bro_init() - { - local config_strings: table[string] of string = { - ["query"] = "select * from ssh;", - }; - - local config_strings2: table[string] of string = { - ["query"] = "select b, g, h from ssh;", - }; - - outfile = open("../out"); - Input::add_event([$source="../ssh", $name="ssh", $fields=SSH::Log, $ev=line, $reader=Input::READER_SQLITE, $want_record=T, $config=config_strings]); - Input::add_event([$source="../ssh", $name="ssh2", $fields=SSH::Log, $ev=line, $reader=Input::READER_SQLITE, $want_record=T, $config=config_strings2]); - - schedule +3secs { term_me() }; - - } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/error.zeek b/testing/btest/scripts/base/frameworks/input/sqlite/error.zeek new file mode 100644 index 0000000000..b6c2b46bbb --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/sqlite/error.zeek @@ -0,0 +1,98 @@ +# @TEST-REQUIRES: which sqlite3 +# +# @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite +# +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: sed '1d' .stderr | sort > cmpfile +# @TEST-EXEC: btest-diff cmpfile + +@TEST-START-FILE ssh.sql +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE ssh ( +'b' boolean, +'i' integer, +'e' text, +'c' integer, +'p' integer, +'sn' text, +'a' text, +'d' double precision, +'t' double precision, +'iv' double precision, +'s' text, +'sc' text, +'ss' text, +'se' text, +'vc' text, +'vs' text, +'vn' text +); +INSERT INTO "ssh" VALUES(1,-42,'SSH::LOG',21,123,'10.0.0.0/24','1.2.3.4',3.14,1.35837684939385390286e+09,100.0,'hurz','2,4,1,3','CC,AA,BB','(empty)','10,20,30','', null); +COMMIT; +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + vs: vector of string; + vh: vector of string &optional; + } &log; +} + + +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, p: SSH::Log) + { + print outfile, p; + + print outfile, |p$se|; + print outfile, |p$vs|; + } + +event term_me() + { + terminate(); + } + +event zeek_init() + { + local config_strings: table[string] of string = { + ["query"] = "select * from ssh;", + }; + + local config_strings2: table[string] of string = { + ["query"] = "select b, g, h from ssh;", + }; + + outfile = open("../out"); + Input::add_event([$source="../ssh", $name="ssh", $fields=SSH::Log, $ev=line, $reader=Input::READER_SQLITE, $want_record=T, $config=config_strings]); + Input::add_event([$source="../ssh", $name="ssh2", $fields=SSH::Log, $ev=line, $reader=Input::READER_SQLITE, $want_record=T, $config=config_strings2]); + + schedule +3secs { term_me() }; + + } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/port.bro b/testing/btest/scripts/base/frameworks/input/sqlite/port.bro deleted file mode 100644 index 6fc18139fe..0000000000 --- a/testing/btest/scripts/base/frameworks/input/sqlite/port.bro +++ /dev/null @@ -1,53 +0,0 @@ -# -# @TEST-GROUP: sqlite -# -# @TEST-REQUIRES: which sqlite3 -# -# @TEST-EXEC: cat port.sql | sqlite3 port.sqlite -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE port.sql -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE port ( -'port' integer, -'proto' text -); -INSERT INTO "port" VALUES(5353,'udp'); -INSERT INTO "port" VALUES(6162,'tcp'); -COMMIT; -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; - -module A; - -type Val: record { - p: port &type_column="proto"; -}; - -event line(description: Input::EventDescription, tpe: Input::Event, p: port) - { - print outfile, p; - } - -event bro_init() - { - local config_strings: table[string] of string = { - ["query"] = "select port as p, proto from port;", - }; - - outfile = open("../out"); - Input::add_event([$source="../port", $name="port", $fields=Val, $ev=line, $reader=Input::READER_SQLITE, $want_record=F, $config=config_strings]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End of data"; - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/port.zeek b/testing/btest/scripts/base/frameworks/input/sqlite/port.zeek new file mode 100644 index 0000000000..ec0e9bd428 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/sqlite/port.zeek @@ -0,0 +1,53 @@ +# +# @TEST-GROUP: sqlite +# +# @TEST-REQUIRES: which sqlite3 +# +# @TEST-EXEC: cat port.sql | sqlite3 port.sqlite +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE port.sql +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE port ( +'port' integer, +'proto' text +); +INSERT INTO "port" VALUES(5353,'udp'); +INSERT INTO "port" VALUES(6162,'tcp'); +COMMIT; +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; + +module A; + +type Val: record { + p: port &type_column="proto"; +}; + +event line(description: Input::EventDescription, tpe: Input::Event, p: port) + { + print outfile, p; + } + +event zeek_init() + { + local config_strings: table[string] of string = { + ["query"] = "select port as p, proto from port;", + }; + + outfile = open("../out"); + Input::add_event([$source="../port", $name="port", $fields=Val, $ev=line, $reader=Input::READER_SQLITE, $want_record=F, $config=config_strings]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End of data"; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/types.bro b/testing/btest/scripts/base/frameworks/input/sqlite/types.bro deleted file mode 100644 index 42f8717c12..0000000000 --- a/testing/btest/scripts/base/frameworks/input/sqlite/types.bro +++ /dev/null @@ -1,91 +0,0 @@ -# @TEST-REQUIRES: which sqlite3 -# -# @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite -# -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE ssh.sql -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE ssh ( -'b' boolean, -'i' integer, -'e' text, -'c' integer, -'p' integer, -'sn' text, -'a' text, -'d' double precision, -'t' double precision, -'iv' double precision, -'s' text, -'sc' text, -'ss' text, -'se' text, -'vc' text, -'vs' text, -'vn' text -); -INSERT INTO "ssh" VALUES(1,-42,'SSH::LOG',21,123,'10.0.0.0/24','1.2.3.4',3.14,1.35837684939385390286e+09,100.0,'hurz','2,4,1,3','CC,AA,BB','(empty)','10,20,30','', null); -COMMIT; -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - vs: vector of string; - vn: vector of string &optional; - } &log; -} - - -global outfile: file; - -event line(description: Input::EventDescription, tpe: Input::Event, p: SSH::Log) - { - print outfile, p; - - print outfile, |p$se|; - print outfile, |p$vs|; - } - -event bro_init() - { - local config_strings: table[string] of string = { - ["query"] = "select * from ssh;", - }; - - outfile = open("../out"); - Input::add_event([$source="../ssh", $name="ssh", $fields=SSH::Log, $ev=line, $reader=Input::READER_SQLITE, $want_record=T, $config=config_strings]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, "End of data"; - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/sqlite/types.zeek b/testing/btest/scripts/base/frameworks/input/sqlite/types.zeek new file mode 100644 index 0000000000..6da0bef528 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/sqlite/types.zeek @@ -0,0 +1,91 @@ +# @TEST-REQUIRES: which sqlite3 +# +# @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite +# +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE ssh.sql +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE ssh ( +'b' boolean, +'i' integer, +'e' text, +'c' integer, +'p' integer, +'sn' text, +'a' text, +'d' double precision, +'t' double precision, +'iv' double precision, +'s' text, +'sc' text, +'ss' text, +'se' text, +'vc' text, +'vs' text, +'vn' text +); +INSERT INTO "ssh" VALUES(1,-42,'SSH::LOG',21,123,'10.0.0.0/24','1.2.3.4',3.14,1.35837684939385390286e+09,100.0,'hurz','2,4,1,3','CC,AA,BB','(empty)','10,20,30','', null); +COMMIT; +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + vs: vector of string; + vn: vector of string &optional; + } &log; +} + + +global outfile: file; + +event line(description: Input::EventDescription, tpe: Input::Event, p: SSH::Log) + { + print outfile, p; + + print outfile, |p$se|; + print outfile, |p$vs|; + } + +event zeek_init() + { + local config_strings: table[string] of string = { + ["query"] = "select * from ssh;", + }; + + outfile = open("../out"); + Input::add_event([$source="../ssh", $name="ssh", $fields=SSH::Log, $ev=line, $reader=Input::READER_SQLITE, $want_record=T, $config=config_strings]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, "End of data"; + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/stream.bro b/testing/btest/scripts/base/frameworks/input/stream.bro deleted file mode 100644 index 8ed498f074..0000000000 --- a/testing/btest/scripts/base/frameworks/input/stream.bro +++ /dev/null @@ -1,89 +0,0 @@ -# @TEST-EXEC: cp input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input2.log >> input.log -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cat input3.log >> input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input1.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input2.log -T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input3.log -F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE - -@load base/protocols/ssh -redef exit_only_after_terminate = T; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -global outfile: file; - -global try: count; - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) - { - print outfile, "============EVENT============"; - print outfile, tpe; - print outfile, left; - print outfile, right; - print outfile, "============SERVERS============"; - print outfile, servers; - - try = try + 1; - - if ( try == 1 ) - system("touch got1"); - else if ( try == 2 ) - system("touch got2"); - else if ( try == 3 ) - { - print outfile, "done"; - close(outfile); - Input::remove("input"); - terminate(); - } - } - -event bro_init() - { - outfile = open("../out"); - try = 0; - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); - } diff --git a/testing/btest/scripts/base/frameworks/input/stream.zeek b/testing/btest/scripts/base/frameworks/input/stream.zeek new file mode 100644 index 0000000000..b9064ef46b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/stream.zeek @@ -0,0 +1,89 @@ +# @TEST-EXEC: cp input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input2.log >> input.log +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cat input3.log >> input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input2.log +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input3.log +F -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +redef exit_only_after_terminate = T; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +global outfile: file; + +global try: count; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print outfile, "============EVENT============"; + print outfile, tpe; + print outfile, left; + print outfile, right; + print outfile, "============SERVERS============"; + print outfile, servers; + + try = try + 1; + + if ( try == 1 ) + system("touch got1"); + else if ( try == 2 ) + system("touch got2"); + else if ( try == 3 ) + { + print outfile, "done"; + close(outfile); + Input::remove("input"); + terminate(); + } + } + +event zeek_init() + { + outfile = open("../out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::STREAM, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line]); + } diff --git a/testing/btest/scripts/base/frameworks/input/subrecord-event.bro b/testing/btest/scripts/base/frameworks/input/subrecord-event.bro deleted file mode 100644 index ec1cc37efc..0000000000 --- a/testing/btest/scripts/base/frameworks/input/subrecord-event.bro +++ /dev/null @@ -1,72 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields sub.b i sub.e sub.c sub.p sub.sn sub.two.a sub.two.d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE - -@load base/protocols/ssh -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type SubVal2: record { - a: addr; - d: double; -}; - -type SubVal: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - two: SubVal2; -}; - -type Val: record { - sub: SubVal; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - - - -event line(description: Input::EventDescription, tpe: Input::Event, value: Val) - { - print outfile, value; - try = try + 1; - if ( try == 1 ) - { - Input::remove("ssh"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_event([$source="../input.log", $name="ssh", $fields=Val, $ev=line, $want_record=T]); - } diff --git a/testing/btest/scripts/base/frameworks/input/subrecord-event.zeek b/testing/btest/scripts/base/frameworks/input/subrecord-event.zeek new file mode 100644 index 0000000000..9f303fbb5a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/subrecord-event.zeek @@ -0,0 +1,72 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields sub.b i sub.e sub.c sub.p sub.sn sub.two.a sub.two.d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type SubVal2: record { + a: addr; + d: double; +}; + +type SubVal: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + two: SubVal2; +}; + +type Val: record { + sub: SubVal; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + + + +event line(description: Input::EventDescription, tpe: Input::Event, value: Val) + { + print outfile, value; + try = try + 1; + if ( try == 1 ) + { + Input::remove("ssh"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_event([$source="../input.log", $name="ssh", $fields=Val, $ev=line, $want_record=T]); + } diff --git a/testing/btest/scripts/base/frameworks/input/subrecord.bro b/testing/btest/scripts/base/frameworks/input/subrecord.bro deleted file mode 100644 index 0f960c6d3c..0000000000 --- a/testing/btest/scripts/base/frameworks/input/subrecord.bro +++ /dev/null @@ -1,67 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields sub.b i sub.e sub.c sub.p sub.sn sub.two.a sub.two.d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE - -@load base/protocols/ssh -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type SubVal2: record { - a: addr; - d: double; -}; - -type SubVal: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - two: SubVal2; -}; - -type Val: record { - sub: SubVal; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/subrecord.zeek b/testing/btest/scripts/base/frameworks/input/subrecord.zeek new file mode 100644 index 0000000000..c01ce24158 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/subrecord.zeek @@ -0,0 +1,67 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields sub.b i sub.e sub.c sub.p sub.sn sub.two.a sub.two.d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type SubVal2: record { + a: addr; + d: double; +}; + +type SubVal: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + two: SubVal2; +}; + +type Val: record { + sub: SubVal; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/tableevent.bro b/testing/btest/scripts/base/frameworks/input/tableevent.bro deleted file mode 100644 index 760b19d24f..0000000000 --- a/testing/btest/scripts/base/frameworks/input/tableevent.bro +++ /dev/null @@ -1,55 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields i b -#types int bool -1 T -2 T -3 F -4 F -5 F -6 F -7 T -@TEST-END-FILE - -redef exit_only_after_terminate = T; - -global outfile: file; -global try: count; - -redef InputAscii::empty_field = "EMPTY"; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; -}; - -global destination: table[int] of bool = table(); - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) - { - print outfile, tpe; - print outfile, left; - print outfile, right; - try = try + 1; - if ( try == 7 ) - { - Input::remove("input"); - close(outfile); - terminate(); - } - } - -event bro_init() - { - try = 0; - outfile = open("../out"); - Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F, $ev=line]); - } diff --git a/testing/btest/scripts/base/frameworks/input/tableevent.zeek b/testing/btest/scripts/base/frameworks/input/tableevent.zeek new file mode 100644 index 0000000000..680a412c27 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/tableevent.zeek @@ -0,0 +1,55 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields i b +#types int bool +1 T +2 T +3 F +4 F +5 F +6 F +7 T +@TEST-END-FILE + +redef exit_only_after_terminate = T; + +global outfile: file; +global try: count; + +redef InputAscii::empty_field = "EMPTY"; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; +}; + +global destination: table[int] of bool = table(); + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: bool) + { + print outfile, tpe; + print outfile, left; + print outfile, right; + try = try + 1; + if ( try == 7 ) + { + Input::remove("input"); + close(outfile); + terminate(); + } + } + +event zeek_init() + { + try = 0; + outfile = open("../out"); + Input::add_table([$source="../input.log", $name="input", $idx=Idx, $val=Val, $destination=destination, $want_record=F, $ev=line]); + } diff --git a/testing/btest/scripts/base/frameworks/input/twotables.bro b/testing/btest/scripts/base/frameworks/input/twotables.bro deleted file mode 100644 index 5b6d833da3..0000000000 --- a/testing/btest/scripts/base/frameworks/input/twotables.bro +++ /dev/null @@ -1,133 +0,0 @@ -# @TEST-EXEC: mv input1.log input.log -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/got2 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: mv input3.log input.log -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff event.out -# @TEST-EXEC: btest-diff pred1.out -# @TEST-EXEC: btest-diff pred2.out -# @TEST-EXEC: btest-diff fin.out - -@TEST-START-FILE input1.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input2.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE -@TEST-START-FILE input3.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve f -#types bool int enum count port subnet addr double time interval string table table table vector vector func -F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE - -@load base/protocols/ssh -redef exit_only_after_terminate = T; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -global event_out: file; -global pred1_out: file; -global pred2_out: file; -global fin_out: file; - -global try: count; - -event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) - { - print event_out, "============EVENT============"; -# print event_out, "Description"; -# print event_out, description; -# print event_out, "Type"; -# print event_out, tpe; -# print event_out, "Left"; -# print event_out, left; -# print event_out, "Right"; -# print event_out, right; - } - -event bro_init() - { - event_out = open ("../event.out"); - pred1_out = open ("../pred1.out"); - pred2_out = open ("../pred2.out"); - fin_out = open ("../fin.out"); - try = 0; - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, - $pred(typ: Input::Event, left: Idx, right: Val) = { - print pred1_out, "============PREDICATE============"; - print pred1_out, typ; - print pred1_out, left; - print pred1_out, right; - return T; - } - ]); - Input::add_table([$source="../input2.log", $mode=Input::REREAD, $name="ssh2", $idx=Idx, $val=Val, $destination=servers, $ev=line, - $pred(typ: Input::Event, left: Idx, right: Val) = { - print pred2_out, "============PREDICATE 2============"; - print pred2_out, typ; - print pred2_out, left; - print pred2_out, right; - return T; - } - ]); - } - - -event Input::end_of_data(name: string, source: string) - { - print fin_out, "==========SERVERS============"; - #print fin_out, servers; - - try = try + 1; - if ( try == 2 ) - system("touch got2"); - else if ( try == 3 ) - { - print fin_out, "done"; - print fin_out, servers; - close(event_out); - close(pred1_out); - close(pred2_out); - close(fin_out); - Input::remove("input"); - Input::remove("input2"); - terminate(); - } - } diff --git a/testing/btest/scripts/base/frameworks/input/twotables.zeek b/testing/btest/scripts/base/frameworks/input/twotables.zeek new file mode 100644 index 0000000000..6ff57f9666 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/twotables.zeek @@ -0,0 +1,133 @@ +# @TEST-EXEC: mv input1.log input.log +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/got2 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: mv input3.log input.log +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff event.out +# @TEST-EXEC: btest-diff pred1.out +# @TEST-EXEC: btest-diff pred2.out +# @TEST-EXEC: btest-diff fin.out + +@TEST-START-FILE input1.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input2.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +T -43 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE +@TEST-START-FILE input3.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve f +#types bool int enum count port subnet addr double time interval string table table table vector vector func +F -44 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +redef exit_only_after_terminate = T; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +global event_out: file; +global pred1_out: file; +global pred2_out: file; +global fin_out: file; + +global try: count; + +event line(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) + { + print event_out, "============EVENT============"; +# print event_out, "Description"; +# print event_out, description; +# print event_out, "Type"; +# print event_out, tpe; +# print event_out, "Left"; +# print event_out, left; +# print event_out, "Right"; +# print event_out, right; + } + +event zeek_init() + { + event_out = open ("../event.out"); + pred1_out = open ("../pred1.out"); + pred2_out = open ("../pred2.out"); + fin_out = open ("../fin.out"); + try = 0; + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $mode=Input::REREAD, $name="ssh", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print pred1_out, "============PREDICATE============"; + print pred1_out, typ; + print pred1_out, left; + print pred1_out, right; + return T; + } + ]); + Input::add_table([$source="../input2.log", $mode=Input::REREAD, $name="ssh2", $idx=Idx, $val=Val, $destination=servers, $ev=line, + $pred(typ: Input::Event, left: Idx, right: Val) = { + print pred2_out, "============PREDICATE 2============"; + print pred2_out, typ; + print pred2_out, left; + print pred2_out, right; + return T; + } + ]); + } + + +event Input::end_of_data(name: string, source: string) + { + print fin_out, "==========SERVERS============"; + #print fin_out, servers; + + try = try + 1; + if ( try == 2 ) + system("touch got2"); + else if ( try == 3 ) + { + print fin_out, "done"; + print fin_out, servers; + close(event_out); + close(pred1_out); + close(pred2_out); + close(fin_out); + Input::remove("input"); + Input::remove("input2"); + terminate(); + } + } diff --git a/testing/btest/scripts/base/frameworks/input/unsupported_types.bro b/testing/btest/scripts/base/frameworks/input/unsupported_types.bro deleted file mode 100644 index beedc0a633..0000000000 --- a/testing/btest/scripts/base/frameworks/input/unsupported_types.bro +++ /dev/null @@ -1,61 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields fi b i e c p sn a d t iv s sc ss se vc ve f -#types file bool int enum count port subnet addr double time interval string table table table vector vector func -whatever T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} -@TEST-END-FILE - -@load base/protocols/ssh -redef exit_only_after_terminate = T; - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; -redef Input::accept_unsupported_types = T; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - fi: file &optional; - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/unsupported_types.zeek b/testing/btest/scripts/base/frameworks/input/unsupported_types.zeek new file mode 100644 index 0000000000..e4e93f7164 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/unsupported_types.zeek @@ -0,0 +1,61 @@ +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields fi b i e c p sn a d t iv s sc ss se vc ve f +#types file bool int enum count port subnet addr double time interval string table table table vector vector func +whatever T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY SSH::foo\x0a{ \x0aif (0 < SSH::i) \x0a\x09return (Foo);\x0aelse\x0a\x09return (Bar);\x0a\x0a} +@TEST-END-FILE + +@load base/protocols/ssh +redef exit_only_after_terminate = T; + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; +redef Input::accept_unsupported_types = T; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + fi: file &optional; + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/input/windows.bro b/testing/btest/scripts/base/frameworks/input/windows.bro deleted file mode 100644 index 275f5e0713..0000000000 --- a/testing/btest/scripts/base/frameworks/input/windows.bro +++ /dev/null @@ -1,64 +0,0 @@ -# Test windows linebreaks - -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff out - -redef exit_only_after_terminate = T; - -@TEST-START-FILE input.log -#separator \x09 -#path ssh -#fields b i e c p sn a d t iv s sc ss se vc ve ns -#types bool int enum count port subnet addr double time interval string table table table vector vector string -T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 -@TEST-END-FILE - -@load base/protocols/ssh - -global outfile: file; - -redef InputAscii::empty_field = "EMPTY"; - -module A; - -type Idx: record { - i: int; -}; - -type Val: record { - b: bool; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - ns: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of int; - ve: vector of int; -}; - -global servers: table[int] of Val = table(); - -event bro_init() - { - outfile = open("../out"); - # first read in the old stuff into the table... - Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); - } - -event Input::end_of_data(name: string, source:string) - { - print outfile, servers; - print outfile, to_count(servers[-42]$ns); # try to actually use a string. If null-termination is wrong this will fail. - Input::remove("ssh"); - close(outfile); - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/input/windows.zeek b/testing/btest/scripts/base/frameworks/input/windows.zeek new file mode 100644 index 0000000000..2615acb197 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/input/windows.zeek @@ -0,0 +1,64 @@ +# Test windows linebreaks + +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff out + +redef exit_only_after_terminate = T; + +@TEST-START-FILE input.log +#separator \x09 +#path ssh +#fields b i e c p sn a d t iv s sc ss se vc ve ns +#types bool int enum count port subnet addr double time interval string table table table vector vector string +T -42 SSH::LOG 21 123 10.0.0.0/24 1.2.3.4 3.14 1315801931.273616 100.000000 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY 4242 +@TEST-END-FILE + +@load base/protocols/ssh + +global outfile: file; + +redef InputAscii::empty_field = "EMPTY"; + +module A; + +type Idx: record { + i: int; +}; + +type Val: record { + b: bool; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + ns: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of int; + ve: vector of int; +}; + +global servers: table[int] of Val = table(); + +event zeek_init() + { + outfile = open("../out"); + # first read in the old stuff into the table... + Input::add_table([$source="../input.log", $name="ssh", $idx=Idx, $val=Val, $destination=servers]); + } + +event Input::end_of_data(name: string, source:string) + { + print outfile, servers; + print outfile, to_count(servers[-42]$ns); # try to actually use a string. If null-termination is wrong this will fail. + Input::remove("ssh"); + close(outfile); + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/intel/cluster-transparency-with-proxy.bro b/testing/btest/scripts/base/frameworks/intel/cluster-transparency-with-proxy.bro deleted file mode 100644 index b81cac9bac..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/cluster-transparency-with-proxy.bro +++ /dev/null @@ -1,93 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-2/.stdout -# @TEST-EXEC: btest-diff manager-1/intel.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1"], -}; -@TEST-END-FILE - -module Intel; - -redef Log::default_rotation_interval=0sec; - -event Cluster::node_up(name: string, id: string) - { - # Insert the data once both workers are connected. - if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 2 && Cluster::proxy_pool$alive_count == 1 ) - { - Intel::insert([$indicator="1.2.3.4", $indicator_type=Intel::ADDR, $meta=[$source="manager"]]); - } - } - -global worker2_data = 0; -global sent_data = F; -# Watch for new indicators send to workers. -event Intel::insert_indicator(item: Intel::Item) - { - print fmt("new_indicator: %s inserted by %s", item$indicator, item$meta$source); - - if ( ! sent_data ) - { - # We wait to insert data here because we can now be sure the - # full cluster is constructed. - sent_data = T; - if ( Cluster::node == "worker-1" ) - Intel::insert([$indicator="123.123.123.123", $indicator_type=Intel::ADDR, $meta=[$source="worker-1"]]); - if ( Cluster::node == "worker-2" ) - Intel::insert([$indicator="4.3.2.1", $indicator_type=Intel::ADDR, $meta=[$source="worker-2"]]); - } - - # We're forcing worker-2 to do a lookup when it has three intelligence items - # which were distributed over the cluster (data inserted locally is resent). - if ( Cluster::node == "worker-2" ) - { - ++worker2_data; - if ( worker2_data == 3 ) - { - # Now that everything is inserted, see if we can match on the data inserted - # by worker-1. - print "Doing a lookup"; - Intel::seen([$host=123.123.123.123, $where=Intel::IN_ANYWHERE]); - } - } - } - -# Watch for remote inserts sent to the manager. -event Intel::insert_item(item: Intel::Item) - { - print fmt("insert_item: %s inserted by %s", item$indicator, item$meta$source); - } - -# Watch for new items. -event Intel::new_item(item: Intel::Item) - { - print fmt("new_item triggered for %s by %s on %s", item$indicator, - item$meta$source, Cluster::node); - } - -event Intel::log_intel(rec: Intel::Info) - { - terminate(); - } - -event Cluster::node_down(name: string, id: string) - { - # Cascading termination - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/intel/cluster-transparency-with-proxy.zeek b/testing/btest/scripts/base/frameworks/intel/cluster-transparency-with-proxy.zeek new file mode 100644 index 0000000000..174f07b478 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/cluster-transparency-with-proxy.zeek @@ -0,0 +1,93 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-2/.stdout +# @TEST-EXEC: btest-diff manager-1/intel.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1"], +}; +@TEST-END-FILE + +module Intel; + +redef Log::default_rotation_interval=0sec; + +event Cluster::node_up(name: string, id: string) + { + # Insert the data once both workers are connected. + if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 2 && Cluster::proxy_pool$alive_count == 1 ) + { + Intel::insert([$indicator="1.2.3.4", $indicator_type=Intel::ADDR, $meta=[$source="manager"]]); + } + } + +global worker2_data = 0; +global sent_data = F; +# Watch for new indicators send to workers. +event Intel::insert_indicator(item: Intel::Item) + { + print fmt("new_indicator: %s inserted by %s", item$indicator, item$meta$source); + + if ( ! sent_data ) + { + # We wait to insert data here because we can now be sure the + # full cluster is constructed. + sent_data = T; + if ( Cluster::node == "worker-1" ) + Intel::insert([$indicator="123.123.123.123", $indicator_type=Intel::ADDR, $meta=[$source="worker-1"]]); + if ( Cluster::node == "worker-2" ) + Intel::insert([$indicator="4.3.2.1", $indicator_type=Intel::ADDR, $meta=[$source="worker-2"]]); + } + + # We're forcing worker-2 to do a lookup when it has three intelligence items + # which were distributed over the cluster (data inserted locally is resent). + if ( Cluster::node == "worker-2" ) + { + ++worker2_data; + if ( worker2_data == 3 ) + { + # Now that everything is inserted, see if we can match on the data inserted + # by worker-1. + print "Doing a lookup"; + Intel::seen([$host=123.123.123.123, $where=Intel::IN_ANYWHERE]); + } + } + } + +# Watch for remote inserts sent to the manager. +event Intel::insert_item(item: Intel::Item) + { + print fmt("insert_item: %s inserted by %s", item$indicator, item$meta$source); + } + +# Watch for new items. +event Intel::new_item(item: Intel::Item) + { + print fmt("new_item triggered for %s by %s on %s", item$indicator, + item$meta$source, Cluster::node); + } + +event Intel::log_intel(rec: Intel::Info) + { + terminate(); + } + +event Cluster::node_down(name: string, id: string) + { + # Cascading termination + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/intel/cluster-transparency.bro b/testing/btest/scripts/base/frameworks/intel/cluster-transparency.bro deleted file mode 100644 index 5362886cd7..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/cluster-transparency.bro +++ /dev/null @@ -1,90 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-2/.stdout -# @TEST-EXEC: btest-diff manager-1/intel.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], -}; -@TEST-END-FILE - -module Intel; - -redef Log::default_rotation_interval=0sec; - -event Cluster::node_up(name: string, id: string) - { - # Insert the data once both workers are connected. - if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 2 ) - { - Intel::insert([$indicator="1.2.3.4", $indicator_type=Intel::ADDR, $meta=[$source="manager"]]); - } - } - -global worker2_data = 0; -global sent_data = F; -# Watch for new indicators send to workers. -event Intel::insert_indicator(item: Intel::Item) - { - print fmt("new_indicator: %s inserted by %s", item$indicator, item$meta$source); - - if ( ! sent_data ) - { - # We wait to insert data here because we can now be sure the - # full cluster is constructed. - sent_data = T; - if ( Cluster::node == "worker-1" ) - Intel::insert([$indicator="123.123.123.123", $indicator_type=Intel::ADDR, $meta=[$source="worker-1"]]); - if ( Cluster::node == "worker-2" ) - Intel::insert([$indicator="4.3.2.1", $indicator_type=Intel::ADDR, $meta=[$source="worker-2"]]); - } - - # We're forcing worker-2 to do a lookup when it has three intelligence items - # which were distributed over the cluster (data inserted locally is resent). - if ( Cluster::node == "worker-2" ) - { - ++worker2_data; - if ( worker2_data == 3 ) - { - # Now that everything is inserted, see if we can match on the data inserted - # by worker-1. - print "Doing a lookup"; - Intel::seen([$host=123.123.123.123, $where=Intel::IN_ANYWHERE]); - } - } - } - -# Watch for remote inserts sent to the manager. -event Intel::insert_item(item: Intel::Item) - { - print fmt("insert_item: %s inserted by %s", item$indicator, item$meta$source); - } - -# Watch for new items. -event Intel::new_item(item: Intel::Item) - { - print fmt("new_item triggered for %s by %s on %s", item$indicator, - item$meta$source, Cluster::node); - } - -event Intel::log_intel(rec: Intel::Info) - { - terminate(); - } - -event Cluster::node_down(name: string, id: string) - { - # Cascading termination - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/intel/cluster-transparency.zeek b/testing/btest/scripts/base/frameworks/intel/cluster-transparency.zeek new file mode 100644 index 0000000000..8e093330c2 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/cluster-transparency.zeek @@ -0,0 +1,90 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-2/.stdout +# @TEST-EXEC: btest-diff manager-1/intel.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], +}; +@TEST-END-FILE + +module Intel; + +redef Log::default_rotation_interval=0sec; + +event Cluster::node_up(name: string, id: string) + { + # Insert the data once both workers are connected. + if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 2 ) + { + Intel::insert([$indicator="1.2.3.4", $indicator_type=Intel::ADDR, $meta=[$source="manager"]]); + } + } + +global worker2_data = 0; +global sent_data = F; +# Watch for new indicators send to workers. +event Intel::insert_indicator(item: Intel::Item) + { + print fmt("new_indicator: %s inserted by %s", item$indicator, item$meta$source); + + if ( ! sent_data ) + { + # We wait to insert data here because we can now be sure the + # full cluster is constructed. + sent_data = T; + if ( Cluster::node == "worker-1" ) + Intel::insert([$indicator="123.123.123.123", $indicator_type=Intel::ADDR, $meta=[$source="worker-1"]]); + if ( Cluster::node == "worker-2" ) + Intel::insert([$indicator="4.3.2.1", $indicator_type=Intel::ADDR, $meta=[$source="worker-2"]]); + } + + # We're forcing worker-2 to do a lookup when it has three intelligence items + # which were distributed over the cluster (data inserted locally is resent). + if ( Cluster::node == "worker-2" ) + { + ++worker2_data; + if ( worker2_data == 3 ) + { + # Now that everything is inserted, see if we can match on the data inserted + # by worker-1. + print "Doing a lookup"; + Intel::seen([$host=123.123.123.123, $where=Intel::IN_ANYWHERE]); + } + } + } + +# Watch for remote inserts sent to the manager. +event Intel::insert_item(item: Intel::Item) + { + print fmt("insert_item: %s inserted by %s", item$indicator, item$meta$source); + } + +# Watch for new items. +event Intel::new_item(item: Intel::Item) + { + print fmt("new_item triggered for %s by %s on %s", item$indicator, + item$meta$source, Cluster::node); + } + +event Intel::log_intel(rec: Intel::Info) + { + terminate(); + } + +event Cluster::node_down(name: string, id: string) + { + # Cascading termination + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/intel/expire-item.bro b/testing/btest/scripts/base/frameworks/intel/expire-item.bro deleted file mode 100644 index 08d80714bc..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/expire-item.bro +++ /dev/null @@ -1,67 +0,0 @@ -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 21 -# @TEST-EXEC: cat broproc/intel.log > output -# @TEST-EXEC: cat broproc/.stdout >> output -# @TEST-EXEC: btest-diff output - -# @TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -1.2.3.4 Intel::ADDR source1 this host is bad http://some-data-distributor.com/1 -192.168.0.0/16 Intel::SUBNET source1 this network is bad http://some-data-distributor.com/2 -# @TEST-END-FILE - -@load frameworks/intel/do_expire - -redef exit_only_after_terminate = T; - -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; -redef Intel::item_expiration = 9sec; -redef table_expire_interval = 3sec; - -global runs = 0; -event do_it() - { - ++runs; - print fmt("-- Run %s --", runs); - - print "Trigger: 1.2.3.4"; - Intel::seen([$host=1.2.3.4, - $where=SOMEWHERE]); - - if ( runs == 2 ) - { - # Reinserting the indicator should reset the expiration - print "Reinsert: 1.2.3.4"; - local item = [ - $indicator="1.2.3.4", - $indicator_type=Intel::ADDR, - $meta=[ - $source="source2", - $desc="this host is still bad", - $url="http://some-data-distributor.com/2"] - ]; - Intel::insert(item); - } - - if ( runs < 6 ) - schedule 3sec { do_it() }; - else - terminate(); - } - -event Intel::match(s: Intel::Seen, items: set[Intel::Item]) - { - print fmt("Seen: %s", s$indicator); - } - -hook Intel::item_expired(indicator: string, indicator_type: Intel::Type, - metas: set[Intel::MetaData]) - { - print fmt("Expired: %s", indicator); - } - -event bro_init() &priority=-10 - { - schedule 1.5sec { do_it() }; - } diff --git a/testing/btest/scripts/base/frameworks/intel/expire-item.zeek b/testing/btest/scripts/base/frameworks/intel/expire-item.zeek new file mode 100644 index 0000000000..8f493947fa --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/expire-item.zeek @@ -0,0 +1,67 @@ +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 21 +# @TEST-EXEC: cat zeekproc/intel.log > output +# @TEST-EXEC: cat zeekproc/.stdout >> output +# @TEST-EXEC: btest-diff output + +# @TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +1.2.3.4 Intel::ADDR source1 this host is bad http://some-data-distributor.com/1 +192.168.0.0/16 Intel::SUBNET source1 this network is bad http://some-data-distributor.com/2 +# @TEST-END-FILE + +@load frameworks/intel/do_expire + +redef exit_only_after_terminate = T; + +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; +redef Intel::item_expiration = 9sec; +redef table_expire_interval = 3sec; + +global runs = 0; +event do_it() + { + ++runs; + print fmt("-- Run %s --", runs); + + print "Trigger: 1.2.3.4"; + Intel::seen([$host=1.2.3.4, + $where=SOMEWHERE]); + + if ( runs == 2 ) + { + # Reinserting the indicator should reset the expiration + print "Reinsert: 1.2.3.4"; + local item = [ + $indicator="1.2.3.4", + $indicator_type=Intel::ADDR, + $meta=[ + $source="source2", + $desc="this host is still bad", + $url="http://some-data-distributor.com/2"] + ]; + Intel::insert(item); + } + + if ( runs < 6 ) + schedule 3sec { do_it() }; + else + terminate(); + } + +event Intel::match(s: Intel::Seen, items: set[Intel::Item]) + { + print fmt("Seen: %s", s$indicator); + } + +hook Intel::item_expired(indicator: string, indicator_type: Intel::Type, + metas: set[Intel::MetaData]) + { + print fmt("Expired: %s", indicator); + } + +event zeek_init() &priority=-10 + { + schedule 1.5sec { do_it() }; + } diff --git a/testing/btest/scripts/base/frameworks/intel/filter-item.bro b/testing/btest/scripts/base/frameworks/intel/filter-item.bro deleted file mode 100644 index c598664996..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/filter-item.bro +++ /dev/null @@ -1,43 +0,0 @@ - -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 5 -# @TEST-EXEC: btest-diff broproc/intel.log - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -1.2.3.42 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 -10.0.0.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 -@TEST-END-FILE - -redef exit_only_after_terminate = T; -redef Site::local_nets += { 10.0.0.0/8 }; -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; - -hook Intel::filter_item(item: Intel::Item) - { - if ( item$indicator_type == Intel::ADDR && - Site::is_local_addr(to_addr(item$indicator)) ) - break; - } - -event do_it() - { - Intel::seen([$host=10.0.0.1, - $where=SOMEWHERE]); - Intel::seen([$host=1.2.3.42, - $where=SOMEWHERE]); - } - -global log_lines = 0; -event Intel::log_intel(rec: Intel::Info) - { - ++log_lines; - if ( log_lines == 1 ) - terminate(); - } - -event bro_init() &priority=-10 - { - schedule 1sec { do_it() }; - } diff --git a/testing/btest/scripts/base/frameworks/intel/filter-item.zeek b/testing/btest/scripts/base/frameworks/intel/filter-item.zeek new file mode 100644 index 0000000000..3c5db1147e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/filter-item.zeek @@ -0,0 +1,43 @@ + +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff zeekproc/intel.log + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +1.2.3.42 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 +10.0.0.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 +@TEST-END-FILE + +redef exit_only_after_terminate = T; +redef Site::local_nets += { 10.0.0.0/8 }; +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; + +hook Intel::filter_item(item: Intel::Item) + { + if ( item$indicator_type == Intel::ADDR && + Site::is_local_addr(to_addr(item$indicator)) ) + break; + } + +event do_it() + { + Intel::seen([$host=10.0.0.1, + $where=SOMEWHERE]); + Intel::seen([$host=1.2.3.42, + $where=SOMEWHERE]); + } + +global log_lines = 0; +event Intel::log_intel(rec: Intel::Info) + { + ++log_lines; + if ( log_lines == 1 ) + terminate(); + } + +event zeek_init() &priority=-10 + { + schedule 1sec { do_it() }; + } diff --git a/testing/btest/scripts/base/frameworks/intel/input-and-match.bro b/testing/btest/scripts/base/frameworks/intel/input-and-match.bro deleted file mode 100644 index 8f74117201..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/input-and-match.bro +++ /dev/null @@ -1,38 +0,0 @@ - -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 5 -# @TEST-EXEC: btest-diff broproc/intel.log - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 -1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 -e@mail.com Intel::EMAIL source1 Phishing email source http://some-data-distributor.com/100000 -@TEST-END-FILE - -redef exit_only_after_terminate = T; -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; - -event do_it() - { - Intel::seen([$indicator="e@mail.com", - $indicator_type=Intel::EMAIL, - $where=SOMEWHERE]); - - Intel::seen([$host=1.2.3.4, - $where=SOMEWHERE]); - } - -global log_lines = 0; -event Intel::log_intel(rec: Intel::Info) - { - ++log_lines; - if ( log_lines == 2 ) - terminate(); - } - -event bro_init() &priority=-10 - { - schedule 1sec { do_it() }; - } diff --git a/testing/btest/scripts/base/frameworks/intel/input-and-match.zeek b/testing/btest/scripts/base/frameworks/intel/input-and-match.zeek new file mode 100644 index 0000000000..f0f5e59511 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/input-and-match.zeek @@ -0,0 +1,38 @@ + +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff zeekproc/intel.log + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 +1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 +e@mail.com Intel::EMAIL source1 Phishing email source http://some-data-distributor.com/100000 +@TEST-END-FILE + +redef exit_only_after_terminate = T; +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; + +event do_it() + { + Intel::seen([$indicator="e@mail.com", + $indicator_type=Intel::EMAIL, + $where=SOMEWHERE]); + + Intel::seen([$host=1.2.3.4, + $where=SOMEWHERE]); + } + +global log_lines = 0; +event Intel::log_intel(rec: Intel::Info) + { + ++log_lines; + if ( log_lines == 2 ) + terminate(); + } + +event zeek_init() &priority=-10 + { + schedule 1sec { do_it() }; + } diff --git a/testing/btest/scripts/base/frameworks/intel/match-subnet.bro b/testing/btest/scripts/base/frameworks/intel/match-subnet.bro deleted file mode 100644 index 8e3fe74116..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/match-subnet.bro +++ /dev/null @@ -1,51 +0,0 @@ -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 5 -# @TEST-EXEC: cat broproc/intel.log > output -# @TEST-EXEC: cat broproc/.stdout >> output -# @TEST-EXEC: btest-diff output - -# @TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -192.168.1.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1 -192.168.2.0/24 Intel::SUBNET source1 this subnetwork is just plain baaad http://some-data-distributor.com/2 -192.168.142.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/3 -192.168.142.0/24 Intel::SUBNET source1 this subnetwork is baaad http://some-data-distributor.com/4 -192.168.142.0/26 Intel::SUBNET source1 this subnetwork is inside http://some-data-distributor.com/4 -192.168.128.0/18 Intel::SUBNET source1 this subnetwork might be baaad http://some-data-distributor.com/5 -# @TEST-END-FILE - -redef exit_only_after_terminate = T; - -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; - -event do_it() - { - Intel::seen([$host=192.168.1.1, - $where=SOMEWHERE]); - Intel::seen([$host=192.168.2.1, - $where=SOMEWHERE]); - Intel::seen([$host=192.168.142.1, - $where=SOMEWHERE]); - } - -event bro_init() &priority=-10 - { - schedule 1sec { do_it() }; - } - -global log_lines = 0; -event Intel::log_intel(rec: Intel::Info) - { - ++log_lines; - if ( log_lines == 2 ) - terminate(); - } - -event Intel::match(s: Intel::Seen, items: set[Intel::Item]) - { - print ""; - print fmt("Seen: %s", s); - for ( item in items ) - print fmt("Item: %s", item); - } diff --git a/testing/btest/scripts/base/frameworks/intel/match-subnet.zeek b/testing/btest/scripts/base/frameworks/intel/match-subnet.zeek new file mode 100644 index 0000000000..ab6399f45b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/match-subnet.zeek @@ -0,0 +1,51 @@ +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: cat zeekproc/intel.log > output +# @TEST-EXEC: cat zeekproc/.stdout >> output +# @TEST-EXEC: btest-diff output + +# @TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +192.168.1.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1 +192.168.2.0/24 Intel::SUBNET source1 this subnetwork is just plain baaad http://some-data-distributor.com/2 +192.168.142.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/3 +192.168.142.0/24 Intel::SUBNET source1 this subnetwork is baaad http://some-data-distributor.com/4 +192.168.142.0/26 Intel::SUBNET source1 this subnetwork is inside http://some-data-distributor.com/4 +192.168.128.0/18 Intel::SUBNET source1 this subnetwork might be baaad http://some-data-distributor.com/5 +# @TEST-END-FILE + +redef exit_only_after_terminate = T; + +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; + +event do_it() + { + Intel::seen([$host=192.168.1.1, + $where=SOMEWHERE]); + Intel::seen([$host=192.168.2.1, + $where=SOMEWHERE]); + Intel::seen([$host=192.168.142.1, + $where=SOMEWHERE]); + } + +event zeek_init() &priority=-10 + { + schedule 1sec { do_it() }; + } + +global log_lines = 0; +event Intel::log_intel(rec: Intel::Info) + { + ++log_lines; + if ( log_lines == 2 ) + terminate(); + } + +event Intel::match(s: Intel::Seen, items: set[Intel::Item]) + { + print ""; + print fmt("Seen: %s", s); + for ( item in items ) + print fmt("Item: %s", item); + } diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-absolute-prefixes.bro b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-absolute-prefixes.bro deleted file mode 100644 index 14ce01d32e..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-absolute-prefixes.bro +++ /dev/null @@ -1,23 +0,0 @@ -# This test verifies that an absolute Intel::path_prefix overrides any -# set for the Input framework. We still want the Intel framework to -# "break out" of any file system location specified for the input -# framework, e.g. when their paths live side-by-side (/foo/bar/input, -# /foo/bar/intel). -# -# @TEST-EXEC: mkdir -p intel -# @TEST-EXEC: cat %INPUT | sed "s|@path_prefix@|$PWD/intel|" >input.bro -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix bro -b input.bro >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE intel/test.data -#fields indicator indicator_type meta.source -127.0.2.1 Intel::ADDR this btest -127.0.2.2 Intel::ADDR this btest -127.0.2.3 Intel::ADDR this btest -@TEST-END-FILE - -@load path-prefix-common.bro - -redef Intel::read_files += { "test.data" }; -redef InputAscii::path_prefix = "/this/does/not/exist"; -redef Intel::path_prefix = "@path_prefix@"; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-absolute-prefixes.zeek b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-absolute-prefixes.zeek new file mode 100644 index 0000000000..ec27e998a7 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-absolute-prefixes.zeek @@ -0,0 +1,23 @@ +# This test verifies that an absolute Intel::path_prefix overrides any +# set for the Input framework. We still want the Intel framework to +# "break out" of any file system location specified for the input +# framework, e.g. when their paths live side-by-side (/foo/bar/input, +# /foo/bar/intel). +# +# @TEST-EXEC: mkdir -p intel +# @TEST-EXEC: cat %INPUT | sed "s|@path_prefix@|$PWD/intel|" >input.zeek +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix zeek -b input.zeek >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE intel/test.data +#fields indicator indicator_type meta.source +127.0.2.1 Intel::ADDR this btest +127.0.2.2 Intel::ADDR this btest +127.0.2.3 Intel::ADDR this btest +@TEST-END-FILE + +@load path-prefix-common.zeek + +redef Intel::read_files += { "test.data" }; +redef InputAscii::path_prefix = "/this/does/not/exist"; +redef Intel::path_prefix = "@path_prefix@"; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-relative-prefixes.bro b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-relative-prefixes.bro deleted file mode 100644 index 346f3bad81..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-relative-prefixes.bro +++ /dev/null @@ -1,20 +0,0 @@ -# This test verifies that combining Input::path_prefix and -# Intel::path_prefix works as intended: the intel path gets -# prepended first, then the input framework one. -# -# @TEST-EXEC: mkdir -p input/intel -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE input/intel/test.data -#fields indicator indicator_type meta.source -127.0.1.1 Intel::ADDR this btest -127.0.1.2 Intel::ADDR this btest -127.0.1.3 Intel::ADDR this btest -@TEST-END-FILE - -@load path-prefix-common.bro - -redef Intel::read_files += { "test.data" }; -redef InputAscii::path_prefix = "input"; -redef Intel::path_prefix = "intel"; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-relative-prefixes.zeek b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-relative-prefixes.zeek new file mode 100644 index 0000000000..ecb74cc777 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-intel-relative-prefixes.zeek @@ -0,0 +1,20 @@ +# This test verifies that combining Input::path_prefix and +# Intel::path_prefix works as intended: the intel path gets +# prepended first, then the input framework one. +# +# @TEST-EXEC: mkdir -p input/intel +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE input/intel/test.data +#fields indicator indicator_type meta.source +127.0.1.1 Intel::ADDR this btest +127.0.1.2 Intel::ADDR this btest +127.0.1.3 Intel::ADDR this btest +@TEST-END-FILE + +@load path-prefix-common.zeek + +redef Intel::read_files += { "test.data" }; +redef InputAscii::path_prefix = "input"; +redef Intel::path_prefix = "intel"; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-prefix.bro b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-prefix.bro deleted file mode 100644 index 19828ea8af..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-prefix.bro +++ /dev/null @@ -1,20 +0,0 @@ -# This test verifies that specifying an Input::path_prefix -# also affects the Intel framework since it relies on the -# former for loading data. (Note that this also tests the -# Input::REREAD ingestion mode.) -# -# @TEST-EXEC: mkdir -p alternative -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE alternative/test.data -#fields indicator indicator_type meta.source -127.0.0.1 Intel::ADDR this btest -127.0.0.2 Intel::ADDR this btest -127.0.0.3 Intel::ADDR this btest -@TEST-END-FILE - -@load path-prefix-common.bro - -redef Intel::read_files += { "test.data" }; -redef InputAscii::path_prefix = "alternative"; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/input-prefix.zeek b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-prefix.zeek new file mode 100644 index 0000000000..f381c690b3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/path-prefix/input-prefix.zeek @@ -0,0 +1,20 @@ +# This test verifies that specifying an Input::path_prefix +# also affects the Intel framework since it relies on the +# former for loading data. (Note that this also tests the +# Input::REREAD ingestion mode.) +# +# @TEST-EXEC: mkdir -p alternative +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE alternative/test.data +#fields indicator indicator_type meta.source +127.0.0.1 Intel::ADDR this btest +127.0.0.2 Intel::ADDR this btest +127.0.0.3 Intel::ADDR this btest +@TEST-END-FILE + +@load path-prefix-common.zeek + +redef Intel::read_files += { "test.data" }; +redef InputAscii::path_prefix = "alternative"; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/no-paths.bro b/testing/btest/scripts/base/frameworks/intel/path-prefix/no-paths.bro deleted file mode 100644 index 7148c1e857..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/path-prefix/no-paths.bro +++ /dev/null @@ -1,16 +0,0 @@ -# This test verifies that when setting neither InputAscii::path_prefix -# nor Intel::path_prefix, Zeek correctly locates local intel files. -# -# @TEST-EXEC: BROPATH=$BROPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE test.data -#fields indicator indicator_type meta.source -127.0.0.1 Intel::ADDR this btest -127.0.0.2 Intel::ADDR this btest -127.0.0.3 Intel::ADDR this btest -@TEST-END-FILE - -@load path-prefix-common.bro - -redef Intel::read_files += { "test.data" }; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/no-paths.zeek b/testing/btest/scripts/base/frameworks/intel/path-prefix/no-paths.zeek new file mode 100644 index 0000000000..2fadc01c24 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/path-prefix/no-paths.zeek @@ -0,0 +1,16 @@ +# This test verifies that when setting neither InputAscii::path_prefix +# nor Intel::path_prefix, Zeek correctly locates local intel files. +# +# @TEST-EXEC: ZEEKPATH=$ZEEKPATH:$TEST_BASE/scripts/base/frameworks/intel/path-prefix zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE test.data +#fields indicator indicator_type meta.source +127.0.0.1 Intel::ADDR this btest +127.0.0.2 Intel::ADDR this btest +127.0.0.3 Intel::ADDR this btest +@TEST-END-FILE + +@load path-prefix-common.zeek + +redef Intel::read_files += { "test.data" }; diff --git a/testing/btest/scripts/base/frameworks/intel/path-prefix/path-prefix-common.bro b/testing/btest/scripts/base/frameworks/intel/path-prefix/path-prefix-common.zeek similarity index 100% rename from testing/btest/scripts/base/frameworks/intel/path-prefix/path-prefix-common.bro rename to testing/btest/scripts/base/frameworks/intel/path-prefix/path-prefix-common.zeek diff --git a/testing/btest/scripts/base/frameworks/intel/read-file-dist-cluster.bro b/testing/btest/scripts/base/frameworks/intel/read-file-dist-cluster.bro deleted file mode 100644 index a4becfb2b3..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/read-file-dist-cluster.bro +++ /dev/null @@ -1,67 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 10 -# @TEST-EXEC: btest-diff manager-1/.stdout -# @TEST-EXEC: btest-diff manager-1/intel.log -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], -}; -@TEST-END-FILE - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 -1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 -e@mail.com Intel::EMAIL source1 Phishing email source http://some-data-distributor.com/100000 -@TEST-END-FILE - -@load base/frameworks/control -redef Log::default_rotation_interval=0sec; - -module Intel; - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -redef Intel::read_files += { "../intel.dat" }; -@endif - -redef enum Intel::Where += { - Intel::IN_A_TEST, -}; - -event do_it() - { - Intel::seen([$host=1.2.3.4, $where=Intel::IN_A_TEST]); - Intel::seen([$indicator="e@mail.com", $indicator_type=Intel::EMAIL, $where=Intel::IN_A_TEST]); - } - -event bro_init() - { - # Delay the workers searching for hits briefly to allow for the data distribution - # mechanism to distribute the data to the workers. - if ( Cluster::local_node_type() == Cluster::WORKER ) - schedule 2sec { do_it() }; - } - -global intel_hits=0; -event Intel::log_intel(rec: Intel::Info) - { - ++intel_hits; - # There should be 4 hits since each worker is "seeing" 2 things. - if ( intel_hits == 4 ) - { - # We're delaying shutdown for a second here to make sure that no other - # matches happen (which would be wrong!). - schedule 1sec { Control::shutdown_request() }; - } - } diff --git a/testing/btest/scripts/base/frameworks/intel/read-file-dist-cluster.zeek b/testing/btest/scripts/base/frameworks/intel/read-file-dist-cluster.zeek new file mode 100644 index 0000000000..a0140aa316 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/read-file-dist-cluster.zeek @@ -0,0 +1,67 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: btest-diff manager-1/.stdout +# @TEST-EXEC: btest-diff manager-1/intel.log +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], +}; +@TEST-END-FILE + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 +1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 +e@mail.com Intel::EMAIL source1 Phishing email source http://some-data-distributor.com/100000 +@TEST-END-FILE + +@load base/frameworks/control +redef Log::default_rotation_interval=0sec; + +module Intel; + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +redef Intel::read_files += { "../intel.dat" }; +@endif + +redef enum Intel::Where += { + Intel::IN_A_TEST, +}; + +event do_it() + { + Intel::seen([$host=1.2.3.4, $where=Intel::IN_A_TEST]); + Intel::seen([$indicator="e@mail.com", $indicator_type=Intel::EMAIL, $where=Intel::IN_A_TEST]); + } + +event zeek_init() + { + # Delay the workers searching for hits briefly to allow for the data distribution + # mechanism to distribute the data to the workers. + if ( Cluster::local_node_type() == Cluster::WORKER ) + schedule 2sec { do_it() }; + } + +global intel_hits=0; +event Intel::log_intel(rec: Intel::Info) + { + ++intel_hits; + # There should be 4 hits since each worker is "seeing" 2 things. + if ( intel_hits == 4 ) + { + # We're delaying shutdown for a second here to make sure that no other + # matches happen (which would be wrong!). + schedule 1sec { Control::shutdown_request() }; + } + } diff --git a/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.bro b/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.bro deleted file mode 100644 index 5241231e1f..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.bro +++ /dev/null @@ -1,92 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 13 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff manager-1/intel.log - -# @TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], -}; -# @TEST-END-FILE - -module Intel; - -redef Log::default_rotation_interval=0sec; - -event test_manager() - { - Intel::remove([$indicator="192.168.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - Intel::seen([$host=192.168.0.1, $where=Intel::IN_ANYWHERE]); - Intel::remove([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]], T); - Intel::seen([$host=192.168.0.2, $where=Intel::IN_ANYWHERE]); - } - -event test_worker() - { - Intel::remove([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - Intel::remove([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); - Intel::seen([$host=192.168.1.2, $where=Intel::IN_ANYWHERE]); - # Trigger shutdown by matching data that should be present - Intel::seen([$host=10.10.10.10, $where=Intel::IN_ANYWHERE]); - } - -event Cluster::node_up(name: string, id: string) - { - # Insert the data once all workers are connected. - if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 1 ) - { - Intel::insert([$indicator="192.168.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - Intel::insert([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - Intel::insert([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); - Intel::insert([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - Intel::insert([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); - Intel::insert([$indicator="10.10.10.10", $indicator_type=Intel::ADDR, $meta=[$source="end"]]); - - event test_manager(); - } - } - -global worker_data = 0; -event Intel::insert_indicator(item: Intel::Item) - { - # Run test on worker-1 when all items have been inserted - if ( Cluster::node == "worker-1" ) - { - ++worker_data; - if ( worker_data == 4 ) - event test_worker(); - } - } - -event Intel::remove_item(item: Item, purge_indicator: bool) - { - print fmt("Removing %s (source: %s).", item$indicator, item$meta$source); - } - -event remove_indicator(item: Item) - { - print fmt("Purging %s.", item$indicator); - } - -event die() - { - terminate(); - } - -event Intel::log_intel(rec: Intel::Info) - { - print "Logging intel hit!"; - schedule 2sec { die() }; - } - -event Cluster::node_down(name: string, id: string) - { - # Cascading termination - schedule 2sec { die() }; - } diff --git a/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.zeek b/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.zeek new file mode 100644 index 0000000000..98c8b55736 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.zeek @@ -0,0 +1,92 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 13 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff manager-1/intel.log + +# @TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], +}; +# @TEST-END-FILE + +module Intel; + +redef Log::default_rotation_interval=0sec; + +event test_manager() + { + Intel::remove([$indicator="192.168.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::seen([$host=192.168.0.1, $where=Intel::IN_ANYWHERE]); + Intel::remove([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]], T); + Intel::seen([$host=192.168.0.2, $where=Intel::IN_ANYWHERE]); + } + +event test_worker() + { + Intel::remove([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::remove([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); + Intel::seen([$host=192.168.1.2, $where=Intel::IN_ANYWHERE]); + # Trigger shutdown by matching data that should be present + Intel::seen([$host=10.10.10.10, $where=Intel::IN_ANYWHERE]); + } + +event Cluster::node_up(name: string, id: string) + { + # Insert the data once all workers are connected. + if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 1 ) + { + Intel::insert([$indicator="192.168.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); + Intel::insert([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); + Intel::insert([$indicator="10.10.10.10", $indicator_type=Intel::ADDR, $meta=[$source="end"]]); + + event test_manager(); + } + } + +global worker_data = 0; +event Intel::insert_indicator(item: Intel::Item) + { + # Run test on worker-1 when all items have been inserted + if ( Cluster::node == "worker-1" ) + { + ++worker_data; + if ( worker_data == 4 ) + event test_worker(); + } + } + +event Intel::remove_item(item: Item, purge_indicator: bool) + { + print fmt("Removing %s (source: %s).", item$indicator, item$meta$source); + } + +event remove_indicator(item: Item) + { + print fmt("Purging %s.", item$indicator); + } + +event die() + { + terminate(); + } + +event Intel::log_intel(rec: Intel::Info) + { + print "Logging intel hit!"; + schedule 2sec { die() }; + } + +event Cluster::node_down(name: string, id: string) + { + # Cascading termination + schedule 2sec { die() }; + } diff --git a/testing/btest/scripts/base/frameworks/intel/remove-non-existing.bro b/testing/btest/scripts/base/frameworks/intel/remove-non-existing.bro deleted file mode 100644 index 1885f5bcf8..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/remove-non-existing.bro +++ /dev/null @@ -1,31 +0,0 @@ -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 5 -# @TEST-EXEC: cat broproc/reporter.log > output -# @TEST-EXEC: cat broproc/.stdout >> output -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff output - -# @TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -192.168.1.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1 -# @TEST-END-FILE - -redef exit_only_after_terminate = T; - -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; - -event do_it() - { - # not existing meta data: - Intel::remove([$indicator="192.168.1.1", $indicator_type=Intel::ADDR, $meta=[$source="source23"]]); - # existing: - Intel::remove([$indicator="192.168.1.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - # not existing item: - Intel::remove([$indicator="192.168.1.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - terminate(); - } - -event bro_init() &priority=-10 - { - schedule 1sec { do_it() }; - } diff --git a/testing/btest/scripts/base/frameworks/intel/remove-non-existing.zeek b/testing/btest/scripts/base/frameworks/intel/remove-non-existing.zeek new file mode 100644 index 0000000000..3dfcb9e334 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/remove-non-existing.zeek @@ -0,0 +1,31 @@ +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: cat zeekproc/reporter.log > output +# @TEST-EXEC: cat zeekproc/.stdout >> output +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff output + +# @TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +192.168.1.1 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1 +# @TEST-END-FILE + +redef exit_only_after_terminate = T; + +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; + +event do_it() + { + # not existing meta data: + Intel::remove([$indicator="192.168.1.1", $indicator_type=Intel::ADDR, $meta=[$source="source23"]]); + # existing: + Intel::remove([$indicator="192.168.1.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + # not existing item: + Intel::remove([$indicator="192.168.1.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + terminate(); + } + +event zeek_init() &priority=-10 + { + schedule 1sec { do_it() }; + } diff --git a/testing/btest/scripts/base/frameworks/intel/updated-match.bro b/testing/btest/scripts/base/frameworks/intel/updated-match.bro deleted file mode 100644 index 5cace1741e..0000000000 --- a/testing/btest/scripts/base/frameworks/intel/updated-match.bro +++ /dev/null @@ -1,74 +0,0 @@ -# @TEST-EXEC: cp intel1.dat intel.dat -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: $SCRIPTS/wait-for-file broproc/got1 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cp intel2.dat intel.dat -# @TEST-EXEC: $SCRIPTS/wait-for-file broproc/got2 5 || (btest-bg-wait -k 1 && false) -# @TEST-EXEC: cp intel3.dat intel.dat -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: cat broproc/intel.log > output -# @TEST-EXEC: cat broproc/notice.log >> output -# @TEST-EXEC: btest-diff output - -# @TEST-START-FILE intel1.dat -#fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice -1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 F -# @TEST-END-FILE - -# @TEST-START-FILE intel2.dat -#fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice -1.2.3.4 Intel::ADDR source2 this host is just plain baaad http://some-data-distributor.com/1234 F -4.3.2.1 Intel::ADDR source2 this host might also be baaad http://some-data-distributor.com/4321 F -# @TEST-END-FILE - -# @TEST-START-FILE intel3.dat -#fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice -1.2.3.4 Intel::ADDR source2 this host is just plain baaad http://some-data-distributor.com/1234 T -4.3.2.1 Intel::ADDR source2 this host might also be baaad http://some-data-distributor.com/4321 T -# @TEST-END-FILE - -@load frameworks/intel/do_notice - -redef exit_only_after_terminate = T; -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; - -global runs = 0; -global entries_read = 0; - -event do_it() - { - Intel::seen([$host=1.2.3.4, - $where=SOMEWHERE]); - Intel::seen([$host=4.3.2.1, - $where=SOMEWHERE]); - - ++runs; - - if ( runs == 1 ) - system("touch got1"); - if ( runs == 2 ) - system("touch got2"); - } - -global log_lines = 0; -event Intel::log_intel(rec: Intel::Info) - { - ++log_lines; - if ( log_lines == 5 ) - terminate(); - } - -module Intel; - -event Intel::read_entry(desc: Input::EventDescription, tpe: Input::Event, item: Intel::Item) - { - ++entries_read; - print entries_read; - - if ( entries_read == 1 ) - event do_it(); - else if ( entries_read == 3 ) - event do_it(); - else if ( entries_read == 5 ) - event do_it(); - } diff --git a/testing/btest/scripts/base/frameworks/intel/updated-match.zeek b/testing/btest/scripts/base/frameworks/intel/updated-match.zeek new file mode 100644 index 0000000000..75a272773d --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/updated-match.zeek @@ -0,0 +1,74 @@ +# @TEST-EXEC: cp intel1.dat intel.dat +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: $SCRIPTS/wait-for-file zeekproc/got1 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cp intel2.dat intel.dat +# @TEST-EXEC: $SCRIPTS/wait-for-file zeekproc/got2 5 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: cp intel3.dat intel.dat +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: cat zeekproc/intel.log > output +# @TEST-EXEC: cat zeekproc/notice.log >> output +# @TEST-EXEC: btest-diff output + +# @TEST-START-FILE intel1.dat +#fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice +1.2.3.4 Intel::ADDR source1 this host is just plain baaad http://some-data-distributor.com/1234 F +# @TEST-END-FILE + +# @TEST-START-FILE intel2.dat +#fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice +1.2.3.4 Intel::ADDR source2 this host is just plain baaad http://some-data-distributor.com/1234 F +4.3.2.1 Intel::ADDR source2 this host might also be baaad http://some-data-distributor.com/4321 F +# @TEST-END-FILE + +# @TEST-START-FILE intel3.dat +#fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice +1.2.3.4 Intel::ADDR source2 this host is just plain baaad http://some-data-distributor.com/1234 T +4.3.2.1 Intel::ADDR source2 this host might also be baaad http://some-data-distributor.com/4321 T +# @TEST-END-FILE + +@load frameworks/intel/do_notice + +redef exit_only_after_terminate = T; +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; + +global runs = 0; +global entries_read = 0; + +event do_it() + { + Intel::seen([$host=1.2.3.4, + $where=SOMEWHERE]); + Intel::seen([$host=4.3.2.1, + $where=SOMEWHERE]); + + ++runs; + + if ( runs == 1 ) + system("touch got1"); + if ( runs == 2 ) + system("touch got2"); + } + +global log_lines = 0; +event Intel::log_intel(rec: Intel::Info) + { + ++log_lines; + if ( log_lines == 5 ) + terminate(); + } + +module Intel; + +event Intel::read_entry(desc: Input::EventDescription, tpe: Input::Event, item: Intel::Item) + { + ++entries_read; + print entries_read; + + if ( entries_read == 1 ) + event do_it(); + else if ( entries_read == 3 ) + event do_it(); + else if ( entries_read == 5 ) + event do_it(); + } diff --git a/testing/btest/scripts/base/frameworks/logging/adapt-filter.bro b/testing/btest/scripts/base/frameworks/logging/adapt-filter.bro deleted file mode 100644 index 2db881deea..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/adapt-filter.bro +++ /dev/null @@ -1,33 +0,0 @@ - -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh-new-default.log -# @TEST-EXEC: test '!' -e ssh.log - -module SSH; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Info: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Info]); - - local filter = Log::get_filter(SSH::LOG, "default"); - filter$path= "ssh-new-default"; - Log::add_filter(SSH::LOG, filter); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); -} diff --git a/testing/btest/scripts/base/frameworks/logging/adapt-filter.zeek b/testing/btest/scripts/base/frameworks/logging/adapt-filter.zeek new file mode 100644 index 0000000000..a5aed0c018 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/adapt-filter.zeek @@ -0,0 +1,33 @@ + +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh-new-default.log +# @TEST-EXEC: test '!' -e ssh.log + +module SSH; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Info: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Info]); + + local filter = Log::get_filter(SSH::LOG, "default"); + filter$path= "ssh-new-default"; + Log::add_filter(SSH::LOG, filter); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); +} diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-binary.bro b/testing/btest/scripts/base/frameworks/logging/ascii-binary.bro deleted file mode 100644 index fcbac3be58..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-binary.bro +++ /dev/null @@ -1,25 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - data: string; - data2: string; - } &log; -} - -redef LogAscii::separator = "|"; - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Info]); - Log::write(SSH::LOG, [$data="abc\n\xffdef", $data2="DATA2"]); - Log::write(SSH::LOG, [$data="abc|\xffdef", $data2="DATA2"]); - Log::write(SSH::LOG, [$data="abc\xff|def", $data2="DATA2"]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-binary.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-binary.zeek new file mode 100644 index 0000000000..74d3ea9267 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-binary.zeek @@ -0,0 +1,25 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + data: string; + data2: string; + } &log; +} + +redef LogAscii::separator = "|"; + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Info]); + Log::write(SSH::LOG, [$data="abc\n\xffdef", $data2="DATA2"]); + Log::write(SSH::LOG, [$data="abc|\xffdef", $data2="DATA2"]); + Log::write(SSH::LOG, [$data="abc\xff|def", $data2="DATA2"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-double.bro b/testing/btest/scripts/base/frameworks/logging/ascii-double.bro deleted file mode 100644 index b824d93676..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-double.bro +++ /dev/null @@ -1,85 +0,0 @@ -# @TEST-DOC: Test that the ASCII writer logs values of type "double" correctly. -# -# @TEST-EXEC: bro -b %INPUT test-json.bro -# @TEST-EXEC: mv test.log json.log -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.log -# @TEST-EXEC: btest-diff json.log -# -# Make sure we do not write out scientific notation for doubles. - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - d: double &log; - }; -} - -function logwrite(val: double) -{ - Log::write(Test::LOG, [$d=val]); -} - -event bro_init() -{ - local d: double; - local dmax: double = 1.79e308; - local dmin: double = 2.23e-308; - - Log::create_stream(Test::LOG, [$columns=Info]); - - # relatively large values - logwrite(2153226000.0); - logwrite(2153226000.1); - logwrite(2153226000.123456789); - - # relatively small values - logwrite(1.0); - logwrite(1.1); - logwrite(1.123456789); - logwrite(-1.123456789); - logwrite(1.1234); - logwrite(.1234); - - # scientific notation (positive exponents) - logwrite(5e4); - logwrite(-5e4); - logwrite(3.14e15); - logwrite(-3.14e15); - logwrite(dmax); - logwrite(-dmax); - - # scientific notation (negative exponents) - logwrite(1.23456789e-5); - logwrite(dmin); - logwrite(-dmin); - - # inf - d = dmax; # ok - d = d * 2.0; # inf - logwrite(d); - - # -inf - d = -dmax; # ok - d = d * 2.0; # -inf - logwrite(d); - - # negative zero (compares equal to 0.0, but has different representation) - d = -0.0; - logwrite(d); - - # nan - d = dmax; # ok - d = d * 2.0; # inf - d = d * 0.0; # nan - logwrite(d); -} - -# @TEST-START-FILE test-json.bro - -redef LogAscii::use_json = T; - -# @TEST-END-FILE diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-double.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-double.zeek new file mode 100644 index 0000000000..676f69600f --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-double.zeek @@ -0,0 +1,85 @@ +# @TEST-DOC: Test that the ASCII writer logs values of type "double" correctly. +# +# @TEST-EXEC: zeek -b %INPUT test-json.zeek +# @TEST-EXEC: mv test.log json.log +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.log +# @TEST-EXEC: btest-diff json.log +# +# Make sure we do not write out scientific notation for doubles. + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + d: double &log; + }; +} + +function logwrite(val: double) +{ + Log::write(Test::LOG, [$d=val]); +} + +event zeek_init() +{ + local d: double; + local dmax: double = 1.79e308; + local dmin: double = 2.23e-308; + + Log::create_stream(Test::LOG, [$columns=Info]); + + # relatively large values + logwrite(2153226000.0); + logwrite(2153226000.1); + logwrite(2153226000.123456789); + + # relatively small values + logwrite(1.0); + logwrite(1.1); + logwrite(1.123456789); + logwrite(-1.123456789); + logwrite(1.1234); + logwrite(.1234); + + # scientific notation (positive exponents) + logwrite(5e4); + logwrite(-5e4); + logwrite(3.14e15); + logwrite(-3.14e15); + logwrite(dmax); + logwrite(-dmax); + + # scientific notation (negative exponents) + logwrite(1.23456789e-5); + logwrite(dmin); + logwrite(-dmin); + + # inf + d = dmax; # ok + d = d * 2.0; # inf + logwrite(d); + + # -inf + d = -dmax; # ok + d = d * 2.0; # -inf + logwrite(d); + + # negative zero (compares equal to 0.0, but has different representation) + d = -0.0; + logwrite(d); + + # nan + d = dmax; # ok + d = d * 2.0; # inf + d = d * 0.0; # nan + logwrite(d); +} + +# @TEST-START-FILE test-json.zeek + +redef LogAscii::use_json = T; + +# @TEST-END-FILE diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro b/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro deleted file mode 100644 index 0bb5900e30..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-empty.bro +++ /dev/null @@ -1,39 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: cat ssh.log | grep -v PREFIX.*20..- >ssh-filtered.log -# @TEST-EXEC: btest-diff ssh-filtered.log - -redef LogAscii::output_to_stdout = F; -redef LogAscii::separator = "|"; -redef LogAscii::empty_field = "EMPTY"; -redef LogAscii::unset_field = "NOT-SET"; -redef LogAscii::meta_prefix = "PREFIX<>"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - b: bool &optional; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $b=T, $status="failure", $country=""]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-empty.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-empty.zeek new file mode 100644 index 0000000000..515bd9aab3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-empty.zeek @@ -0,0 +1,39 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: cat ssh.log | grep -v PREFIX.*20..- >ssh-filtered.log +# @TEST-EXEC: btest-diff ssh-filtered.log + +redef LogAscii::output_to_stdout = F; +redef LogAscii::separator = "|"; +redef LogAscii::empty_field = "EMPTY"; +redef LogAscii::unset_field = "NOT-SET"; +redef LogAscii::meta_prefix = "PREFIX<>"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + b: bool &optional; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $b=T, $status="failure", $country=""]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-binary.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape-binary.bro deleted file mode 100644 index 3df3ea1d25..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape-binary.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff test.log -# @TEST-EXEC: btest-diff output - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - s: string; - } &log; -} - -event bro_init() -{ - local a = "abc\0def"; - local b = escape_string(a); - local c = fmt("%s", a); - - Log::create_stream(Test::LOG, [$columns=Log]); - Log::write(Test::LOG, [$s="AB\0CD\0"]); - Log::write(Test::LOG, [$s="AB\xffCD\0"]); - Log::write(Test::LOG, [$s="AB\\xffCD\0"]); - Log::write(Test::LOG, [$s=" "]); - Log::write(Test::LOG, [$s=b]); - Log::write(Test::LOG, [$s=" "]); - Log::write(Test::LOG, [$s=c]); - Log::write(Test::LOG, [$s=" "]); - Log::write(Test::LOG, [$s="foo \xc2\xae bar \\xc2\\xae baz"]); - Log::write(Test::LOG, [$s="foo\x00bar\\0baz"]); - Log::write(Test::LOG, [$s="foo \16 bar ^N baz"]); - - print "AB\0CD\0"; - print "AB\xffCD\0"; - print "AB\\xffCD\0"; - print ""; - print b; - print ""; - print c; - print ""; - print "foo \xc2\xae bar \\xc2\\xae baz"; - print "foo\x00bar\\0baz"; - print "foo \16 bar ^N baz"; - - print ""; -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-binary.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-escape-binary.zeek new file mode 100644 index 0000000000..5535f83276 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape-binary.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff test.log +# @TEST-EXEC: btest-diff output + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + s: string; + } &log; +} + +event zeek_init() +{ + local a = "abc\0def"; + local b = escape_string(a); + local c = fmt("%s", a); + + Log::create_stream(Test::LOG, [$columns=Log]); + Log::write(Test::LOG, [$s="AB\0CD\0"]); + Log::write(Test::LOG, [$s="AB\xffCD\0"]); + Log::write(Test::LOG, [$s="AB\\xffCD\0"]); + Log::write(Test::LOG, [$s=" "]); + Log::write(Test::LOG, [$s=b]); + Log::write(Test::LOG, [$s=" "]); + Log::write(Test::LOG, [$s=c]); + Log::write(Test::LOG, [$s=" "]); + Log::write(Test::LOG, [$s="foo \xc2\xae bar \\xc2\\xae baz"]); + Log::write(Test::LOG, [$s="foo\x00bar\\0baz"]); + Log::write(Test::LOG, [$s="foo \16 bar ^N baz"]); + + print "AB\0CD\0"; + print "AB\xffCD\0"; + print "AB\\xffCD\0"; + print ""; + print b; + print ""; + print c; + print ""; + print "foo \xc2\xae bar \\xc2\\xae baz"; + print "foo\x00bar\\0baz"; + print "foo \16 bar ^N baz"; + + print ""; +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-empty-str.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape-empty-str.bro deleted file mode 100644 index e18926a194..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape-empty-str.bro +++ /dev/null @@ -1,24 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.log - -redef LogAscii::empty_field = "EMPTY"; - -module test; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - ss: set[string]; - } &log; -} - -event bro_init() -{ - Log::create_stream(test::LOG, [$columns=Log]); - - Log::write(test::LOG, [ - $ss=set("EMPTY") - ]); -} diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-empty-str.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-escape-empty-str.zeek new file mode 100644 index 0000000000..2c66593250 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape-empty-str.zeek @@ -0,0 +1,24 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.log + +redef LogAscii::empty_field = "EMPTY"; + +module test; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + ss: set[string]; + } &log; +} + +event zeek_init() +{ + Log::create_stream(test::LOG, [$columns=Log]); + + Log::write(test::LOG, [ + $ss=set("EMPTY") + ]); +} diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-notset-str.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape-notset-str.bro deleted file mode 100644 index 8c1401b179..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape-notset-str.bro +++ /dev/null @@ -1,23 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.log - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - x: string &optional; - y: string &optional; - z: string &optional; - } &log; -} - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); - Log::write(Test::LOG, [$x=LogAscii::unset_field, $z=""]); -} - - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-notset-str.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-escape-notset-str.zeek new file mode 100644 index 0000000000..3c1cb2cd10 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape-notset-str.zeek @@ -0,0 +1,23 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.log + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + x: string &optional; + y: string &optional; + z: string &optional; + } &log; +} + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); + Log::write(Test::LOG, [$x=LogAscii::unset_field, $z=""]); +} + + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-odd-url.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape-odd-url.bro deleted file mode 100644 index 9df48edbb6..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape-odd-url.bro +++ /dev/null @@ -1,4 +0,0 @@ -# -# @TEST-EXEC: bro -C -r $TRACES/www-odd-url.trace -# @TEST-EXEC: btest-diff http.log - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-odd-url.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-escape-odd-url.zeek new file mode 100644 index 0000000000..f64f00f857 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape-odd-url.zeek @@ -0,0 +1,4 @@ +# +# @TEST-EXEC: zeek -C -r $TRACES/www-odd-url.trace +# @TEST-EXEC: btest-diff http.log + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-set-separator.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape-set-separator.bro deleted file mode 100644 index f5fb7a6259..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape-set-separator.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.log - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - ss: set[string]; - } &log; -} - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); - - - Log::write(Test::LOG, [$ss=set("AA", ",", ",,", "CC")]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape-set-separator.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-escape-set-separator.zeek new file mode 100644 index 0000000000..5170718d9e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape-set-separator.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.log + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + ss: set[string]; + } &log; +} + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); + + + Log::write(Test::LOG, [$ss=set("AA", ",", ",,", "CC")]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro b/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro deleted file mode 100644 index d73464777a..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-escape.bro +++ /dev/null @@ -1,33 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: cat ssh.log | egrep -v '#open|#close' >ssh.log.tmp && mv ssh.log.tmp ssh.log -# @TEST-EXEC: btest-diff ssh.log - -redef LogAscii::separator = "||"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="fa||ure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="su||ess", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-escape.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-escape.zeek new file mode 100644 index 0000000000..85c309ca98 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-escape.zeek @@ -0,0 +1,33 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: cat ssh.log | egrep -v '#open|#close' >ssh.log.tmp && mv ssh.log.tmp ssh.log +# @TEST-EXEC: btest-diff ssh.log + +redef LogAscii::separator = "||"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="fa||ure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="su||ess", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-gz-rotate.bro b/testing/btest/scripts/base/frameworks/logging/ascii-gz-rotate.bro deleted file mode 100644 index 2a1c388322..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-gz-rotate.bro +++ /dev/null @@ -1,25 +0,0 @@ -# Test that log rotation works with compressed logs. -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: gunzip test.*.log.gz -# - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - s: string; - } &log; -} - -redef Log::default_rotation_interval = 1hr; -redef LogAscii::gzip_level = 1; - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); - - Log::write(Test::LOG, [$s="testing"]); -} diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-gz-rotate.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-gz-rotate.zeek new file mode 100644 index 0000000000..874715dce7 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-gz-rotate.zeek @@ -0,0 +1,25 @@ +# Test that log rotation works with compressed logs. +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: gunzip test.*.log.gz +# + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + s: string; + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef LogAscii::gzip_level = 1; + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); + + Log::write(Test::LOG, [$s="testing"]); +} diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-gz.bro b/testing/btest/scripts/base/frameworks/logging/ascii-gz.bro deleted file mode 100644 index 9563f42c40..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-gz.bro +++ /dev/null @@ -1,75 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: gunzip ssh.log.gz -# @TEST-EXEC: btest-diff ssh.log -# @TEST-EXEC: btest-diff ssh-uncompressed.log -# -# Testing all possible types. - -redef LogAscii::gzip_level = 9; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - local filter = Log::Filter($name="ssh-uncompressed", $path="ssh-uncompressed", - $config = table(["gzip_level"] = "0")); - Log::add_filter(SSH::LOG, filter); - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(SSH::LOG, [ - $b=T, - $i=-42, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "2008-07-09T16:13:30Z") + 0.543210 secs), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-gz.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-gz.zeek new file mode 100644 index 0000000000..c240df96e5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-gz.zeek @@ -0,0 +1,75 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: gunzip ssh.log.gz +# @TEST-EXEC: btest-diff ssh.log +# @TEST-EXEC: btest-diff ssh-uncompressed.log +# +# Testing all possible types. + +redef LogAscii::gzip_level = 9; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + local filter = Log::Filter($name="ssh-uncompressed", $path="ssh-uncompressed", + $config = table(["gzip_level"] = "0")); + Log::add_filter(SSH::LOG, filter); + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(SSH::LOG, [ + $b=T, + $i=-42, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "2008-07-09T16:13:30Z") + 0.543210 secs), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-json-iso-timestamps.bro b/testing/btest/scripts/base/frameworks/logging/ascii-json-iso-timestamps.bro deleted file mode 100644 index 8cb1210a68..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-json-iso-timestamps.bro +++ /dev/null @@ -1,56 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log -# -# Testing all possible types. - -redef LogAscii::use_json = T; -redef LogAscii::json_timestamps = JSON::TS_ISO8601; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "2008-07-09T16:13:30Z") + 0.00543210 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1986-12-01T01:01:01Z") + 0.90 secs) - ]); - - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 0.4 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 0.5 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 0.6 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.0 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.4 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.5 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.6 secs) - ]); - Log::write(SSH::LOG, [ - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 99 secs) - ]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-json-iso-timestamps.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-json-iso-timestamps.zeek new file mode 100644 index 0000000000..6055989e70 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-json-iso-timestamps.zeek @@ -0,0 +1,56 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log +# +# Testing all possible types. + +redef LogAscii::use_json = T; +redef LogAscii::json_timestamps = JSON::TS_ISO8601; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "2008-07-09T16:13:30Z") + 0.00543210 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1986-12-01T01:01:01Z") + 0.90 secs) + ]); + + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 0.4 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 0.5 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 0.6 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.0 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.4 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.5 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 1.6 secs) + ]); + Log::write(SSH::LOG, [ + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "1970-01-01T00:00:00Z") - 99 secs) + ]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-json-optional.bro b/testing/btest/scripts/base/frameworks/logging/ascii-json-optional.bro deleted file mode 100644 index c26683a338..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-json-optional.bro +++ /dev/null @@ -1,27 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff testing.log - -@load tuning/json-logs.bro - -module testing; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - ts: time &log &optional; - msg: string &log &optional; - }; - - global log_test: event(rec: Info); -} - -event bro_init() &priority=5 -{ - Log::create_stream(testing::LOG, [$columns=testing::Info, $ev=log_test]); - local info: Info; - info$msg = "Testing 1 2 3 "; - Log::write(testing::LOG, info); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-json-optional.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-json-optional.zeek new file mode 100644 index 0000000000..ec86557c4a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-json-optional.zeek @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff testing.log + +@load tuning/json-logs + +module testing; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + ts: time &log &optional; + msg: string &log &optional; + }; + + global log_test: event(rec: Info); +} + +event zeek_init() &priority=5 +{ + Log::create_stream(testing::LOG, [$columns=testing::Info, $ev=log_test]); + local info: Info; + info$msg = "Testing 1 2 3 "; + Log::write(testing::LOG, info); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-json.bro b/testing/btest/scripts/base/frameworks/logging/ascii-json.bro deleted file mode 100644 index 2b6055930f..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-json.bro +++ /dev/null @@ -1,70 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log -# -# Testing all possible types. - -redef LogAscii::use_json = T; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(SSH::LOG, [ - $b=T, - $i=-42, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "2008-07-09T16:13:30Z") + 0.543210 secs), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-json.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-json.zeek new file mode 100644 index 0000000000..ab88225d97 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-json.zeek @@ -0,0 +1,70 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log +# +# Testing all possible types. + +redef LogAscii::use_json = T; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(SSH::LOG, [ + $b=T, + $i=-42, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=(strptime("%Y-%m-%dT%H:%M:%SZ", "2008-07-09T16:13:30Z") + 0.543210 secs), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro b/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro deleted file mode 100644 index 4670811b2a..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.bro +++ /dev/null @@ -1,23 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.log - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - data: string &log; - c: count &log &default=42; - }; -} - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Info]); - Log::write(Test::LOG, [$data="Test1"]); - Log::write(Test::LOG, [$data="#Kaputt"]); - Log::write(Test::LOG, [$data="Test2"]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.zeek new file mode 100644 index 0000000000..caaf123633 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-line-like-comment.zeek @@ -0,0 +1,23 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.log + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + data: string &log; + c: count &log &default=42; + }; +} + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Info]); + Log::write(Test::LOG, [$data="Test1"]); + Log::write(Test::LOG, [$data="#Kaputt"]); + Log::write(Test::LOG, [$data="Test2"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-options.bro b/testing/btest/scripts/base/frameworks/logging/ascii-options.bro deleted file mode 100644 index 474b179536..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-options.bro +++ /dev/null @@ -1,35 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -redef LogAscii::output_to_stdout = F; -redef LogAscii::separator = "|"; -redef LogAscii::include_meta = F; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-options.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-options.zeek new file mode 100644 index 0000000000..11a69a0086 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-options.zeek @@ -0,0 +1,35 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +redef LogAscii::output_to_stdout = F; +redef LogAscii::separator = "|"; +redef LogAscii::include_meta = F; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-timestamps.bro b/testing/btest/scripts/base/frameworks/logging/ascii-timestamps.bro deleted file mode 100644 index e63e30f6c6..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-timestamps.bro +++ /dev/null @@ -1,27 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.log - -module Test; - -export { - redef enum Log::ID += { LOG }; - - type Info: record { - data: time &log; - }; -} - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Info]); - Log::write(Test::LOG, [$data=double_to_time(1234567890)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.0)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.01)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.001)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.0001)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.00001)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.000001)]); - Log::write(Test::LOG, [$data=double_to_time(1234567890.0000001)]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-timestamps.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-timestamps.zeek new file mode 100644 index 0000000000..ab7269c16c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-timestamps.zeek @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.log + +module Test; + +export { + redef enum Log::ID += { LOG }; + + type Info: record { + data: time &log; + }; +} + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Info]); + Log::write(Test::LOG, [$data=double_to_time(1234567890)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.0)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.01)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.001)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.0001)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.00001)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.000001)]); + Log::write(Test::LOG, [$data=double_to_time(1234567890.0000001)]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-tsv.bro b/testing/btest/scripts/base/frameworks/logging/ascii-tsv.bro deleted file mode 100644 index 09276a08fd..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/ascii-tsv.bro +++ /dev/null @@ -1,37 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: cat ssh.log | grep -v PREFIX.*20..- >ssh-filtered.log -# @TEST-EXEC: btest-diff ssh-filtered.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - b: bool &optional; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local filter = Log::get_filter(SSH::LOG, "default"); - filter$config = table(["tsv"] = "T"); - Log::add_filter(SSH::LOG, filter); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $b=T, $status="failure", $country=""]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/ascii-tsv.zeek b/testing/btest/scripts/base/frameworks/logging/ascii-tsv.zeek new file mode 100644 index 0000000000..67d407bb91 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/ascii-tsv.zeek @@ -0,0 +1,37 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: cat ssh.log | grep -v PREFIX.*20..- >ssh-filtered.log +# @TEST-EXEC: btest-diff ssh-filtered.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + b: bool &optional; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local filter = Log::get_filter(SSH::LOG, "default"); + filter$config = table(["tsv"] = "T"); + Log::add_filter(SSH::LOG, filter); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $b=T, $status="failure", $country=""]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/attr-extend.bro b/testing/btest/scripts/base/frameworks/logging/attr-extend.bro deleted file mode 100644 index 7f58f3f8c1..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/attr-extend.bro +++ /dev/null @@ -1,37 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; - status: string &optional &log; - country: string &default="unknown" &log; - }; -} - -redef record Log += { - a1: count &log &optional; - a2: count &optional; -}; - -redef record Log += { - b1: count &optional; - b2: count &optional; -} &log; - - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $a1=1, $a2=2, $b1=3, $b2=4]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/attr-extend.zeek b/testing/btest/scripts/base/frameworks/logging/attr-extend.zeek new file mode 100644 index 0000000000..203f5a5343 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/attr-extend.zeek @@ -0,0 +1,37 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; + status: string &optional &log; + country: string &default="unknown" &log; + }; +} + +redef record Log += { + a1: count &log &optional; + a2: count &optional; +}; + +redef record Log += { + b1: count &optional; + b2: count &optional; +} &log; + + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $a1=1, $a2=2, $b1=3, $b2=4]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/attr.bro b/testing/btest/scripts/base/frameworks/logging/attr.bro deleted file mode 100644 index 8ec3d1c385..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/attr.bro +++ /dev/null @@ -1,31 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; - status: string &optional &log; - country: string &default="unknown" &log; - }; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/attr.zeek b/testing/btest/scripts/base/frameworks/logging/attr.zeek new file mode 100644 index 0000000000..f0e65aa818 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/attr.zeek @@ -0,0 +1,31 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; + status: string &optional &log; + country: string &default="unknown" &log; + }; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/disable-stream.bro b/testing/btest/scripts/base/frameworks/logging/disable-stream.bro deleted file mode 100644 index c2f64da8e6..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/disable-stream.bro +++ /dev/null @@ -1,32 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: test '!' -e ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - Log::disable_stream(SSH::LOG); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/disable-stream.zeek b/testing/btest/scripts/base/frameworks/logging/disable-stream.zeek new file mode 100644 index 0000000000..da6f9f0dd5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/disable-stream.zeek @@ -0,0 +1,32 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: test '!' -e ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + Log::disable_stream(SSH::LOG); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/empty-event.bro b/testing/btest/scripts/base/frameworks/logging/empty-event.bro deleted file mode 100644 index 6aa867220f..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/empty-event.bro +++ /dev/null @@ -1,33 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -global log_ssh: event(rec: Log); - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log, $ev=log_ssh]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/empty-event.zeek b/testing/btest/scripts/base/frameworks/logging/empty-event.zeek new file mode 100644 index 0000000000..404b35cec8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/empty-event.zeek @@ -0,0 +1,33 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +global log_ssh: event(rec: Log); + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log, $ev=log_ssh]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/enable-stream.bro b/testing/btest/scripts/base/frameworks/logging/enable-stream.bro deleted file mode 100644 index 0f525eced1..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/enable-stream.bro +++ /dev/null @@ -1,33 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - Log::disable_stream(SSH::LOG); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::enable_stream(SSH::LOG); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/enable-stream.zeek b/testing/btest/scripts/base/frameworks/logging/enable-stream.zeek new file mode 100644 index 0000000000..6da68c66fa --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/enable-stream.zeek @@ -0,0 +1,33 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + Log::disable_stream(SSH::LOG); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::enable_stream(SSH::LOG); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/env-ext.test b/testing/btest/scripts/base/frameworks/logging/env-ext.test index e9f690caa4..f7539ea7b4 100644 --- a/testing/btest/scripts/base/frameworks/logging/env-ext.test +++ b/testing/btest/scripts/base/frameworks/logging/env-ext.test @@ -1,2 +1,2 @@ -# @TEST-EXEC: BRO_LOG_SUFFIX=txt bro -r $TRACES/wikipedia.trace +# @TEST-EXEC: ZEEK_LOG_SUFFIX=txt zeek -r $TRACES/wikipedia.trace # @TEST-EXEC: test -f conn.txt diff --git a/testing/btest/scripts/base/frameworks/logging/events.bro b/testing/btest/scripts/base/frameworks/logging/events.bro deleted file mode 100644 index bf156e6d60..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/events.bro +++ /dev/null @@ -1,37 +0,0 @@ - -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -module SSH; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -global ssh_log: event(rec: Log); - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log, $ev=ssh_log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - local r: Log = [$t=network_time(), $id=cid, $status="success"]; - Log::write(SSH::LOG, r); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - -} - -event ssh_log(rec: Log) - { - print rec; - } diff --git a/testing/btest/scripts/base/frameworks/logging/events.zeek b/testing/btest/scripts/base/frameworks/logging/events.zeek new file mode 100644 index 0000000000..321a702002 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/events.zeek @@ -0,0 +1,37 @@ + +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +module SSH; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +global ssh_log: event(rec: Log); + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log, $ev=ssh_log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + local r: Log = [$t=network_time(), $id=cid, $status="success"]; + Log::write(SSH::LOG, r); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + +} + +event ssh_log(rec: Log) + { + print rec; + } diff --git a/testing/btest/scripts/base/frameworks/logging/exclude.bro b/testing/btest/scripts/base/frameworks/logging/exclude.bro deleted file mode 100644 index 7b245541ab..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/exclude.bro +++ /dev/null @@ -1,34 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - Log::remove_default_filter(SSH::LOG); - Log::add_filter(SSH::LOG, [$name="f1", $exclude=set("t", "id.orig_h")]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/exclude.zeek b/testing/btest/scripts/base/frameworks/logging/exclude.zeek new file mode 100644 index 0000000000..0f1e1b72d1 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/exclude.zeek @@ -0,0 +1,34 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + Log::remove_default_filter(SSH::LOG); + Log::add_filter(SSH::LOG, [$name="f1", $exclude=set("t", "id.orig_h")]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-cluster-error.bro b/testing/btest/scripts/base/frameworks/logging/field-extension-cluster-error.bro deleted file mode 100644 index dd30ad4c6f..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension-cluster-error.bro +++ /dev/null @@ -1,91 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# -# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=manager-1 bro %INPUT" -# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=worker-1 bro --pseudo-realtime -C -r $TRACES/wikipedia.trace %INPUT" -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: grep qux manager-1/reporter.log | sed 's#line ..#line XX#g' > manager-reporter.log -# @TEST-EXEC: grep qux manager-1/reporter-2.log | sed 's#line ..*#line XX#g' >> manager-reporter.log -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-canonifier | $SCRIPTS/diff-remove-abspath | grep -v ^# | $SCRIPTS/diff-sort" btest-diff manager-reporter.log - - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], -}; -@TEST-END-FILE - -@load base/protocols/conn - -@if ( Cluster::node == "worker-1" ) -redef exit_only_after_terminate = T; -@endif - -redef Log::default_rotation_interval = 0secs; - -redef Log::default_scope_sep="_"; - -type Extension: record { - write_ts: time &log; - stream: string &log; - system_name: string &log; -}; - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -function add_extension(path: string): Extension - { - return Extension($write_ts = network_time(), - $stream = "bah", - $system_name = peer_description); - } - -redef Log::default_ext_func = add_extension; - -@endif - -event die() - { - terminate(); - } - -event slow_death() - { - Broker::flush_logs(); - schedule 2sec { die() }; - } - -event kill_worker() - { - Reporter::info("qux"); - Broker::publish("death", slow_death); - } - -event bro_init() - { - if ( Cluster::node == "worker-1" ) - { - Broker::subscribe("death"); - suspend_processing(); - } - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - if ( Cluster::node == "manager-1" ) - { - schedule 2sec { kill_worker() }; - } - - if ( Cluster::node == "worker-1" ) - { - continue_processing(); - Reporter::info("qux"); - } - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - schedule 2sec { die() }; - } diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-cluster-error.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension-cluster-error.zeek new file mode 100644 index 0000000000..6e66d56bb5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension-cluster-error.zeek @@ -0,0 +1,91 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# +# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=manager-1 zeek %INPUT" +# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=worker-1 zeek --pseudo-realtime -C -r $TRACES/wikipedia.trace %INPUT" +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: grep qux manager-1/reporter.log | sed 's#line ..#line XX#g' > manager-reporter.log +# @TEST-EXEC: grep qux manager-1/reporter-2.log | sed 's#line ..*#line XX#g' >> manager-reporter.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-canonifier | $SCRIPTS/diff-remove-abspath | grep -v ^# | $SCRIPTS/diff-sort" btest-diff manager-reporter.log + + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], +}; +@TEST-END-FILE + +@load base/protocols/conn + +@if ( Cluster::node == "worker-1" ) +redef exit_only_after_terminate = T; +@endif + +redef Log::default_rotation_interval = 0secs; + +redef Log::default_scope_sep="_"; + +type Extension: record { + write_ts: time &log; + stream: string &log; + system_name: string &log; +}; + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +function add_extension(path: string): Extension + { + return Extension($write_ts = network_time(), + $stream = "bah", + $system_name = peer_description); + } + +redef Log::default_ext_func = add_extension; + +@endif + +event die() + { + terminate(); + } + +event slow_death() + { + Broker::flush_logs(); + schedule 2sec { die() }; + } + +event kill_worker() + { + Reporter::info("qux"); + Broker::publish("death", slow_death); + } + +event zeek_init() + { + if ( Cluster::node == "worker-1" ) + { + Broker::subscribe("death"); + suspend_processing(); + } + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + if ( Cluster::node == "manager-1" ) + { + schedule 2sec { kill_worker() }; + } + + if ( Cluster::node == "worker-1" ) + { + continue_processing(); + Reporter::info("qux"); + } + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + schedule 2sec { die() }; + } diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-cluster.bro b/testing/btest/scripts/base/frameworks/logging/field-extension-cluster.bro deleted file mode 100644 index d38b5b744b..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension-cluster.bro +++ /dev/null @@ -1,79 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# -# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=manager-1 bro %INPUT" -# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=worker-1 bro --pseudo-realtime -C -r $TRACES/wikipedia.trace %INPUT" -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff manager-1/http.log - - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], -}; -@TEST-END-FILE - -@load base/protocols/conn - -@if ( Cluster::node == "worker-1" ) -redef exit_only_after_terminate = T; -@endif - -redef Log::default_rotation_interval = 0secs; - -redef Log::default_scope_sep="_"; - -type Extension: record { - write_ts: time &log; - stream: string &log; - system_name: string &log; -}; - -function add_extension(path: string): Extension - { - return Extension($write_ts = network_time(), - $stream = path, - $system_name = peer_description); - } - -redef Log::default_ext_func = add_extension; - -event die() - { - terminate(); - } - -event slow_death() - { - Broker::flush_logs(); - schedule 2sec { die() }; - } - -event kill_worker() - { - Broker::publish("death", slow_death); - } - -event bro_init() - { - if ( Cluster::node == "worker-1" ) - { - suspend_processing(); - Broker::subscribe("death"); - } - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - if ( Cluster::node == "manager-1" ) - schedule 2sec { kill_worker() }; - - if ( Cluster::node == "worker-1" ) - continue_processing(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - schedule 2sec { die() }; - } diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-cluster.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension-cluster.zeek new file mode 100644 index 0000000000..14103cf816 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension-cluster.zeek @@ -0,0 +1,79 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# +# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=manager-1 zeek %INPUT" +# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=worker-1 zeek --pseudo-realtime -C -r $TRACES/wikipedia.trace %INPUT" +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff manager-1/http.log + + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], +}; +@TEST-END-FILE + +@load base/protocols/conn + +@if ( Cluster::node == "worker-1" ) +redef exit_only_after_terminate = T; +@endif + +redef Log::default_rotation_interval = 0secs; + +redef Log::default_scope_sep="_"; + +type Extension: record { + write_ts: time &log; + stream: string &log; + system_name: string &log; +}; + +function add_extension(path: string): Extension + { + return Extension($write_ts = network_time(), + $stream = path, + $system_name = peer_description); + } + +redef Log::default_ext_func = add_extension; + +event die() + { + terminate(); + } + +event slow_death() + { + Broker::flush_logs(); + schedule 2sec { die() }; + } + +event kill_worker() + { + Broker::publish("death", slow_death); + } + +event zeek_init() + { + if ( Cluster::node == "worker-1" ) + { + suspend_processing(); + Broker::subscribe("death"); + } + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + if ( Cluster::node == "manager-1" ) + schedule 2sec { kill_worker() }; + + if ( Cluster::node == "worker-1" ) + continue_processing(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + schedule 2sec { die() }; + } diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-complex.bro b/testing/btest/scripts/base/frameworks/logging/field-extension-complex.bro deleted file mode 100644 index 7c1b448fee..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension-complex.bro +++ /dev/null @@ -1,37 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/protocols/conn - -type InnerRecord: record { - a: count; - b: count; - c: count; -}; - -type InnerLoggedRecord: record { - a: count &log; - b: count; - c: count &log; - d: set[count] &log; -}; - -type Extension: record { - write_ts: time &log; - stream: string &log; - inner: InnerRecord; - innerLogged: InnerLoggedRecord &log; - system_name: string &log; -}; - -function add_extension(path: string): Extension - { - return Extension($write_ts = network_time(), - $stream = path, - $system_name = peer_description, - $inner = InnerRecord($a=1,$b=2,$c=3), - $innerLogged = InnerLoggedRecord($a=1,$b=2,$c=3,$d=set(1,2,3,4)) - ); - } - -redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-complex.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension-complex.zeek new file mode 100644 index 0000000000..5ac8e9220b --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension-complex.zeek @@ -0,0 +1,37 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/protocols/conn + +type InnerRecord: record { + a: count; + b: count; + c: count; +}; + +type InnerLoggedRecord: record { + a: count &log; + b: count; + c: count &log; + d: set[count] &log; +}; + +type Extension: record { + write_ts: time &log; + stream: string &log; + inner: InnerRecord; + innerLogged: InnerLoggedRecord &log; + system_name: string &log; +}; + +function add_extension(path: string): Extension + { + return Extension($write_ts = network_time(), + $stream = path, + $system_name = peer_description, + $inner = InnerRecord($a=1,$b=2,$c=3), + $innerLogged = InnerLoggedRecord($a=1,$b=2,$c=3,$d=set(1,2,3,4)) + ); + } + +redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-invalid.bro b/testing/btest/scripts/base/frameworks/logging/field-extension-invalid.bro deleted file mode 100644 index b06cec2f54..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension-invalid.bro +++ /dev/null @@ -1,17 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/http/get.trace %INPUT -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff .stderr - -@load base/protocols/conn - -type Extension: record { - write_ts: time &log; - stream: string &log; - system_name: string &log; -}; - -function add_extension(path: string): Extension - { - } - -redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-invalid.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension-invalid.zeek new file mode 100644 index 0000000000..87a2caecbc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension-invalid.zeek @@ -0,0 +1,17 @@ +# @TEST-EXEC: zeek -b -r $TRACES/http/get.trace %INPUT +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff .stderr + +@load base/protocols/conn + +type Extension: record { + write_ts: time &log; + stream: string &log; + system_name: string &log; +}; + +function add_extension(path: string): Extension + { + } + +redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-optional.bro b/testing/btest/scripts/base/frameworks/logging/field-extension-optional.bro deleted file mode 100644 index 9b37a893bf..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension-optional.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/protocols/conn - -type Extension: record { - write_ts: time &log; - stream: string; - system_name: string &log &optional; - undefined_string: string &log &optional; -}; - -function add_extension(path: string): Extension - { - return Extension($write_ts = network_time(), - $stream = path, - $system_name = peer_description); - } - -redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-optional.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension-optional.zeek new file mode 100644 index 0000000000..50d6f90515 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension-optional.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/protocols/conn + +type Extension: record { + write_ts: time &log; + stream: string; + system_name: string &log &optional; + undefined_string: string &log &optional; +}; + +function add_extension(path: string): Extension + { + return Extension($write_ts = network_time(), + $stream = path, + $system_name = peer_description); + } + +redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-table.bro b/testing/btest/scripts/base/frameworks/logging/field-extension-table.bro deleted file mode 100644 index 8a9f3ed5f2..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension-table.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC-FAIL: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr - -@load base/protocols/conn - -type Extension: record { - write_ts: time &log; - stream: string &log; - tab: table[count] of count &log; - system_name: string &log; -}; - -function add_extension(path: string): Extension - { - return Extension($write_ts = network_time(), - $stream = path, - $system_name = peer_description, - $tab = { [1] = 2, [2] = 3} - ); - } - -redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension-table.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension-table.zeek new file mode 100644 index 0000000000..ccf40899c8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension-table.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC-FAIL: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr + +@load base/protocols/conn + +type Extension: record { + write_ts: time &log; + stream: string &log; + tab: table[count] of count &log; + system_name: string &log; +}; + +function add_extension(path: string): Extension + { + return Extension($write_ts = network_time(), + $stream = path, + $system_name = peer_description, + $tab = { [1] = 2, [2] = 3} + ); + } + +redef Log::default_ext_func = add_extension; diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension.bro b/testing/btest/scripts/base/frameworks/logging/field-extension.bro deleted file mode 100644 index 609df1b467..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-extension.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/protocols/conn - -type Extension: record { - write_ts: time &log; - stream: string &log; - system_name: string &log; -}; - -function add_extension(path: string): Extension - { - return Extension($write_ts = network_time(), - $stream = path, - $system_name = peer_description); - } - -redef Log::default_ext_func = add_extension; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/field-extension.zeek b/testing/btest/scripts/base/frameworks/logging/field-extension.zeek new file mode 100644 index 0000000000..a53c202387 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-extension.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/protocols/conn + +type Extension: record { + write_ts: time &log; + stream: string &log; + system_name: string &log; +}; + +function add_extension(path: string): Extension + { + return Extension($write_ts = network_time(), + $stream = path, + $system_name = peer_description); + } + +redef Log::default_ext_func = add_extension; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/field-name-map.bro b/testing/btest/scripts/base/frameworks/logging/field-name-map.bro deleted file mode 100644 index e480180a0d..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-name-map.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/protocols/conn - -redef Log::default_field_name_map = { - ["id.orig_h"] = "src", - ["id.orig_p"] = "src_port", - ["id.resp_h"] = "dst", - ["id.resp_p"] = "dst_port", -}; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/field-name-map.zeek b/testing/btest/scripts/base/frameworks/logging/field-name-map.zeek new file mode 100644 index 0000000000..54af73374e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-name-map.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/protocols/conn + +redef Log::default_field_name_map = { + ["id.orig_h"] = "src", + ["id.orig_p"] = "src_port", + ["id.resp_h"] = "dst", + ["id.resp_p"] = "dst_port", +}; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/field-name-map2.bro b/testing/btest/scripts/base/frameworks/logging/field-name-map2.bro deleted file mode 100644 index e51bcd6580..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/field-name-map2.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/auth_change_session_keys.pcap %INPUT -# @TEST-EXEC: btest-diff conn.log - -# The other tests of Log::default_field_name_map used to not catch an invalid -# memory free for some reason, but this test did reproduce a crash -# consistently (now fixed). - -@load base/protocols/conn - -redef Log::default_field_name_map = { - ["id.orig_h"] = "src_ip", - ["id.orig_p"] = "src_port", - ["id.resp_h"] = "dst_ip", - ["id.resp_p"] = "dst_port" -}; diff --git a/testing/btest/scripts/base/frameworks/logging/field-name-map2.zeek b/testing/btest/scripts/base/frameworks/logging/field-name-map2.zeek new file mode 100644 index 0000000000..60ebb5a1a4 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/field-name-map2.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -b -r $TRACES/auth_change_session_keys.pcap %INPUT +# @TEST-EXEC: btest-diff conn.log + +# The other tests of Log::default_field_name_map used to not catch an invalid +# memory free for some reason, but this test did reproduce a crash +# consistently (now fixed). + +@load base/protocols/conn + +redef Log::default_field_name_map = { + ["id.orig_h"] = "src_ip", + ["id.orig_p"] = "src_port", + ["id.resp_h"] = "dst_ip", + ["id.resp_p"] = "dst_port" +}; diff --git a/testing/btest/scripts/base/frameworks/logging/file.bro b/testing/btest/scripts/base/frameworks/logging/file.bro deleted file mode 100644 index 94bdad6b1b..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/file.bro +++ /dev/null @@ -1,23 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - f: file; - } &log; -} - -const foo_log = open_log_file("Foo") &redef; - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::write(SSH::LOG, [$t=network_time(), $f=foo_log]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/file.zeek b/testing/btest/scripts/base/frameworks/logging/file.zeek new file mode 100644 index 0000000000..6aa07f1699 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/file.zeek @@ -0,0 +1,23 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + f: file; + } &log; +} + +const foo_log = open_log_file("Foo") &redef; + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::write(SSH::LOG, [$t=network_time(), $f=foo_log]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/include.bro b/testing/btest/scripts/base/frameworks/logging/include.bro deleted file mode 100644 index d0fea93c99..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/include.bro +++ /dev/null @@ -1,34 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - Log::remove_default_filter(SSH::LOG); - Log::add_filter(SSH::LOG, [$name="default", $include=set("t", "id.orig_h")]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/include.zeek b/testing/btest/scripts/base/frameworks/logging/include.zeek new file mode 100644 index 0000000000..31f905d172 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/include.zeek @@ -0,0 +1,34 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + Log::remove_default_filter(SSH::LOG); + Log::add_filter(SSH::LOG, [$name="default", $include=set("t", "id.orig_h")]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/no-local.bro b/testing/btest/scripts/base/frameworks/logging/no-local.bro deleted file mode 100644 index 9ae7d32d61..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/no-local.bro +++ /dev/null @@ -1,33 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: test '!' -e ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -redef Log::enable_local_logging = F; - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/no-local.zeek b/testing/btest/scripts/base/frameworks/logging/no-local.zeek new file mode 100644 index 0000000000..38e395afac --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/no-local.zeek @@ -0,0 +1,33 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: test '!' -e ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +redef Log::enable_local_logging = F; + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/none-debug.bro b/testing/btest/scripts/base/frameworks/logging/none-debug.bro deleted file mode 100644 index 5d2e98323a..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/none-debug.bro +++ /dev/null @@ -1,37 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output - -redef Log::default_writer = Log::WRITER_NONE; -redef LogNone::debug = T; -redef Log::default_rotation_interval= 1hr; -redef log_rotate_base_time = "00:05"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - local config: table[string] of string; - config["foo"]="bar"; - config["foo2"]="bar2"; - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::create_stream(SSH::LOG, [$columns=Log]); - - Log::remove_default_filter(SSH::LOG); - Log::add_filter(SSH::LOG, [$name="f1", $exclude=set("t", "id.orig_h"), $config=config]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/none-debug.zeek b/testing/btest/scripts/base/frameworks/logging/none-debug.zeek new file mode 100644 index 0000000000..43b1daa187 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/none-debug.zeek @@ -0,0 +1,37 @@ +# +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output + +redef Log::default_writer = Log::WRITER_NONE; +redef LogNone::debug = T; +redef Log::default_rotation_interval= 1hr; +redef log_rotate_base_time = "00:05"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + local config: table[string] of string; + config["foo"]="bar"; + config["foo2"]="bar2"; + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::create_stream(SSH::LOG, [$columns=Log]); + + Log::remove_default_filter(SSH::LOG); + Log::add_filter(SSH::LOG, [$name="f1", $exclude=set("t", "id.orig_h"), $config=config]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/path-func-column-demote.bro b/testing/btest/scripts/base/frameworks/logging/path-func-column-demote.bro deleted file mode 100644 index aff886c2f4..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/path-func-column-demote.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff local.log -# @TEST-EXEC: btest-diff remote.log -# -# The record value passed into the path_func should be allowed to contain a -# subset of the fields in the stream's columns. - -@load base/utils/site -@load base/protocols/conn -@load base/frameworks/notice - -redef Site::local_nets = {141.142.0.0/16}; - -function split_log(id: Log::ID, path: string, rec: record {id:conn_id;}): string -{ - return Site::is_local_addr(rec$id$orig_h) ? "local" : "remote"; -} - -event bro_init() -{ - # Add a new filter to the Conn::LOG stream that logs only - # timestamp and originator address. - local filter: Log::Filter = [$name="dst-only", $path_func=split_log, - $include=set("ts", "id.orig_h")]; - Log::add_filter(Conn::LOG, filter); -} diff --git a/testing/btest/scripts/base/frameworks/logging/path-func-column-demote.zeek b/testing/btest/scripts/base/frameworks/logging/path-func-column-demote.zeek new file mode 100644 index 0000000000..7b256da666 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/path-func-column-demote.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff local.log +# @TEST-EXEC: btest-diff remote.log +# +# The record value passed into the path_func should be allowed to contain a +# subset of the fields in the stream's columns. + +@load base/utils/site +@load base/protocols/conn +@load base/frameworks/notice + +redef Site::local_nets = {141.142.0.0/16}; + +function split_log(id: Log::ID, path: string, rec: record {id:conn_id;}): string +{ + return Site::is_local_addr(rec$id$orig_h) ? "local" : "remote"; +} + +event zeek_init() +{ + # Add a new filter to the Conn::LOG stream that logs only + # timestamp and originator address. + local filter: Log::Filter = [$name="dst-only", $path_func=split_log, + $include=set("ts", "id.orig_h")]; + Log::add_filter(Conn::LOG, filter); +} diff --git a/testing/btest/scripts/base/frameworks/logging/path-func.bro b/testing/btest/scripts/base/frameworks/logging/path-func.bro deleted file mode 100644 index 684aa03ed6..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/path-func.bro +++ /dev/null @@ -1,48 +0,0 @@ - -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: ( ls static-*; cat static-* ) >output -# @TEST-EXEC: btest-diff output - -module SSH; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -global c = -1; - -function path_func(id: Log::ID, path: string, rec: Log) : string - { - c = (c + 1) % 3; - - return fmt("%s-%d-%s", path, c, rec$country); - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::remove_default_filter(SSH::LOG); - - Log::add_filter(SSH::LOG, [$name="dyn", $path="static-prefix", $path_func=path_func]); - - Log::set_buf(SSH::LOG, F); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX2"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX3"]); -} diff --git a/testing/btest/scripts/base/frameworks/logging/path-func.zeek b/testing/btest/scripts/base/frameworks/logging/path-func.zeek new file mode 100644 index 0000000000..80cb5e7918 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/path-func.zeek @@ -0,0 +1,48 @@ + +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: ( ls static-*; cat static-* ) >output +# @TEST-EXEC: btest-diff output + +module SSH; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +global c = -1; + +function path_func(id: Log::ID, path: string, rec: Log) : string + { + c = (c + 1) % 3; + + return fmt("%s-%d-%s", path, c, rec$country); + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::remove_default_filter(SSH::LOG); + + Log::add_filter(SSH::LOG, [$name="dyn", $path="static-prefix", $path_func=path_func]); + + Log::set_buf(SSH::LOG, F); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX2"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX3"]); +} diff --git a/testing/btest/scripts/base/frameworks/logging/pred.bro b/testing/btest/scripts/base/frameworks/logging/pred.bro deleted file mode 100644 index e13c726656..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/pred.bro +++ /dev/null @@ -1,39 +0,0 @@ - -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff test.success.log -# @TEST-EXEC: btest-diff test.failure.log - -module Test; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -function fail(rec: Log): bool - { - return rec$status != "success"; - } - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); - Log::remove_default_filter(Test::LOG); - Log::add_filter(Test::LOG, [$name="f1", $path="test.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]); - Log::add_filter(Test::LOG, [$name="f2", $path="test.failure", $pred=fail]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - local r: Log = [$t=network_time(), $id=cid, $status="success"]; - Log::write(Test::LOG, r); - Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - -} diff --git a/testing/btest/scripts/base/frameworks/logging/pred.zeek b/testing/btest/scripts/base/frameworks/logging/pred.zeek new file mode 100644 index 0000000000..aa89fdf504 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/pred.zeek @@ -0,0 +1,39 @@ + +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff test.success.log +# @TEST-EXEC: btest-diff test.failure.log + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +function fail(rec: Log): bool + { + return rec$status != "success"; + } + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); + Log::remove_default_filter(Test::LOG); + Log::add_filter(Test::LOG, [$name="f1", $path="test.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]); + Log::add_filter(Test::LOG, [$name="f2", $path="test.failure", $pred=fail]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + local r: Log = [$t=network_time(), $id=cid, $status="success"]; + Log::write(Test::LOG, r); + Log::write(Test::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + +} diff --git a/testing/btest/scripts/base/frameworks/logging/remove.bro b/testing/btest/scripts/base/frameworks/logging/remove.bro deleted file mode 100644 index 3b80d24e9f..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/remove.bro +++ /dev/null @@ -1,44 +0,0 @@ -# -# @TEST-EXEC: bro -b -B logging %INPUT -# @TEST-EXEC: btest-diff ssh.log -# @TEST-EXEC: btest-diff ssh.failure.log -# @TEST-EXEC: btest-diff .stdout - -module SSH; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::add_filter(SSH::LOG, [$name="f1", $path="ssh.failure", $pred=function(rec: Log): bool { return rec$status == "failure"; }]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - # Log something. - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - print Log::get_filter_names(SSH::LOG); - - Log::remove_filter(SSH::LOG, "f1"); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="BR"]); - - Log::remove_filter(SSH::LOG, "default"); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - - Log::remove_filter(SSH::LOG, "doesn-not-exist"); - print Log::get_filter_names(SSH::LOG); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/remove.zeek b/testing/btest/scripts/base/frameworks/logging/remove.zeek new file mode 100644 index 0000000000..c4a626610e --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/remove.zeek @@ -0,0 +1,44 @@ +# +# @TEST-EXEC: zeek -b -B logging %INPUT +# @TEST-EXEC: btest-diff ssh.log +# @TEST-EXEC: btest-diff ssh.failure.log +# @TEST-EXEC: btest-diff .stdout + +module SSH; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::add_filter(SSH::LOG, [$name="f1", $path="ssh.failure", $pred=function(rec: Log): bool { return rec$status == "failure"; }]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + # Log something. + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + print Log::get_filter_names(SSH::LOG); + + Log::remove_filter(SSH::LOG, "f1"); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="BR"]); + + Log::remove_filter(SSH::LOG, "default"); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + + Log::remove_filter(SSH::LOG, "doesn-not-exist"); + print Log::get_filter_names(SSH::LOG); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro deleted file mode 100644 index c0f0ef8643..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro +++ /dev/null @@ -1,40 +0,0 @@ -# -# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out.tmp -# @TEST-EXEC: cat out.tmp pp.log | sort >out -# @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | $SCRIPTS/diff-remove-timestamps | uniq >>out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff .stderr - -module Test; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - } &log; -} - -redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor_cmd = "echo 1st >>pp.log"; - -function custom_rotate(info: Log::RotationInfo) : bool -{ - print "custom rotate", info; - return T; -} - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); - Log::add_filter(Test::LOG, [$name="2nd", $path="test2", $interv=30mins, $postprocessor=custom_rotate]); -} - -event new_connection(c: connection) - { - Log::write(Test::LOG, [$t=network_time(), $id=c$id]); - } diff --git a/testing/btest/scripts/base/frameworks/logging/rotate-custom.zeek b/testing/btest/scripts/base/frameworks/logging/rotate-custom.zeek new file mode 100644 index 0000000000..4e6e38ebe9 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/rotate-custom.zeek @@ -0,0 +1,40 @@ +# +# @TEST-EXEC: zeek -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out.tmp +# @TEST-EXEC: cat out.tmp pp.log | sort >out +# @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | $SCRIPTS/diff-remove-timestamps | uniq >>out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff .stderr + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo 1st >>pp.log"; + +function custom_rotate(info: Log::RotationInfo) : bool +{ + print "custom rotate", info; + return T; +} + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); + Log::add_filter(Test::LOG, [$name="2nd", $path="test2", $interv=30mins, $postprocessor=custom_rotate]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/scripts/base/frameworks/logging/rotate.bro b/testing/btest/scripts/base/frameworks/logging/rotate.bro deleted file mode 100644 index 501c0db8ea..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/rotate.bro +++ /dev/null @@ -1,32 +0,0 @@ -# -# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT >bro.out 2>&1 -# @TEST-EXEC: grep "test" bro.out | sort >out -# @TEST-EXEC: for i in `ls test.*.log | sort`; do printf '> %s\n' $i; cat $i; done >>out -# @TEST-EXEC: btest-diff out - -module Test; - -export { - # Create a new ID for our log stream - redef enum Log::ID += { LOG }; - - # Define a record with all the columns the log file can have. - # (I'm using a subset of fields from ssh-ext for demonstration.) - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - } &log; -} - -redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor_cmd = "echo"; - -event bro_init() -{ - Log::create_stream(Test::LOG, [$columns=Log]); -} - -event new_connection(c: connection) - { - Log::write(Test::LOG, [$t=network_time(), $id=c$id]); - } diff --git a/testing/btest/scripts/base/frameworks/logging/rotate.zeek b/testing/btest/scripts/base/frameworks/logging/rotate.zeek new file mode 100644 index 0000000000..235bc3829f --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/rotate.zeek @@ -0,0 +1,32 @@ +# +# @TEST-EXEC: zeek -b -r ${TRACES}/rotation.trace %INPUT >zeek.out 2>&1 +# @TEST-EXEC: grep "test" zeek.out | sort >out +# @TEST-EXEC: for i in `ls test.*.log | sort`; do printf '> %s\n' $i; cat $i; done >>out +# @TEST-EXEC: btest-diff out + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo"; + +event zeek_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/scripts/base/frameworks/logging/scope_sep.bro b/testing/btest/scripts/base/frameworks/logging/scope_sep.bro deleted file mode 100644 index 9d58ef11c2..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/scope_sep.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/protocols/conn - -redef Log::default_scope_sep = "_"; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/scope_sep.zeek b/testing/btest/scripts/base/frameworks/logging/scope_sep.zeek new file mode 100644 index 0000000000..03936bbe17 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/scope_sep.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/protocols/conn + +redef Log::default_scope_sep = "_"; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/scope_sep_and_field_name_map.bro b/testing/btest/scripts/base/frameworks/logging/scope_sep_and_field_name_map.bro deleted file mode 100644 index 3c72b7a833..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/scope_sep_and_field_name_map.bro +++ /dev/null @@ -1,15 +0,0 @@ -# This tests the order in which the unrolling and field name -# renaming occurs. - -# @TEST-EXEC: bro -b -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/protocols/conn - -redef Log::default_scope_sep = "*"; -redef Log::default_field_name_map = { - ["id*orig_h"] = "src", - ["id*orig_p"] = "src_port", - ["id*resp_h"] = "dst", - ["id*resp_p"] = "dst_port", -}; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/scope_sep_and_field_name_map.zeek b/testing/btest/scripts/base/frameworks/logging/scope_sep_and_field_name_map.zeek new file mode 100644 index 0000000000..a67b260241 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/scope_sep_and_field_name_map.zeek @@ -0,0 +1,15 @@ +# This tests the order in which the unrolling and field name +# renaming occurs. + +# @TEST-EXEC: zeek -b -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/protocols/conn + +redef Log::default_scope_sep = "*"; +redef Log::default_field_name_map = { + ["id*orig_h"] = "src", + ["id*orig_p"] = "src_port", + ["id*resp_h"] = "dst", + ["id*resp_p"] = "dst_port", +}; \ No newline at end of file diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro deleted file mode 100644 index e48e066c6c..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/error.bro +++ /dev/null @@ -1,107 +0,0 @@ -# -# @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer Bro::SQLiteWriter -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff .stderr -# -# Testing all possible types. -# - -@TEST-START-FILE ssh.sql -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE ssh ( -'b' boolean, -'i' integer, -'e' text, -'c' integer, -'p' integer, -'sn' text, -'a' text, -'d' double precision, -'t' double precision, -'iv' double precision, -'s' text, -'sc' text, -'ss' text, -'se' text, -'vc' text, -'ve' text -); -INSERT INTO "ssh" VALUES(1,-42,'SSH::LOG',21,123,'10.0.0.0/24','1.2.3.4',3.14,1.36859359634203600879e+09,100.0,'hurz','2,4,1,3','CC,AA,BB','(empty)','10,20,30','(empty)'); -COMMIT; -@TEST-END-FILE - - - -redef LogSQLite::unset_field = "(unset)"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::remove_filter(SSH::LOG, "default"); - - local filter: Log::Filter = [$name="sqlite", $path="ssh", $writer=Log::WRITER_SQLITE]; - Log::add_filter(SSH::LOG, filter); - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(SSH::LOG, [ - $b=T, - $i=-42, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=network_time(), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/error.zeek b/testing/btest/scripts/base/frameworks/logging/sqlite/error.zeek new file mode 100644 index 0000000000..3a1566b5a2 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/error.zeek @@ -0,0 +1,107 @@ +# +# @TEST-REQUIRES: which sqlite3 +# @TEST-REQUIRES: has-writer Zeek::SQLiteWriter +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: cat ssh.sql | sqlite3 ssh.sqlite +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff .stderr +# +# Testing all possible types. +# + +@TEST-START-FILE ssh.sql +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE ssh ( +'b' boolean, +'i' integer, +'e' text, +'c' integer, +'p' integer, +'sn' text, +'a' text, +'d' double precision, +'t' double precision, +'iv' double precision, +'s' text, +'sc' text, +'ss' text, +'se' text, +'vc' text, +'ve' text +); +INSERT INTO "ssh" VALUES(1,-42,'SSH::LOG',21,123,'10.0.0.0/24','1.2.3.4',3.14,1.36859359634203600879e+09,100.0,'hurz','2,4,1,3','CC,AA,BB','(empty)','10,20,30','(empty)'); +COMMIT; +@TEST-END-FILE + + + +redef LogSQLite::unset_field = "(unset)"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::remove_filter(SSH::LOG, "default"); + + local filter: Log::Filter = [$name="sqlite", $path="ssh", $writer=Log::WRITER_SQLITE]; + Log::add_filter(SSH::LOG, filter); + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(SSH::LOG, [ + $b=T, + $i=-42, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=double_to_time(1559847346.10295), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro deleted file mode 100644 index 0cceb7af08..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/set.bro +++ /dev/null @@ -1,50 +0,0 @@ -# -# Check if set works in last position (the describe call in sqlite.cc has a good -# chance of being off by one if someone changes it). -# -# @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer Bro::SQLiteWriter -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from ssh' > ssh.select -# @TEST-EXEC: btest-diff ssh.select -# -# Testing all possible types. - -redef LogSQLite::unset_field = "(unset)"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - ss: set[string]; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::remove_filter(SSH::LOG, "default"); - - local filter: Log::Filter = [$name="sqlite", $path="ssh", $writer=Log::WRITER_SQLITE]; - Log::add_filter(SSH::LOG, filter); - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(SSH::LOG, [ - $ss=set("AA", "BB", "CC") - ]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/set.zeek b/testing/btest/scripts/base/frameworks/logging/sqlite/set.zeek new file mode 100644 index 0000000000..e597a74024 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/set.zeek @@ -0,0 +1,50 @@ +# +# Check if set works in last position (the describe call in sqlite.cc has a good +# chance of being off by one if someone changes it). +# +# @TEST-REQUIRES: which sqlite3 +# @TEST-REQUIRES: has-writer Zeek::SQLiteWriter +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from ssh' > ssh.select +# @TEST-EXEC: btest-diff ssh.select +# +# Testing all possible types. + +redef LogSQLite::unset_field = "(unset)"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + ss: set[string]; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::remove_filter(SSH::LOG, "default"); + + local filter: Log::Filter = [$name="sqlite", $path="ssh", $writer=Log::WRITER_SQLITE]; + Log::add_filter(SSH::LOG, filter); + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(SSH::LOG, [ + $ss=set("AA", "BB", "CC") + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/simultaneous-writes.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/simultaneous-writes.bro deleted file mode 100644 index 2e864aa791..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/simultaneous-writes.bro +++ /dev/null @@ -1,90 +0,0 @@ -# Test simultaneous writes to the same database file. -# -# @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer Bro::SQLiteWriter -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from ssh' > ssh.select -# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from sshtwo' >> ssh.select -# @TEST-EXEC: btest-diff ssh.select -# -# Testing all possible types. - -redef LogSQLite::unset_field = "(unset)"; - -module SSH; - -export { - redef enum Log::ID += { LOG, LOG2 }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::create_stream(SSH::LOG2, [$columns=Log]); - Log::remove_filter(SSH::LOG, "default"); - Log::remove_filter(SSH::LOG2, "default"); - - local filter: Log::Filter = [$name="sqlite", $path="ssh", $config=table(["tablename"] = "ssh"), $writer=Log::WRITER_SQLITE]; - Log::add_filter(SSH::LOG, filter); - local filter2 = copy(filter); - filter2$name = "sqlite2"; - filter2$config = table(["tablename"] = "sshtwo"); - Log::add_filter(SSH::LOG2, filter2); - - local empty_set: set[string]; - local empty_vector: vector of string; - - local out = [ - $b=T, - $i=-42, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=network_time(), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]; - - Log::write(SSH::LOG, out); - Log::write(SSH::LOG2, out); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/simultaneous-writes.zeek b/testing/btest/scripts/base/frameworks/logging/sqlite/simultaneous-writes.zeek new file mode 100644 index 0000000000..fcdbd928ee --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/simultaneous-writes.zeek @@ -0,0 +1,90 @@ +# Test simultaneous writes to the same database file. +# +# @TEST-REQUIRES: which sqlite3 +# @TEST-REQUIRES: has-writer Zeek::SQLiteWriter +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from ssh' > ssh.select +# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from sshtwo' >> ssh.select +# @TEST-EXEC: btest-diff ssh.select +# +# Testing all possible types. + +redef LogSQLite::unset_field = "(unset)"; + +module SSH; + +export { + redef enum Log::ID += { LOG, LOG2 }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::create_stream(SSH::LOG2, [$columns=Log]); + Log::remove_filter(SSH::LOG, "default"); + Log::remove_filter(SSH::LOG2, "default"); + + local filter: Log::Filter = [$name="sqlite", $path="ssh", $config=table(["tablename"] = "ssh"), $writer=Log::WRITER_SQLITE]; + Log::add_filter(SSH::LOG, filter); + local filter2 = copy(filter); + filter2$name = "sqlite2"; + filter2$config = table(["tablename"] = "sshtwo"); + Log::add_filter(SSH::LOG2, filter2); + + local empty_set: set[string]; + local empty_vector: vector of string; + + local out = [ + $b=T, + $i=-42, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=double_to_time(1559847346.10295), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]; + + Log::write(SSH::LOG, out); + Log::write(SSH::LOG2, out); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro deleted file mode 100644 index 6c088e9f2f..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/types.bro +++ /dev/null @@ -1,79 +0,0 @@ -# -# @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer Bro::SQLiteWriter -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from ssh' > ssh.select -# @TEST-EXEC: btest-diff ssh.select -# -# Testing all possible types. - -redef LogSQLite::unset_field = "(unset)"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - Log::remove_filter(SSH::LOG, "default"); - - local filter: Log::Filter = [$name="sqlite", $path="ssh", $writer=Log::WRITER_SQLITE]; - Log::add_filter(SSH::LOG, filter); - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(SSH::LOG, [ - $b=T, - $i=-42, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=network_time(), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/types.zeek b/testing/btest/scripts/base/frameworks/logging/sqlite/types.zeek new file mode 100644 index 0000000000..065fa98a77 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/types.zeek @@ -0,0 +1,79 @@ +# +# @TEST-REQUIRES: which sqlite3 +# @TEST-REQUIRES: has-writer Zeek::SQLiteWriter +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: sqlite3 ssh.sqlite 'select * from ssh' > ssh.select +# @TEST-EXEC: btest-diff ssh.select +# +# Testing all possible types. + +redef LogSQLite::unset_field = "(unset)"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + Log::remove_filter(SSH::LOG, "default"); + + local filter: Log::Filter = [$name="sqlite", $path="ssh", $writer=Log::WRITER_SQLITE]; + Log::add_filter(SSH::LOG, filter); + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(SSH::LOG, [ + $b=T, + $i=-42, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=double_to_time(1559847346.10295), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro deleted file mode 100644 index e45c42d7e2..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.bro +++ /dev/null @@ -1,10 +0,0 @@ -# -# @TEST-REQUIRES: which sqlite3 -# @TEST-REQUIRES: has-writer Bro::SQLiteWriter -# @TEST-GROUP: sqlite -# -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_SQLITE -# @TEST-EXEC: sqlite3 conn.sqlite 'select * from conn order by ts' | sort -n > conn.select -# @TEST-EXEC: sqlite3 http.sqlite 'select * from http order by ts' | sort -n > http.select -# @TEST-EXEC: btest-diff conn.select -# @TEST-EXEC: btest-diff http.select diff --git a/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.zeek b/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.zeek new file mode 100644 index 0000000000..cd6eaf7f26 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/sqlite/wikipedia.zeek @@ -0,0 +1,10 @@ +# +# @TEST-REQUIRES: which sqlite3 +# @TEST-REQUIRES: has-writer Zeek::SQLiteWriter +# @TEST-GROUP: sqlite +# +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_SQLITE +# @TEST-EXEC: sqlite3 conn.sqlite 'select * from conn order by ts' | sort -n > conn.select +# @TEST-EXEC: sqlite3 http.sqlite 'select * from http order by ts' | sort -n > http.select +# @TEST-EXEC: btest-diff conn.select +# @TEST-EXEC: btest-diff http.select diff --git a/testing/btest/scripts/base/frameworks/logging/stdout.bro b/testing/btest/scripts/base/frameworks/logging/stdout.bro deleted file mode 100644 index f431a5b6c9..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/stdout.bro +++ /dev/null @@ -1,36 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: test '!' -e ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local filter = Log::get_filter(SSH::LOG, "default"); - filter$path= "/dev/stdout"; - Log::add_filter(SSH::LOG, filter); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/stdout.zeek b/testing/btest/scripts/base/frameworks/logging/stdout.zeek new file mode 100644 index 0000000000..39db1d1e51 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/stdout.zeek @@ -0,0 +1,36 @@ +# +# @TEST-EXEC: zeek -b %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: test '!' -e ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local filter = Log::get_filter(SSH::LOG, "default"); + filter$path= "/dev/stdout"; + Log::add_filter(SSH::LOG, filter); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/test-logging.bro deleted file mode 100644 index 9f90d515fb..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/test-logging.bro +++ /dev/null @@ -1,31 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - t: time; - id: conn_id; # Will be rolled out into individual columns. - status: string &optional; - country: string &default="unknown"; - } &log; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; - - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); - Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); - -} - diff --git a/testing/btest/scripts/base/frameworks/logging/test-logging.zeek b/testing/btest/scripts/base/frameworks/logging/test-logging.zeek new file mode 100644 index 0000000000..3e0db68c79 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/test-logging.zeek @@ -0,0 +1,31 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/types.bro b/testing/btest/scripts/base/frameworks/logging/types.bro deleted file mode 100644 index d79c667e50..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/types.bro +++ /dev/null @@ -1,70 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log -# -# Testing all possible types. - -redef LogAscii::empty_field = "EMPTY"; - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - b: bool; - i: int; - e: Log::ID; - c: count; - p: port; - sn: subnet; - a: addr; - d: double; - t: time; - iv: interval; - s: string; - sc: set[count]; - ss: set[string]; - se: set[string]; - vc: vector of count; - ve: vector of string; - f: function(i: count) : string; - } &log; -} - -function foo(i : count) : string - { - if ( i > 0 ) - return "Foo"; - else - return "Bar"; - } - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local empty_set: set[string]; - local empty_vector: vector of string; - - Log::write(SSH::LOG, [ - $b=T, - $i=-42, - $e=SSH::LOG, - $c=21, - $p=123/tcp, - $sn=10.0.0.1/24, - $a=1.2.3.4, - $d=3.14, - $t=network_time(), - $iv=100secs, - $s="hurz", - $sc=set(1,2,3,4), - $ss=set("AA", "BB", "CC"), - $se=empty_set, - $vc=vector(10, 20, 30), - $ve=empty_vector, - $f=foo - ]); -} - diff --git a/testing/btest/scripts/base/frameworks/logging/types.zeek b/testing/btest/scripts/base/frameworks/logging/types.zeek new file mode 100644 index 0000000000..fc10e88bcc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/types.zeek @@ -0,0 +1,70 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log +# +# Testing all possible types. + +redef LogAscii::empty_field = "EMPTY"; + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + b: bool; + i: int; + e: Log::ID; + c: count; + p: port; + sn: subnet; + a: addr; + d: double; + t: time; + iv: interval; + s: string; + sc: set[count]; + ss: set[string]; + se: set[string]; + vc: vector of count; + ve: vector of string; + f: function(i: count) : string; + } &log; +} + +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local empty_set: set[string]; + local empty_vector: vector of string; + + Log::write(SSH::LOG, [ + $b=T, + $i=-42, + $e=SSH::LOG, + $c=21, + $p=123/tcp, + $sn=10.0.0.1/24, + $a=1.2.3.4, + $d=3.14, + $t=network_time(), + $iv=100secs, + $s="hurz", + $sc=set(1,2,3,4), + $ss=set("AA", "BB", "CC"), + $se=empty_set, + $vc=vector(10, 20, 30), + $ve=empty_vector, + $f=foo + ]); +} + diff --git a/testing/btest/scripts/base/frameworks/logging/unset-record.bro b/testing/btest/scripts/base/frameworks/logging/unset-record.bro deleted file mode 100644 index bb922dc9c8..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/unset-record.bro +++ /dev/null @@ -1,28 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff testing.log - -redef enum Log::ID += { TESTING }; - -type Foo: record { - val1: count; - val2: count; -} &log; - -type Bar: record { - a: Foo &log &optional; - b: count &log; -}; - -event bro_init() -{ - Log::create_stream(TESTING, [$columns=Bar]); - - local x: Bar; - - x = [$b=6]; - Log::write(TESTING, x); - - x = [$a=[$val1=1,$val2=2], $b=3]; - Log::write(TESTING, x); -} diff --git a/testing/btest/scripts/base/frameworks/logging/unset-record.zeek b/testing/btest/scripts/base/frameworks/logging/unset-record.zeek new file mode 100644 index 0000000000..529e474381 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/unset-record.zeek @@ -0,0 +1,28 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff testing.log + +redef enum Log::ID += { TESTING }; + +type Foo: record { + val1: count; + val2: count; +} &log; + +type Bar: record { + a: Foo &log &optional; + b: count &log; +}; + +event zeek_init() +{ + Log::create_stream(TESTING, [$columns=Bar]); + + local x: Bar; + + x = [$b=6]; + Log::write(TESTING, x); + + x = [$a=[$val1=1,$val2=2], $b=3]; + Log::write(TESTING, x); +} diff --git a/testing/btest/scripts/base/frameworks/logging/vec.bro b/testing/btest/scripts/base/frameworks/logging/vec.bro deleted file mode 100644 index 00c5ff5117..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/vec.bro +++ /dev/null @@ -1,27 +0,0 @@ -# -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff ssh.log - -module SSH; - -export { - redef enum Log::ID += { LOG }; - - type Log: record { - vec: vector of string &log; - }; -} - -event bro_init() -{ - Log::create_stream(SSH::LOG, [$columns=Log]); - - local v: vector of string; - - v[1] = "2"; - v[4] = "5"; - - Log::write(SSH::LOG, [$vec=v]); -} - - diff --git a/testing/btest/scripts/base/frameworks/logging/vec.zeek b/testing/btest/scripts/base/frameworks/logging/vec.zeek new file mode 100644 index 0000000000..5e73357947 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/vec.zeek @@ -0,0 +1,27 @@ +# +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff ssh.log + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + vec: vector of string &log; + }; +} + +event zeek_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local v: vector of string; + + v[1] = "2"; + v[4] = "5"; + + Log::write(SSH::LOG, [$vec=v]); +} + + diff --git a/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro deleted file mode 100644 index 908fb43c72..0000000000 --- a/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff reporter.log -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff http-2.log -# @TEST-EXEC: btest-diff http-3.log -# @TEST-EXEC: btest-diff http-2-2.log - -@load base/protocols/http - -event bro_init() - { - # Both the default filter for the http stream and this new one will - # attempt to have the same writer write to path "http", which will - # be reported as a warning and the path auto-corrected to "http-2" - local filter: Log::Filter = [$name="host-only", $include=set("host")]; - # Same deal here, but should be auto-corrected to "http-3". - local filter2: Log::Filter = [$name="uri-only", $include=set("uri")]; - # Conflict between auto-correct paths needs to be corrected, too, this - # time it will be "http-2-2". - local filter3: Log::Filter = [$path="http-2", $name="status-only", $include=set("status_code")]; - Log::add_filter(HTTP::LOG, filter); - Log::add_filter(HTTP::LOG, filter2); - Log::add_filter(HTTP::LOG, filter3); - } diff --git a/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.zeek b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.zeek new file mode 100644 index 0000000000..60984f1fc7 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/writer-path-conflict.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff reporter.log +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff http-2.log +# @TEST-EXEC: btest-diff http-3.log +# @TEST-EXEC: btest-diff http-2-2.log + +@load base/protocols/http + +event zeek_init() + { + # Both the default filter for the http stream and this new one will + # attempt to have the same writer write to path "http", which will + # be reported as a warning and the path auto-corrected to "http-2" + local filter: Log::Filter = [$name="host-only", $include=set("host")]; + # Same deal here, but should be auto-corrected to "http-3". + local filter2: Log::Filter = [$name="uri-only", $include=set("uri")]; + # Conflict between auto-correct paths needs to be corrected, too, this + # time it will be "http-2-2". + local filter3: Log::Filter = [$path="http-2", $name="status-only", $include=set("status_code")]; + Log::add_filter(HTTP::LOG, filter); + Log::add_filter(HTTP::LOG, filter2); + Log::add_filter(HTTP::LOG, filter3); + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/acld-hook.bro b/testing/btest/scripts/base/frameworks/netcontrol/acld-hook.bro deleted file mode 100644 index 9e0db8531a..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/acld-hook.bro +++ /dev/null @@ -1,135 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b -r $TRACES/tls/ecdhe.pcap --pseudo-realtime ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -@load base/frameworks/netcontrol - -redef exit_only_after_terminate = T; -global have_peer = F; -global did_init = F; - -event bro_init() - { - suspend_processing(); - } - -event NetControl::init() - { - local netcontrol_acld = NetControl::create_acld(NetControl::AcldConfig($acld_host=127.0.0.1, $acld_port=to_port(getenv("BROKER_PORT")), $acld_topic="bro/event/netcontroltest")); - NetControl::activate(netcontrol_acld, 0); - } - -event NetControl::init_done() - { - did_init = T; - - if ( did_init && have_peer ) - continue_processing(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); - have_peer = T; - - if ( did_init && have_peer ) - continue_processing(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -hook NetControl::acld_rule_policy(p: NetControl::PluginState, r: NetControl::Rule, ar: NetControl::AclRule) - { - # use nullzero instead of drop for address drops - if ( r$ty == NetControl::DROP && r$entity$ty == NetControl::ADDRESS && ar$command == "drop" ) - ar$command = "nullzero"; - } - -event connection_established(c: connection) - { - local id = c$id; - - local flow1 = NetControl::Flow( - $src_h=addr_to_subnet(c$id$orig_h), - $dst_h=addr_to_subnet(c$id$resp_h) - ); - local e1: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow1]; - local r1: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e1, $expire=10hrs, $location="here"]; - - local flow2 = NetControl::Flow( - $dst_p=c$id$resp_p - ); - local e2: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow2]; - local r2: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e2, $expire=10hrs, $location="there"]; - - NetControl::add_rule(r1); - NetControl::add_rule(r2); - NetControl::drop_address(id$orig_h, 10hrs); - } - -event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule added", r$entity, r$ty; - NetControl::remove_rule(r$id); - } - -event NetControl::rule_removed(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule removed", r$entity, r$ty; - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -@load base/frameworks/netcontrol -@load base/frameworks/broker - -redef exit_only_after_terminate = T; - -event die() - { - terminate(); - } - -event bro_init() - { - Broker::subscribe("bro/event/netcontroltest"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added"; - } - -event NetControl::acld_add_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) - { - print "add_rule", id, r$entity, r$ty, ar; - - Broker::publish("bro/event/netcontroltest", NetControl::acld_rule_added, id, r, ar$command); - } - -event NetControl::acld_remove_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) - { - print "remove_rule", id, r$entity, r$ty, ar; - - Broker::publish("bro/event/netcontroltest", NetControl::acld_rule_removed, id, r, ar$command); - - if ( r$cid == 4 ) - { - schedule 2sec { die() }; - } - } - -@TEST-END-FILE - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/acld-hook.zeek b/testing/btest/scripts/base/frameworks/netcontrol/acld-hook.zeek new file mode 100644 index 0000000000..2698a3bfab --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/acld-hook.zeek @@ -0,0 +1,135 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b -r $TRACES/tls/ecdhe.pcap --pseudo-realtime ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +@load base/frameworks/netcontrol + +redef exit_only_after_terminate = T; +global have_peer = F; +global did_init = F; + +event zeek_init() + { + suspend_processing(); + } + +event NetControl::init() + { + local netcontrol_acld = NetControl::create_acld(NetControl::AcldConfig($acld_host=127.0.0.1, $acld_port=to_port(getenv("BROKER_PORT")), $acld_topic="zeek/event/netcontroltest")); + NetControl::activate(netcontrol_acld, 0); + } + +event NetControl::init_done() + { + did_init = T; + + if ( did_init && have_peer ) + continue_processing(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); + have_peer = T; + + if ( did_init && have_peer ) + continue_processing(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +hook NetControl::acld_rule_policy(p: NetControl::PluginState, r: NetControl::Rule, ar: NetControl::AclRule) + { + # use nullzero instead of drop for address drops + if ( r$ty == NetControl::DROP && r$entity$ty == NetControl::ADDRESS && ar$command == "drop" ) + ar$command = "nullzero"; + } + +event connection_established(c: connection) + { + local id = c$id; + + local flow1 = NetControl::Flow( + $src_h=addr_to_subnet(c$id$orig_h), + $dst_h=addr_to_subnet(c$id$resp_h) + ); + local e1: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow1]; + local r1: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e1, $expire=10hrs, $location="here"]; + + local flow2 = NetControl::Flow( + $dst_p=c$id$resp_p + ); + local e2: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow2]; + local r2: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e2, $expire=10hrs, $location="there"]; + + NetControl::add_rule(r1); + NetControl::add_rule(r2); + NetControl::drop_address(id$orig_h, 10hrs); + } + +event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule added", r$entity, r$ty; + NetControl::remove_rule(r$id); + } + +event NetControl::rule_removed(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule removed", r$entity, r$ty; + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +@load base/frameworks/netcontrol +@load base/frameworks/broker + +redef exit_only_after_terminate = T; + +event die() + { + terminate(); + } + +event zeek_init() + { + Broker::subscribe("zeek/event/netcontroltest"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added"; + } + +event NetControl::acld_add_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) + { + print "add_rule", id, r$entity, r$ty, ar; + + Broker::publish("zeek/event/netcontroltest", NetControl::acld_rule_added, id, r, ar$command); + } + +event NetControl::acld_remove_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) + { + print "remove_rule", id, r$entity, r$ty, ar; + + Broker::publish("zeek/event/netcontroltest", NetControl::acld_rule_removed, id, r, ar$command); + + if ( r$cid == 4 ) + { + schedule 2sec { die() }; + } + } + +@TEST-END-FILE + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/acld.bro b/testing/btest/scripts/base/frameworks/netcontrol/acld.bro deleted file mode 100644 index 243e5e9b7c..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/acld.bro +++ /dev/null @@ -1,146 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b -r $TRACES/tls/ecdhe.pcap --pseudo-realtime ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff send/netcontrol.log -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -@load base/frameworks/netcontrol - -redef exit_only_after_terminate = T; -global have_peer = F; -global did_init = F; - -event bro_init() - { - suspend_processing(); - } - -event NetControl::init() - { - local netcontrol_acld = NetControl::create_acld(NetControl::AcldConfig($acld_host=127.0.0.1, $acld_port=to_port(getenv("BROKER_PORT")), $acld_topic="bro/event/netcontroltest")); - NetControl::activate(netcontrol_acld, 0); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); - have_peer = T; - - if ( did_init && have_peer ) - continue_processing(); - } - -event NetControl::init_done() - { - did_init = T; - - if ( did_init && have_peer ) - continue_processing(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event connection_established(c: connection) - { - local id = c$id; - - local flow1 = NetControl::Flow( - $src_h=addr_to_subnet(c$id$orig_h), - $dst_h=addr_to_subnet(c$id$resp_h) - ); - local e1: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow1]; - local r1: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e1, $expire=10hrs, $location="here"]; - - local flow2 = NetControl::Flow( - $dst_p=c$id$resp_p - ); - local e2: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow2]; - local r2: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e2, $expire=10hrs, $location="there"]; - - NetControl::add_rule(r1); - NetControl::add_rule(r2); - NetControl::drop_address(id$orig_h, 10hrs); - } - -event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule added", r$entity, r$ty; - NetControl::remove_rule(r$id); - } - -event NetControl::rule_exists(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule exists", r$entity, r$ty; - NetControl::remove_rule(r$id); - } - -event NetControl::rule_removed(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule removed", r$entity, r$ty; - } - -event NetControl::rule_error(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule error", r$entity, r$ty; - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -@load base/frameworks/netcontrol -@load base/frameworks/broker - -redef exit_only_after_terminate = T; - -event die() - { - terminate(); - } - -event bro_init() - { - Broker::subscribe("bro/event/netcontroltest"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added"; - } - -event NetControl::acld_add_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) - { - print "add_rule", id, r$entity, r$ty, ar; - - if ( r$cid != 3 ) - Broker::publish("bro/event/netcontroltest", NetControl::acld_rule_added, id, r, ar$command); - else - Broker::publish("bro/event/netcontroltest", NetControl::acld_rule_exists, id, r, ar$command); - } - -event NetControl::acld_remove_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) - { - print "remove_rule", id, r$entity, r$ty, ar; - - if ( r$cid != 2 ) - Broker::publish("bro/event/netcontroltest", NetControl::acld_rule_removed, id, r, ar$command); - else - Broker::publish("bro/event/netcontroltest", NetControl::acld_rule_error, id, r, ar$command); - - if ( r$cid == 4 ) - { - schedule 2sec { die() }; - } - } - -@TEST-END-FILE - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/acld.zeek b/testing/btest/scripts/base/frameworks/netcontrol/acld.zeek new file mode 100644 index 0000000000..b710294bf9 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/acld.zeek @@ -0,0 +1,146 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b -r $TRACES/tls/ecdhe.pcap --pseudo-realtime ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff send/netcontrol.log +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +@load base/frameworks/netcontrol + +redef exit_only_after_terminate = T; +global have_peer = F; +global did_init = F; + +event zeek_init() + { + suspend_processing(); + } + +event NetControl::init() + { + local netcontrol_acld = NetControl::create_acld(NetControl::AcldConfig($acld_host=127.0.0.1, $acld_port=to_port(getenv("BROKER_PORT")), $acld_topic="zeek/event/netcontroltest")); + NetControl::activate(netcontrol_acld, 0); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); + have_peer = T; + + if ( did_init && have_peer ) + continue_processing(); + } + +event NetControl::init_done() + { + did_init = T; + + if ( did_init && have_peer ) + continue_processing(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event connection_established(c: connection) + { + local id = c$id; + + local flow1 = NetControl::Flow( + $src_h=addr_to_subnet(c$id$orig_h), + $dst_h=addr_to_subnet(c$id$resp_h) + ); + local e1: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow1]; + local r1: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e1, $expire=10hrs, $location="here"]; + + local flow2 = NetControl::Flow( + $dst_p=c$id$resp_p + ); + local e2: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow2]; + local r2: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e2, $expire=10hrs, $location="there"]; + + NetControl::add_rule(r1); + NetControl::add_rule(r2); + NetControl::drop_address(id$orig_h, 10hrs); + } + +event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule added", r$entity, r$ty; + NetControl::remove_rule(r$id); + } + +event NetControl::rule_exists(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule exists", r$entity, r$ty; + NetControl::remove_rule(r$id); + } + +event NetControl::rule_removed(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule removed", r$entity, r$ty; + } + +event NetControl::rule_error(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule error", r$entity, r$ty; + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +@load base/frameworks/netcontrol +@load base/frameworks/broker + +redef exit_only_after_terminate = T; + +event die() + { + terminate(); + } + +event zeek_init() + { + Broker::subscribe("zeek/event/netcontroltest"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added"; + } + +event NetControl::acld_add_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) + { + print "add_rule", id, r$entity, r$ty, ar; + + if ( r$cid != 3 ) + Broker::publish("zeek/event/netcontroltest", NetControl::acld_rule_added, id, r, ar$command); + else + Broker::publish("zeek/event/netcontroltest", NetControl::acld_rule_exists, id, r, ar$command); + } + +event NetControl::acld_remove_rule(id: count, r: NetControl::Rule, ar: NetControl::AclRule) + { + print "remove_rule", id, r$entity, r$ty, ar; + + if ( r$cid != 2 ) + Broker::publish("zeek/event/netcontroltest", NetControl::acld_rule_removed, id, r, ar$command); + else + Broker::publish("zeek/event/netcontroltest", NetControl::acld_rule_error, id, r, ar$command); + + if ( r$cid == 4 ) + { + schedule 2sec { die() }; + } + } + +@TEST-END-FILE + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/basic-cluster.bro b/testing/btest/scripts/base/frameworks/netcontrol/basic-cluster.bro deleted file mode 100644 index 50c04433ad..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/basic-cluster.bro +++ /dev/null @@ -1,78 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=manager-1 bro %INPUT" -# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=worker-1 bro --pseudo-realtime -C -r $TRACES/tls/ecdhe.pcap %INPUT" - -# @TEST-EXEC: $SCRIPTS/wait-for-pid $(cat worker-1/.pid) 10 || (btest-bg-wait -k 1 && false) - -# @TEST-EXEC: btest-bg-run worker-2 "cp ../cluster-layout.bro . && CLUSTER_NODE=worker-2 bro --pseudo-realtime -C -r $TRACES/tls/ecdhe.pcap %INPUT" -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff worker-1/.stdout -# @TEST-EXEC: btest-diff worker-2/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth0"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; -#redef exit_only_after_terminate = T; - -@load base/frameworks/netcontrol - -@if ( Cluster::local_node_type() == Cluster::WORKER ) -event bro_init() - { - suspend_processing(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - continue_processing(); - } -@endif - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -event connection_established(c: connection) - { - local id = c$id; - NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 1sec); - NetControl::drop_address(id$orig_h, 1sec); - } - -event terminate_me() { - terminate(); -} - -global peers_lost = 0; - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - ++peers_lost; - - if ( peers_lost == 2 ) - schedule 2sec { terminate_me() }; - } - -event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string &default="") - { - print "Rule added", r$id, r$cid; - if ( r$entity?$ip ) - print |NetControl::find_rules_subnet(r$entity$ip)|; - } - -event NetControl::rule_destroyed(r: NetControl::Rule) - { - if ( r$entity?$ip ) - print "Rule destroyed", r$id, r$cid, |NetControl::find_rules_subnet(r$entity$ip)|; - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/basic-cluster.zeek b/testing/btest/scripts/base/frameworks/netcontrol/basic-cluster.zeek new file mode 100644 index 0000000000..067193de8c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/basic-cluster.zeek @@ -0,0 +1,78 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=manager-1 zeek %INPUT" +# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=worker-1 zeek --pseudo-realtime -C -r $TRACES/tls/ecdhe.pcap %INPUT" + +# @TEST-EXEC: $SCRIPTS/wait-for-pid $(cat worker-1/.pid) 10 || (btest-bg-wait -k 1 && false) + +# @TEST-EXEC: btest-bg-run worker-2 "cp ../cluster-layout.zeek . && CLUSTER_NODE=worker-2 zeek --pseudo-realtime -C -r $TRACES/tls/ecdhe.pcap %INPUT" +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff worker-2/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth0"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; +#redef exit_only_after_terminate = T; + +@load base/frameworks/netcontrol + +@if ( Cluster::local_node_type() == Cluster::WORKER ) +event zeek_init() + { + suspend_processing(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + continue_processing(); + } +@endif + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +event connection_established(c: connection) + { + local id = c$id; + NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 1sec); + NetControl::drop_address(id$orig_h, 1sec); + } + +event terminate_me() { + terminate(); +} + +global peers_lost = 0; + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + ++peers_lost; + + if ( peers_lost == 2 ) + schedule 2sec { terminate_me() }; + } + +event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string &default="") + { + print "Rule added", r$id, r$cid; + if ( r$entity?$ip ) + print |NetControl::find_rules_subnet(r$entity$ip)|; + } + +event NetControl::rule_destroyed(r: NetControl::Rule) + { + if ( r$entity?$ip ) + print "Rule destroyed", r$id, r$cid, |NetControl::find_rules_subnet(r$entity$ip)|; + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/basic.bro b/testing/btest/scripts/base/frameworks/netcontrol/basic.bro deleted file mode 100644 index 1efe420d73..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/basic.bro +++ /dev/null @@ -1,44 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff netcontrol.log -# @TEST-EXEC: btest-diff netcontrol_shunt.log -# @TEST-EXEC: btest-diff netcontrol_drop.log -# @TEST-EXEC: btest-diff .stdout - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -function test_mac_flow() - { - local flow = NetControl::Flow( - $src_m = "FF:FF:FF:FF:FF:FF" - ); - local e: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow]; - local r: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e, $expire=15sec]; - - NetControl::add_rule(r); - } - -function test_mac() - { - local e: NetControl::Entity = [$ty=NetControl::MAC, $mac="FF:FF:FF:FF:FF:FF"]; - local r: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e, $expire=15sec]; - - NetControl::add_rule(r); - } - -event NetControl::init_done() &priority=-5 - { - NetControl::shunt_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 30sec); - NetControl::drop_address(1.1.2.2, 15sec, "Hi there"); - NetControl::whitelist_address(1.2.3.4, 15sec); - NetControl::redirect_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 5, 30sec); - NetControl::quarantine_host(127.0.0.2, 8.8.8.8, 127.0.0.3, 15sec); - test_mac(); - test_mac_flow(); - } - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/basic.zeek b/testing/btest/scripts/base/frameworks/netcontrol/basic.zeek new file mode 100644 index 0000000000..b7510e4c2c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/basic.zeek @@ -0,0 +1,44 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff netcontrol.log +# @TEST-EXEC: btest-diff netcontrol_shunt.log +# @TEST-EXEC: btest-diff netcontrol_drop.log +# @TEST-EXEC: btest-diff .stdout + +@load base/frameworks/netcontrol + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +function test_mac_flow() + { + local flow = NetControl::Flow( + $src_m = "FF:FF:FF:FF:FF:FF" + ); + local e: NetControl::Entity = [$ty=NetControl::FLOW, $flow=flow]; + local r: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e, $expire=15sec]; + + NetControl::add_rule(r); + } + +function test_mac() + { + local e: NetControl::Entity = [$ty=NetControl::MAC, $mac="FF:FF:FF:FF:FF:FF"]; + local r: NetControl::Rule = [$ty=NetControl::DROP, $target=NetControl::FORWARD, $entity=e, $expire=15sec]; + + NetControl::add_rule(r); + } + +event NetControl::init_done() &priority=-5 + { + NetControl::shunt_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 30sec); + NetControl::drop_address(1.1.2.2, 15sec, "Hi there"); + NetControl::whitelist_address(1.2.3.4, 15sec); + NetControl::redirect_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 5, 30sec); + NetControl::quarantine_host(127.0.0.2, 8.8.8.8, 127.0.0.3, 15sec); + test_mac(); + test_mac_flow(); + } + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/broker.bro b/testing/btest/scripts/base/frameworks/netcontrol/broker.bro deleted file mode 100644 index 4d232c3325..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/broker.bro +++ /dev/null @@ -1,130 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b -r $TRACES/smtp.trace --pseudo-realtime ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff send/netcontrol.log -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -@load base/frameworks/netcontrol - -redef exit_only_after_terminate = T; -global have_peer = F; -global did_init = F; - -event bro_init() - { - suspend_processing(); - } - -event NetControl::init() - { - local netcontrol_broker = NetControl::create_broker(NetControl::BrokerConfig($host=127.0.0.1, $bport=to_port(getenv("BROKER_PORT")), $topic="bro/event/netcontroltest"), T); - NetControl::activate(netcontrol_broker, 0); - } - -event NetControl::init_done() - { - did_init = T; - - if ( did_init && have_peer ) - continue_processing(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); - have_peer = T; - - if ( did_init && have_peer ) - continue_processing(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event connection_established(c: connection) - { - local id = c$id; - NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 10hrs); - NetControl::drop_address(id$orig_h, 10hrs); - } - -event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule added", r$entity, r$ty; - NetControl::remove_rule(r$id, "removing"); - } - -event NetControl::rule_exists(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule exists", r$entity, r$ty; - } - -event NetControl::rule_removed(r: NetControl::Rule, p: NetControl::PluginState, msg: string) - { - print "rule removed", r$entity, r$ty; - } - -event NetControl::rule_timeout(r: NetControl::Rule, i: NetControl::FlowInfo, p: NetControl::PluginState) - { - print "rule timeout", r$entity, r$ty, i; - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -@load base/frameworks/netcontrol -@load base/frameworks/broker - -redef exit_only_after_terminate = T; - -event die() - { - terminate(); - } - -event bro_init() - { - Broker::subscribe("bro/event/netcontroltest"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added"; - } - -event NetControl::broker_add_rule(id: count, r: NetControl::Rule) - { - print "add_rule", id, r$entity, r$ty; - - if ( r$cid == 3 ) - Broker::publish("bro/event/netcontroltest", NetControl::broker_rule_added, id, r, ""); - if ( r$cid == 2 ) - Broker::publish("bro/event/netcontroltest", NetControl::broker_rule_exists, id, r, ""); - - if ( r$cid == 2 ) - Broker::publish("bro/event/netcontroltest", NetControl::broker_rule_timeout, id, r, NetControl::FlowInfo()); - } - -event NetControl::broker_remove_rule(id: count, r: NetControl::Rule, reason: string) - { - print "remove_rule", id, r$entity, r$ty, reason; - - Broker::publish("bro/event/netcontroltest", NetControl::broker_rule_removed, id, r, ""); - - if ( r$cid == 3 ) - { - schedule 2sec { die() }; - } - } - -@TEST-END-FILE - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/broker.zeek b/testing/btest/scripts/base/frameworks/netcontrol/broker.zeek new file mode 100644 index 0000000000..4773a3fa91 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/broker.zeek @@ -0,0 +1,130 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b -r $TRACES/smtp.trace --pseudo-realtime ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff send/netcontrol.log +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +@load base/frameworks/netcontrol + +redef exit_only_after_terminate = T; +global have_peer = F; +global did_init = F; + +event zeek_init() + { + suspend_processing(); + } + +event NetControl::init() + { + local netcontrol_broker = NetControl::create_broker(NetControl::BrokerConfig($host=127.0.0.1, $bport=to_port(getenv("BROKER_PORT")), $topic="zeek/event/netcontroltest"), T); + NetControl::activate(netcontrol_broker, 0); + } + +event NetControl::init_done() + { + did_init = T; + + if ( did_init && have_peer ) + continue_processing(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); + have_peer = T; + + if ( did_init && have_peer ) + continue_processing(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event connection_established(c: connection) + { + local id = c$id; + NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 10hrs); + NetControl::drop_address(id$orig_h, 10hrs); + } + +event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule added", r$entity, r$ty; + NetControl::remove_rule(r$id, "removing"); + } + +event NetControl::rule_exists(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule exists", r$entity, r$ty; + } + +event NetControl::rule_removed(r: NetControl::Rule, p: NetControl::PluginState, msg: string) + { + print "rule removed", r$entity, r$ty; + } + +event NetControl::rule_timeout(r: NetControl::Rule, i: NetControl::FlowInfo, p: NetControl::PluginState) + { + print "rule timeout", r$entity, r$ty, i; + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +@load base/frameworks/netcontrol +@load base/frameworks/broker + +redef exit_only_after_terminate = T; + +event die() + { + terminate(); + } + +event zeek_init() + { + Broker::subscribe("zeek/event/netcontroltest"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added"; + } + +event NetControl::broker_add_rule(id: count, r: NetControl::Rule) + { + print "add_rule", id, r$entity, r$ty; + + if ( r$cid == 3 ) + Broker::publish("zeek/event/netcontroltest", NetControl::broker_rule_added, id, r, ""); + if ( r$cid == 2 ) + Broker::publish("zeek/event/netcontroltest", NetControl::broker_rule_exists, id, r, ""); + + if ( r$cid == 2 ) + Broker::publish("zeek/event/netcontroltest", NetControl::broker_rule_timeout, id, r, NetControl::FlowInfo()); + } + +event NetControl::broker_remove_rule(id: count, r: NetControl::Rule, reason: string) + { + print "remove_rule", id, r$entity, r$ty, reason; + + Broker::publish("zeek/event/netcontroltest", NetControl::broker_rule_removed, id, r, ""); + + if ( r$cid == 3 ) + { + schedule 2sec { die() }; + } + } + +@TEST-END-FILE + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/catch-and-release-forgotten.bro b/testing/btest/scripts/base/frameworks/netcontrol/catch-and-release-forgotten.bro deleted file mode 100644 index dd5e71f1fe..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/catch-and-release-forgotten.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT -# @TEST-EXEC: btest-diff netcontrol_catch_release.log -# @TEST-EXEC: btest-diff .stdout - -@load base/frameworks/netcontrol - -redef NetControl::catch_release_intervals = vector(1sec, 2sec, 2sec); - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -global pc: count = 0; - -event new_packet(c: connection, p: pkt_hdr) - { - if ( ++pc == 1 ) - NetControl::drop_address_catch_release(10.0.0.1); - } - -event NetControl::catch_release_forgotten(a: addr, bi: NetControl::BlockInfo) - { - print "Forgotten: ", a, bi; - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/catch-and-release.bro b/testing/btest/scripts/base/frameworks/netcontrol/catch-and-release.bro deleted file mode 100644 index 29c56c2535..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/catch-and-release.bro +++ /dev/null @@ -1,61 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER='grep -v ^# | $SCRIPTS/diff-remove-timestamps' btest-diff netcontrol.log -# @TEST-EXEC: btest-diff netcontrol_catch_release.log - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -global i: count = 0; - -event connection_established(c: connection) - { - local id = c$id; - NetControl::drop_address_catch_release(id$orig_h); - # second one should be ignored because duplicate - NetControl::drop_address_catch_release(id$orig_h); - } - -event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string &default="") - { - if ( ++i == 6 ) - return; - - # delete directly, without notifying anything. - NetControl::delete_rule(r$id, "testing"); - NetControl::catch_release_seen(subnet_to_addr(r$entity$ip)); - } - -@TEST-START-NEXT - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -global i: count = 0; - -event connection_established(c: connection) - { - local id = c$id; - NetControl::drop_address(id$orig_h, 2min); - NetControl::drop_address_catch_release(id$orig_h, "test drop"); - } - -event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string &default="") - { - if ( ++i == 3 ) - return; - - # delete directly, without notifying anything. - NetControl::delete_rule(r$id); - NetControl::catch_release_seen(subnet_to_addr(r$entity$ip)); - } - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/delete-internal-state.bro b/testing/btest/scripts/base/frameworks/netcontrol/delete-internal-state.bro deleted file mode 100644 index 29cb439a64..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/delete-internal-state.bro +++ /dev/null @@ -1,54 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT -# @TEST-EXEC: btest-diff .stdout - -# Verify the state of internal tables after rules have been deleted... - -@load base/frameworks/netcontrol - -module NetControl; - -export { - global dump_state: function(); -} - -function dump_state() - { - print "Dumping state"; - print rules; - print rule_entities; - print rules_by_subnets; - } - -module GLOBAL; - -global rules: vector of string; - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 10); - } - -event remove_all() - { - for ( i in rules ) - NetControl::remove_rule(rules[i]); - } - -event dump_info() - { - NetControl::dump_state(); - } - -event connection_established(c: connection) - { - local id = c$id; - rules += NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 0secs); - rules += NetControl::drop_address(id$orig_h, 0secs); - rules += NetControl::whitelist_address(id$orig_h, 0secs); - rules += NetControl::redirect_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 5, 0secs); - - schedule 1sec { remove_all() }; - schedule 2sec { dump_info() }; - } - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/delete-internal-state.zeek b/testing/btest/scripts/base/frameworks/netcontrol/delete-internal-state.zeek new file mode 100644 index 0000000000..935142b33c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/delete-internal-state.zeek @@ -0,0 +1,54 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +# Verify the state of internal tables after rules have been deleted... + +@load base/frameworks/netcontrol + +module NetControl; + +export { + global dump_state: function(); +} + +function dump_state() + { + print "Dumping state"; + print rules; + print rule_entities; + print rules_by_subnets; + } + +module GLOBAL; + +global rules: vector of string; + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 10); + } + +event remove_all() + { + for ( i in rules ) + NetControl::remove_rule(rules[i]); + } + +event dump_info() + { + NetControl::dump_state(); + } + +event connection_established(c: connection) + { + local id = c$id; + rules += NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 0secs); + rules += NetControl::drop_address(id$orig_h, 0secs); + rules += NetControl::whitelist_address(id$orig_h, 0secs); + rules += NetControl::redirect_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 5, 0secs); + + schedule 1sec { remove_all() }; + schedule 2sec { dump_info() }; + } + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/duplicate.bro b/testing/btest/scripts/base/frameworks/netcontrol/duplicate.bro deleted file mode 100644 index c64bd9e16b..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/duplicate.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tls/google-duplicate.trace %INPUT -# @TEST-EXEC: btest-diff netcontrol.log - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -event connection_established(c: connection) - { - NetControl::drop_address(c$id$orig_h, 0secs); - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/duplicate.zeek b/testing/btest/scripts/base/frameworks/netcontrol/duplicate.zeek new file mode 100644 index 0000000000..a5e03add55 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/duplicate.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tls/google-duplicate.trace %INPUT +# @TEST-EXEC: btest-diff netcontrol.log + +@load base/frameworks/netcontrol + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +event connection_established(c: connection) + { + NetControl::drop_address(c$id$orig_h, 0secs); + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/find-rules.bro b/testing/btest/scripts/base/frameworks/netcontrol/find-rules.bro deleted file mode 100644 index e7bb61cc04..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/find-rules.bro +++ /dev/null @@ -1,34 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff out - -@load base/frameworks/netcontrol - -global outfile: file; - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -event NetControl::init_done() &priority=-5 - { - NetControl::shunt_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 30sec); - NetControl::drop_address(1.1.2.2, 15sec, "Hi there"); - NetControl::whitelist_address(1.2.3.4, 15sec); - NetControl::redirect_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 5, 30sec); - NetControl::quarantine_host(127.0.0.2, 8.8.8.8, 127.0.0.3, 15sec); - - outfile = open("out"); - local rules = NetControl::find_rules_addr(1.2.3.4); - print outfile, |rules|; - print outfile, rules[0]$entity; - rules = NetControl::find_rules_addr(1.2.3.5); - print outfile, |rules|; - rules = NetControl::find_rules_addr(127.0.0.2); - print outfile, |rules|; - print outfile, rules[0]$entity, rules[0]$ty; - print outfile, rules[3]$entity, rules[3]$ty; - close(outfile); - } - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/find-rules.zeek b/testing/btest/scripts/base/frameworks/netcontrol/find-rules.zeek new file mode 100644 index 0000000000..09694cc1f8 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/find-rules.zeek @@ -0,0 +1,34 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff out + +@load base/frameworks/netcontrol + +global outfile: file; + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +event NetControl::init_done() &priority=-5 + { + NetControl::shunt_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 30sec); + NetControl::drop_address(1.1.2.2, 15sec, "Hi there"); + NetControl::whitelist_address(1.2.3.4, 15sec); + NetControl::redirect_flow([$src_h=192.168.17.1, $src_p=32/tcp, $dst_h=192.168.17.2, $dst_p=32/tcp], 5, 30sec); + NetControl::quarantine_host(127.0.0.2, 8.8.8.8, 127.0.0.3, 15sec); + + outfile = open("out"); + local rules = NetControl::find_rules_addr(1.2.3.4); + print outfile, |rules|; + print outfile, rules[0]$entity; + rules = NetControl::find_rules_addr(1.2.3.5); + print outfile, |rules|; + rules = NetControl::find_rules_addr(127.0.0.2); + print outfile, |rules|; + print outfile, rules[0]$entity, rules[0]$ty; + print outfile, rules[3]$entity, rules[3]$ty; + close(outfile); + } + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/hook.bro b/testing/btest/scripts/base/frameworks/netcontrol/hook.bro deleted file mode 100644 index 02056a1e0a..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/hook.bro +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT -# @TEST-EXEC: btest-diff netcontrol.log - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -event connection_established(c: connection) - { - local id = c$id; - NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 30sec); - NetControl::drop_address(id$orig_h, 15sec); - NetControl::whitelist_address(id$orig_h, 15sec); - NetControl::redirect_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 5, 30sec); - } - -hook NetControl::rule_policy(r: NetControl::Rule) - { - if ( r$expire == 15sec ) - break; - - r$entity$flow$src_h = 0.0.0.0/0; - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/hook.zeek b/testing/btest/scripts/base/frameworks/netcontrol/hook.zeek new file mode 100644 index 0000000000..e12599db83 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/hook.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: btest-diff netcontrol.log + +@load base/frameworks/netcontrol + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +event connection_established(c: connection) + { + local id = c$id; + NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 30sec); + NetControl::drop_address(id$orig_h, 15sec); + NetControl::whitelist_address(id$orig_h, 15sec); + NetControl::redirect_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 5, 30sec); + } + +hook NetControl::rule_policy(r: NetControl::Rule) + { + if ( r$expire == 15sec ) + break; + + r$entity$flow$src_h = 0.0.0.0/0; + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/multiple.bro b/testing/btest/scripts/base/frameworks/netcontrol/multiple.bro deleted file mode 100644 index d56c8e2468..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/multiple.bro +++ /dev/null @@ -1,37 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER='grep -v ^# | $SCRIPTS/diff-sort' btest-diff netcontrol.log -# @TEST-EXEC: btest-diff openflow.log - -@load base/frameworks/netcontrol - -global rules: vector of string; - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - local netcontrol_debug_2 = NetControl::create_debug(T); - local of_controller = OpenFlow::log_new(42); - local netcontrol_of = NetControl::create_openflow(of_controller); - NetControl::activate(netcontrol_debug, 10); - NetControl::activate(netcontrol_of, 10); - NetControl::activate(netcontrol_debug_2, 0); - } - -event remove_all() - { - for ( i in rules ) - NetControl::remove_rule(rules[i]); - } - - -event connection_established(c: connection) - { - local id = c$id; - rules += NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 0secs); - rules += NetControl::drop_address(id$orig_h, 0secs); - rules += NetControl::whitelist_address(id$orig_h, 0secs); - rules += NetControl::redirect_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 5, 0secs); - - schedule 1sec { remove_all() }; - } - diff --git a/testing/btest/scripts/base/frameworks/netcontrol/multiple.zeek b/testing/btest/scripts/base/frameworks/netcontrol/multiple.zeek new file mode 100644 index 0000000000..4fc05d4f45 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/multiple.zeek @@ -0,0 +1,37 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER='grep -v ^# | $SCRIPTS/diff-sort' btest-diff netcontrol.log +# @TEST-EXEC: btest-diff openflow.log + +@load base/frameworks/netcontrol + +global rules: vector of string; + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + local netcontrol_debug_2 = NetControl::create_debug(T); + local of_controller = OpenFlow::log_new(42); + local netcontrol_of = NetControl::create_openflow(of_controller); + NetControl::activate(netcontrol_debug, 10); + NetControl::activate(netcontrol_of, 10); + NetControl::activate(netcontrol_debug_2, 0); + } + +event remove_all() + { + for ( i in rules ) + NetControl::remove_rule(rules[i]); + } + + +event connection_established(c: connection) + { + local id = c$id; + rules += NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 0secs); + rules += NetControl::drop_address(id$orig_h, 0secs); + rules += NetControl::whitelist_address(id$orig_h, 0secs); + rules += NetControl::redirect_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 5, 0secs); + + schedule 1sec { remove_all() }; + } + diff --git a/testing/btest/scripts/base/frameworks/netcontrol/openflow.bro b/testing/btest/scripts/base/frameworks/netcontrol/openflow.bro deleted file mode 100644 index 36c06fcc3d..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/openflow.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT -# @TEST-EXEC: btest-diff netcontrol.log -# @TEST-EXEC: btest-diff openflow.log - -@load base/frameworks/netcontrol - -global of_controller: OpenFlow::Controller; - -event NetControl::init() - { - of_controller = OpenFlow::log_new(42); - local netcontrol_of = NetControl::create_openflow(of_controller); - NetControl::activate(netcontrol_of, 0); - } - -event connection_established(c: connection) - { - local id = c$id; - NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 30sec); - NetControl::drop_address(id$resp_h, 15sec); - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/openflow.zeek b/testing/btest/scripts/base/frameworks/netcontrol/openflow.zeek new file mode 100644 index 0000000000..04cd1302b3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/openflow.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff netcontrol.log +# @TEST-EXEC: btest-diff openflow.log + +@load base/frameworks/netcontrol + +global of_controller: OpenFlow::Controller; + +event NetControl::init() + { + of_controller = OpenFlow::log_new(42); + local netcontrol_of = NetControl::create_openflow(of_controller); + NetControl::activate(netcontrol_of, 0); + } + +event connection_established(c: connection) + { + local id = c$id; + NetControl::shunt_flow([$src_h=id$orig_h, $src_p=id$orig_p, $dst_h=id$resp_h, $dst_p=id$resp_p], 30sec); + NetControl::drop_address(id$resp_h, 15sec); + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/packetfilter.bro b/testing/btest/scripts/base/frameworks/netcontrol/packetfilter.bro deleted file mode 100644 index 46a1193a21..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/packetfilter.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_packetfilter = NetControl::create_packetfilter(); - NetControl::activate(netcontrol_packetfilter, 0); - } - -event connection_established(c: connection) - { - local e = NetControl::Entity($ty=NetControl::ADDRESS, $ip=addr_to_subnet(c$id$orig_h)); - local r = NetControl::Rule($ty=NetControl::DROP, $target=NetControl::MONITOR, $entity=e, $expire=10min); - - NetControl::add_rule(r); - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/packetfilter.zeek b/testing/btest/scripts/base/frameworks/netcontrol/packetfilter.zeek new file mode 100644 index 0000000000..ac8a3f5c0a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/packetfilter.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load base/frameworks/netcontrol + +event NetControl::init() + { + local netcontrol_packetfilter = NetControl::create_packetfilter(); + NetControl::activate(netcontrol_packetfilter, 0); + } + +event connection_established(c: connection) + { + local e = NetControl::Entity($ty=NetControl::ADDRESS, $ip=addr_to_subnet(c$id$orig_h)); + local r = NetControl::Rule($ty=NetControl::DROP, $target=NetControl::MONITOR, $entity=e, $expire=10min); + + NetControl::add_rule(r); + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/quarantine-openflow.bro b/testing/btest/scripts/base/frameworks/netcontrol/quarantine-openflow.bro deleted file mode 100644 index 9356253c98..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/quarantine-openflow.bro +++ /dev/null @@ -1,19 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT -# @TEST-EXEC: btest-diff netcontrol.log -# @TEST-EXEC: btest-diff openflow.log - -@load base/frameworks/netcontrol - -global of_controller: OpenFlow::Controller; - -event NetControl::init() - { - of_controller = OpenFlow::log_new(42); - local netcontrol_of = NetControl::create_openflow(of_controller); - NetControl::activate(netcontrol_of, 0); - } - -event connection_established(c: connection) - { - NetControl::quarantine_host(c$id$orig_h, 8.8.8.8, 192.169.18.1, 10hrs); - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/quarantine-openflow.zeek b/testing/btest/scripts/base/frameworks/netcontrol/quarantine-openflow.zeek new file mode 100644 index 0000000000..71ef2b3efe --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/quarantine-openflow.zeek @@ -0,0 +1,19 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: btest-diff netcontrol.log +# @TEST-EXEC: btest-diff openflow.log + +@load base/frameworks/netcontrol + +global of_controller: OpenFlow::Controller; + +event NetControl::init() + { + of_controller = OpenFlow::log_new(42); + local netcontrol_of = NetControl::create_openflow(of_controller); + NetControl::activate(netcontrol_of, 0); + } + +event connection_established(c: connection) + { + NetControl::quarantine_host(c$id$orig_h, 8.8.8.8, 192.169.18.1, 10hrs); + } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/timeout.bro b/testing/btest/scripts/base/frameworks/netcontrol/timeout.bro deleted file mode 100644 index e308205ffc..0000000000 --- a/testing/btest/scripts/base/frameworks/netcontrol/timeout.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/tls/ecdhe.pcap --pseudo-realtime %INPUT -# @TEST-EXEC: btest-diff netcontrol.log - -@load base/frameworks/netcontrol - -event NetControl::init() - { - local netcontrol_debug = NetControl::create_debug(T); - NetControl::activate(netcontrol_debug, 0); - } - -event connection_established(c: connection) - { - NetControl::drop_address(c$id$orig_h, 1secs); - } diff --git a/testing/btest/scripts/base/frameworks/netcontrol/timeout.zeek b/testing/btest/scripts/base/frameworks/netcontrol/timeout.zeek new file mode 100644 index 0000000000..bc7de9dd3a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/netcontrol/timeout.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -b -r $TRACES/tls/ecdhe.pcap --pseudo-realtime %INPUT +# @TEST-EXEC: btest-diff netcontrol.log + +@load base/frameworks/netcontrol + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +event connection_established(c: connection) + { + NetControl::drop_address(c$id$orig_h, 1secs); + } diff --git a/testing/btest/scripts/base/frameworks/notice/cluster.bro b/testing/btest/scripts/base/frameworks/notice/cluster.bro deleted file mode 100644 index 69d1ac8364..0000000000 --- a/testing/btest/scripts/base/frameworks/notice/cluster.bro +++ /dev/null @@ -1,58 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff manager-1/notice.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth0"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -redef enum Notice::Type += { - Test_Notice, -}; - -event Cluster::node_down(name: string, id: string) - { - terminate(); - } - -event delayed_notice() - { - if ( Cluster::node == "worker-1" ) - NOTICE([$note=Test_Notice, $msg="test notice!"]); - } - -event ready() - { - schedule 1secs { delayed_notice() }; - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; - -event Cluster::node_up(name: string, id: string) - { - peer_count = peer_count + 1; - - if ( peer_count == 2 ) - Broker::publish(Cluster::worker_topic, ready); - } - -event Notice::log_notice(rec: Notice::Info) - { - terminate(); - } - -@endif diff --git a/testing/btest/scripts/base/frameworks/notice/cluster.zeek b/testing/btest/scripts/base/frameworks/notice/cluster.zeek new file mode 100644 index 0000000000..06160f0309 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/notice/cluster.zeek @@ -0,0 +1,58 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff manager-1/notice.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth0"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +redef enum Notice::Type += { + Test_Notice, +}; + +event Cluster::node_down(name: string, id: string) + { + terminate(); + } + +event delayed_notice() + { + if ( Cluster::node == "worker-1" ) + NOTICE([$note=Test_Notice, $msg="test notice!"]); + } + +event ready() + { + schedule 1secs { delayed_notice() }; + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; + +event Cluster::node_up(name: string, id: string) + { + peer_count = peer_count + 1; + + if ( peer_count == 2 ) + Broker::publish(Cluster::worker_topic, ready); + } + +event Notice::log_notice(rec: Notice::Info) + { + terminate(); + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/notice/default-policy-order.test b/testing/btest/scripts/base/frameworks/notice/default-policy-order.test index d5d3f4c3fa..7daffc2ea0 100644 --- a/testing/btest/scripts/base/frameworks/notice/default-policy-order.test +++ b/testing/btest/scripts/base/frameworks/notice/default-policy-order.test @@ -1,10 +1,10 @@ # This test checks that the default notice policy ordering does not # change from run to run. -# @TEST-EXEC: bro -e '' +# @TEST-EXEC: zeek -e '' # @TEST-EXEC: cat notice_policy.log | $SCRIPTS/diff-remove-timestamps > notice_policy.log.1 -# @TEST-EXEC: bro -e '' +# @TEST-EXEC: zeek -e '' # @TEST-EXEC: cat notice_policy.log | $SCRIPTS/diff-remove-timestamps > notice_policy.log.2 -# @TEST-EXEC: bro -e '' +# @TEST-EXEC: zeek -e '' # @TEST-EXEC: cat notice_policy.log | $SCRIPTS/diff-remove-timestamps > notice_policy.log.3 # @TEST-EXEC: diff notice_policy.log.1 notice_policy.log.2 # @TEST-EXEC: diff notice_policy.log.1 notice_policy.log.3 diff --git a/testing/btest/scripts/base/frameworks/notice/mail-alarms.bro b/testing/btest/scripts/base/frameworks/notice/mail-alarms.bro deleted file mode 100644 index 0970ec0c76..0000000000 --- a/testing/btest/scripts/base/frameworks/notice/mail-alarms.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/web.trace %INPUT -# @TEST-EXEC: btest-diff alarm-mail.txt - -hook Notice::policy(n: Notice::Info) &priority=1 - { - add n$actions[Notice::ACTION_ALARM]; - } - -redef Notice::force_email_summaries = T; - -redef enum Notice::Type += { - Test_Notice, -}; - -event connection_established(c: connection) - { - NOTICE([$note=Test_Notice, $conn=c, $msg="test", $identifier="static"]); - } - - - diff --git a/testing/btest/scripts/base/frameworks/notice/mail-alarms.zeek b/testing/btest/scripts/base/frameworks/notice/mail-alarms.zeek new file mode 100644 index 0000000000..373d773bd2 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/notice/mail-alarms.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -C -r $TRACES/web.trace %INPUT +# @TEST-EXEC: btest-diff alarm-mail.txt + +hook Notice::policy(n: Notice::Info) &priority=1 + { + add n$actions[Notice::ACTION_ALARM]; + } + +redef Notice::force_email_summaries = T; + +redef enum Notice::Type += { + Test_Notice, +}; + +event connection_established(c: connection) + { + NOTICE([$note=Test_Notice, $conn=c, $msg="test", $identifier="static"]); + } + + + diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro deleted file mode 100644 index e9b31e1756..0000000000 --- a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro +++ /dev/null @@ -1,66 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# @TEST-PORT: BROKER_PORT4 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff manager-1/notice.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -redef enum Notice::Type += { - Test_Notice, -}; - -event Cluster::node_down(name: string, id: string) - { - terminate(); - } - -event delayed_notice() - { - NOTICE([$note=Test_Notice, - $msg="test notice!", - $identifier="this identifier is static"]); - } - -event ready() - { - if ( Cluster::node == "worker-1" ) - schedule 4secs { delayed_notice() }; - if ( Cluster::node == "worker-2" ) - schedule 1secs { delayed_notice() }; - } - -event Notice::suppressed(n: Notice::Info) - { - if ( Cluster::node == "worker-1" ) - terminate(); - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; - -event Cluster::node_up(name: string, id: string) - { - peer_count = peer_count + 1; - - if ( peer_count == 3 ) - Broker::publish(Cluster::worker_topic, ready); - } - -@endif diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.zeek b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.zeek new file mode 100644 index 0000000000..7c1dbaf5bc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.zeek @@ -0,0 +1,66 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# @TEST-PORT: BROKER_PORT4 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=proxy-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff manager-1/notice.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT4")), $manager="manager-1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +redef enum Notice::Type += { + Test_Notice, +}; + +event Cluster::node_down(name: string, id: string) + { + terminate(); + } + +event delayed_notice() + { + NOTICE([$note=Test_Notice, + $msg="test notice!", + $identifier="this identifier is static"]); + } + +event ready() + { + if ( Cluster::node == "worker-1" ) + schedule 4secs { delayed_notice() }; + if ( Cluster::node == "worker-2" ) + schedule 1secs { delayed_notice() }; + } + +event Notice::suppressed(n: Notice::Info) + { + if ( Cluster::node == "worker-1" ) + terminate(); + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; + +event Cluster::node_up(name: string, id: string) + { + peer_count = peer_count + 1; + + if ( peer_count == 3 ) + Broker::publish(Cluster::worker_topic, ready); + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-disable.bro b/testing/btest/scripts/base/frameworks/notice/suppression-disable.bro deleted file mode 100644 index 96b932caf8..0000000000 --- a/testing/btest/scripts/base/frameworks/notice/suppression-disable.bro +++ /dev/null @@ -1,25 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT -# The "Test_Notice" should be logged twice -# @TEST-EXEC: test `grep Test_Notice notice.log | wc -l` -eq 2 - -@load base/frameworks/notice - -redef enum Notice::Type += { - Test_Notice, -}; - -redef Notice::not_suppressed_types += { Test_Notice }; - -# The second notice needs to be scheduled due to how the notice framework -# uses the event queue. - -event second_notice() - { - NOTICE([$note=Test_Notice, $msg="another test", $identifier="static"]); - } - -event bro_init() - { - NOTICE([$note=Test_Notice, $msg="test", $identifier="static"]); - schedule 1msec { second_notice() }; - } diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-disable.zeek b/testing/btest/scripts/base/frameworks/notice/suppression-disable.zeek new file mode 100644 index 0000000000..a281fd1b7c --- /dev/null +++ b/testing/btest/scripts/base/frameworks/notice/suppression-disable.zeek @@ -0,0 +1,25 @@ +# @TEST-EXEC: zeek -b %INPUT +# The "Test_Notice" should be logged twice +# @TEST-EXEC: test `grep Test_Notice notice.log | wc -l` -eq 2 + +@load base/frameworks/notice + +redef enum Notice::Type += { + Test_Notice, +}; + +redef Notice::not_suppressed_types += { Test_Notice }; + +# The second notice needs to be scheduled due to how the notice framework +# uses the event queue. + +event second_notice() + { + NOTICE([$note=Test_Notice, $msg="another test", $identifier="static"]); + } + +event zeek_init() + { + NOTICE([$note=Test_Notice, $msg="test", $identifier="static"]); + schedule 1msec { second_notice() }; + } diff --git a/testing/btest/scripts/base/frameworks/notice/suppression.bro b/testing/btest/scripts/base/frameworks/notice/suppression.bro deleted file mode 100644 index 87ce3672b6..0000000000 --- a/testing/btest/scripts/base/frameworks/notice/suppression.bro +++ /dev/null @@ -1,23 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff notice.log - -@load base/frameworks/notice - -redef enum Notice::Type += { - Test_Notice, -}; - -# The second notice needs to be scheduled due to how the notice framework -# uses the event queue. - -event second_notice() - { - NOTICE([$note=Test_Notice, $msg="another test", $identifier="static"]); - } - -event bro_init() - { - NOTICE([$note=Test_Notice, $msg="test", $identifier="static"]); - schedule 1msec { second_notice() }; - } - diff --git a/testing/btest/scripts/base/frameworks/notice/suppression.zeek b/testing/btest/scripts/base/frameworks/notice/suppression.zeek new file mode 100644 index 0000000000..f284bb4600 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/notice/suppression.zeek @@ -0,0 +1,23 @@ +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff notice.log + +@load base/frameworks/notice + +redef enum Notice::Type += { + Test_Notice, +}; + +# The second notice needs to be scheduled due to how the notice framework +# uses the event queue. + +event second_notice() + { + NOTICE([$note=Test_Notice, $msg="another test", $identifier="static"]); + } + +event zeek_init() + { + NOTICE([$note=Test_Notice, $msg="test", $identifier="static"]); + schedule 1msec { second_notice() }; + } + diff --git a/testing/btest/scripts/base/frameworks/openflow/broker-basic.bro b/testing/btest/scripts/base/frameworks/openflow/broker-basic.bro deleted file mode 100644 index 9d43089b93..0000000000 --- a/testing/btest/scripts/base/frameworks/openflow/broker-basic.bro +++ /dev/null @@ -1,124 +0,0 @@ -# @TEST-PORT: BROKER_PORT -# @TEST-EXEC: btest-bg-run recv "bro -b ../recv.bro >recv.out" -# @TEST-EXEC: btest-bg-run send "bro -b -r $TRACES/smtp.trace --pseudo-realtime ../send.bro >send.out" - -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff recv/recv.out -# @TEST-EXEC: btest-diff send/send.out - -@TEST-START-FILE send.bro - -@load base/protocols/conn -@load base/frameworks/openflow - -redef exit_only_after_terminate = T; - -global of_controller: OpenFlow::Controller; - -event bro_init() - { - suspend_processing(); - of_controller = OpenFlow::broker_new("broker1", 127.0.0.1, to_port(getenv("BROKER_PORT")), "bro/openflow", 42); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event OpenFlow::controller_activated(name: string, controller: OpenFlow::Controller) - { - continue_processing(); - OpenFlow::flow_clear(of_controller); - OpenFlow::flow_mod(of_controller, [], [$cookie=OpenFlow::generate_cookie(1), $command=OpenFlow::OFPFC_ADD, $actions=[$out_ports=vector(3, 7)]]); - } - -event connection_established(c: connection) - { - print "connection established"; - local match = OpenFlow::match_conn(c$id); - local match_rev = OpenFlow::match_conn(c$id, T); - - local flow_mod: OpenFlow::ofp_flow_mod = [ - $cookie=OpenFlow::generate_cookie(42), - $command=OpenFlow::OFPFC_ADD, - $idle_timeout=30, - $priority=5 - ]; - - OpenFlow::flow_mod(of_controller, match, flow_mod); - OpenFlow::flow_mod(of_controller, match_rev, flow_mod); - } - -event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) - { - print "Flow_mod_success"; - } - -event OpenFlow::flow_mod_failure(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) - { - print "Flow_mod_failure"; - } - -@TEST-END-FILE - -@TEST-START-FILE recv.bro - -@load base/frameworks/openflow - -redef exit_only_after_terminate = T; - -global msg_count: count = 0; - -event die() - { - terminate(); - } - -event bro_init() - { - Broker::subscribe("bro/openflow"); - Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - print "Broker peer added"; - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -function got_message() - { - ++msg_count; - - if ( msg_count >= 4 ) - { - schedule 2sec { die() }; - } - } - -event OpenFlow::broker_flow_mod(name: string, dpid: count, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod) - { - print "got flow_mod", dpid, match, flow_mod; - Broker::publish("bro/openflow", OpenFlow::flow_mod_success, name, match, flow_mod, ""); - Broker::publish("bro/openflow", OpenFlow::flow_mod_failure, name, match, flow_mod, ""); - got_message(); - } - -event OpenFlow::broker_flow_clear(name: string, dpid: count) - { - print "flow_clear", dpid; - got_message(); - } - -@TEST-END-FILE - diff --git a/testing/btest/scripts/base/frameworks/openflow/broker-basic.zeek b/testing/btest/scripts/base/frameworks/openflow/broker-basic.zeek new file mode 100644 index 0000000000..e1d05db7a5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/openflow/broker-basic.zeek @@ -0,0 +1,118 @@ +# @TEST-PORT: BROKER_PORT +# @TEST-EXEC: btest-bg-run recv "zeek -b ../recv.zeek >recv.out" +# @TEST-EXEC: btest-bg-run send "zeek -b -r $TRACES/smtp.trace --pseudo-realtime ../send.zeek >send.out" + +# @TEST-EXEC: btest-bg-wait 30 +# @TEST-EXEC: btest-diff recv/recv.out +# @TEST-EXEC: btest-diff send/send.out + +@TEST-START-FILE send.zeek + +@load base/protocols/conn +@load base/frameworks/openflow + +redef exit_only_after_terminate = T; + +global of_controller: OpenFlow::Controller; + +event zeek_init() + { + suspend_processing(); + of_controller = OpenFlow::broker_new("broker1", 127.0.0.1, to_port(getenv("BROKER_PORT")), "zeek/openflow", 42); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added", endpoint$network$address, endpoint$network$bound_port == to_port(getenv("BROKER_PORT")); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event OpenFlow::controller_activated(name: string, controller: OpenFlow::Controller) + { + OpenFlow::flow_clear(of_controller); + OpenFlow::flow_mod(of_controller, [], [$cookie=OpenFlow::generate_cookie(1), $command=OpenFlow::OFPFC_ADD, $actions=[$out_ports=vector(3, 7)]]); + } + +event connection_established(c: connection) + { + print "connection established"; + local match = OpenFlow::match_conn(c$id); + local match_rev = OpenFlow::match_conn(c$id, T); + + local flow_mod: OpenFlow::ofp_flow_mod = [ + $cookie=OpenFlow::generate_cookie(42), + $command=OpenFlow::OFPFC_ADD, + $idle_timeout=30, + $priority=5 + ]; + + OpenFlow::flow_mod(of_controller, match, flow_mod); + OpenFlow::flow_mod(of_controller, match_rev, flow_mod); + } + +global msg_count: count = 0; + +function got_message() + { + ++msg_count; + + if ( msg_count == 2 ) + continue_processing(); + else if ( msg_count == 6 ) + terminate(); + } + +event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) + { + print "Flow_mod_success"; + got_message(); + } + +event OpenFlow::flow_mod_failure(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) + { + print "Flow_mod_failure"; + got_message(); + } + +@TEST-END-FILE + +@TEST-START-FILE recv.zeek + +@load base/frameworks/openflow + +redef exit_only_after_terminate = T; + +event zeek_init() + { + Broker::subscribe("zeek/openflow"); + Broker::listen("127.0.0.1", to_port(getenv("BROKER_PORT"))); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + print "Broker peer added"; + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event OpenFlow::broker_flow_mod(name: string, dpid: count, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod) + { + print "got flow_mod", dpid, match, flow_mod; + Broker::publish("zeek/openflow", OpenFlow::flow_mod_success, name, match, flow_mod, ""); + Broker::publish("zeek/openflow", OpenFlow::flow_mod_failure, name, match, flow_mod, ""); + } + +event OpenFlow::broker_flow_clear(name: string, dpid: count) + { + print "flow_clear", dpid; + } + +@TEST-END-FILE + diff --git a/testing/btest/scripts/base/frameworks/openflow/log-basic.bro b/testing/btest/scripts/base/frameworks/openflow/log-basic.bro deleted file mode 100644 index d4f08e7822..0000000000 --- a/testing/btest/scripts/base/frameworks/openflow/log-basic.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT -# @TEST-EXEC: btest-diff openflow.log - -@load base/protocols/conn -@load base/frameworks/openflow - -global of_controller: OpenFlow::Controller; - -global cookie_id: count = 42; - -event bro_init() - { - of_controller = OpenFlow::log_new(42); - - OpenFlow::flow_mod(of_controller, [], [$cookie=OpenFlow::generate_cookie(1), $command=OpenFlow::OFPFC_ADD, $actions=[$out_ports=vector(3, 7)]]); - } - -event connection_established(c: connection) - { - local match = OpenFlow::match_conn(c$id); - local match_rev = OpenFlow::match_conn(c$id, T); - - local flow_mod: OpenFlow::ofp_flow_mod = [ - $cookie=OpenFlow::generate_cookie(++cookie_id), - $command=OpenFlow::OFPFC_ADD, - $idle_timeout=30, - $priority=5 - ]; - - OpenFlow::flow_mod(of_controller, match, flow_mod); - OpenFlow::flow_mod(of_controller, match_rev, flow_mod); - } diff --git a/testing/btest/scripts/base/frameworks/openflow/log-basic.zeek b/testing/btest/scripts/base/frameworks/openflow/log-basic.zeek new file mode 100644 index 0000000000..3604c95eec --- /dev/null +++ b/testing/btest/scripts/base/frameworks/openflow/log-basic.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff openflow.log + +@load base/protocols/conn +@load base/frameworks/openflow + +global of_controller: OpenFlow::Controller; + +global cookie_id: count = 42; + +event zeek_init() + { + of_controller = OpenFlow::log_new(42); + + OpenFlow::flow_mod(of_controller, [], [$cookie=OpenFlow::generate_cookie(1), $command=OpenFlow::OFPFC_ADD, $actions=[$out_ports=vector(3, 7)]]); + } + +event connection_established(c: connection) + { + local match = OpenFlow::match_conn(c$id); + local match_rev = OpenFlow::match_conn(c$id, T); + + local flow_mod: OpenFlow::ofp_flow_mod = [ + $cookie=OpenFlow::generate_cookie(++cookie_id), + $command=OpenFlow::OFPFC_ADD, + $idle_timeout=30, + $priority=5 + ]; + + OpenFlow::flow_mod(of_controller, match, flow_mod); + OpenFlow::flow_mod(of_controller, match_rev, flow_mod); + } diff --git a/testing/btest/scripts/base/frameworks/openflow/log-cluster.bro b/testing/btest/scripts/base/frameworks/openflow/log-cluster.bro deleted file mode 100644 index 33f20f8ce5..0000000000 --- a/testing/btest/scripts/base/frameworks/openflow/log-cluster.bro +++ /dev/null @@ -1,77 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# -# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=manager-1 bro %INPUT" -# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.bro . && CLUSTER_NODE=worker-1 bro --pseudo-realtime -C -r $TRACES/smtp.trace %INPUT" -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff manager-1/openflow.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; -#redef exit_only_after_terminate = T; - -@load base/protocols/conn -@load base/frameworks/openflow - -global of_controller: OpenFlow::Controller; - -@if ( Cluster::local_node_type() == Cluster::WORKER ) -event bro_init() - { - suspend_processing(); - } - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - continue_processing(); - } -@endif - -event bro_init() - { - of_controller = OpenFlow::log_new(42); - } - -event terminate_me() - { - terminate(); - } - -global done = F; - -event connection_established(c: connection) - { - if ( done ) - return; - - done = T; - - print "conn established"; - - local match = OpenFlow::match_conn(c$id); - local match_rev = OpenFlow::match_conn(c$id, T); - - local flow_mod: OpenFlow::ofp_flow_mod = [ - $cookie=OpenFlow::generate_cookie(42), - $command=OpenFlow::OFPFC_ADD, - $idle_timeout=30, - $priority=5 - ]; - - OpenFlow::flow_mod(of_controller, match, flow_mod); - OpenFlow::flow_mod(of_controller, match_rev, flow_mod); - - schedule 2sec { terminate_me() }; - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - schedule 2sec { terminate_me() }; - } - diff --git a/testing/btest/scripts/base/frameworks/openflow/log-cluster.zeek b/testing/btest/scripts/base/frameworks/openflow/log-cluster.zeek new file mode 100644 index 0000000000..5aa40ed181 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/openflow/log-cluster.zeek @@ -0,0 +1,77 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# +# @TEST-EXEC: btest-bg-run manager-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=manager-1 zeek %INPUT" +# @TEST-EXEC: btest-bg-run worker-1 "cp ../cluster-layout.zeek . && CLUSTER_NODE=worker-1 zeek --pseudo-realtime -C -r $TRACES/smtp.trace %INPUT" +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff manager-1/openflow.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; +#redef exit_only_after_terminate = T; + +@load base/protocols/conn +@load base/frameworks/openflow + +global of_controller: OpenFlow::Controller; + +@if ( Cluster::local_node_type() == Cluster::WORKER ) +event zeek_init() + { + suspend_processing(); + } + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + continue_processing(); + } +@endif + +event zeek_init() + { + of_controller = OpenFlow::log_new(42); + } + +event terminate_me() + { + terminate(); + } + +global done = F; + +event connection_established(c: connection) + { + if ( done ) + return; + + done = T; + + print "conn established"; + + local match = OpenFlow::match_conn(c$id); + local match_rev = OpenFlow::match_conn(c$id, T); + + local flow_mod: OpenFlow::ofp_flow_mod = [ + $cookie=OpenFlow::generate_cookie(42), + $command=OpenFlow::OFPFC_ADD, + $idle_timeout=30, + $priority=5 + ]; + + OpenFlow::flow_mod(of_controller, match, flow_mod); + OpenFlow::flow_mod(of_controller, match_rev, flow_mod); + + schedule 2sec { terminate_me() }; + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + schedule 2sec { terminate_me() }; + } + diff --git a/testing/btest/scripts/base/frameworks/openflow/ryu-basic.bro b/testing/btest/scripts/base/frameworks/openflow/ryu-basic.bro deleted file mode 100644 index 3bfaa4c076..0000000000 --- a/testing/btest/scripts/base/frameworks/openflow/ryu-basic.bro +++ /dev/null @@ -1,37 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT -# @TEST-EXEC: btest-diff .stdout - -@load base/protocols/conn -@load base/frameworks/openflow - -global of_controller: OpenFlow::Controller; - -event bro_init() - { - of_controller = OpenFlow::ryu_new(127.0.0.1, 8080, 42); - of_controller$state$ryu_debug=T; - - OpenFlow::flow_clear(of_controller); - OpenFlow::flow_mod(of_controller, [], [$cookie=OpenFlow::generate_cookie(1), $command=OpenFlow::OFPFC_ADD, $actions=[$out_ports=vector(3, 7)]]); - } - -event connection_established(c: connection) - { - local match = OpenFlow::match_conn(c$id); - local match_rev = OpenFlow::match_conn(c$id, T); - - local flow_mod: OpenFlow::ofp_flow_mod = [ - $cookie=OpenFlow::generate_cookie(42), - $command=OpenFlow::OFPFC_ADD, - $idle_timeout=30, - $priority=5 - ]; - - OpenFlow::flow_mod(of_controller, match, flow_mod); - OpenFlow::flow_mod(of_controller, match_rev, flow_mod); - } - -event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) - { - print "Flow_mod_success"; - } diff --git a/testing/btest/scripts/base/frameworks/openflow/ryu-basic.zeek b/testing/btest/scripts/base/frameworks/openflow/ryu-basic.zeek new file mode 100644 index 0000000000..8f1dc35fce --- /dev/null +++ b/testing/btest/scripts/base/frameworks/openflow/ryu-basic.zeek @@ -0,0 +1,37 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/conn +@load base/frameworks/openflow + +global of_controller: OpenFlow::Controller; + +event zeek_init() + { + of_controller = OpenFlow::ryu_new(127.0.0.1, 8080, 42); + of_controller$state$ryu_debug=T; + + OpenFlow::flow_clear(of_controller); + OpenFlow::flow_mod(of_controller, [], [$cookie=OpenFlow::generate_cookie(1), $command=OpenFlow::OFPFC_ADD, $actions=[$out_ports=vector(3, 7)]]); + } + +event connection_established(c: connection) + { + local match = OpenFlow::match_conn(c$id); + local match_rev = OpenFlow::match_conn(c$id, T); + + local flow_mod: OpenFlow::ofp_flow_mod = [ + $cookie=OpenFlow::generate_cookie(42), + $command=OpenFlow::OFPFC_ADD, + $idle_timeout=30, + $priority=5 + ]; + + OpenFlow::flow_mod(of_controller, match, flow_mod); + OpenFlow::flow_mod(of_controller, match_rev, flow_mod); + } + +event OpenFlow::flow_mod_success(name: string, match: OpenFlow::ofp_match, flow_mod: OpenFlow::ofp_flow_mod, msg: string) + { + print "Flow_mod_success"; + } diff --git a/testing/btest/scripts/base/frameworks/packet-filter/bad-filter.test b/testing/btest/scripts/base/frameworks/packet-filter/bad-filter.test index a3e2a54c57..537b210128 100644 --- a/testing/btest/scripts/base/frameworks/packet-filter/bad-filter.test +++ b/testing/btest/scripts/base/frameworks/packet-filter/bad-filter.test @@ -1,2 +1,2 @@ -# @TEST-EXEC-FAIL: bro -r $TRACES/web.trace -f "bad filter" +# @TEST-EXEC-FAIL: zeek -r $TRACES/web.trace -f "bad filter" # @TEST-EXEC: test -s .stderr diff --git a/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro deleted file mode 100644 index b1afb99b5c..0000000000 --- a/testing/btest/scripts/base/frameworks/reporter/disable-stderr.bro +++ /dev/null @@ -1,13 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log - -redef Reporter::warnings_to_stderr = F; -redef Reporter::errors_to_stderr = F; - -global test: table[count] of string = {}; - -event bro_init() - { - print test[3]; - } diff --git a/testing/btest/scripts/base/frameworks/reporter/disable-stderr.zeek b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.zeek new file mode 100644 index 0000000000..1395f20807 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/reporter/disable-stderr.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log + +redef Reporter::warnings_to_stderr = F; +redef Reporter::errors_to_stderr = F; + +global test: table[count] of string = {}; + +event my_event() + { + print test[3]; + } + +event zeek_init() + { + # Errors within zeek_init are always printed to stderr, so check whether + # an error that happens later is suppressed. + schedule 0.2sec { my_event() }; + } diff --git a/testing/btest/scripts/base/frameworks/reporter/stderr.bro b/testing/btest/scripts/base/frameworks/reporter/stderr.bro deleted file mode 100644 index ef01c9fdf9..0000000000 --- a/testing/btest/scripts/base/frameworks/reporter/stderr.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log - -global test: table[count] of string = {}; - -event bro_init() - { - print test[3]; - } diff --git a/testing/btest/scripts/base/frameworks/reporter/stderr.zeek b/testing/btest/scripts/base/frameworks/reporter/stderr.zeek new file mode 100644 index 0000000000..5c3793b435 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/reporter/stderr.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath | $SCRIPTS/diff-remove-timestamps" btest-diff reporter.log + +global test: table[count] of string = {}; + +event zeek_init() + { + print test[3]; + } diff --git a/testing/btest/scripts/base/frameworks/software/version-parsing.bro b/testing/btest/scripts/base/frameworks/software/version-parsing.bro deleted file mode 100644 index 806a058a03..0000000000 --- a/testing/btest/scripts/base/frameworks/software/version-parsing.bro +++ /dev/null @@ -1,156 +0,0 @@ -# @TEST-EXEC: bro %INPUT > output -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff output - -module Software; - -global matched_software: table[string] of Software::Description = { - ["OpenSSH_4.4"] = - [$name="OpenSSH", $version=[$major=4,$minor=4], $unparsed_version=""], - ["OpenSSH_5.2"] = - [$name="OpenSSH", $version=[$major=5,$minor=2], $unparsed_version=""], - ["Apache/2.0.63 (Unix) mod_auth_kerb/5.3 mod_ssl/2.0.63 OpenSSL/0.9.7a mod_fastcgi/2.4.2"] = - [$name="Apache", $version=[$major=2,$minor=0,$minor2=63,$addl="Unix"], $unparsed_version=""], - ["Apache/1.3.19 (Unix)"] = - [$name="Apache", $version=[$major=1,$minor=3,$minor2=19,$addl="Unix"], $unparsed_version=""], - ["ProFTPD 1.2.5rc1 Server (Debian)"] = - [$name="ProFTPD", $version=[$major=1,$minor=2,$minor2=5,$addl="rc1"], $unparsed_version=""], - ["wu-2.4.2-academ[BETA-18-VR14](1)"] = - [$name="wu", $version=[$major=2,$minor=4,$minor2=2,$addl="academ"], $unparsed_version=""], - ["wu-2.6.2(1)"] = - [$name="wu", $version=[$major=2,$minor=6,$minor2=2,$addl="1"], $unparsed_version=""], - ["Java1.2.2-JDeveloper"] = - [$name="Java", $version=[$major=1,$minor=2,$minor2=2,$addl="JDeveloper"], $unparsed_version=""], - ["Java/1.6.0_13"] = - [$name="Java", $version=[$major=1,$minor=6,$minor2=0,$minor3=13], $unparsed_version=""], - ["Python-urllib/3.1"] = - [$name="Python-urllib", $version=[$major=3,$minor=1], $unparsed_version=""], - ["libwww-perl/5.820"] = - [$name="libwww-perl", $version=[$major=5,$minor=820], $unparsed_version=""], - ["Wget/1.9+cvs-stable (Red Hat modified)"] = - [$name="Wget", $version=[$major=1,$minor=9,$addl="+cvs"], $unparsed_version=""], - ["Wget/1.11.4 (Red Hat modified)"] = - [$name="Wget", $version=[$major=1,$minor=11,$minor2=4,$addl="Red Hat modified"], $unparsed_version=""], - ["curl/7.15.1 (i486-pc-linux-gnu) libcurl/7.15.1 OpenSSL/0.9.8a zlib/1.2.3 libidn/0.5.18"] = - [$name="curl", $version=[$major=7,$minor=15,$minor2=1,$addl="i486-pc-linux-gnu"], $unparsed_version=""], - ["Apache"] = - [$name="Apache", $unparsed_version=""], - ["Zope/(Zope 2.7.8-final, python 2.3.5, darwin) ZServer/1.1 Plone/Unknown"] = - [$name="Zope/(Zope", $version=[$major=2,$minor=7,$minor2=8,$addl="final"], $unparsed_version=""], - ["The Bat! (v2.00.9) Personal"] = - [$name="The Bat!", $version=[$major=2,$minor=0,$minor2=9,$addl="Personal"], $unparsed_version=""], - ["Flash/10,2,153,1"] = - [$name="Flash", $version=[$major=10,$minor=2,$minor2=153,$minor3=1], $unparsed_version=""], - # The addl on the following entry isn't so great, but it'll do. - ["Flash%20Player/26.0.0.137 CFNetwork/811.5.4 Darwin/16.6.0 (x86_64)"] = - [$name="Flash", $version=[$major=26,$minor=0,$minor2=0,$minor3=137,$addl="CFNetwork/811"], $unparsed_version=""], - ["mt2/1.2.3.967 Oct 13 2010-13:40:24 ord-pixel-x2 pid 0x35a3 13731"] = - [$name="mt2", $version=[$major=1,$minor=2,$minor2=3,$minor3=967,$addl="Oct"], $unparsed_version=""], - ["CacheFlyServe v26b"] = - [$name="CacheFlyServe", $version=[$major=26,$addl="b"], $unparsed_version=""], - ["Apache/2.0.46 (Win32) mod_ssl/2.0.46 OpenSSL/0.9.7b mod_jk2/2.0.4"] = - [$name="Apache", $version=[$major=2,$minor=0,$minor2=46,$addl="Win32"], $unparsed_version=""], - # I have no clue how I'd support this without a special case. - #["Apache mod_fcgid/2.3.6 mod_auth_passthrough/2.1 mod_bwlimited/1.4 FrontPage/5.0.2.2635"] = - # [$name="Apache", $version=[], $unparsed_version=""], - ["Apple iPhone v4.3.1 Weather v1.0.0.8G4"] = - [$name="Apple iPhone", $version=[$major=4,$minor=3,$minor2=1,$addl="Weather"], $unparsed_version=""], - ["Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_2 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8H7 Safari/6533.18.5"] = - [$name="Safari", $version=[$major=5,$minor=0,$minor2=2,$addl="Mobile"], $unparsed_version=""], - ["Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16"] = - [$name="Chrome", $version=[$major=10,$minor=0,$minor2=648,$minor3=205], $unparsed_version=""], - ["Opera/9.80 (Windows NT 6.1; U; sv) Presto/2.7.62 Version/11.01"] = - [$name="Opera", $version=[$major=11,$minor=1], $unparsed_version=""], - ["Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.11) Gecko/20101013 Lightning/1.0b2 Thunderbird/3.1.5"] = - [$name="Thunderbird", $version=[$major=3,$minor=1,$minor2=5], $unparsed_version=""], - ["iTunes/9.0 (Macintosh; Intel Mac OS X 10.5.8) AppleWebKit/531.9"] = - [$name="iTunes", $version=[$major=9,$minor=0,$addl="Macintosh"], $unparsed_version=""], - ["Java1.3.1_04"] = - [$name="Java", $version=[$major=1,$minor=3,$minor2=1,$minor3=4], $unparsed_version=""], - ["Mozilla/5.0 (Linux; U; Android 2.3.3; zh-tw; HTC Pyramid Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"] = - [$name="Safari", $version=[$major=4,$minor=0,$addl="Mobile"], $unparsed_version=""], - ["Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27"] = - [$name="Safari", $version=[$major=5,$minor=0,$minor2=4], $unparsed_version=""], - ["Mozilla/5.0 (iPod; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"] = - [$name="Safari", $version=[$major=4,$minor=0,$minor2=5,$addl="Mobile"], $unparsed_version=""], - ["Opera/9.80 (J2ME/MIDP; Opera Mini/9.80 (S60; SymbOS; Opera Mobi/23.348; U; en) Presto/2.5.25 Version/10.54"] = - [$name="Opera Mini", $version=[$major=10,$minor=54], $unparsed_version=""], - ["Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.18741/18.794; U; en) Presto/2.4.15"] = - [$name="Opera Mini", $version=[$major=5,$minor=0,$minor2=18741], $unparsed_version=""], - ["Opera/9.80 (Windows NT 5.1; Opera Mobi/49; U; en) Presto/2.4.18 Version/10.00"] = - [$name="Opera Mobi", $version=[$major=10,$minor=0], $unparsed_version=""], - ["Mozilla/4.0 (compatible; MSIE 8.0; Android 2.2.2; Linux; Opera Mobi/ADR-1103311355; en) Opera 11.00"] = - [$name="Opera", $version=[$major=11,$minor=0], $unparsed_version=""], - ["Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2) Gecko/20040804 Netscape/7.2 (ax)"] = - [$name="Netscape", $version=[$major=7,$minor=2], $unparsed_version=""], - ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; GTB5; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506; InfoPath.2)"] = - [$name="MSIE", $version=[$major=7,$minor=0], $unparsed_version=""], - ["Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1; Media Center PC 3.0; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.1)"] = - [$name="MSIE", $version=[$major=7,$minor=0,$addl="b"], $unparsed_version=""], - ["Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Tablet PC 2.0; InfoPath.2; InfoPath.3)"] = - [$name="MSIE", $version=[$major=8,$minor=0], $unparsed_version=""], - ["Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)"] = - [$name="MSIE", $version=[$major=9,$minor=0], $unparsed_version=""], - ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; Creative AutoUpdate v1.40.02)"] = - [$name="MSIE", $version=[$major=9,$minor=0], $unparsed_version=""], - ["Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"] = - [$name="MSIE", $version=[$major=10,$minor=0], $unparsed_version=""], - # IE 11 normal mode. - ["Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"] = - [$name="MSIE", $version=[$major=11,$minor=0], $unparsed_version=""], - # IE 11 compatibility mode - ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)"] = - [$name="MSIE", $version=[$major=11,$minor=0], $unparsed_version=""], - ["The Bat! (3.0.1 RC3) Professional"] = - [$name="The Bat!", $version=[$major=3,$minor=0,$minor2=1,$addl="RC3"], $unparsed_version=""], - # This is an FTP client (found with CLNT command) - ["Total Commander"] = - [$name="Total Commander", $version=[], $unparsed_version=""], - ["(vsFTPd 2.0.5)"] = - [$name="vsFTPd", $version=[$major=2,$minor=0,$minor2=5], $unparsed_version=""], - ["Apple Mail (2.1084)"] = - [$name="Apple Mail", $version=[$major=2,$minor=1084], $unparsed_version=""], - ["Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) AdobeAIR/1.0"] = - [$name="AdobeAIR", $version=[$major=1,$minor=0], $unparsed_version=""], - ["Mozilla/5.0 (Windows; U; en) AppleWebKit/420+ (KHTML, like Gecko) AdobeAIR/1.0"] = - [$name="AdobeAIR", $version=[$major=1,$minor=0], $unparsed_version=""], - ["\\xe6\\xbc\\xab\\xe7\\x94\\xbb\\xe4\\xba\\xba 2.6.2 rv:1.2 (iPhone; iOS 10.3.2; en_US)"] = - [$name="\xe6\xbc\xab\xe7\x94\xbb\xe4\xba\xba", $version=[$major=2,$minor=6,$minor2=2,$addl="rv:1"], $unparsed_version=""], - ["%E6%9C%89%E9%81%93%E8%AF%8D%E5%85%B8/128 CFNetwork/760.2.6 Darwin/15.3.0 (x86_64)"] = - [$name="\xe6\x9c\x89\xe9\x81\x93\xe8\xaf\x8d\xe5\x85\xb8", $version=[$major=128,$addl="CFNetwork/760"], $unparsed_version=""], - ["QQ%E9%82%AE%E7%AE%B1/5.3.2.8 CFNetwork/811.5.4 Darwin/16.6.0"] = - [$name="QQ\xe9\x82\xae\xe7\xae\xb1", $version=[$major=5,$minor=3,$minor2=2,$minor3=8,$addl="CFNetwork/811"], $unparsed_version=""], - ["Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.15063"] = - [$name="Edge", $version=[$major=15,$minor=15063], $unparsed_version=""], - ["A/8.0.0/Google/Pixel#XL/marlin/unknown/QCX3/l8100358318783302904/-/1456904160/-/google/662107/662098/-"] = - [$name="Android (Google Pixel)", $version=[$major=8,$minor=0,$minor2=0], $unparsed_version=""], - ["A/8.1.0/Google/Pixel#2/walleye/unknown/QCX3/l10660929675510745862/-/104360422/-/google/3606/3607/-"] = - [$name="Android (Google Pixel)", $version=[$major=8,$minor=1,$minor2=0], $unparsed_version=""], - ["A/9/Google/Pixel#2/walleye/unknown/QCX3/l17463753539612639959/-/2406658516/-/google/724998/724992/-"] = - [$name="Android (Google Pixel)", $version=[$major=9], $unparsed_version=""], - ["A/9/Google/Pixel#2#XL/taimen/unknown/QCX3/l2640039522761750592/-/1061307257/-/google/1199700/1199701/-"] = - [$name="Android (Google Pixel)", $version=[$major=9], $unparsed_version=""], - ["A/9/Google/Pixel#2/walleye/unknown/QCX3/l9335055540778241916/-/1576068601/-/google/63672/63666/00:BOOT.XF.1.2.2.c1-00036-M8998LZB-2+01:TZ.BF.4.0.6-00152+03:RPM.BF.1.7-00128+11:MPSS.AT.2.0.c4.5-00253-8998_GEN_PACK-1.172723.1.178350.2+12:ADSP.HT.3.0-00372-CB8998-1+14:VIDEO.VE.4.4-00033+15:SLPI.HB.2.0.c3-00016-M8998AZL-1"] = - [$name="Android (Google Pixel)", $version=[$major=9], $unparsed_version=""], -}; - -event bro_init() - { - for ( sw in matched_software ) - { - local output = Software::parse(sw); - local baseline = matched_software[sw]; - - if ( baseline$name == output$name && - sw == output$unparsed_version && - Software::cmp_versions(baseline$version,output$version) == 0 ) - print fmt("success on: %s", sw); - else - { - print fmt("failure on: %s", sw); - print fmt(" test name: %s", output$name); - print fmt(" test version: %s", output$version); - print fmt(" baseline name: %s", baseline$name); - print fmt(" baseline version: %s", baseline$version); - } - } - } diff --git a/testing/btest/scripts/base/frameworks/software/version-parsing.zeek b/testing/btest/scripts/base/frameworks/software/version-parsing.zeek new file mode 100644 index 0000000000..ecf36ca8dc --- /dev/null +++ b/testing/btest/scripts/base/frameworks/software/version-parsing.zeek @@ -0,0 +1,156 @@ +# @TEST-EXEC: zeek %INPUT > output +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff output + +module Software; + +global matched_software: table[string] of Software::Description = { + ["OpenSSH_4.4"] = + [$name="OpenSSH", $version=[$major=4,$minor=4], $unparsed_version=""], + ["OpenSSH_5.2"] = + [$name="OpenSSH", $version=[$major=5,$minor=2], $unparsed_version=""], + ["Apache/2.0.63 (Unix) mod_auth_kerb/5.3 mod_ssl/2.0.63 OpenSSL/0.9.7a mod_fastcgi/2.4.2"] = + [$name="Apache", $version=[$major=2,$minor=0,$minor2=63,$addl="Unix"], $unparsed_version=""], + ["Apache/1.3.19 (Unix)"] = + [$name="Apache", $version=[$major=1,$minor=3,$minor2=19,$addl="Unix"], $unparsed_version=""], + ["ProFTPD 1.2.5rc1 Server (Debian)"] = + [$name="ProFTPD", $version=[$major=1,$minor=2,$minor2=5,$addl="rc1"], $unparsed_version=""], + ["wu-2.4.2-academ[BETA-18-VR14](1)"] = + [$name="wu", $version=[$major=2,$minor=4,$minor2=2,$addl="academ"], $unparsed_version=""], + ["wu-2.6.2(1)"] = + [$name="wu", $version=[$major=2,$minor=6,$minor2=2,$addl="1"], $unparsed_version=""], + ["Java1.2.2-JDeveloper"] = + [$name="Java", $version=[$major=1,$minor=2,$minor2=2,$addl="JDeveloper"], $unparsed_version=""], + ["Java/1.6.0_13"] = + [$name="Java", $version=[$major=1,$minor=6,$minor2=0,$minor3=13], $unparsed_version=""], + ["Python-urllib/3.1"] = + [$name="Python-urllib", $version=[$major=3,$minor=1], $unparsed_version=""], + ["libwww-perl/5.820"] = + [$name="libwww-perl", $version=[$major=5,$minor=820], $unparsed_version=""], + ["Wget/1.9+cvs-stable (Red Hat modified)"] = + [$name="Wget", $version=[$major=1,$minor=9,$addl="+cvs"], $unparsed_version=""], + ["Wget/1.11.4 (Red Hat modified)"] = + [$name="Wget", $version=[$major=1,$minor=11,$minor2=4,$addl="Red Hat modified"], $unparsed_version=""], + ["curl/7.15.1 (i486-pc-linux-gnu) libcurl/7.15.1 OpenSSL/0.9.8a zlib/1.2.3 libidn/0.5.18"] = + [$name="curl", $version=[$major=7,$minor=15,$minor2=1,$addl="i486-pc-linux-gnu"], $unparsed_version=""], + ["Apache"] = + [$name="Apache", $unparsed_version=""], + ["Zope/(Zope 2.7.8-final, python 2.3.5, darwin) ZServer/1.1 Plone/Unknown"] = + [$name="Zope/(Zope", $version=[$major=2,$minor=7,$minor2=8,$addl="final"], $unparsed_version=""], + ["The Bat! (v2.00.9) Personal"] = + [$name="The Bat!", $version=[$major=2,$minor=0,$minor2=9,$addl="Personal"], $unparsed_version=""], + ["Flash/10,2,153,1"] = + [$name="Flash", $version=[$major=10,$minor=2,$minor2=153,$minor3=1], $unparsed_version=""], + # The addl on the following entry isn't so great, but it'll do. + ["Flash%20Player/26.0.0.137 CFNetwork/811.5.4 Darwin/16.6.0 (x86_64)"] = + [$name="Flash", $version=[$major=26,$minor=0,$minor2=0,$minor3=137,$addl="CFNetwork/811"], $unparsed_version=""], + ["mt2/1.2.3.967 Oct 13 2010-13:40:24 ord-pixel-x2 pid 0x35a3 13731"] = + [$name="mt2", $version=[$major=1,$minor=2,$minor2=3,$minor3=967,$addl="Oct"], $unparsed_version=""], + ["CacheFlyServe v26b"] = + [$name="CacheFlyServe", $version=[$major=26,$addl="b"], $unparsed_version=""], + ["Apache/2.0.46 (Win32) mod_ssl/2.0.46 OpenSSL/0.9.7b mod_jk2/2.0.4"] = + [$name="Apache", $version=[$major=2,$minor=0,$minor2=46,$addl="Win32"], $unparsed_version=""], + # I have no clue how I'd support this without a special case. + #["Apache mod_fcgid/2.3.6 mod_auth_passthrough/2.1 mod_bwlimited/1.4 FrontPage/5.0.2.2635"] = + # [$name="Apache", $version=[], $unparsed_version=""], + ["Apple iPhone v4.3.1 Weather v1.0.0.8G4"] = + [$name="Apple iPhone", $version=[$major=4,$minor=3,$minor2=1,$addl="Weather"], $unparsed_version=""], + ["Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_2 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8H7 Safari/6533.18.5"] = + [$name="Safari", $version=[$major=5,$minor=0,$minor2=2,$addl="Mobile"], $unparsed_version=""], + ["Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16"] = + [$name="Chrome", $version=[$major=10,$minor=0,$minor2=648,$minor3=205], $unparsed_version=""], + ["Opera/9.80 (Windows NT 6.1; U; sv) Presto/2.7.62 Version/11.01"] = + [$name="Opera", $version=[$major=11,$minor=1], $unparsed_version=""], + ["Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.11) Gecko/20101013 Lightning/1.0b2 Thunderbird/3.1.5"] = + [$name="Thunderbird", $version=[$major=3,$minor=1,$minor2=5], $unparsed_version=""], + ["iTunes/9.0 (Macintosh; Intel Mac OS X 10.5.8) AppleWebKit/531.9"] = + [$name="iTunes", $version=[$major=9,$minor=0,$addl="Macintosh"], $unparsed_version=""], + ["Java1.3.1_04"] = + [$name="Java", $version=[$major=1,$minor=3,$minor2=1,$minor3=4], $unparsed_version=""], + ["Mozilla/5.0 (Linux; U; Android 2.3.3; zh-tw; HTC Pyramid Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"] = + [$name="Safari", $version=[$major=4,$minor=0,$addl="Mobile"], $unparsed_version=""], + ["Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27"] = + [$name="Safari", $version=[$major=5,$minor=0,$minor2=4], $unparsed_version=""], + ["Mozilla/5.0 (iPod; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"] = + [$name="Safari", $version=[$major=4,$minor=0,$minor2=5,$addl="Mobile"], $unparsed_version=""], + ["Opera/9.80 (J2ME/MIDP; Opera Mini/9.80 (S60; SymbOS; Opera Mobi/23.348; U; en) Presto/2.5.25 Version/10.54"] = + [$name="Opera Mini", $version=[$major=10,$minor=54], $unparsed_version=""], + ["Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.18741/18.794; U; en) Presto/2.4.15"] = + [$name="Opera Mini", $version=[$major=5,$minor=0,$minor2=18741], $unparsed_version=""], + ["Opera/9.80 (Windows NT 5.1; Opera Mobi/49; U; en) Presto/2.4.18 Version/10.00"] = + [$name="Opera Mobi", $version=[$major=10,$minor=0], $unparsed_version=""], + ["Mozilla/4.0 (compatible; MSIE 8.0; Android 2.2.2; Linux; Opera Mobi/ADR-1103311355; en) Opera 11.00"] = + [$name="Opera", $version=[$major=11,$minor=0], $unparsed_version=""], + ["Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2) Gecko/20040804 Netscape/7.2 (ax)"] = + [$name="Netscape", $version=[$major=7,$minor=2], $unparsed_version=""], + ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; GTB5; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506; InfoPath.2)"] = + [$name="MSIE", $version=[$major=7,$minor=0], $unparsed_version=""], + ["Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1; Media Center PC 3.0; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.1)"] = + [$name="MSIE", $version=[$major=7,$minor=0,$addl="b"], $unparsed_version=""], + ["Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Tablet PC 2.0; InfoPath.2; InfoPath.3)"] = + [$name="MSIE", $version=[$major=8,$minor=0], $unparsed_version=""], + ["Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)"] = + [$name="MSIE", $version=[$major=9,$minor=0], $unparsed_version=""], + ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; Creative AutoUpdate v1.40.02)"] = + [$name="MSIE", $version=[$major=9,$minor=0], $unparsed_version=""], + ["Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"] = + [$name="MSIE", $version=[$major=10,$minor=0], $unparsed_version=""], + # IE 11 normal mode. + ["Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"] = + [$name="MSIE", $version=[$major=11,$minor=0], $unparsed_version=""], + # IE 11 compatibility mode + ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)"] = + [$name="MSIE", $version=[$major=11,$minor=0], $unparsed_version=""], + ["The Bat! (3.0.1 RC3) Professional"] = + [$name="The Bat!", $version=[$major=3,$minor=0,$minor2=1,$addl="RC3"], $unparsed_version=""], + # This is an FTP client (found with CLNT command) + ["Total Commander"] = + [$name="Total Commander", $version=[], $unparsed_version=""], + ["(vsFTPd 2.0.5)"] = + [$name="vsFTPd", $version=[$major=2,$minor=0,$minor2=5], $unparsed_version=""], + ["Apple Mail (2.1084)"] = + [$name="Apple Mail", $version=[$major=2,$minor=1084], $unparsed_version=""], + ["Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) AdobeAIR/1.0"] = + [$name="AdobeAIR", $version=[$major=1,$minor=0], $unparsed_version=""], + ["Mozilla/5.0 (Windows; U; en) AppleWebKit/420+ (KHTML, like Gecko) AdobeAIR/1.0"] = + [$name="AdobeAIR", $version=[$major=1,$minor=0], $unparsed_version=""], + ["\\xe6\\xbc\\xab\\xe7\\x94\\xbb\\xe4\\xba\\xba 2.6.2 rv:1.2 (iPhone; iOS 10.3.2; en_US)"] = + [$name="\xe6\xbc\xab\xe7\x94\xbb\xe4\xba\xba", $version=[$major=2,$minor=6,$minor2=2,$addl="rv:1"], $unparsed_version=""], + ["%E6%9C%89%E9%81%93%E8%AF%8D%E5%85%B8/128 CFNetwork/760.2.6 Darwin/15.3.0 (x86_64)"] = + [$name="\xe6\x9c\x89\xe9\x81\x93\xe8\xaf\x8d\xe5\x85\xb8", $version=[$major=128,$addl="CFNetwork/760"], $unparsed_version=""], + ["QQ%E9%82%AE%E7%AE%B1/5.3.2.8 CFNetwork/811.5.4 Darwin/16.6.0"] = + [$name="QQ\xe9\x82\xae\xe7\xae\xb1", $version=[$major=5,$minor=3,$minor2=2,$minor3=8,$addl="CFNetwork/811"], $unparsed_version=""], + ["Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.15063"] = + [$name="Edge", $version=[$major=15,$minor=15063], $unparsed_version=""], + ["A/8.0.0/Google/Pixel#XL/marlin/unknown/QCX3/l8100358318783302904/-/1456904160/-/google/662107/662098/-"] = + [$name="Android (Google Pixel)", $version=[$major=8,$minor=0,$minor2=0], $unparsed_version=""], + ["A/8.1.0/Google/Pixel#2/walleye/unknown/QCX3/l10660929675510745862/-/104360422/-/google/3606/3607/-"] = + [$name="Android (Google Pixel)", $version=[$major=8,$minor=1,$minor2=0], $unparsed_version=""], + ["A/9/Google/Pixel#2/walleye/unknown/QCX3/l17463753539612639959/-/2406658516/-/google/724998/724992/-"] = + [$name="Android (Google Pixel)", $version=[$major=9], $unparsed_version=""], + ["A/9/Google/Pixel#2#XL/taimen/unknown/QCX3/l2640039522761750592/-/1061307257/-/google/1199700/1199701/-"] = + [$name="Android (Google Pixel)", $version=[$major=9], $unparsed_version=""], + ["A/9/Google/Pixel#2/walleye/unknown/QCX3/l9335055540778241916/-/1576068601/-/google/63672/63666/00:BOOT.XF.1.2.2.c1-00036-M8998LZB-2+01:TZ.BF.4.0.6-00152+03:RPM.BF.1.7-00128+11:MPSS.AT.2.0.c4.5-00253-8998_GEN_PACK-1.172723.1.178350.2+12:ADSP.HT.3.0-00372-CB8998-1+14:VIDEO.VE.4.4-00033+15:SLPI.HB.2.0.c3-00016-M8998AZL-1"] = + [$name="Android (Google Pixel)", $version=[$major=9], $unparsed_version=""], +}; + +event zeek_init() + { + for ( sw in matched_software ) + { + local output = Software::parse(sw); + local baseline = matched_software[sw]; + + if ( baseline$name == output$name && + sw == output$unparsed_version && + Software::cmp_versions(baseline$version,output$version) == 0 ) + print fmt("success on: %s", sw); + else + { + print fmt("failure on: %s", sw); + print fmt(" test name: %s", output$name); + print fmt(" test version: %s", output$version); + print fmt(" baseline name: %s", baseline$name); + print fmt(" baseline version: %s", baseline$version); + } + } + } diff --git a/testing/btest/scripts/base/frameworks/sumstats/basic-cluster.bro b/testing/btest/scripts/base/frameworks/sumstats/basic-cluster.bro deleted file mode 100644 index 8f4bd26ef1..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/basic-cluster.bro +++ /dev/null @@ -1,89 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 - -# @TEST-EXEC: btest-diff manager-1/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -global n = 0; - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE, SumStats::HLL_UNIQUE)]; - SumStats::create([$name="test", - $epoch=5secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test"]; - print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d - hllunique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique, r$hll_unique); - }, - $epoch_finished(ts: time) = - { - terminate(); - }]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event ready_for_data() - { - if ( Cluster::node == "worker-1" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=34]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=6.5.4.3], [$num=1]); - SumStats::observe("test", [$host=7.2.1.5], [$num=54]); - } - if ( Cluster::node == "worker-2" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=75]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=1.2.3.4], [$num=3]); - SumStats::observe("test", [$host=1.2.3.4], [$num=57]); - SumStats::observe("test", [$host=1.2.3.4], [$num=52]); - SumStats::observe("test", [$host=1.2.3.4], [$num=61]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=6.5.4.3], [$num=5]); - SumStats::observe("test", [$host=7.2.1.5], [$num=91]); - SumStats::observe("test", [$host=10.10.10.10], [$num=5]); - } - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -event bro_init() &priority=100 - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - } - -global peer_count = 0; - -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - ++peer_count; - - if ( peer_count == 2 ) - event ready_for_data(); - } - -@endif diff --git a/testing/btest/scripts/base/frameworks/sumstats/basic-cluster.zeek b/testing/btest/scripts/base/frameworks/sumstats/basic-cluster.zeek new file mode 100644 index 0000000000..28a5809eb5 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/basic-cluster.zeek @@ -0,0 +1,89 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 + +# @TEST-EXEC: btest-diff manager-1/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +global n = 0; + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE, SumStats::HLL_UNIQUE)]; + SumStats::create([$name="test", + $epoch=5secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test"]; + print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d - hllunique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique, r$hll_unique); + }, + $epoch_finished(ts: time) = + { + terminate(); + }]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event ready_for_data() + { + if ( Cluster::node == "worker-1" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=34]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=6.5.4.3], [$num=1]); + SumStats::observe("test", [$host=7.2.1.5], [$num=54]); + } + if ( Cluster::node == "worker-2" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=75]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=1.2.3.4], [$num=3]); + SumStats::observe("test", [$host=1.2.3.4], [$num=57]); + SumStats::observe("test", [$host=1.2.3.4], [$num=52]); + SumStats::observe("test", [$host=1.2.3.4], [$num=61]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=6.5.4.3], [$num=5]); + SumStats::observe("test", [$host=7.2.1.5], [$num=91]); + SumStats::observe("test", [$host=10.10.10.10], [$num=5]); + } + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +event zeek_init() &priority=100 + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + } + +global peer_count = 0; + +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + ++peer_count; + + if ( peer_count == 2 ) + event ready_for_data(); + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/sumstats/basic.bro b/testing/btest/scripts/base/frameworks/sumstats/basic.bro deleted file mode 100644 index 40f269ab1a..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/basic.bro +++ /dev/null @@ -1,36 +0,0 @@ -# @TEST-EXEC: btest-bg-run standalone bro %INPUT -# @TEST-EXEC: btest-bg-wait 10 -# @TEST-EXEC: btest-diff standalone/.stdout - -redef exit_only_after_terminate=T; - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.metric", - $apply=set(SumStats::SUM, - SumStats::VARIANCE, - SumStats::AVERAGE, - SumStats::MAX, - SumStats::MIN, - SumStats::STD_DEV, - SumStats::UNIQUE, - SumStats::HLL_UNIQUE)]; - SumStats::create([$name="test", - $epoch=3secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test.metric"]; - print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d - hllunique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique, r$hll_unique); - terminate(); - }]); - - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=94]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]); - - SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]); - SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]); - } diff --git a/testing/btest/scripts/base/frameworks/sumstats/basic.zeek b/testing/btest/scripts/base/frameworks/sumstats/basic.zeek new file mode 100644 index 0000000000..3b454ebaa4 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/basic.zeek @@ -0,0 +1,36 @@ +# @TEST-EXEC: btest-bg-run standalone zeek %INPUT +# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-diff standalone/.stdout + +redef exit_only_after_terminate=T; + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.metric", + $apply=set(SumStats::SUM, + SumStats::VARIANCE, + SumStats::AVERAGE, + SumStats::MAX, + SumStats::MIN, + SumStats::STD_DEV, + SumStats::UNIQUE, + SumStats::HLL_UNIQUE)]; + SumStats::create([$name="test", + $epoch=3secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test.metric"]; + print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d - hllunique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique, r$hll_unique); + terminate(); + }]); + + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=94]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]); + + SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]); + SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]); + } diff --git a/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro b/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro deleted file mode 100644 index 949fcb3644..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro +++ /dev/null @@ -1,72 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff manager-1/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; - SumStats::create([$name="test", - $epoch=10secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - print result["test.metric"]$sum; - }, - $epoch_finished(ts: time) = - { - print "End of epoch handler was called"; - terminate(); - }, - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["test.metric"]$sum; - }, - $threshold=100.0, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum); - }]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event do_stats(i: count) - { - # Worker-1 will trigger an intermediate update and then if everything - # works correctly, the data from worker-2 will hit the threshold and - # should trigger the notice. - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=i]); - } - -event Cluster::node_up(name: string, id: string) - { - if ( name == "manager-1" ) - { - if ( Cluster::node == "worker-1" ) - { - schedule 0.1sec { do_stats(1) }; - schedule 5secs { do_stats(60) }; - } - if ( Cluster::node == "worker-2" ) - schedule 0.5sec { do_stats(40) }; - } - } diff --git a/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.zeek b/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.zeek new file mode 100644 index 0000000000..a1f88a3367 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.zeek @@ -0,0 +1,72 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff manager-1/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; + SumStats::create([$name="test", + $epoch=10secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + print result["test.metric"]$sum; + }, + $epoch_finished(ts: time) = + { + print "End of epoch handler was called"; + terminate(); + }, + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["test.metric"]$sum; + }, + $threshold=100.0, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum); + }]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event do_stats(i: count) + { + # Worker-1 will trigger an intermediate update and then if everything + # works correctly, the data from worker-2 will hit the threshold and + # should trigger the notice. + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=i]); + } + +event Cluster::node_up(name: string, id: string) + { + if ( name == "manager-1" ) + { + if ( Cluster::node == "worker-1" ) + { + schedule 0.1sec { do_stats(1) }; + schedule 5secs { do_stats(60) }; + } + if ( Cluster::node == "worker-2" ) + schedule 0.5sec { do_stats(40) }; + } + } diff --git a/testing/btest/scripts/base/frameworks/sumstats/last-cluster.bro b/testing/btest/scripts/base/frameworks/sumstats/last-cluster.bro deleted file mode 100644 index da8f8fb80f..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/last-cluster.bro +++ /dev/null @@ -1,69 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-wait 25 - -# @TEST-EXEC: btest-diff manager-1/.stdout -# -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], -}; -@TEST-END-FILE - -global c = 0; - -event do_observe() - { - print "do observe", c; - SumStats::observe("test", - [$str=cat(c)], - [$num=c] - ); - ++c; - schedule 0.1secs { do_observe() }; - } - -event bro_init() - { - local r1 = SumStats::Reducer($stream="test", - $apply=set(SumStats::LAST), - $num_last_elements=1 - ); - - SumStats::create([$name="test", - $epoch=10secs, - $reducers=set(r1), - $threshold_val(key: SumStats::Key, result: SumStats::Result): double = { return 2.0; }, - $threshold = 1.0, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local l = SumStats::get_last(result["test"]); - print "test thresh crossed", l; - - if ( l[0]$num == 7 ) - terminate(); - } - ]); - } - -event Cluster::node_up(name: string, id: string) - { - print "node up", name; - - if ( Cluster::node == "worker-1" && name == "manager-1" ) - schedule 0.1secs { do_observe() }; - } - -event Cluster::node_down(name: string, id: string) - { - print "node down", name; - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, id: string) - { - terminate(); - } diff --git a/testing/btest/scripts/base/frameworks/sumstats/last-cluster.zeek b/testing/btest/scripts/base/frameworks/sumstats/last-cluster.zeek new file mode 100644 index 0000000000..7d23ae9e80 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/last-cluster.zeek @@ -0,0 +1,69 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 25 + +# @TEST-EXEC: btest-diff manager-1/.stdout +# +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], +}; +@TEST-END-FILE + +global c = 0; + +event do_observe() + { + print "do observe", c; + SumStats::observe("test", + [$str=cat(c)], + [$num=c] + ); + ++c; + schedule 0.1secs { do_observe() }; + } + +event zeek_init() + { + local r1 = SumStats::Reducer($stream="test", + $apply=set(SumStats::LAST), + $num_last_elements=1 + ); + + SumStats::create([$name="test", + $epoch=10secs, + $reducers=set(r1), + $threshold_val(key: SumStats::Key, result: SumStats::Result): double = { return 2.0; }, + $threshold = 1.0, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local l = SumStats::get_last(result["test"]); + print "test thresh crossed", l; + + if ( l[0]$num == 7 ) + terminate(); + } + ]); + } + +event Cluster::node_up(name: string, id: string) + { + print "node up", name; + + if ( Cluster::node == "worker-1" && name == "manager-1" ) + schedule 0.1secs { do_observe() }; + } + +event Cluster::node_down(name: string, id: string) + { + print "node down", name; + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, id: string) + { + terminate(); + } diff --git a/testing/btest/scripts/base/frameworks/sumstats/on-demand-cluster.bro b/testing/btest/scripts/base/frameworks/sumstats/on-demand-cluster.bro deleted file mode 100644 index bb429a52cb..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/on-demand-cluster.bro +++ /dev/null @@ -1,91 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 - -# @TEST-EXEC: btest-diff manager-1/.stdout -# - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -global n = 0; - -event bro_init() &priority=5 - { - local r1 = SumStats::Reducer($stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)); - SumStats::create([$name="test sumstat", - $epoch=1hr, - $reducers=set(r1)]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - } - -event on_demand() - { - local host = 7.2.1.5; - when ( local result = SumStats::request_key("test sumstat", [$host=host]) ) - { - print "SumStat key request"; - if ( "test" in result ) - print fmt(" Host: %s -> %.0f", host, result["test"]$sum); - - if ( Cluster::node == "manager-1" ) - terminate(); - } - } - -event ready_for_data() - { - if ( Cluster::node == "worker-1" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=34]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=6.5.4.3], [$num=1]); - SumStats::observe("test", [$host=7.2.1.5], [$num=54]); - } - if ( Cluster::node == "worker-2" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=75]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=7.2.1.5], [$num=91]); - SumStats::observe("test", [$host=10.10.10.10], [$num=5]); - } - - schedule 1sec { on_demand() }; - } - -global peer_count = 0; -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - if ( Cluster::node != "manager-1" ) - return; - - ++peer_count; - if ( peer_count == 2 ) - { - event ready_for_data(); - } - } - diff --git a/testing/btest/scripts/base/frameworks/sumstats/on-demand-cluster.zeek b/testing/btest/scripts/base/frameworks/sumstats/on-demand-cluster.zeek new file mode 100644 index 0000000000..bd0cdc2d1a --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/on-demand-cluster.zeek @@ -0,0 +1,91 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 + +# @TEST-EXEC: btest-diff manager-1/.stdout +# + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +global n = 0; + +event zeek_init() &priority=5 + { + local r1 = SumStats::Reducer($stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)); + SumStats::create([$name="test sumstat", + $epoch=1hr, + $reducers=set(r1)]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + } + +event on_demand() + { + local host = 7.2.1.5; + when ( local result = SumStats::request_key("test sumstat", [$host=host]) ) + { + print "SumStat key request"; + if ( "test" in result ) + print fmt(" Host: %s -> %.0f", host, result["test"]$sum); + + if ( Cluster::node == "manager-1" ) + terminate(); + } + } + +event ready_for_data() + { + if ( Cluster::node == "worker-1" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=34]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=6.5.4.3], [$num=1]); + SumStats::observe("test", [$host=7.2.1.5], [$num=54]); + } + if ( Cluster::node == "worker-2" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=75]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=7.2.1.5], [$num=91]); + SumStats::observe("test", [$host=10.10.10.10], [$num=5]); + } + + schedule 1sec { on_demand() }; + } + +global peer_count = 0; +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + if ( Cluster::node != "manager-1" ) + return; + + ++peer_count; + if ( peer_count == 2 ) + { + event ready_for_data(); + } + } + diff --git a/testing/btest/scripts/base/frameworks/sumstats/on-demand.bro b/testing/btest/scripts/base/frameworks/sumstats/on-demand.bro deleted file mode 100644 index 78aba726ca..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/on-demand.bro +++ /dev/null @@ -1,46 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff .stdout - -redef exit_only_after_terminate=T; - - -## Requesting a full sumstats resulttable is not supported yet. -#event on_demand() -# { -# when ( local results = SumStats::request("test") ) -# { -# print "Complete SumStat request"; -# for ( key in results ) -# { -# print fmt(" Host: %s -> %.0f", key$host, results[key]["test.reducer"]$sum); -# } -# } -# } - -event on_demand_key() - { - local host = 1.2.3.4; - when ( local result = SumStats::request_key("test", [$host=host]) ) - { - print fmt("Key request for %s", host); - print fmt(" Host: %s -> %.0f", host, result["test.reducer"]$sum); - terminate(); - } - } - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.reducer", - $apply=set(SumStats::SUM)]; - SumStats::create([$name="test", - $epoch=1hr, - $reducers=set(r1)]); - - # Seed some data but notice there are no callbacks defined in the sumstat! - SumStats::observe("test.reducer", [$host=1.2.3.4], [$num=42]); - SumStats::observe("test.reducer", [$host=4.3.2.1], [$num=7]); - - #schedule 0.1 secs { on_demand() }; - schedule 1 secs { on_demand_key() }; - } - diff --git a/testing/btest/scripts/base/frameworks/sumstats/on-demand.zeek b/testing/btest/scripts/base/frameworks/sumstats/on-demand.zeek new file mode 100644 index 0000000000..4faedd9bac --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/on-demand.zeek @@ -0,0 +1,46 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff .stdout + +redef exit_only_after_terminate=T; + + +## Requesting a full sumstats resulttable is not supported yet. +#event on_demand() +# { +# when ( local results = SumStats::request("test") ) +# { +# print "Complete SumStat request"; +# for ( key in results ) +# { +# print fmt(" Host: %s -> %.0f", key$host, results[key]["test.reducer"]$sum); +# } +# } +# } + +event on_demand_key() + { + local host = 1.2.3.4; + when ( local result = SumStats::request_key("test", [$host=host]) ) + { + print fmt("Key request for %s", host); + print fmt(" Host: %s -> %.0f", host, result["test.reducer"]$sum); + terminate(); + } + } + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.reducer", + $apply=set(SumStats::SUM)]; + SumStats::create([$name="test", + $epoch=1hr, + $reducers=set(r1)]); + + # Seed some data but notice there are no callbacks defined in the sumstat! + SumStats::observe("test.reducer", [$host=1.2.3.4], [$num=42]); + SumStats::observe("test.reducer", [$host=4.3.2.1], [$num=7]); + + #schedule 0.1 secs { on_demand() }; + schedule 1 secs { on_demand_key() }; + } + diff --git a/testing/btest/scripts/base/frameworks/sumstats/sample-cluster.bro b/testing/btest/scripts/base/frameworks/sumstats/sample-cluster.bro deleted file mode 100644 index 227313635a..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/sample-cluster.bro +++ /dev/null @@ -1,117 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: btest-diff manager-1/.stdout - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SAMPLE), $num_samples=5]; - SumStats::create([$name="test", - $epoch=5secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test"]; - print fmt("Host: %s Sampled observations: %d", key$host, r$sample_elements); - local sample_nums: vector of count = vector(); - for ( sample in r$samples ) - sample_nums += r$samples[sample]$num; - - print fmt(" %s", sort(sample_nums)); - }, - $epoch_finished(ts: time) = - { - terminate(); - }]); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - - } - -event ready_for_data() - { - if ( Cluster::node == "worker-1" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=5]); - SumStats::observe("test", [$host=1.2.3.4], [$num=22]); - SumStats::observe("test", [$host=1.2.3.4], [$num=94]); - SumStats::observe("test", [$host=1.2.3.4], [$num=50]); - # I checked the random numbers. seems legit. - SumStats::observe("test", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test", [$host=1.2.3.4], [$num=61]); - SumStats::observe("test", [$host=1.2.3.4], [$num=61]); - SumStats::observe("test", [$host=1.2.3.4], [$num=71]); - SumStats::observe("test", [$host=1.2.3.4], [$num=81]); - SumStats::observe("test", [$host=1.2.3.4], [$num=91]); - SumStats::observe("test", [$host=1.2.3.4], [$num=101]); - SumStats::observe("test", [$host=1.2.3.4], [$num=111]); - SumStats::observe("test", [$host=1.2.3.4], [$num=121]); - SumStats::observe("test", [$host=1.2.3.4], [$num=131]); - SumStats::observe("test", [$host=1.2.3.4], [$num=141]); - SumStats::observe("test", [$host=1.2.3.4], [$num=151]); - SumStats::observe("test", [$host=1.2.3.4], [$num=161]); - SumStats::observe("test", [$host=1.2.3.4], [$num=171]); - SumStats::observe("test", [$host=1.2.3.4], [$num=181]); - SumStats::observe("test", [$host=1.2.3.4], [$num=191]); - - SumStats::observe("test", [$host=6.5.4.3], [$num=2]); - SumStats::observe("test", [$host=7.2.1.5], [$num=1]); - } - if ( Cluster::node == "worker-2" ) - { - SumStats::observe("test", [$host=1.2.3.4], [$num=75]); - SumStats::observe("test", [$host=1.2.3.4], [$num=30]); - SumStats::observe("test", [$host=1.2.3.4], [$num=3]); - SumStats::observe("test", [$host=1.2.3.4], [$num=57]); - SumStats::observe("test", [$host=1.2.3.4], [$num=52]); - SumStats::observe("test", [$host=1.2.3.4], [$num=61]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=1.2.3.4], [$num=95]); - SumStats::observe("test", [$host=6.5.4.3], [$num=5]); - SumStats::observe("test", [$host=7.2.1.5], [$num=91]); - SumStats::observe("test", [$host=10.10.10.10], [$num=5]); - } - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - ++peer_count; - if ( peer_count == 2 ) - event ready_for_data(); - } - -@endif diff --git a/testing/btest/scripts/base/frameworks/sumstats/sample-cluster.zeek b/testing/btest/scripts/base/frameworks/sumstats/sample-cluster.zeek new file mode 100644 index 0000000000..a1223b4395 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/sample-cluster.zeek @@ -0,0 +1,117 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 +# @TEST-EXEC: btest-diff manager-1/.stdout + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SAMPLE), $num_samples=5]; + SumStats::create([$name="test", + $epoch=5secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test"]; + print fmt("Host: %s Sampled observations: %d", key$host, r$sample_elements); + local sample_nums: vector of count = vector(); + for ( sample in r$samples ) + sample_nums += r$samples[sample]$num; + + print fmt(" %s", sort(sample_nums)); + }, + $epoch_finished(ts: time) = + { + terminate(); + }]); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + + } + +event ready_for_data() + { + if ( Cluster::node == "worker-1" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=5]); + SumStats::observe("test", [$host=1.2.3.4], [$num=22]); + SumStats::observe("test", [$host=1.2.3.4], [$num=94]); + SumStats::observe("test", [$host=1.2.3.4], [$num=50]); + # I checked the random numbers. seems legit. + SumStats::observe("test", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test", [$host=1.2.3.4], [$num=61]); + SumStats::observe("test", [$host=1.2.3.4], [$num=61]); + SumStats::observe("test", [$host=1.2.3.4], [$num=71]); + SumStats::observe("test", [$host=1.2.3.4], [$num=81]); + SumStats::observe("test", [$host=1.2.3.4], [$num=91]); + SumStats::observe("test", [$host=1.2.3.4], [$num=101]); + SumStats::observe("test", [$host=1.2.3.4], [$num=111]); + SumStats::observe("test", [$host=1.2.3.4], [$num=121]); + SumStats::observe("test", [$host=1.2.3.4], [$num=131]); + SumStats::observe("test", [$host=1.2.3.4], [$num=141]); + SumStats::observe("test", [$host=1.2.3.4], [$num=151]); + SumStats::observe("test", [$host=1.2.3.4], [$num=161]); + SumStats::observe("test", [$host=1.2.3.4], [$num=171]); + SumStats::observe("test", [$host=1.2.3.4], [$num=181]); + SumStats::observe("test", [$host=1.2.3.4], [$num=191]); + + SumStats::observe("test", [$host=6.5.4.3], [$num=2]); + SumStats::observe("test", [$host=7.2.1.5], [$num=1]); + } + if ( Cluster::node == "worker-2" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=75]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=1.2.3.4], [$num=3]); + SumStats::observe("test", [$host=1.2.3.4], [$num=57]); + SumStats::observe("test", [$host=1.2.3.4], [$num=52]); + SumStats::observe("test", [$host=1.2.3.4], [$num=61]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=6.5.4.3], [$num=5]); + SumStats::observe("test", [$host=7.2.1.5], [$num=91]); + SumStats::observe("test", [$host=10.10.10.10], [$num=5]); + } + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + ++peer_count; + if ( peer_count == 2 ) + event ready_for_data(); + } + +@endif diff --git a/testing/btest/scripts/base/frameworks/sumstats/sample.bro b/testing/btest/scripts/base/frameworks/sumstats/sample.bro deleted file mode 100644 index 4ba395b463..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/sample.bro +++ /dev/null @@ -1,44 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff .stdout - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.metric", - $apply=set(SumStats::SAMPLE), $num_samples=2]; - SumStats::create([$name="test", - $epoch=3secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - print key$host; - local r = result["test.metric"]; - print r$samples; - print r$sample_elements; - }]); - - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=94]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]); - # I checked the random numbers. seems legit. - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); - - SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]); - SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]); - } - diff --git a/testing/btest/scripts/base/frameworks/sumstats/sample.zeek b/testing/btest/scripts/base/frameworks/sumstats/sample.zeek new file mode 100644 index 0000000000..7d63c2e946 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/sample.zeek @@ -0,0 +1,44 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff .stdout + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.metric", + $apply=set(SumStats::SAMPLE), $num_samples=2]; + SumStats::create([$name="test", + $epoch=3secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + print key$host; + local r = result["test.metric"]; + print r$samples; + print r$sample_elements; + }]); + + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=94]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]); + # I checked the random numbers. seems legit. + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=51]); + + SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]); + SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]); + } + diff --git a/testing/btest/scripts/base/frameworks/sumstats/thresholding.bro b/testing/btest/scripts/base/frameworks/sumstats/thresholding.bro deleted file mode 100644 index b7bb826446..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/thresholding.bro +++ /dev/null @@ -1,76 +0,0 @@ -# @TEST-EXEC: bro %INPUT | sort >output -# @TEST-EXEC: btest-diff output - -redef enum Notice::Type += { - Test_Notice, -}; - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; - SumStats::create([$name="test1", - $epoch=3secs, - $reducers=set(r1), - #$threshold_val = SumStats::sum_threshold("test.metric"), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["test.metric"]$sum; - }, - $threshold=5.0, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test.metric"]; - print fmt("THRESHOLD: hit a threshold value at %.0f for %s", r$sum, SumStats::key2str(key)); - } - ]); - - local r2: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; - SumStats::create([$name="test2", - $epoch=3secs, - $reducers=set(r2), - #$threshold_val = SumStats::sum_threshold("test.metric"), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - return result["test.metric"]$sum; - }, - $threshold_series=vector(3.0,6.0,800.0), - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test.metric"]; - print fmt("THRESHOLD_SERIES: hit a threshold series value at %.0f for %s", r$sum, SumStats::key2str(key)); - } - ]); - - local r3: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; - local r4: SumStats::Reducer = [$stream="test.metric2", $apply=set(SumStats::SUM)]; - SumStats::create([$name="test3", - $epoch=3secs, - $reducers=set(r3, r4), - $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { - # Calculate a ratio between sums of two reducers. - if ( "test.metric2" in result && "test.metric" in result && - result["test.metric"]$sum > 0 ) - return result["test.metric2"]$sum / result["test.metric"]$sum; - else - return 0.0; - }, - # Looking for metric2 sum to be 5 times the sum of metric - $threshold=5.0, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = - { - local thold = result["test.metric2"]$sum / result["test.metric"]$sum; - print fmt("THRESHOLD WITH RATIO BETWEEN REDUCERS: hit a threshold value at %.0fx for %s", thold, SumStats::key2str(key)); - } - ]); - - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]); - SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]); - SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]); - SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]); - SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1000]); - SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=10]); - SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=1000]); - SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=54321]); - - } diff --git a/testing/btest/scripts/base/frameworks/sumstats/thresholding.zeek b/testing/btest/scripts/base/frameworks/sumstats/thresholding.zeek new file mode 100644 index 0000000000..93ae99e0ef --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/thresholding.zeek @@ -0,0 +1,76 @@ +# @TEST-EXEC: zeek %INPUT | sort >output +# @TEST-EXEC: btest-diff output + +redef enum Notice::Type += { + Test_Notice, +}; + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; + SumStats::create([$name="test1", + $epoch=3secs, + $reducers=set(r1), + #$threshold_val = SumStats::sum_threshold("test.metric"), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["test.metric"]$sum; + }, + $threshold=5.0, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test.metric"]; + print fmt("THRESHOLD: hit a threshold value at %.0f for %s", r$sum, SumStats::key2str(key)); + } + ]); + + local r2: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; + SumStats::create([$name="test2", + $epoch=3secs, + $reducers=set(r2), + #$threshold_val = SumStats::sum_threshold("test.metric"), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + return result["test.metric"]$sum; + }, + $threshold_series=vector(3.0,6.0,800.0), + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test.metric"]; + print fmt("THRESHOLD_SERIES: hit a threshold series value at %.0f for %s", r$sum, SumStats::key2str(key)); + } + ]); + + local r3: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; + local r4: SumStats::Reducer = [$stream="test.metric2", $apply=set(SumStats::SUM)]; + SumStats::create([$name="test3", + $epoch=3secs, + $reducers=set(r3, r4), + $threshold_val(key: SumStats::Key, result: SumStats::Result) = + { + # Calculate a ratio between sums of two reducers. + if ( "test.metric2" in result && "test.metric" in result && + result["test.metric"]$sum > 0 ) + return result["test.metric2"]$sum / result["test.metric"]$sum; + else + return 0.0; + }, + # Looking for metric2 sum to be 5 times the sum of metric + $threshold=5.0, + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + { + local thold = result["test.metric2"]$sum / result["test.metric"]$sum; + print fmt("THRESHOLD WITH RATIO BETWEEN REDUCERS: hit a threshold value at %.0fx for %s", thold, SumStats::key2str(key)); + } + ]); + + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]); + SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]); + SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]); + SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]); + SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1000]); + SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=10]); + SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=1000]); + SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=54321]); + + } diff --git a/testing/btest/scripts/base/frameworks/sumstats/topk-cluster.bro b/testing/btest/scripts/base/frameworks/sumstats/topk-cluster.bro deleted file mode 100644 index 8a3a9bcf1b..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/topk-cluster.bro +++ /dev/null @@ -1,112 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 15 - -# @TEST-EXEC: btest-diff manager-1/.stdout -# -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -redef Log::default_rotation_interval = 0secs; - - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.metric", - $apply=set(SumStats::TOPK)]; - SumStats::create([$name="topk-test", - $epoch=5secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test.metric"]; - local s: vector of SumStats::Observation; - s = topk_get_top(r$topk, 5); - print fmt("Top entries for key %s", key$str); - for ( element in s ) - { - print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element])); - } - }, - $epoch_finished(ts: time) = - { - terminate(); - }]); - - - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -global ready_for_data: event(); - -event bro_init() - { - Broker::auto_publish(Cluster::worker_topic, ready_for_data); - } - -event ready_for_data() - { - const loop_v: vector of count = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}; - - - if ( Cluster::node == "worker-1" ) - { - - local a: count; - a = 0; - - for ( i in loop_v ) - { - a = a + 1; - for ( j in loop_v ) - { - if ( i < j ) - SumStats::observe("test.metric", [$str="counter"], [$num=a]); - } - } - - - SumStats::observe("test.metric", [$str="two"], [$num=1]); - SumStats::observe("test.metric", [$str="two"], [$num=1]); - } - if ( Cluster::node == "worker-2" ) - { - SumStats::observe("test.metric", [$str="two"], [$num=2]); - SumStats::observe("test.metric", [$str="two"], [$num=2]); - SumStats::observe("test.metric", [$str="two"], [$num=2]); - SumStats::observe("test.metric", [$str="two"], [$num=2]); - SumStats::observe("test.metric", [$str="two"], [$num=1]); - - for ( i in loop_v ) - { - SumStats::observe("test.metric", [$str="counter"], [$num=995]); - } - } - } - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; -event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) - { - ++peer_count; - if ( peer_count == 2 ) - event ready_for_data(); - } - -@endif - diff --git a/testing/btest/scripts/base/frameworks/sumstats/topk-cluster.zeek b/testing/btest/scripts/base/frameworks/sumstats/topk-cluster.zeek new file mode 100644 index 0000000000..9ccb24f980 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/topk-cluster.zeek @@ -0,0 +1,112 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 15 + +# @TEST-EXEC: btest-diff manager-1/.stdout +# +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef Log::default_rotation_interval = 0secs; + + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.metric", + $apply=set(SumStats::TOPK)]; + SumStats::create([$name="topk-test", + $epoch=5secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test.metric"]; + local s: vector of SumStats::Observation; + s = topk_get_top(r$topk, 5); + print fmt("Top entries for key %s", key$str); + for ( element in s ) + { + print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element])); + } + }, + $epoch_finished(ts: time) = + { + terminate(); + }]); + + + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +global ready_for_data: event(); + +event zeek_init() + { + Broker::auto_publish(Cluster::worker_topic, ready_for_data); + } + +event ready_for_data() + { + const loop_v: vector of count = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}; + + + if ( Cluster::node == "worker-1" ) + { + + local a: count; + a = 0; + + for ( i in loop_v ) + { + a = a + 1; + for ( j in loop_v ) + { + if ( i < j ) + SumStats::observe("test.metric", [$str="counter"], [$num=a]); + } + } + + + SumStats::observe("test.metric", [$str="two"], [$num=1]); + SumStats::observe("test.metric", [$str="two"], [$num=1]); + } + if ( Cluster::node == "worker-2" ) + { + SumStats::observe("test.metric", [$str="two"], [$num=2]); + SumStats::observe("test.metric", [$str="two"], [$num=2]); + SumStats::observe("test.metric", [$str="two"], [$num=2]); + SumStats::observe("test.metric", [$str="two"], [$num=2]); + SumStats::observe("test.metric", [$str="two"], [$num=1]); + + for ( i in loop_v ) + { + SumStats::observe("test.metric", [$str="counter"], [$num=995]); + } + } + } + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; +event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) + { + ++peer_count; + if ( peer_count == 2 ) + event ready_for_data(); + } + +@endif + diff --git a/testing/btest/scripts/base/frameworks/sumstats/topk.bro b/testing/btest/scripts/base/frameworks/sumstats/topk.bro deleted file mode 100644 index 99c301c669..0000000000 --- a/testing/btest/scripts/base/frameworks/sumstats/topk.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff .stdout - -event bro_init() &priority=5 - { - local r1: SumStats::Reducer = [$stream="test.metric", - $apply=set(SumStats::TOPK)]; - SumStats::create([$name="topk-test", - $epoch=3secs, - $reducers=set(r1), - $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = - { - local r = result["test.metric"]; - local s: vector of SumStats::Observation; - s = topk_get_top(r$topk, 5); - - print fmt("Top entries for key %s", key$str); - for ( element in s ) - { - print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element])); - } - }]); - - - const loop_v: vector of count = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}; - - local a: count; - a = 0; - - for ( i in loop_v ) - { - a = a + 1; - for ( j in loop_v ) - { - if ( i < j ) - SumStats::observe("test.metric", [$str="counter"], [$num=a]); - } - } - - - SumStats::observe("test.metric", [$str="two"], [$num=1]); - SumStats::observe("test.metric", [$str="two"], [$num=1]); - } diff --git a/testing/btest/scripts/base/frameworks/sumstats/topk.zeek b/testing/btest/scripts/base/frameworks/sumstats/topk.zeek new file mode 100644 index 0000000000..2375cddd10 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/sumstats/topk.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff .stdout + +event zeek_init() &priority=5 + { + local r1: SumStats::Reducer = [$stream="test.metric", + $apply=set(SumStats::TOPK)]; + # Merge two empty sets + local topk1: opaque of topk = topk_init(4); + local topk2: opaque of topk = topk_init(4); + topk_merge(topk1, topk2); + + SumStats::create([$name="topk-test", + $epoch=3secs, + $reducers=set(r1), + $epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) = + { + local r = result["test.metric"]; + local s: vector of SumStats::Observation; + s = topk_get_top(r$topk, 5); + + print fmt("Top entries for key %s", key$str); + for ( element in s ) + { + print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element])); + } + }]); + + + const loop_v: vector of count = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100}; + + local a: count; + a = 0; + + for ( i in loop_v ) + { + a = a + 1; + for ( j in loop_v ) + { + if ( i < j ) + SumStats::observe("test.metric", [$str="counter"], [$num=a]); + } + } + + + SumStats::observe("test.metric", [$str="two"], [$num=1]); + SumStats::observe("test.metric", [$str="two"], [$num=1]); + } diff --git a/testing/btest/scripts/base/misc/find-filtered-trace.test b/testing/btest/scripts/base/misc/find-filtered-trace.test index e6c61c2bd2..a63e0c7a2b 100644 --- a/testing/btest/scripts/base/misc/find-filtered-trace.test +++ b/testing/btest/scripts/base/misc/find-filtered-trace.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/http/bro.org-filtered.pcap >out1 2>&1 -# @TEST-EXEC: bro -r $TRACES/http/bro.org-filtered.pcap "FilteredTraceDetection::enable=F" >out2 2>&1 +# @TEST-EXEC: zeek -r $TRACES/http/bro.org-filtered.pcap >out1 2>&1 +# @TEST-EXEC: zeek -r $TRACES/http/bro.org-filtered.pcap "FilteredTraceDetection::enable=F" >out2 2>&1 # @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out1 # @TEST-EXEC: btest-diff out2 diff --git a/testing/btest/scripts/base/misc/version.bro b/testing/btest/scripts/base/misc/version.bro deleted file mode 100644 index bceade0abb..0000000000 --- a/testing/btest/scripts/base/misc/version.bro +++ /dev/null @@ -1,43 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff .stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath" btest-diff .stderr - -# good versions -print Version::parse("1.5"); -print Version::parse("2.0"); -print Version::parse("2.6"); -print Version::parse("2.5-beta"); -print Version::parse("2.5.1-debug"); -print Version::parse("2.5-beta-12"); -print Version::parse("2.5-12-debug"); -print Version::parse("2.5.2-beta-12-debug"); -print Version::parse("2.5.2-beta5-12-debug"); -print Version::parse("1.12.20-beta-2562-debug"); -print Version::parse("2.6-936"); - -# bad versions -print Version::parse("1"); -print Version::parse("12.5"); -print Version::parse("1.12-beta-drunk"); -print Version::parse("JustARandomString"); - -# check that current running version of Bro parses without error -Version::parse(bro_version()); - -@TEST-START-NEXT - -@if ( Version::number >= 20500 ) -print "yup"; -@endif - -@if ( Version::parse("1.5")$version_number < 20500 ) -print "yup"; -@endif - -@if ( Version::at_least("2.5") ) -print "yup"; -@endif - -@if ( Version::at_least("2.9") ) -print "no"; -@endif diff --git a/testing/btest/scripts/base/misc/version.zeek b/testing/btest/scripts/base/misc/version.zeek new file mode 100644 index 0000000000..c6723f4e54 --- /dev/null +++ b/testing/btest/scripts/base/misc/version.zeek @@ -0,0 +1,43 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff .stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-abspath" btest-diff .stderr + +# good versions +print Version::parse("1.5"); +print Version::parse("2.0"); +print Version::parse("2.6"); +print Version::parse("2.5-beta"); +print Version::parse("2.5.1-debug"); +print Version::parse("2.5-beta-12"); +print Version::parse("2.5-12-debug"); +print Version::parse("2.5.2-beta-12-debug"); +print Version::parse("2.5.2-beta5-12-debug"); +print Version::parse("1.12.20-beta-2562-debug"); +print Version::parse("2.6-936"); + +# bad versions +print Version::parse("1"); +print Version::parse("12.5"); +print Version::parse("1.12-beta-drunk"); +print Version::parse("JustARandomString"); + +# check that current running version of Zeek parses without error +Version::parse(zeek_version()); + +@TEST-START-NEXT + +@if ( Version::number >= 20500 ) +print "yup"; +@endif + +@if ( Version::parse("1.5")$version_number < 20500 ) +print "yup"; +@endif + +@if ( Version::at_least("2.5") ) +print "yup"; +@endif + +@if ( Version::at_least("2.9") ) +print "no"; +@endif diff --git a/testing/btest/scripts/base/protocols/arp/bad.test b/testing/btest/scripts/base/protocols/arp/bad.test index efe9b1d15a..fb3444f105 100644 --- a/testing/btest/scripts/base/protocols/arp/bad.test +++ b/testing/btest/scripts/base/protocols/arp/bad.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/arp-leak.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/arp-leak.pcap %INPUT # @TEST-EXEC: btest-diff .stdout event arp_request(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string) diff --git a/testing/btest/scripts/base/protocols/arp/basic.test b/testing/btest/scripts/base/protocols/arp/basic.test index 9ef1404567..c8dbc58cff 100644 --- a/testing/btest/scripts/base/protocols/arp/basic.test +++ b/testing/btest/scripts/base/protocols/arp/basic.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/arp-who-has.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/arp-who-has.pcap %INPUT # @TEST-EXEC: btest-diff .stdout event arp_request(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string) diff --git a/testing/btest/scripts/base/protocols/arp/radiotap.test b/testing/btest/scripts/base/protocols/arp/radiotap.test index 95ce471532..59f69aca13 100644 --- a/testing/btest/scripts/base/protocols/arp/radiotap.test +++ b/testing/btest/scripts/base/protocols/arp/radiotap.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/arp-who-has-radiotap.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/arp-who-has-radiotap.pcap %INPUT # @TEST-EXEC: btest-diff .stdout event arp_request(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string) diff --git a/testing/btest/scripts/base/protocols/arp/wlanmon.test b/testing/btest/scripts/base/protocols/arp/wlanmon.test index 7f909eac4f..6516d424e9 100644 --- a/testing/btest/scripts/base/protocols/arp/wlanmon.test +++ b/testing/btest/scripts/base/protocols/arp/wlanmon.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/arp-who-has-wlanmon.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/arp-who-has-wlanmon.pcap %INPUT # @TEST-EXEC: btest-diff .stdout event arp_request(mac_src: string, mac_dst: string, SPA: addr, SHA: string, TPA: addr, THA: string) diff --git a/testing/btest/scripts/base/protocols/conn/contents-default-extract.test b/testing/btest/scripts/base/protocols/conn/contents-default-extract.test index b53081826c..5bd0044dbc 100644 --- a/testing/btest/scripts/base/protocols/conn/contents-default-extract.test +++ b/testing/btest/scripts/base/protocols/conn/contents-default-extract.test @@ -1,3 +1,3 @@ -# @TEST-EXEC: bro -f "tcp port 21" -r $TRACES/ftp/ipv6.trace "Conn::default_extract=T" +# @TEST-EXEC: zeek -f "tcp port 21" -r $TRACES/ftp/ipv6.trace "Conn::default_extract=T" # @TEST-EXEC: btest-diff contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat # @TEST-EXEC: btest-diff contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat diff --git a/testing/btest/scripts/base/protocols/conn/new_connection_contents.bro b/testing/btest/scripts/base/protocols/conn/new_connection_contents.bro deleted file mode 100644 index 42919f6f13..0000000000 --- a/testing/btest/scripts/base/protocols/conn/new_connection_contents.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/irc-dcc-send.trace %INPUT -# @TEST-EXEC: btest-diff .stdout - -event new_connection_contents(c: connection) - { - print fmt("new_connection_contents for %s", cat(c$id)); - } diff --git a/testing/btest/scripts/base/protocols/conn/new_connection_contents.zeek b/testing/btest/scripts/base/protocols/conn/new_connection_contents.zeek new file mode 100644 index 0000000000..6278078d49 --- /dev/null +++ b/testing/btest/scripts/base/protocols/conn/new_connection_contents.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -r $TRACES/irc-dcc-send.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +event new_connection_contents(c: connection) + { + print fmt("new_connection_contents for %s", cat(c$id)); + } diff --git a/testing/btest/scripts/base/protocols/conn/polling.test b/testing/btest/scripts/base/protocols/conn/polling.test index f855326e77..4b009bacaa 100644 --- a/testing/btest/scripts/base/protocols/conn/polling.test +++ b/testing/btest/scripts/base/protocols/conn/polling.test @@ -1,6 +1,6 @@ -# @TEST-EXEC: bro -b -r $TRACES/http/100-continue.trace %INPUT >out1 +# @TEST-EXEC: zeek -b -r $TRACES/http/100-continue.trace %INPUT >out1 # @TEST-EXEC: btest-diff out1 -# @TEST-EXEC: bro -b -r $TRACES/http/100-continue.trace %INPUT stop_cnt=2 >out2 +# @TEST-EXEC: zeek -b -r $TRACES/http/100-continue.trace %INPUT stop_cnt=2 >out2 # @TEST-EXEC: btest-diff out2 @load base/protocols/conn diff --git a/testing/btest/scripts/base/protocols/conn/threshold.bro b/testing/btest/scripts/base/protocols/conn/threshold.bro deleted file mode 100644 index 13daa8fff0..0000000000 --- a/testing/btest/scripts/base/protocols/conn/threshold.bro +++ /dev/null @@ -1,30 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/irc-dcc-send.trace %INPUT -# @TEST-EXEC: btest-diff .stdout - -event connection_established(c: connection) - { - print fmt("Threshold set for %s", cat(c$id)); - ConnThreshold::set_bytes_threshold(c, 1, T); - ConnThreshold::set_bytes_threshold(c, 2500, T); - ConnThreshold::set_bytes_threshold(c, 2700, T); - ConnThreshold::set_bytes_threshold(c, 3000, T); - ConnThreshold::delete_bytes_threshold(c, 3000, T); - ConnThreshold::set_bytes_threshold(c, 2000, F); - - ConnThreshold::set_packets_threshold(c, 50, F); - ConnThreshold::set_packets_threshold(c, 51, F); - ConnThreshold::set_packets_threshold(c, 52, F); - ConnThreshold::delete_packets_threshold(c, 51, F); - ConnThreshold::set_packets_threshold(c, 63, T); - ConnThreshold::delete_packets_threshold(c, 63, T); - } - -event ConnThreshold::bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) - { - print "triggered bytes", c$id, threshold, is_orig; - } - -event ConnThreshold::packets_threshold_crossed(c: connection, threshold: count, is_orig: bool) - { - print "triggered packets", c$id, threshold, is_orig; - } diff --git a/testing/btest/scripts/base/protocols/conn/threshold.zeek b/testing/btest/scripts/base/protocols/conn/threshold.zeek new file mode 100644 index 0000000000..4ab01b4dbf --- /dev/null +++ b/testing/btest/scripts/base/protocols/conn/threshold.zeek @@ -0,0 +1,30 @@ +# @TEST-EXEC: zeek -r $TRACES/irc-dcc-send.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +event connection_established(c: connection) + { + print fmt("Threshold set for %s", cat(c$id)); + ConnThreshold::set_bytes_threshold(c, 1, T); + ConnThreshold::set_bytes_threshold(c, 2500, T); + ConnThreshold::set_bytes_threshold(c, 2700, T); + ConnThreshold::set_bytes_threshold(c, 3000, T); + ConnThreshold::delete_bytes_threshold(c, 3000, T); + ConnThreshold::set_bytes_threshold(c, 2000, F); + + ConnThreshold::set_packets_threshold(c, 50, F); + ConnThreshold::set_packets_threshold(c, 51, F); + ConnThreshold::set_packets_threshold(c, 52, F); + ConnThreshold::delete_packets_threshold(c, 51, F); + ConnThreshold::set_packets_threshold(c, 63, T); + ConnThreshold::delete_packets_threshold(c, 63, T); + } + +event ConnThreshold::bytes_threshold_crossed(c: connection, threshold: count, is_orig: bool) + { + print "triggered bytes", c$id, threshold, is_orig; + } + +event ConnThreshold::packets_threshold_crossed(c: connection, threshold: count, is_orig: bool) + { + print "triggered packets", c$id, threshold, is_orig; + } diff --git a/testing/btest/scripts/base/protocols/dce-rpc/context.bro b/testing/btest/scripts/base/protocols/dce-rpc/context.bro deleted file mode 100644 index cb0d93383b..0000000000 --- a/testing/btest/scripts/base/protocols/dce-rpc/context.bro +++ /dev/null @@ -1,31 +0,0 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/dce-rpc/cs_window7-join_stream092.pcap %INPUT >out -# @TEST-EXEC: btest-diff out -# @TEST-EXEC: btest-diff dce_rpc.log - -@load base/protocols/dce-rpc - -event dce_rpc_bind(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 - { - print fmt("dce_rpc_bind :: fid == %s", fid); - print fmt("dce_rpc_bind :: ctx_id == %s", ctx_id); - print fmt("dce_rpc_bind :: uuid == %s", uuid_to_string(uuid)); - } - -event dce_rpc_alter_context(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 - { - print fmt("dce_rpc_alter_context :: fid == %s", fid); - print fmt("dce_rpc_alter_context :: ctx_id == %s", ctx_id); - print fmt("dce_rpc_alter_context :: uuid == %s", uuid_to_string(uuid)); - } - - -event dce_rpc_bind_ack(c: connection, fid: count, sec_addr: string) &priority=5 - { - print fmt("dce_rpc_bind_ack :: fid == %s", fid); - print fmt("dce_rpc_bind_ack :: sec_addr == %s", sec_addr); - } - -event dce_rpc_alter_context_resp(c: connection, fid: count) &priority=5 - { - print fmt("dce_rpc_alter_context_resp :: fid == %s", fid); - } diff --git a/testing/btest/scripts/base/protocols/dce-rpc/context.zeek b/testing/btest/scripts/base/protocols/dce-rpc/context.zeek new file mode 100644 index 0000000000..f49649848b --- /dev/null +++ b/testing/btest/scripts/base/protocols/dce-rpc/context.zeek @@ -0,0 +1,31 @@ +# @TEST-EXEC: zeek -b -C -r $TRACES/dce-rpc/cs_window7-join_stream092.pcap %INPUT >out +# @TEST-EXEC: btest-diff out +# @TEST-EXEC: btest-diff dce_rpc.log + +@load base/protocols/dce-rpc + +event dce_rpc_bind(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 + { + print fmt("dce_rpc_bind :: fid == %s", fid); + print fmt("dce_rpc_bind :: ctx_id == %s", ctx_id); + print fmt("dce_rpc_bind :: uuid == %s", uuid_to_string(uuid)); + } + +event dce_rpc_alter_context(c: connection, fid: count, ctx_id: count, uuid: string, ver_major: count, ver_minor: count) &priority=5 + { + print fmt("dce_rpc_alter_context :: fid == %s", fid); + print fmt("dce_rpc_alter_context :: ctx_id == %s", ctx_id); + print fmt("dce_rpc_alter_context :: uuid == %s", uuid_to_string(uuid)); + } + + +event dce_rpc_bind_ack(c: connection, fid: count, sec_addr: string) &priority=5 + { + print fmt("dce_rpc_bind_ack :: fid == %s", fid); + print fmt("dce_rpc_bind_ack :: sec_addr == %s", sec_addr); + } + +event dce_rpc_alter_context_resp(c: connection, fid: count) &priority=5 + { + print fmt("dce_rpc_alter_context_resp :: fid == %s", fid); + } diff --git a/testing/btest/scripts/base/protocols/dce-rpc/mapi.test b/testing/btest/scripts/base/protocols/dce-rpc/mapi.test index 97431bb005..ba29d31540 100644 --- a/testing/btest/scripts/base/protocols/dce-rpc/mapi.test +++ b/testing/btest/scripts/base/protocols/dce-rpc/mapi.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/dce-rpc/mapi.pcap %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/dce-rpc/mapi.pcap %INPUT # @TEST-EXEC: btest-diff dce_rpc.log # @TEST-EXEC: btest-diff ntlm.log diff --git a/testing/btest/scripts/base/protocols/dhcp/dhcp-ack-msg-types.btest b/testing/btest/scripts/base/protocols/dhcp/dhcp-ack-msg-types.btest index 8f192b7aa4..8f32736572 100644 --- a/testing/btest/scripts/base/protocols/dhcp/dhcp-ack-msg-types.btest +++ b/testing/btest/scripts/base/protocols/dhcp/dhcp-ack-msg-types.btest @@ -2,5 +2,5 @@ # The trace has a message of each DHCP message type, # but only one lease should show up in the logs. -# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp_ack_subscriber_id_and_agent_remote_id.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/dhcp/dhcp_ack_subscriber_id_and_agent_remote_id.trace %INPUT # @TEST-EXEC: btest-diff dhcp.log diff --git a/testing/btest/scripts/base/protocols/dhcp/dhcp-all-msg-types.btest b/testing/btest/scripts/base/protocols/dhcp/dhcp-all-msg-types.btest index 752ab91780..0c902911a2 100644 --- a/testing/btest/scripts/base/protocols/dhcp/dhcp-all-msg-types.btest +++ b/testing/btest/scripts/base/protocols/dhcp/dhcp-all-msg-types.btest @@ -2,5 +2,5 @@ # The trace has a message of each DHCP message type, # but only one lease should show up in the logs. -# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/dhcp/dhcp.trace %INPUT # @TEST-EXEC: btest-diff dhcp.log diff --git a/testing/btest/scripts/base/protocols/dhcp/dhcp-discover-msg-types.btest b/testing/btest/scripts/base/protocols/dhcp/dhcp-discover-msg-types.btest index 1952682e61..1833bd70ab 100644 --- a/testing/btest/scripts/base/protocols/dhcp/dhcp-discover-msg-types.btest +++ b/testing/btest/scripts/base/protocols/dhcp/dhcp-discover-msg-types.btest @@ -2,5 +2,5 @@ # The trace has a message of each DHCP message type, # but only one lease should show up in the logs. -# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp_discover_param_req_and_client_id.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/dhcp/dhcp_discover_param_req_and_client_id.trace %INPUT # @TEST-EXEC: btest-diff dhcp.log diff --git a/testing/btest/scripts/base/protocols/dhcp/dhcp-sub-opts.btest b/testing/btest/scripts/base/protocols/dhcp/dhcp-sub-opts.btest index 3bd37a996b..f5fc6be660 100644 --- a/testing/btest/scripts/base/protocols/dhcp/dhcp-sub-opts.btest +++ b/testing/btest/scripts/base/protocols/dhcp/dhcp-sub-opts.btest @@ -1,2 +1,2 @@ -# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp_ack_subscriber_id_and_agent_remote_id.trace %INPUT protocols/dhcp/sub-opts +# @TEST-EXEC: zeek -r $TRACES/dhcp/dhcp_ack_subscriber_id_and_agent_remote_id.trace %INPUT protocols/dhcp/sub-opts # @TEST-EXEC: btest-diff dhcp.log diff --git a/testing/btest/scripts/base/protocols/dhcp/dhcp-time-nameserver-events.zeek b/testing/btest/scripts/base/protocols/dhcp/dhcp-time-nameserver-events.zeek new file mode 100644 index 0000000000..bba8a0412f --- /dev/null +++ b/testing/btest/scripts/base/protocols/dhcp/dhcp-time-nameserver-events.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -b -r $TRACES/dhcp/dhcp_time_and_nameserver.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/dhcp + +event DHCP::aggregate_msgs(ts: time, id: conn_id, uid: string, is_orig: bool, msg: DHCP::Msg, options: DHCP::Options) &priority=5 + { + print "time_offset", options$time_offset; + print "timeserver_list", options$time_servers; + print "nameserver_list", options$name_servers; + print "ntpserver_list", options$ntp_servers; + } diff --git a/testing/btest/scripts/base/protocols/dhcp/inform.test b/testing/btest/scripts/base/protocols/dhcp/inform.test index 652fd1ae45..7a6fa78eaa 100644 --- a/testing/btest/scripts/base/protocols/dhcp/inform.test +++ b/testing/btest/scripts/base/protocols/dhcp/inform.test @@ -1,5 +1,5 @@ # DHCPINFORM leases are special-cased in the code. # This tests that those leases are correctly logged. -# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp_inform.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/dhcp/dhcp_inform.trace %INPUT # @TEST-EXEC: btest-diff dhcp.log diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_del_measure.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_del_measure.bro deleted file mode 100644 index 533bfd8e0b..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_del_measure.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_del_measure.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_del_measure.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_del_measure.zeek new file mode 100644 index 0000000000..dd2fe42007 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_del_measure.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_del_measure.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_en_spon.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_en_spon.bro deleted file mode 100644 index 3e8c4f56d4..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_en_spon.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_en_spon.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_en_spon.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_en_spon.zeek new file mode 100644 index 0000000000..3fd98f90a9 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_en_spon.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_en_spon.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_del.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_del.bro deleted file mode 100644 index e95637b67d..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_del.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_file_del.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_del.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_del.zeek new file mode 100644 index 0000000000..9fa7cff416 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_del.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_file_del.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_read.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_read.bro deleted file mode 100644 index 8da9f078a4..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_read.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_file_read.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_read.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_read.zeek new file mode 100644 index 0000000000..279ce73fc5 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_read.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_file_read.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_write.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_write.bro deleted file mode 100644 index 60761360ed..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_write.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_file_write.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_file_write.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_write.zeek new file mode 100644 index 0000000000..a7bf5a6c51 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_file_write.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_file_write.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_link_only.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_link_only.bro deleted file mode 100644 index 867382148b..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_link_only.bro +++ /dev/null @@ -1,8 +0,0 @@ -# -# @TEST-EXEC: bro -C -r $TRACES/dnp3/dnp3_link_only.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_link_only.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_link_only.zeek new file mode 100644 index 0000000000..c55ad9eaf5 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_link_only.zeek @@ -0,0 +1,8 @@ +# +# @TEST-EXEC: zeek -C -r $TRACES/dnp3/dnp3_link_only.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_read.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_read.bro deleted file mode 100644 index ffb0e03653..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_read.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_read.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_read.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_read.zeek new file mode 100644 index 0000000000..c474cc5594 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_read.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_read.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_rec_time.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_rec_time.bro deleted file mode 100644 index d97d37d0ce..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_rec_time.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_rec_time.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_rec_time.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_rec_time.zeek new file mode 100644 index 0000000000..7f0e2437af --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_rec_time.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_rec_time.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_select_operate.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_select_operate.bro deleted file mode 100644 index a8acf4755c..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_select_operate.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_select_operate.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_select_operate.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_select_operate.zeek new file mode 100644 index 0000000000..44fcd570c1 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_select_operate.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_select_operate.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_en_spon.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_en_spon.bro deleted file mode 100644 index a5f1f895cc..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_en_spon.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_udp_en_spon.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_en_spon.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_en_spon.zeek new file mode 100644 index 0000000000..2efaa4f5d7 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_en_spon.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_udp_en_spon.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_read.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_read.bro deleted file mode 100644 index 073e758df4..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_read.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_udp_read.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_read.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_read.zeek new file mode 100644 index 0000000000..9f817b5bc1 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_read.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_udp_read.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_select_operate.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_select_operate.bro deleted file mode 100644 index c8708b10cd..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_select_operate.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_udp_select_operate.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_select_operate.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_select_operate.zeek new file mode 100644 index 0000000000..8c1aa79dba --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_select_operate.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_udp_select_operate.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_write.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_write.bro deleted file mode 100644 index d832d937a7..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_write.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_udp_write.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_write.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_write.zeek new file mode 100644 index 0000000000..60eeb30480 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_udp_write.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_udp_write.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_write.bro b/testing/btest/scripts/base/protocols/dnp3/dnp3_write.bro deleted file mode 100644 index 8669d701b2..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/dnp3_write.bro +++ /dev/null @@ -1,9 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3_write.pcap %DIR/events.bro >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# diff --git a/testing/btest/scripts/base/protocols/dnp3/dnp3_write.zeek b/testing/btest/scripts/base/protocols/dnp3/dnp3_write.zeek new file mode 100644 index 0000000000..cb0e0560d3 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/dnp3_write.zeek @@ -0,0 +1,9 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3_write.pcap %DIR/events.zeek >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# diff --git a/testing/btest/scripts/base/protocols/dnp3/events.bro b/testing/btest/scripts/base/protocols/dnp3/events.bro deleted file mode 100644 index c5a853be61..0000000000 --- a/testing/btest/scripts/base/protocols/dnp3/events.bro +++ /dev/null @@ -1,266 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/dnp3/dnp3.trace %INPUT >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff dnp3.log -# -event dnp3_application_request_header(c: connection, is_orig: bool, application_control: count, fc: count) - { - print "dnp3_application_request_header", is_orig, application_control, fc; - } - -event dnp3_application_response_header(c: connection, is_orig: bool, application_control: count, fc: count, iin: count) - { - print "dnp3_application_response_header", is_orig, application_control, fc, iin; - } - -event dnp3_object_header(c: connection, is_orig: bool, obj_type: count, qua_field: count, number: count, rf_low: count, rf_high: count) - { - print "dnp3_object_header", is_orig, obj_type, qua_field, number, rf_low, rf_high; - } - -event dnp3_object_prefix(c: connection, is_orig: bool, prefix_value: count) - { - print "dnp3_object_prefix", is_orig, prefix_value; - } - -event dnp3_header_block(c: connection, is_orig: bool, len: count, ctrl: count, dest_addr: count, src_addr: count) - { - print "dnp3_header_block", is_orig, len, ctrl, dest_addr, src_addr; - } - -event dnp3_response_data_object(c: connection, is_orig: bool, data_value: count) - { - print "dnp3_response_data_object", is_orig, data_value; - } - -event dnp3_attribute_common(c: connection, is_orig: bool, data_type_code: count, leng: count, attribute_obj: string) - { - print "dnp3_attribute_common", is_orig, data_type_code, leng, attribute_obj; - } - -event dnp3_crob(c: connection, is_orig: bool, control_code: count, count8: count, on_time: count, off_time: count, status_code: count) - { - print "dnp3_crob", is_orig, control_code, count8, on_time, off_time, status_code; - } - -event dnp3_pcb(c: connection, is_orig: bool, control_code: count, count8: count, on_time: count, off_time: count, status_code: count) - { - print "dnp3_pcb", is_orig, control_code, count8, on_time, off_time, status_code; - } - -event dnp3_counter_32wFlag(c: connection, is_orig: bool, flag: count, count_value: count) - { - print "dnp3_counter_32wFlag", is_orig, flag, count_value; - } - -event dnp3_counter_16wFlag(c: connection, is_orig: bool, flag: count, count_value: count) - { - print "dnp3_counter_16wFlag", is_orig, flag, count_value; - } - -event dnp3_counter_32woFlag(c: connection, is_orig: bool, count_value: count) - { - print "dnp3_counter_32woFlag", is_orig, count_value; - } - -event dnp3_counter_16woFlag(c: connection, is_orig: bool, count_value: count) - { - print "dnp3_counter_16woFlag", is_orig, count_value; - } - -event dnp3_frozen_counter_32wFlag(c: connection, is_orig: bool, flag:count, count_value: count) - { - print "dnp3_frozen_counter_32wFlag", is_orig, flag; - } - -event dnp3_frozen_counter_16wFlag(c: connection, is_orig: bool, flag:count, count_value: count) - { - print "dnp3_frozen_counter_16wFlag", is_orig, flag; - } - -event dnp3_frozen_counter_32wFlagTime(c: connection, is_orig: bool, flag:count, count_value: count, time48: count) - { - print "dnp3_frozen_counter_32wFlagTime", is_orig, flag; - } - -event dnp3_frozen_counter_16wFlagTime(c: connection, is_orig: bool, flag:count, count_value: count, time48: count) - { - print "dnp3_frozen_counter_16wFlagTime", is_orig, flag; - } - -event dnp3_frozen_counter_32woFlag(c: connection, is_orig: bool, count_value: count) - { - print "dnp3_frozen_counter_32woFlag", is_orig, count_value; - } - -event dnp3_frozen_counter_16woFlag(c: connection, is_orig: bool, count_value: count) - { - print "dnp3_frozen_counter_16woFlag", is_orig, count_value; - } - -event dnp3_analog_input_32wFlag(c: connection, is_orig: bool, flag: count, value: count) - { - print "dnp3_analog_input_32wFlag", is_orig, flag, value; - } - -event dnp3_analog_input_16wFlag(c: connection, is_orig: bool, flag: count, value: count) - { - print "dnp3_analog_input_16wFlag", is_orig, flag, value; - } - -event dnp3_analog_input_32woFlag(c: connection, is_orig: bool, value: count) - { - print "dnp3_analog_input_32woFlag", is_orig, value; - } - -event dnp3_analog_input_16woFlag(c: connection, is_orig: bool, value: count) - { - print "dnp3_analog_input_16woFlag", is_orig, value; - } - -event dnp3_analog_input_SPwFlag(c: connection, is_orig: bool, flag: count, value: count) - { - print "dnp3_analog_input_SPwFlag", is_orig, flag, value; - } - -event dnp3_analog_input_DPwFlag(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count) - { - print "dnp3_analog_input_DPwFlag", is_orig, flag, value_low, value_high; - } - -event dnp3_frozen_analog_input_32wFlag(c: connection, is_orig: bool, flag: count, frozen_value: count) - { - print "dnp3_frozen_analog_input_32wFlag", is_orig, flag, frozen_value; - } - -event dnp3_frozen_analog_input_16wFlag(c: connection, is_orig: bool, flag: count, frozen_value: count) - { - print "dnp3_frozen_analog_input_16wFlag", is_orig, flag, frozen_value; - } - -event dnp3_frozen_analog_input_32wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) - { - print "dnp3_frozen_analog_input_32wTime", is_orig, flag, frozen_value, time48; - } - -event dnp3_frozen_analog_input_16wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) - { - print "dnp3_frozen_analog_input_16wTime", is_orig, flag, frozen_value, time48; - } - -event dnp3_frozen_analog_input_32woFlag(c: connection, is_orig: bool, frozen_value: count) - { - print "dnp3_frozen_analog_input_32woFlag", is_orig, frozen_value; - } - -event dnp3_frozen_analog_input_16woFlag(c: connection, is_orig: bool, frozen_value: count) - { - print "dnp3_frozen_analog_input_16woFlag", is_orig, frozen_value; - } - -event dnp3_frozen_analog_input_SPwFlag(c: connection, is_orig: bool, flag: count, frozen_value: count) - { - print "dnp3_frozen_analog_input_SPwFlag", is_orig, flag, frozen_value; - } - -event dnp3_frozen_analog_input_DPwFlag(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count) - { - print "dnp3_frozen_analog_input_DPwFlag", is_orig, flag, frozen_value_low, frozen_value_high; - } - -event dnp3_analog_input_event_32woTime(c: connection, is_orig: bool, flag: count, value: count) - { - print "dnp3_analog_input_event_32woTime", is_orig, flag, value; - } - -event dnp3_analog_input_event_16woTime(c: connection, is_orig: bool, flag: count, value: count) - { - print "dnp3_analog_input_event_16woTime", is_orig, flag, value; - } - -event dnp3_analog_input_event_32wTime(c: connection, is_orig: bool, flag: count, value: count, time48: count) - { - print "dnp3_analog_input_event_32wTime", is_orig, flag, value, time48; - } - -event dnp3_analog_input_16wTime(c: connection, is_orig: bool, flag: count, value: count, time48: count) - { - print "dnp3_analog_input_event_16wTime", is_orig, flag, value, time48; - } - -event dnp3_analog_inputSP_woTime(c: connection, is_orig: bool, flag: count, value: count) - { - print "dnp3_analog_input_event_SPwoTime", is_orig, flag, value; - } - -event dnp3_analog_inputDP_woTime(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count) - { - print "dnp3_analog_input_event_DPwoTime", is_orig, flag, value_low, value_high; - } - -event dnp3_analog_inputSP_wTime(c: connection, is_orig: bool, flag: count, value: count, time48: count) - { - print "dnp3_analog_input_event_SPwTime", is_orig, flag, value, time48; - } - -event dnp3_analog_inputDP_wTime(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count, time48: count) - { - print "dnp3_analog_input_event_DPwTime", is_orig, flag, value_low, value_high, time48; - } - -event dnp3_frozen_analog_input_event_32woTime(c: connection, is_orig: bool, flag: count, frozen_value: count) - { - print "dnp3_frozen_analog_input_event_32woTime", is_orig, flag, frozen_value; - } - -event dnp3_frozen_analog_input_event_16woTime(c: connection, is_orig: bool, flag: count, frozen_value: count) - { - print "dnp3_frozen_analog_input_event_16woTime", is_orig, flag, frozen_value; - } - -event dnp3_frozen_analog_input_event_32wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) - { - print "dnp3_frozen_analog_input_event_32wTime", is_orig, flag, frozen_value, time48; - } - -event dnp3_frozen_analog_input_event_16wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) - { - print "dnp3_frozen_analog_input_event_16wTime", is_orig, flag, frozen_value, time48; - } - -event dnp3_frozen_analog_input_event_SPwoTime(c: connection, is_orig: bool, flag: count, frozen_value: count) - { - print "dnp3_frozen_analog_input_event_SPwoTime", is_orig, flag, frozen_value; - } - -event dnp3_frozen_analog_input_event_DPwoTime(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count) - { - print "dnp3_frozen_analog_input_event_DPwoTime", is_orig, flag, frozen_value_low, frozen_value_high; - } - -event dnp3_frozen_analog_input_event_SPwTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) - { - print "dnp3_frozen_analog_inputeventSP_wTime", is_orig, flag, frozen_value, time48; - } - -event dnp3_frozen_analog_input_event_DPwTime(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count, time48: count) - { - print "dnp3_frozen_analog_inputeventDP_wTime", is_orig, flag, frozen_value_low, frozen_value_high, time48; - } - -event dnp3_file_transport(c: connection, is_orig: bool, file_handle: count, block_num: count, file_data: string) - { - print "dnp3_file_transport", is_orig, file_handle, block_num; - print hexdump(file_data); - } - -event dnp3_debug_byte(c: connection, is_orig: bool, debug: string) -{ - print "dnp3_debug_byte", is_orig, debug; -} - - diff --git a/testing/btest/scripts/base/protocols/dnp3/events.zeek b/testing/btest/scripts/base/protocols/dnp3/events.zeek new file mode 100644 index 0000000000..ec871b0932 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dnp3/events.zeek @@ -0,0 +1,266 @@ +# +# @TEST-EXEC: zeek -r $TRACES/dnp3/dnp3.trace %INPUT >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $1}' | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/dnp3/events.bif | grep "^event dnp3_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff dnp3.log +# +event dnp3_application_request_header(c: connection, is_orig: bool, application_control: count, fc: count) + { + print "dnp3_application_request_header", is_orig, application_control, fc; + } + +event dnp3_application_response_header(c: connection, is_orig: bool, application_control: count, fc: count, iin: count) + { + print "dnp3_application_response_header", is_orig, application_control, fc, iin; + } + +event dnp3_object_header(c: connection, is_orig: bool, obj_type: count, qua_field: count, number: count, rf_low: count, rf_high: count) + { + print "dnp3_object_header", is_orig, obj_type, qua_field, number, rf_low, rf_high; + } + +event dnp3_object_prefix(c: connection, is_orig: bool, prefix_value: count) + { + print "dnp3_object_prefix", is_orig, prefix_value; + } + +event dnp3_header_block(c: connection, is_orig: bool, len: count, ctrl: count, dest_addr: count, src_addr: count) + { + print "dnp3_header_block", is_orig, len, ctrl, dest_addr, src_addr; + } + +event dnp3_response_data_object(c: connection, is_orig: bool, data_value: count) + { + print "dnp3_response_data_object", is_orig, data_value; + } + +event dnp3_attribute_common(c: connection, is_orig: bool, data_type_code: count, leng: count, attribute_obj: string) + { + print "dnp3_attribute_common", is_orig, data_type_code, leng, attribute_obj; + } + +event dnp3_crob(c: connection, is_orig: bool, control_code: count, count8: count, on_time: count, off_time: count, status_code: count) + { + print "dnp3_crob", is_orig, control_code, count8, on_time, off_time, status_code; + } + +event dnp3_pcb(c: connection, is_orig: bool, control_code: count, count8: count, on_time: count, off_time: count, status_code: count) + { + print "dnp3_pcb", is_orig, control_code, count8, on_time, off_time, status_code; + } + +event dnp3_counter_32wFlag(c: connection, is_orig: bool, flag: count, count_value: count) + { + print "dnp3_counter_32wFlag", is_orig, flag, count_value; + } + +event dnp3_counter_16wFlag(c: connection, is_orig: bool, flag: count, count_value: count) + { + print "dnp3_counter_16wFlag", is_orig, flag, count_value; + } + +event dnp3_counter_32woFlag(c: connection, is_orig: bool, count_value: count) + { + print "dnp3_counter_32woFlag", is_orig, count_value; + } + +event dnp3_counter_16woFlag(c: connection, is_orig: bool, count_value: count) + { + print "dnp3_counter_16woFlag", is_orig, count_value; + } + +event dnp3_frozen_counter_32wFlag(c: connection, is_orig: bool, flag:count, count_value: count) + { + print "dnp3_frozen_counter_32wFlag", is_orig, flag; + } + +event dnp3_frozen_counter_16wFlag(c: connection, is_orig: bool, flag:count, count_value: count) + { + print "dnp3_frozen_counter_16wFlag", is_orig, flag; + } + +event dnp3_frozen_counter_32wFlagTime(c: connection, is_orig: bool, flag:count, count_value: count, time48: count) + { + print "dnp3_frozen_counter_32wFlagTime", is_orig, flag; + } + +event dnp3_frozen_counter_16wFlagTime(c: connection, is_orig: bool, flag:count, count_value: count, time48: count) + { + print "dnp3_frozen_counter_16wFlagTime", is_orig, flag; + } + +event dnp3_frozen_counter_32woFlag(c: connection, is_orig: bool, count_value: count) + { + print "dnp3_frozen_counter_32woFlag", is_orig, count_value; + } + +event dnp3_frozen_counter_16woFlag(c: connection, is_orig: bool, count_value: count) + { + print "dnp3_frozen_counter_16woFlag", is_orig, count_value; + } + +event dnp3_analog_input_32wFlag(c: connection, is_orig: bool, flag: count, value: count) + { + print "dnp3_analog_input_32wFlag", is_orig, flag, value; + } + +event dnp3_analog_input_16wFlag(c: connection, is_orig: bool, flag: count, value: count) + { + print "dnp3_analog_input_16wFlag", is_orig, flag, value; + } + +event dnp3_analog_input_32woFlag(c: connection, is_orig: bool, value: count) + { + print "dnp3_analog_input_32woFlag", is_orig, value; + } + +event dnp3_analog_input_16woFlag(c: connection, is_orig: bool, value: count) + { + print "dnp3_analog_input_16woFlag", is_orig, value; + } + +event dnp3_analog_input_SPwFlag(c: connection, is_orig: bool, flag: count, value: count) + { + print "dnp3_analog_input_SPwFlag", is_orig, flag, value; + } + +event dnp3_analog_input_DPwFlag(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count) + { + print "dnp3_analog_input_DPwFlag", is_orig, flag, value_low, value_high; + } + +event dnp3_frozen_analog_input_32wFlag(c: connection, is_orig: bool, flag: count, frozen_value: count) + { + print "dnp3_frozen_analog_input_32wFlag", is_orig, flag, frozen_value; + } + +event dnp3_frozen_analog_input_16wFlag(c: connection, is_orig: bool, flag: count, frozen_value: count) + { + print "dnp3_frozen_analog_input_16wFlag", is_orig, flag, frozen_value; + } + +event dnp3_frozen_analog_input_32wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) + { + print "dnp3_frozen_analog_input_32wTime", is_orig, flag, frozen_value, time48; + } + +event dnp3_frozen_analog_input_16wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) + { + print "dnp3_frozen_analog_input_16wTime", is_orig, flag, frozen_value, time48; + } + +event dnp3_frozen_analog_input_32woFlag(c: connection, is_orig: bool, frozen_value: count) + { + print "dnp3_frozen_analog_input_32woFlag", is_orig, frozen_value; + } + +event dnp3_frozen_analog_input_16woFlag(c: connection, is_orig: bool, frozen_value: count) + { + print "dnp3_frozen_analog_input_16woFlag", is_orig, frozen_value; + } + +event dnp3_frozen_analog_input_SPwFlag(c: connection, is_orig: bool, flag: count, frozen_value: count) + { + print "dnp3_frozen_analog_input_SPwFlag", is_orig, flag, frozen_value; + } + +event dnp3_frozen_analog_input_DPwFlag(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count) + { + print "dnp3_frozen_analog_input_DPwFlag", is_orig, flag, frozen_value_low, frozen_value_high; + } + +event dnp3_analog_input_event_32woTime(c: connection, is_orig: bool, flag: count, value: count) + { + print "dnp3_analog_input_event_32woTime", is_orig, flag, value; + } + +event dnp3_analog_input_event_16woTime(c: connection, is_orig: bool, flag: count, value: count) + { + print "dnp3_analog_input_event_16woTime", is_orig, flag, value; + } + +event dnp3_analog_input_event_32wTime(c: connection, is_orig: bool, flag: count, value: count, time48: count) + { + print "dnp3_analog_input_event_32wTime", is_orig, flag, value, time48; + } + +event dnp3_analog_input_16wTime(c: connection, is_orig: bool, flag: count, value: count, time48: count) + { + print "dnp3_analog_input_event_16wTime", is_orig, flag, value, time48; + } + +event dnp3_analog_inputSP_woTime(c: connection, is_orig: bool, flag: count, value: count) + { + print "dnp3_analog_input_event_SPwoTime", is_orig, flag, value; + } + +event dnp3_analog_inputDP_woTime(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count) + { + print "dnp3_analog_input_event_DPwoTime", is_orig, flag, value_low, value_high; + } + +event dnp3_analog_inputSP_wTime(c: connection, is_orig: bool, flag: count, value: count, time48: count) + { + print "dnp3_analog_input_event_SPwTime", is_orig, flag, value, time48; + } + +event dnp3_analog_inputDP_wTime(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count, time48: count) + { + print "dnp3_analog_input_event_DPwTime", is_orig, flag, value_low, value_high, time48; + } + +event dnp3_frozen_analog_input_event_32woTime(c: connection, is_orig: bool, flag: count, frozen_value: count) + { + print "dnp3_frozen_analog_input_event_32woTime", is_orig, flag, frozen_value; + } + +event dnp3_frozen_analog_input_event_16woTime(c: connection, is_orig: bool, flag: count, frozen_value: count) + { + print "dnp3_frozen_analog_input_event_16woTime", is_orig, flag, frozen_value; + } + +event dnp3_frozen_analog_input_event_32wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) + { + print "dnp3_frozen_analog_input_event_32wTime", is_orig, flag, frozen_value, time48; + } + +event dnp3_frozen_analog_input_event_16wTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) + { + print "dnp3_frozen_analog_input_event_16wTime", is_orig, flag, frozen_value, time48; + } + +event dnp3_frozen_analog_input_event_SPwoTime(c: connection, is_orig: bool, flag: count, frozen_value: count) + { + print "dnp3_frozen_analog_input_event_SPwoTime", is_orig, flag, frozen_value; + } + +event dnp3_frozen_analog_input_event_DPwoTime(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count) + { + print "dnp3_frozen_analog_input_event_DPwoTime", is_orig, flag, frozen_value_low, frozen_value_high; + } + +event dnp3_frozen_analog_input_event_SPwTime(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count) + { + print "dnp3_frozen_analog_inputeventSP_wTime", is_orig, flag, frozen_value, time48; + } + +event dnp3_frozen_analog_input_event_DPwTime(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count, time48: count) + { + print "dnp3_frozen_analog_inputeventDP_wTime", is_orig, flag, frozen_value_low, frozen_value_high, time48; + } + +event dnp3_file_transport(c: connection, is_orig: bool, file_handle: count, block_num: count, file_data: string) + { + print "dnp3_file_transport", is_orig, file_handle, block_num; + print hexdump(file_data); + } + +event dnp3_debug_byte(c: connection, is_orig: bool, debug: string) +{ + print "dnp3_debug_byte", is_orig, debug; +} + + diff --git a/testing/btest/scripts/base/protocols/dns/caa.bro b/testing/btest/scripts/base/protocols/dns/caa.bro deleted file mode 100644 index 9a0f4701de..0000000000 --- a/testing/btest/scripts/base/protocols/dns/caa.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/dns-caa.pcap %INPUT -# @TEST-EXEC: btest-diff .stdout - -event dns_CAA_reply(c: connection, msg: dns_msg, ans: dns_answer, flags: count, tag: string, value: string) - { - print flags,tag,value; - } diff --git a/testing/btest/scripts/base/protocols/dns/caa.zeek b/testing/btest/scripts/base/protocols/dns/caa.zeek new file mode 100644 index 0000000000..4c3b5af22d --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/caa.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -r $TRACES/dns-caa.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +event dns_CAA_reply(c: connection, msg: dns_msg, ans: dns_answer, flags: count, tag: string, value: string) + { + print flags,tag,value; + } diff --git a/testing/btest/scripts/base/protocols/dns/dns-key.bro b/testing/btest/scripts/base/protocols/dns/dns-key.bro deleted file mode 100644 index 4880ad3530..0000000000 --- a/testing/btest/scripts/base/protocols/dns/dns-key.bro +++ /dev/null @@ -1,4 +0,0 @@ -# Making sure DNSKEY gets logged as such. -# -# @TEST-EXEC: bro -r $TRACES/dnssec/dnskey2.pcap -# @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/scripts/base/protocols/dns/dns-key.zeek b/testing/btest/scripts/base/protocols/dns/dns-key.zeek new file mode 100644 index 0000000000..7ab37cb015 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/dns-key.zeek @@ -0,0 +1,4 @@ +# Making sure DNSKEY gets logged as such. +# +# @TEST-EXEC: zeek -r $TRACES/dnssec/dnskey2.pcap +# @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/scripts/base/protocols/dns/dnskey.bro b/testing/btest/scripts/base/protocols/dns/dnskey.bro deleted file mode 100644 index 9297dc696a..0000000000 --- a/testing/btest/scripts/base/protocols/dns/dnskey.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/dnssec/dnskey.pcap %INPUT > output -# @TEST-EXEC: btest-diff dns.log -# @TEST-EXEC: btest-diff output - -#@load policy/protocols/dns/auth-addl - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) - { - print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) - { - print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) - { - print "NSEC", next_name, bitmaps; - - for ( i in bitmaps ) - print bytestring_to_hexstr(bitmaps[i]); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) - { - print "NSEC3", nsec3, - bytestring_to_hexstr(nsec3$nsec_salt), - bytestring_to_hexstr(nsec3$nsec_hash); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) - { - print "DS", ds, bytestring_to_hexstr(ds$digest_val); - } diff --git a/testing/btest/scripts/base/protocols/dns/dnskey.zeek b/testing/btest/scripts/base/protocols/dns/dnskey.zeek new file mode 100644 index 0000000000..b790b832cf --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/dnskey.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -C -r $TRACES/dnssec/dnskey.pcap %INPUT > output +# @TEST-EXEC: btest-diff dns.log +# @TEST-EXEC: btest-diff output + +#@load policy/protocols/dns/auth-addl + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) + { + print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) + { + print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) + { + print "NSEC", next_name, bitmaps; + + for ( i in bitmaps ) + print bytestring_to_hexstr(bitmaps[i]); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) + { + print "NSEC3", nsec3, + bytestring_to_hexstr(nsec3$nsec_salt), + bytestring_to_hexstr(nsec3$nsec_hash); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) + { + print "DS", ds, bytestring_to_hexstr(ds$digest_val); + } diff --git a/testing/btest/scripts/base/protocols/dns/ds.bro b/testing/btest/scripts/base/protocols/dns/ds.bro deleted file mode 100644 index ecb90514cd..0000000000 --- a/testing/btest/scripts/base/protocols/dns/ds.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/dnssec/ds.pcap %INPUT > output -# @TEST-EXEC: btest-diff dns.log -# @TEST-EXEC: btest-diff output - -#@load policy/protocols/dns/auth-addl - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) - { - print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) - { - print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) - { - print "NSEC", next_name, bitmaps; - - for ( i in bitmaps ) - print bytestring_to_hexstr(bitmaps[i]); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) - { - print "NSEC3", nsec3, - bytestring_to_hexstr(nsec3$nsec_salt), - bytestring_to_hexstr(nsec3$nsec_hash); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) - { - print "DS", ds, bytestring_to_hexstr(ds$digest_val); - } diff --git a/testing/btest/scripts/base/protocols/dns/ds.zeek b/testing/btest/scripts/base/protocols/dns/ds.zeek new file mode 100644 index 0000000000..4c1a75562f --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/ds.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -C -r $TRACES/dnssec/ds.pcap %INPUT > output +# @TEST-EXEC: btest-diff dns.log +# @TEST-EXEC: btest-diff output + +#@load policy/protocols/dns/auth-addl + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) + { + print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) + { + print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) + { + print "NSEC", next_name, bitmaps; + + for ( i in bitmaps ) + print bytestring_to_hexstr(bitmaps[i]); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) + { + print "NSEC3", nsec3, + bytestring_to_hexstr(nsec3$nsec_salt), + bytestring_to_hexstr(nsec3$nsec_hash); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) + { + print "DS", ds, bytestring_to_hexstr(ds$digest_val); + } diff --git a/testing/btest/scripts/base/protocols/dns/duplicate-reponses.bro b/testing/btest/scripts/base/protocols/dns/duplicate-reponses.bro deleted file mode 100644 index e13b3b4807..0000000000 --- a/testing/btest/scripts/base/protocols/dns/duplicate-reponses.bro +++ /dev/null @@ -1,4 +0,0 @@ -# This tests the case where the DNS server responded with zero RRs. -# -# @TEST-EXEC: bro -r $TRACES/dns-two-responses.trace -# @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/scripts/base/protocols/dns/duplicate-reponses.zeek b/testing/btest/scripts/base/protocols/dns/duplicate-reponses.zeek new file mode 100644 index 0000000000..91f37fa723 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/duplicate-reponses.zeek @@ -0,0 +1,4 @@ +# This tests the case where the DNS server responded with zero RRs. +# +# @TEST-EXEC: zeek -r $TRACES/dns-two-responses.trace +# @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/scripts/base/protocols/dns/flip.bro b/testing/btest/scripts/base/protocols/dns/flip.bro deleted file mode 100644 index 66987ee27d..0000000000 --- a/testing/btest/scripts/base/protocols/dns/flip.bro +++ /dev/null @@ -1,3 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/dns53.pcap -# @TEST-EXEC: btest-diff dns.log -# If the DNS reply is seen first, should be able to correctly set orig/resp. diff --git a/testing/btest/scripts/base/protocols/dns/flip.zeek b/testing/btest/scripts/base/protocols/dns/flip.zeek new file mode 100644 index 0000000000..92058c6c49 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/flip.zeek @@ -0,0 +1,3 @@ +# @TEST-EXEC: zeek -r $TRACES/dns53.pcap +# @TEST-EXEC: btest-diff dns.log +# If the DNS reply is seen first, should be able to correctly set orig/resp. diff --git a/testing/btest/scripts/base/protocols/dns/huge-ttl.bro b/testing/btest/scripts/base/protocols/dns/huge-ttl.bro deleted file mode 100644 index ee6a76e978..0000000000 --- a/testing/btest/scripts/base/protocols/dns/huge-ttl.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/dns-huge-ttl.pcap %INPUT -# @TEST-EXEC: btest-diff .stdout - -event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) - { - print ans; - } diff --git a/testing/btest/scripts/base/protocols/dns/huge-ttl.zeek b/testing/btest/scripts/base/protocols/dns/huge-ttl.zeek new file mode 100644 index 0000000000..90ed2275b0 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/huge-ttl.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -r $TRACES/dns-huge-ttl.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) + { + print ans; + } diff --git a/testing/btest/scripts/base/protocols/dns/multiple-txt-strings.bro b/testing/btest/scripts/base/protocols/dns/multiple-txt-strings.bro deleted file mode 100644 index 4a15792702..0000000000 --- a/testing/btest/scripts/base/protocols/dns/multiple-txt-strings.bro +++ /dev/null @@ -1,4 +0,0 @@ -# This tests the case where the DNS server responded with zero RRs. -# -# @TEST-EXEC: bro -r $TRACES/dns-txt-multiple.trace -# @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/scripts/base/protocols/dns/multiple-txt-strings.zeek b/testing/btest/scripts/base/protocols/dns/multiple-txt-strings.zeek new file mode 100644 index 0000000000..55ea225106 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/multiple-txt-strings.zeek @@ -0,0 +1,4 @@ +# This tests the case where the DNS server responded with zero RRs. +# +# @TEST-EXEC: zeek -r $TRACES/dns-txt-multiple.trace +# @TEST-EXEC: btest-diff dns.log diff --git a/testing/btest/scripts/base/protocols/dns/nsec.bro b/testing/btest/scripts/base/protocols/dns/nsec.bro deleted file mode 100644 index 8d9b1c91a7..0000000000 --- a/testing/btest/scripts/base/protocols/dns/nsec.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/dnssec/nsec.pcap %INPUT > output -# @TEST-EXEC: btest-diff dns.log -# @TEST-EXEC: btest-diff output - -@load policy/protocols/dns/auth-addl - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) - { - print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) - { - print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) - { - print "NSEC", next_name, bitmaps; - - for ( i in bitmaps ) - print bytestring_to_hexstr(bitmaps[i]); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) - { - print "NSEC3", nsec3, - bytestring_to_hexstr(nsec3$nsec_salt), - bytestring_to_hexstr(nsec3$nsec_hash); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) - { - print "DS", ds, bytestring_to_hexstr(ds$digest_val); - } diff --git a/testing/btest/scripts/base/protocols/dns/nsec.zeek b/testing/btest/scripts/base/protocols/dns/nsec.zeek new file mode 100644 index 0000000000..006e24057b --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/nsec.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -C -r $TRACES/dnssec/nsec.pcap %INPUT > output +# @TEST-EXEC: btest-diff dns.log +# @TEST-EXEC: btest-diff output + +@load policy/protocols/dns/auth-addl + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) + { + print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) + { + print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) + { + print "NSEC", next_name, bitmaps; + + for ( i in bitmaps ) + print bytestring_to_hexstr(bitmaps[i]); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) + { + print "NSEC3", nsec3, + bytestring_to_hexstr(nsec3$nsec_salt), + bytestring_to_hexstr(nsec3$nsec_hash); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) + { + print "DS", ds, bytestring_to_hexstr(ds$digest_val); + } diff --git a/testing/btest/scripts/base/protocols/dns/nsec3.bro b/testing/btest/scripts/base/protocols/dns/nsec3.bro deleted file mode 100644 index 0710be8fea..0000000000 --- a/testing/btest/scripts/base/protocols/dns/nsec3.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/dnssec/nsec3.pcap %INPUT > output -# @TEST-EXEC: btest-diff dns.log -# @TEST-EXEC: btest-diff output - -@load policy/protocols/dns/auth-addl - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) - { - print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) - { - print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) - { - print "NSEC", next_name, bitmaps; - - for ( i in bitmaps ) - print bytestring_to_hexstr(bitmaps[i]); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) - { - print "NSEC3", nsec3, - bytestring_to_hexstr(nsec3$nsec_salt), - bytestring_to_hexstr(nsec3$nsec_hash); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) - { - print "DS", ds, bytestring_to_hexstr(ds$digest_val); - } diff --git a/testing/btest/scripts/base/protocols/dns/nsec3.zeek b/testing/btest/scripts/base/protocols/dns/nsec3.zeek new file mode 100644 index 0000000000..ce77ae857d --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/nsec3.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -C -r $TRACES/dnssec/nsec3.pcap %INPUT > output +# @TEST-EXEC: btest-diff dns.log +# @TEST-EXEC: btest-diff output + +@load policy/protocols/dns/auth-addl + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) + { + print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) + { + print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) + { + print "NSEC", next_name, bitmaps; + + for ( i in bitmaps ) + print bytestring_to_hexstr(bitmaps[i]); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) + { + print "NSEC3", nsec3, + bytestring_to_hexstr(nsec3$nsec_salt), + bytestring_to_hexstr(nsec3$nsec_hash); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) + { + print "DS", ds, bytestring_to_hexstr(ds$digest_val); + } diff --git a/testing/btest/scripts/base/protocols/dns/rrsig.bro b/testing/btest/scripts/base/protocols/dns/rrsig.bro deleted file mode 100644 index 32b958a789..0000000000 --- a/testing/btest/scripts/base/protocols/dns/rrsig.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/dnssec/rrsig.pcap %INPUT > output -# @TEST-EXEC: btest-diff dns.log -# @TEST-EXEC: btest-diff output - -#@load policy/protocols/dns/auth-addl - -event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) - { - print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); - } - -event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) - { - print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); - } - -event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) - { - print "NSEC", next_name, bitmaps; - - for ( i in bitmaps ) - print bytestring_to_hexstr(bitmaps[i]); - } - -event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) - { - print "NSEC3", nsec3, - bytestring_to_hexstr(nsec3$nsec_salt), - bytestring_to_hexstr(nsec3$nsec_hash); - } - -event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) - { - print "DS", ds, bytestring_to_hexstr(ds$digest_val); - } diff --git a/testing/btest/scripts/base/protocols/dns/rrsig.zeek b/testing/btest/scripts/base/protocols/dns/rrsig.zeek new file mode 100644 index 0000000000..68f6a46e0a --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/rrsig.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -C -r $TRACES/dnssec/rrsig.pcap %INPUT > output +# @TEST-EXEC: btest-diff dns.log +# @TEST-EXEC: btest-diff output + +#@load policy/protocols/dns/auth-addl + +event dns_RRSIG(c: connection, msg: dns_msg, ans: dns_answer, rrsig: dns_rrsig_rr) + { + print "RRSIG", rrsig, bytestring_to_hexstr(rrsig$signature); + } + +event dns_DNSKEY(c: connection, msg: dns_msg, ans: dns_answer, dnskey: dns_dnskey_rr) + { + print "DNSKEY", dnskey, bytestring_to_hexstr(dnskey$public_key); + } + +event dns_NSEC(c: connection, msg: dns_msg, ans: dns_answer, next_name: string, bitmaps: string_vec) + { + print "NSEC", next_name, bitmaps; + + for ( i in bitmaps ) + print bytestring_to_hexstr(bitmaps[i]); + } + +event dns_NSEC3(c: connection, msg: dns_msg, ans: dns_answer, nsec3: dns_nsec3_rr) + { + print "NSEC3", nsec3, + bytestring_to_hexstr(nsec3$nsec_salt), + bytestring_to_hexstr(nsec3$nsec_hash); + } + +event dns_DS(c: connection, msg: dns_msg, ans: dns_answer, ds: dns_ds_rr) + { + print "DS", ds, bytestring_to_hexstr(ds$digest_val); + } diff --git a/testing/btest/scripts/base/protocols/dns/spf.zeek b/testing/btest/scripts/base/protocols/dns/spf.zeek new file mode 100644 index 0000000000..14d743cb1d --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/spf.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -b -r $TRACES/dns-spf.pcap %INPUT +# @TEST-EXEC: btest-diff dns.log + +@load base/protocols/dns \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/dns/tsig.bro b/testing/btest/scripts/base/protocols/dns/tsig.bro deleted file mode 100644 index 79de4cf9f1..0000000000 --- a/testing/btest/scripts/base/protocols/dns/tsig.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/dns-tsig.trace %INPUT >out -# @TEST-EXEC: btest-diff out - -redef dns_skip_all_addl = F; - -event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional) - { - print ans; - print |ans$sig|; - } diff --git a/testing/btest/scripts/base/protocols/dns/tsig.zeek b/testing/btest/scripts/base/protocols/dns/tsig.zeek new file mode 100644 index 0000000000..7df31eb9c4 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/tsig.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -r $TRACES/dns-tsig.trace %INPUT >out +# @TEST-EXEC: btest-diff out + +redef dns_skip_all_addl = F; + +event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional) + { + print ans; + print |ans$sig|; + } diff --git a/testing/btest/scripts/base/protocols/dns/zero-responses.bro b/testing/btest/scripts/base/protocols/dns/zero-responses.bro deleted file mode 100644 index 54f7d7b7d3..0000000000 --- a/testing/btest/scripts/base/protocols/dns/zero-responses.bro +++ /dev/null @@ -1,4 +0,0 @@ -# This tests the case where the DNS server responded with zero RRs. -# -# @TEST-EXEC: bro -r $TRACES/dns-zero-RRs.trace -# @TEST-EXEC: btest-diff dns.log \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/dns/zero-responses.zeek b/testing/btest/scripts/base/protocols/dns/zero-responses.zeek new file mode 100644 index 0000000000..aff38b4402 --- /dev/null +++ b/testing/btest/scripts/base/protocols/dns/zero-responses.zeek @@ -0,0 +1,4 @@ +# This tests the case where the DNS server responded with zero RRs. +# +# @TEST-EXEC: zeek -r $TRACES/dns-zero-RRs.trace +# @TEST-EXEC: btest-diff dns.log \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/ftp/cwd-navigation.bro b/testing/btest/scripts/base/protocols/ftp/cwd-navigation.bro deleted file mode 100644 index c3c5de778a..0000000000 --- a/testing/btest/scripts/base/protocols/ftp/cwd-navigation.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ftp/cwd-navigation.pcap >output.log %INPUT -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff ftp.log -# @TEST-EXEC: btest-diff output.log - -# Make sure we're tracking the CWD correctly. -event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=10 - { - print "CWD", c$ftp$cwd; - } - - diff --git a/testing/btest/scripts/base/protocols/ftp/cwd-navigation.zeek b/testing/btest/scripts/base/protocols/ftp/cwd-navigation.zeek new file mode 100644 index 0000000000..79b41fa28d --- /dev/null +++ b/testing/btest/scripts/base/protocols/ftp/cwd-navigation.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -r $TRACES/ftp/cwd-navigation.pcap >output.log %INPUT +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ftp.log +# @TEST-EXEC: btest-diff output.log + +# Make sure we're tracking the CWD correctly. +event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=10 + { + print "CWD", c$ftp$cwd; + } + + diff --git a/testing/btest/scripts/base/protocols/ftp/ftp-get-file-size.bro b/testing/btest/scripts/base/protocols/ftp/ftp-get-file-size.bro deleted file mode 100644 index 4791d31460..0000000000 --- a/testing/btest/scripts/base/protocols/ftp/ftp-get-file-size.bro +++ /dev/null @@ -1,5 +0,0 @@ -# This tests extracting the server reported file size -# from FTP sessions. -# -# @TEST-EXEC: bro -r $TRACES/ftp/ftp-with-numbers-in-filename.pcap -# @TEST-EXEC: btest-diff ftp.log diff --git a/testing/btest/scripts/base/protocols/ftp/ftp-get-file-size.zeek b/testing/btest/scripts/base/protocols/ftp/ftp-get-file-size.zeek new file mode 100644 index 0000000000..42e90301b4 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ftp/ftp-get-file-size.zeek @@ -0,0 +1,5 @@ +# This tests extracting the server reported file size +# from FTP sessions. +# +# @TEST-EXEC: zeek -r $TRACES/ftp/ftp-with-numbers-in-filename.pcap +# @TEST-EXEC: btest-diff ftp.log diff --git a/testing/btest/scripts/base/protocols/ftp/ftp-ipv4.bro b/testing/btest/scripts/base/protocols/ftp/ftp-ipv4.bro deleted file mode 100644 index cb58d4af8a..0000000000 --- a/testing/btest/scripts/base/protocols/ftp/ftp-ipv4.bro +++ /dev/null @@ -1,6 +0,0 @@ -# This tests both active and passive FTP over IPv4. -# -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff ftp.log - diff --git a/testing/btest/scripts/base/protocols/ftp/ftp-ipv4.zeek b/testing/btest/scripts/base/protocols/ftp/ftp-ipv4.zeek new file mode 100644 index 0000000000..f12ef0d109 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ftp/ftp-ipv4.zeek @@ -0,0 +1,6 @@ +# This tests both active and passive FTP over IPv4. +# +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ftp.log + diff --git a/testing/btest/scripts/base/protocols/ftp/ftp-ipv6.bro b/testing/btest/scripts/base/protocols/ftp/ftp-ipv6.bro deleted file mode 100644 index 87dfa7e052..0000000000 --- a/testing/btest/scripts/base/protocols/ftp/ftp-ipv6.bro +++ /dev/null @@ -1,6 +0,0 @@ -# This tests both active and passive FTP over IPv6. -# -# @TEST-EXEC: bro -r $TRACES/ftp/ipv6.trace -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff ftp.log - diff --git a/testing/btest/scripts/base/protocols/ftp/ftp-ipv6.zeek b/testing/btest/scripts/base/protocols/ftp/ftp-ipv6.zeek new file mode 100644 index 0000000000..bb8bf9ca1b --- /dev/null +++ b/testing/btest/scripts/base/protocols/ftp/ftp-ipv6.zeek @@ -0,0 +1,6 @@ +# This tests both active and passive FTP over IPv6. +# +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv6.trace +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ftp.log + diff --git a/testing/btest/scripts/base/protocols/ftp/gridftp.test b/testing/btest/scripts/base/protocols/ftp/gridftp.test index 18b3bd956b..3981adc5ae 100644 --- a/testing/btest/scripts/base/protocols/ftp/gridftp.test +++ b/testing/btest/scripts/base/protocols/ftp/gridftp.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/globus-url-copy.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/globus-url-copy.trace %INPUT # @TEST-EXEC: btest-diff notice.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff ssl.log diff --git a/testing/btest/scripts/base/protocols/http/100-continue.bro b/testing/btest/scripts/base/protocols/http/100-continue.bro deleted file mode 100644 index ed9e4970fe..0000000000 --- a/testing/btest/scripts/base/protocols/http/100-continue.bro +++ /dev/null @@ -1,12 +0,0 @@ -# This tests that the HTTP analyzer does not generate an unmatched_HTTP_reply -# weird as a result of seeing both a 1xx response and the real response to -# a given request. The http scripts should also be able log such replies -# in a way that correlates the final response with the request. -# -# @TEST-EXEC: bro -r $TRACES/http/100-continue.trace %INPUT -# @TEST-EXEC: test ! -f weird.log -# @TEST-EXEC: btest-diff http.log - -# The base analysis scripts are loaded by default. -#@load base/protocols/http - diff --git a/testing/btest/scripts/base/protocols/http/100-continue.zeek b/testing/btest/scripts/base/protocols/http/100-continue.zeek new file mode 100644 index 0000000000..110c6c2f4c --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/100-continue.zeek @@ -0,0 +1,12 @@ +# This tests that the HTTP analyzer does not generate an unmatched_HTTP_reply +# weird as a result of seeing both a 1xx response and the real response to +# a given request. The http scripts should also be able log such replies +# in a way that correlates the final response with the request. +# +# @TEST-EXEC: zeek -r $TRACES/http/100-continue.trace %INPUT +# @TEST-EXEC: test ! -f weird.log +# @TEST-EXEC: btest-diff http.log + +# The base analysis scripts are loaded by default. +#@load base/protocols/http + diff --git a/testing/btest/scripts/base/protocols/http/101-switching-protocols.bro b/testing/btest/scripts/base/protocols/http/101-switching-protocols.bro deleted file mode 100644 index b6aabb0de5..0000000000 --- a/testing/btest/scripts/base/protocols/http/101-switching-protocols.bro +++ /dev/null @@ -1,13 +0,0 @@ -# This tests that the HTTP analyzer does not generate a dpd error as a -# result of seeing an upgraded connection. -# -# @TEST-EXEC: bro -r $TRACES/http/websocket.pcap %INPUT -# @TEST-EXEC: test ! -f dpd.log -# @TEST-EXEC: test ! -f weird.log -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff .stdout - -event http_connection_upgrade(c: connection, protocol: string) - { - print fmt("Connection upgraded to %s", protocol); - } diff --git a/testing/btest/scripts/base/protocols/http/101-switching-protocols.zeek b/testing/btest/scripts/base/protocols/http/101-switching-protocols.zeek new file mode 100644 index 0000000000..e8ec4ff491 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/101-switching-protocols.zeek @@ -0,0 +1,13 @@ +# This tests that the HTTP analyzer does not generate a dpd error as a +# result of seeing an upgraded connection. +# +# @TEST-EXEC: zeek -r $TRACES/http/websocket.pcap %INPUT +# @TEST-EXEC: test ! -f dpd.log +# @TEST-EXEC: test ! -f weird.log +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff .stdout + +event http_connection_upgrade(c: connection, protocol: string) + { + print fmt("Connection upgraded to %s", protocol); + } diff --git a/testing/btest/scripts/base/protocols/http/content-range-gap-skip.bro b/testing/btest/scripts/base/protocols/http/content-range-gap-skip.bro deleted file mode 100644 index b96b8f02a6..0000000000 --- a/testing/btest/scripts/base/protocols/http/content-range-gap-skip.bro +++ /dev/null @@ -1,26 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/content-range-gap-skip.trace %INPUT - -# In this trace, we should be able to determine that a gap lies -# entirely within the body of an entity that specifies Content-Range, -# and so further deliveries after the gap can still be made. - -global got_gap = F; -global got_data_after_gap = F; - -event http_entity_data(c: connection, is_orig: bool, length: count, - data: string) - { - if ( got_gap ) - got_data_after_gap = T; - } - -event content_gap(c: connection, is_orig: bool, seq: count, length: count) - { - got_gap = T; - } - -event bro_done() - { - if ( ! got_data_after_gap ) - exit(1); - } diff --git a/testing/btest/scripts/base/protocols/http/content-range-gap-skip.zeek b/testing/btest/scripts/base/protocols/http/content-range-gap-skip.zeek new file mode 100644 index 0000000000..f499543327 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/content-range-gap-skip.zeek @@ -0,0 +1,26 @@ +# @TEST-EXEC: zeek -r $TRACES/http/content-range-gap-skip.trace %INPUT + +# In this trace, we should be able to determine that a gap lies +# entirely within the body of an entity that specifies Content-Range, +# and so further deliveries after the gap can still be made. + +global got_gap = F; +global got_data_after_gap = F; + +event http_entity_data(c: connection, is_orig: bool, length: count, + data: string) + { + if ( got_gap ) + got_data_after_gap = T; + } + +event content_gap(c: connection, is_orig: bool, seq: count, length: count) + { + got_gap = T; + } + +event zeek_done() + { + if ( ! got_data_after_gap ) + exit(1); + } diff --git a/testing/btest/scripts/base/protocols/http/content-range-gap.bro b/testing/btest/scripts/base/protocols/http/content-range-gap.bro deleted file mode 100644 index a62e8aa362..0000000000 --- a/testing/btest/scripts/base/protocols/http/content-range-gap.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/content-range-gap.trace %INPUT -# @TEST-EXEC: btest-diff extract_files/thefile - -event file_new(f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_EXTRACT, - [$extract_filename="thefile"]); - } diff --git a/testing/btest/scripts/base/protocols/http/content-range-gap.zeek b/testing/btest/scripts/base/protocols/http/content-range-gap.zeek new file mode 100644 index 0000000000..d992ef4d38 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/content-range-gap.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -r $TRACES/http/content-range-gap.trace %INPUT +# @TEST-EXEC: btest-diff extract_files/thefile + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_EXTRACT, + [$extract_filename="thefile"]); + } diff --git a/testing/btest/scripts/base/protocols/http/content-range-less-than-len.bro b/testing/btest/scripts/base/protocols/http/content-range-less-than-len.bro deleted file mode 100644 index c95816b29f..0000000000 --- a/testing/btest/scripts/base/protocols/http/content-range-less-than-len.bro +++ /dev/null @@ -1,3 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/content-range-less-than-len.pcap -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff weird.log diff --git a/testing/btest/scripts/base/protocols/http/content-range-less-than-len.zeek b/testing/btest/scripts/base/protocols/http/content-range-less-than-len.zeek new file mode 100644 index 0000000000..e10e504635 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/content-range-less-than-len.zeek @@ -0,0 +1,3 @@ +# @TEST-EXEC: zeek -r $TRACES/http/content-range-less-than-len.pcap +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff weird.log diff --git a/testing/btest/scripts/base/protocols/http/entity-gap.bro b/testing/btest/scripts/base/protocols/http/entity-gap.bro deleted file mode 100644 index 95d3e52759..0000000000 --- a/testing/btest/scripts/base/protocols/http/entity-gap.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/entity_gap.trace %INPUT -# @TEST-EXEC: btest-diff entity_data -# @TEST-EXEC: btest-diff extract_files/file0 - -global f = open("entity_data"); -global fn = 0; - -event http_entity_data(c: connection, is_orig: bool, length: count, - data: string) - { - print f, data; - } - -event content_gap(c: connection, is_orig: bool, seq: count, length: count) - { - print f, fmt("<%d byte gap>", length); - } - -event file_new(f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_EXTRACT, - [$extract_filename=fmt("file%d", fn)]); - ++fn; - } diff --git a/testing/btest/scripts/base/protocols/http/entity-gap.zeek b/testing/btest/scripts/base/protocols/http/entity-gap.zeek new file mode 100644 index 0000000000..6f82801d2d --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/entity-gap.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: zeek -r $TRACES/http/entity_gap.trace %INPUT +# @TEST-EXEC: btest-diff entity_data +# @TEST-EXEC: btest-diff extract_files/file0 + +global f = open("entity_data"); +global fn = 0; + +event http_entity_data(c: connection, is_orig: bool, length: count, + data: string) + { + print f, data; + } + +event content_gap(c: connection, is_orig: bool, seq: count, length: count) + { + print f, fmt("<%d byte gap>", length); + } + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_EXTRACT, + [$extract_filename=fmt("file%d", fn)]); + ++fn; + } diff --git a/testing/btest/scripts/base/protocols/http/entity-gap2.bro b/testing/btest/scripts/base/protocols/http/entity-gap2.bro deleted file mode 100644 index c9ade93b72..0000000000 --- a/testing/btest/scripts/base/protocols/http/entity-gap2.bro +++ /dev/null @@ -1,24 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/entity_gap2.trace %INPUT -# @TEST-EXEC: btest-diff entity_data -# @TEST-EXEC: btest-diff extract_files/file0 - -global f = open("entity_data"); -global fn = 0; - -event http_entity_data(c: connection, is_orig: bool, length: count, - data: string) - { - print f, data; - } - -event content_gap(c: connection, is_orig: bool, seq: count, length: count) - { - print f, fmt("<%d byte gap>", length); - } - -event file_new(f: fa_file) - { - Files::add_analyzer(f, Files::ANALYZER_EXTRACT, - [$extract_filename=fmt("file%d", fn)]); - ++fn; - } diff --git a/testing/btest/scripts/base/protocols/http/entity-gap2.zeek b/testing/btest/scripts/base/protocols/http/entity-gap2.zeek new file mode 100644 index 0000000000..e8703efc85 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/entity-gap2.zeek @@ -0,0 +1,24 @@ +# @TEST-EXEC: zeek -r $TRACES/http/entity_gap2.trace %INPUT +# @TEST-EXEC: btest-diff entity_data +# @TEST-EXEC: btest-diff extract_files/file0 + +global f = open("entity_data"); +global fn = 0; + +event http_entity_data(c: connection, is_orig: bool, length: count, + data: string) + { + print f, data; + } + +event content_gap(c: connection, is_orig: bool, seq: count, length: count) + { + print f, fmt("<%d byte gap>", length); + } + +event file_new(f: fa_file) + { + Files::add_analyzer(f, Files::ANALYZER_EXTRACT, + [$extract_filename=fmt("file%d", fn)]); + ++fn; + } diff --git a/testing/btest/scripts/base/protocols/http/fake-content-length.bro b/testing/btest/scripts/base/protocols/http/fake-content-length.bro deleted file mode 100644 index 5993b18ed1..0000000000 --- a/testing/btest/scripts/base/protocols/http/fake-content-length.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/fake-content-length.pcap -# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/scripts/base/protocols/http/fake-content-length.zeek b/testing/btest/scripts/base/protocols/http/fake-content-length.zeek new file mode 100644 index 0000000000..30bb628958 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/fake-content-length.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -r $TRACES/http/fake-content-length.pcap +# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/scripts/base/protocols/http/http-bad-request-with-version.bro b/testing/btest/scripts/base/protocols/http/http-bad-request-with-version.bro deleted file mode 100644 index f95196e8bd..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-bad-request-with-version.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/http/http-bad-request-with-version.trace %INPUT -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff weird.log - diff --git a/testing/btest/scripts/base/protocols/http/http-bad-request-with-version.zeek b/testing/btest/scripts/base/protocols/http/http-bad-request-with-version.zeek new file mode 100644 index 0000000000..dbd4747598 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-bad-request-with-version.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -Cr $TRACES/http/http-bad-request-with-version.trace %INPUT +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff weird.log + diff --git a/testing/btest/scripts/base/protocols/http/http-connect-with-header.bro b/testing/btest/scripts/base/protocols/http/http-connect-with-header.bro deleted file mode 100644 index 84172878f6..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-connect-with-header.bro +++ /dev/null @@ -1,12 +0,0 @@ -# This tests that the HTTP analyzer handles HTTP CONNECT proxying correctly -# when the server include a header line into its response. -# -# @TEST-EXEC: bro -C -r $TRACES/http/connect-with-header.trace %INPUT -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff tunnel.log - -@load base/protocols/conn -@load base/protocols/http -@load base/protocols/tunnels -@load base/frameworks/dpd diff --git a/testing/btest/scripts/base/protocols/http/http-connect-with-header.zeek b/testing/btest/scripts/base/protocols/http/http-connect-with-header.zeek new file mode 100644 index 0000000000..6c2cbcc815 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-connect-with-header.zeek @@ -0,0 +1,12 @@ +# This tests that the HTTP analyzer handles HTTP CONNECT proxying correctly +# when the server include a header line into its response. +# +# @TEST-EXEC: zeek -C -r $TRACES/http/connect-with-header.trace %INPUT +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff tunnel.log + +@load base/protocols/conn +@load base/protocols/http +@load base/protocols/tunnels +@load base/frameworks/dpd diff --git a/testing/btest/scripts/base/protocols/http/http-connect.bro b/testing/btest/scripts/base/protocols/http/http-connect.bro deleted file mode 100644 index df6f3268b4..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-connect.bro +++ /dev/null @@ -1,13 +0,0 @@ -# This tests that the HTTP analyzer handles HTTP CONNECT proxying correctly. -# -# @TEST-EXEC: bro -r $TRACES/http/connect-with-smtp.trace %INPUT -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff smtp.log -# @TEST-EXEC: btest-diff tunnel.log - -@load base/protocols/conn -@load base/protocols/http -@load base/protocols/smtp -@load base/protocols/tunnels -@load base/frameworks/dpd diff --git a/testing/btest/scripts/base/protocols/http/http-connect.zeek b/testing/btest/scripts/base/protocols/http/http-connect.zeek new file mode 100644 index 0000000000..39cf3f3271 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-connect.zeek @@ -0,0 +1,13 @@ +# This tests that the HTTP analyzer handles HTTP CONNECT proxying correctly. +# +# @TEST-EXEC: zeek -r $TRACES/http/connect-with-smtp.trace %INPUT +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff smtp.log +# @TEST-EXEC: btest-diff tunnel.log + +@load base/protocols/conn +@load base/protocols/http +@load base/protocols/smtp +@load base/protocols/tunnels +@load base/frameworks/dpd diff --git a/testing/btest/scripts/base/protocols/http/http-filename.bro b/testing/btest/scripts/base/protocols/http/http-filename.bro deleted file mode 100644 index b20bbddafe..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-filename.bro +++ /dev/null @@ -1,8 +0,0 @@ -# This tests that the HTTP analyzer handles filenames over HTTP correctly. -# -# @TEST-EXEC: bro -r $TRACES/http/http-filename.pcap %INPUT -# @TEST-EXEC: btest-diff http.log - -# The base analysis scripts are loaded by default. -#@load base/protocols/http - diff --git a/testing/btest/scripts/base/protocols/http/http-filename.zeek b/testing/btest/scripts/base/protocols/http/http-filename.zeek new file mode 100644 index 0000000000..b3528191c0 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-filename.zeek @@ -0,0 +1,8 @@ +# This tests that the HTTP analyzer handles filenames over HTTP correctly. +# +# @TEST-EXEC: zeek -r $TRACES/http/http-filename.pcap %INPUT +# @TEST-EXEC: btest-diff http.log + +# The base analysis scripts are loaded by default. +#@load base/protocols/http + diff --git a/testing/btest/scripts/base/protocols/http/http-header-crlf.bro b/testing/btest/scripts/base/protocols/http/http-header-crlf.bro deleted file mode 100644 index c9ba7afba3..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-header-crlf.bro +++ /dev/null @@ -1,10 +0,0 @@ -# This tests for what looks like a problem in the HTTP parser: -# it gets confused whether it's in a header or not; it shouldn't report -# the http_no_crlf_in_header_list wierd. -# -# @TEST-EXEC: bro -r $TRACES/http/byteranges.trace %INPUT -# @TEST-EXEC: test ! -f weird.log - -# The base analysis scripts are loaded by default. -#@load base/protocols/http - diff --git a/testing/btest/scripts/base/protocols/http/http-header-crlf.zeek b/testing/btest/scripts/base/protocols/http/http-header-crlf.zeek new file mode 100644 index 0000000000..60d5095d97 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-header-crlf.zeek @@ -0,0 +1,10 @@ +# This tests for what looks like a problem in the HTTP parser: +# it gets confused whether it's in a header or not; it shouldn't report +# the http_no_crlf_in_header_list wierd. +# +# @TEST-EXEC: zeek -r $TRACES/http/byteranges.trace %INPUT +# @TEST-EXEC: test ! -f weird.log + +# The base analysis scripts are loaded by default. +#@load base/protocols/http + diff --git a/testing/btest/scripts/base/protocols/http/http-methods.bro b/testing/btest/scripts/base/protocols/http/http-methods.bro deleted file mode 100644 index 5ab89bbe4d..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-methods.bro +++ /dev/null @@ -1,9 +0,0 @@ -# This tests that the HTTP analyzer handles strange HTTP methods properly. -# -# @TEST-EXEC: bro -r $TRACES/http/methods.trace %INPUT -# @TEST-EXEC: btest-diff weird.log -# @TEST-EXEC: btest-diff http.log - -# The base analysis scripts are loaded by default. -#@load base/protocols/http - diff --git a/testing/btest/scripts/base/protocols/http/http-methods.zeek b/testing/btest/scripts/base/protocols/http/http-methods.zeek new file mode 100644 index 0000000000..810868184f --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-methods.zeek @@ -0,0 +1,9 @@ +# This tests that the HTTP analyzer handles strange HTTP methods properly. +# +# @TEST-EXEC: zeek -r $TRACES/http/methods.trace %INPUT +# @TEST-EXEC: btest-diff weird.log +# @TEST-EXEC: btest-diff http.log + +# The base analysis scripts are loaded by default. +#@load base/protocols/http + diff --git a/testing/btest/scripts/base/protocols/http/http-pipelining.bro b/testing/btest/scripts/base/protocols/http/http-pipelining.bro deleted file mode 100644 index bb392b1c4b..0000000000 --- a/testing/btest/scripts/base/protocols/http/http-pipelining.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/pipelined-requests.trace %INPUT > output -# @TEST-EXEC: btest-diff http.log - -# mime type is irrelevant to this test, so filter it out -event bro_init() - { - Log::remove_default_filter(HTTP::LOG); - Log::add_filter(HTTP::LOG, [$name="less-mime-types", $exclude=set("mime_type")]); - } diff --git a/testing/btest/scripts/base/protocols/http/http-pipelining.zeek b/testing/btest/scripts/base/protocols/http/http-pipelining.zeek new file mode 100644 index 0000000000..d1451276fe --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/http-pipelining.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -r $TRACES/http/pipelined-requests.trace %INPUT > output +# @TEST-EXEC: btest-diff http.log + +# mime type is irrelevant to this test, so filter it out +event zeek_init() + { + Log::remove_default_filter(HTTP::LOG); + Log::add_filter(HTTP::LOG, [$name="less-mime-types", $exclude=set("mime_type")]); + } diff --git a/testing/btest/scripts/base/protocols/http/missing-zlib-header.bro b/testing/btest/scripts/base/protocols/http/missing-zlib-header.bro deleted file mode 100644 index 25923f70da..0000000000 --- a/testing/btest/scripts/base/protocols/http/missing-zlib-header.bro +++ /dev/null @@ -1,6 +0,0 @@ -# This tests an issue where some web servers don't -# include an appropriate ZLIB header on deflated -# content. -# -# @TEST-EXEC: bro -r $TRACES/http/missing-zlib-header.pcap %INPUT -# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/scripts/base/protocols/http/missing-zlib-header.zeek b/testing/btest/scripts/base/protocols/http/missing-zlib-header.zeek new file mode 100644 index 0000000000..9c993c7e7f --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/missing-zlib-header.zeek @@ -0,0 +1,6 @@ +# This tests an issue where some web servers don't +# include an appropriate ZLIB header on deflated +# content. +# +# @TEST-EXEC: zeek -r $TRACES/http/missing-zlib-header.pcap %INPUT +# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/scripts/base/protocols/http/multipart-extract.bro b/testing/btest/scripts/base/protocols/http/multipart-extract.bro deleted file mode 100644 index a919a844b2..0000000000 --- a/testing/btest/scripts/base/protocols/http/multipart-extract.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/http/multipart.trace %INPUT -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: cat extract_files/http-item-* | sort > extractions - -event file_new(f: fa_file) - { - local fname = fmt("http-item-%s", f$id); - Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]); - } diff --git a/testing/btest/scripts/base/protocols/http/multipart-extract.zeek b/testing/btest/scripts/base/protocols/http/multipart-extract.zeek new file mode 100644 index 0000000000..93f12e13d7 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/multipart-extract.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -C -r $TRACES/http/multipart.trace %INPUT +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: cat extract_files/http-item-* | sort > extractions + +event file_new(f: fa_file) + { + local fname = fmt("http-item-%s", f$id); + Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]); + } diff --git a/testing/btest/scripts/base/protocols/http/multipart-file-limit.bro b/testing/btest/scripts/base/protocols/http/multipart-file-limit.bro deleted file mode 100644 index 7c0690babd..0000000000 --- a/testing/btest/scripts/base/protocols/http/multipart-file-limit.bro +++ /dev/null @@ -1,23 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/http/multipart.trace -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: bro -C -r $TRACES/http/multipart.trace %INPUT >out-limited -# @TEST-EXEC: mv http.log http-limited.log -# @TEST-EXEC: btest-diff http-limited.log -# @TEST-EXEC: btest-diff out-limited -# @TEST-EXEC: bro -C -r $TRACES/http/multipart.trace %INPUT ignore_http_file_limit=T >out-limit-ignored -# @TEST-EXEC: mv http.log http-limit-ignored.log -# @TEST-EXEC: btest-diff http-limit-ignored.log -# @TEST-EXEC: btest-diff out-limit-ignored - -option ignore_http_file_limit = F; - -redef HTTP::max_files_orig = 1; -redef HTTP::max_files_resp = 1; - -hook HTTP::max_files_policy(f: fa_file, is_orig: bool) - { - print "max_files reached"; - - if ( ignore_http_file_limit ) - break; - } diff --git a/testing/btest/scripts/base/protocols/http/multipart-file-limit.zeek b/testing/btest/scripts/base/protocols/http/multipart-file-limit.zeek new file mode 100644 index 0000000000..21980ae7e0 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/multipart-file-limit.zeek @@ -0,0 +1,23 @@ +# @TEST-EXEC: zeek -C -r $TRACES/http/multipart.trace +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: zeek -C -r $TRACES/http/multipart.trace %INPUT >out-limited +# @TEST-EXEC: mv http.log http-limited.log +# @TEST-EXEC: btest-diff http-limited.log +# @TEST-EXEC: btest-diff out-limited +# @TEST-EXEC: zeek -C -r $TRACES/http/multipart.trace %INPUT ignore_http_file_limit=T >out-limit-ignored +# @TEST-EXEC: mv http.log http-limit-ignored.log +# @TEST-EXEC: btest-diff http-limit-ignored.log +# @TEST-EXEC: btest-diff out-limit-ignored + +option ignore_http_file_limit = F; + +redef HTTP::max_files_orig = 1; +redef HTTP::max_files_resp = 1; + +hook HTTP::max_files_policy(f: fa_file, is_orig: bool) + { + print "max_files reached"; + + if ( ignore_http_file_limit ) + break; + } diff --git a/testing/btest/scripts/base/protocols/http/no-uri.bro b/testing/btest/scripts/base/protocols/http/no-uri.bro deleted file mode 100644 index 9793b93c58..0000000000 --- a/testing/btest/scripts/base/protocols/http/no-uri.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/http/no-uri.pcap %INPUT -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff weird.log - diff --git a/testing/btest/scripts/base/protocols/http/no-uri.zeek b/testing/btest/scripts/base/protocols/http/no-uri.zeek new file mode 100644 index 0000000000..dc0a3f313d --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/no-uri.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -Cr $TRACES/http/no-uri.pcap %INPUT +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff weird.log + diff --git a/testing/btest/scripts/base/protocols/http/no-version.bro b/testing/btest/scripts/base/protocols/http/no-version.bro deleted file mode 100644 index 3e861534bd..0000000000 --- a/testing/btest/scripts/base/protocols/http/no-version.bro +++ /dev/null @@ -1,3 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/http/no-version.pcap %INPUT -# @TEST-EXEC: btest-diff http.log - diff --git a/testing/btest/scripts/base/protocols/http/no-version.zeek b/testing/btest/scripts/base/protocols/http/no-version.zeek new file mode 100644 index 0000000000..d926cb565e --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/no-version.zeek @@ -0,0 +1,3 @@ +# @TEST-EXEC: zeek -Cr $TRACES/http/no-version.pcap %INPUT +# @TEST-EXEC: btest-diff http.log + diff --git a/testing/btest/scripts/base/protocols/http/percent-end-of-line.bro b/testing/btest/scripts/base/protocols/http/percent-end-of-line.bro deleted file mode 100644 index a41dbab294..0000000000 --- a/testing/btest/scripts/base/protocols/http/percent-end-of-line.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/http/percent-end-of-line.pcap %INPUT -# @TEST-EXEC: btest-diff http.log -# @TEST-EXEC: btest-diff weird.log - diff --git a/testing/btest/scripts/base/protocols/http/percent-end-of-line.zeek b/testing/btest/scripts/base/protocols/http/percent-end-of-line.zeek new file mode 100644 index 0000000000..9bfd21d46f --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/percent-end-of-line.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -Cr $TRACES/http/percent-end-of-line.pcap %INPUT +# @TEST-EXEC: btest-diff http.log +# @TEST-EXEC: btest-diff weird.log + diff --git a/testing/btest/scripts/base/protocols/http/x-gzip.bro b/testing/btest/scripts/base/protocols/http/x-gzip.bro deleted file mode 100644 index a73fc5f71f..0000000000 --- a/testing/btest/scripts/base/protocols/http/x-gzip.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/x-gzip.pcap -# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/scripts/base/protocols/http/x-gzip.zeek b/testing/btest/scripts/base/protocols/http/x-gzip.zeek new file mode 100644 index 0000000000..75cd505490 --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/x-gzip.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -r $TRACES/http/x-gzip.pcap +# @TEST-EXEC: btest-diff http.log diff --git a/testing/btest/scripts/base/protocols/http/zero-length-bodies-with-drops.bro b/testing/btest/scripts/base/protocols/http/zero-length-bodies-with-drops.bro deleted file mode 100644 index ccf397617e..0000000000 --- a/testing/btest/scripts/base/protocols/http/zero-length-bodies-with-drops.bro +++ /dev/null @@ -1,10 +0,0 @@ -# This tests an issue with interaction between zero length -# http bodies and the file analysis code. It is creating -# files when there isn't actually any body there and shouldn't -# create a file. -# -# @TEST-EXEC: bro -r $TRACES/http/zero-length-bodies-with-drops.pcap %INPUT - -# There shouldn't be a files log (no files!) -# @TEST-EXEC: test ! -f files.log - diff --git a/testing/btest/scripts/base/protocols/http/zero-length-bodies-with-drops.zeek b/testing/btest/scripts/base/protocols/http/zero-length-bodies-with-drops.zeek new file mode 100644 index 0000000000..1e7ba1f5eb --- /dev/null +++ b/testing/btest/scripts/base/protocols/http/zero-length-bodies-with-drops.zeek @@ -0,0 +1,10 @@ +# This tests an issue with interaction between zero length +# http bodies and the file analysis code. It is creating +# files when there isn't actually any body there and shouldn't +# create a file. +# +# @TEST-EXEC: zeek -r $TRACES/http/zero-length-bodies-with-drops.pcap %INPUT + +# There shouldn't be a files log (no files!) +# @TEST-EXEC: test ! -f files.log + diff --git a/testing/btest/scripts/base/protocols/imap/capabilities.test b/testing/btest/scripts/base/protocols/imap/capabilities.test index 06bdb56b7d..81fb802275 100644 --- a/testing/btest/scripts/base/protocols/imap/capabilities.test +++ b/testing/btest/scripts/base/protocols/imap/capabilities.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/tls/imap-starttls.pcap %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/tls/imap-starttls.pcap %INPUT # @TEST-EXEC: btest-diff .stdout @load base/protocols/ssl diff --git a/testing/btest/scripts/base/protocols/imap/starttls.test b/testing/btest/scripts/base/protocols/imap/starttls.test index 444c27688a..2d20622b15 100644 --- a/testing/btest/scripts/base/protocols/imap/starttls.test +++ b/testing/btest/scripts/base/protocols/imap/starttls.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/tls/imap-starttls.pcap %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/tls/imap-starttls.pcap %INPUT # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/protocols/irc/basic.test b/testing/btest/scripts/base/protocols/irc/basic.test index 618f4d9079..bf3141896b 100644 --- a/testing/btest/scripts/base/protocols/irc/basic.test +++ b/testing/btest/scripts/base/protocols/irc/basic.test @@ -1,12 +1,12 @@ # This tests that basic IRC commands (NICK, USER, JOIN, DCC SEND) # are logged for a client. -# @TEST-EXEC: bro -r $TRACES/irc-dcc-send.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/irc-dcc-send.trace %INPUT # @TEST-EXEC: btest-diff irc.log # @TEST-EXEC: btest-diff conn.log # dcc mime types are irrelevant to this test, so filter it out -event bro_init() +event zeek_init() { Log::remove_default_filter(IRC::LOG); Log::add_filter(IRC::LOG, [$name="remove-mime", $exclude=set("dcc_mime_type")]); diff --git a/testing/btest/scripts/base/protocols/irc/events.test b/testing/btest/scripts/base/protocols/irc/events.test index c5220b247b..3e187d9da9 100644 --- a/testing/btest/scripts/base/protocols/irc/events.test +++ b/testing/btest/scripts/base/protocols/irc/events.test @@ -1,8 +1,8 @@ # Test IRC events -# @TEST-EXEC: bro -r $TRACES/irc-dcc-send.trace %INPUT -# @TEST-EXEC: bro -r $TRACES/irc-basic.trace %INPUT -# @TEST-EXEC: bro -r $TRACES/irc-whitespace.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/irc-dcc-send.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/irc-basic.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/irc-whitespace.trace %INPUT # @TEST-EXEC: btest-diff .stdout event irc_privmsg_message(c: connection, is_orig: bool, source: string, target: string, message: string) diff --git a/testing/btest/scripts/base/protocols/irc/longline.test b/testing/btest/scripts/base/protocols/irc/longline.test index 0573494844..fec493d086 100644 --- a/testing/btest/scripts/base/protocols/irc/longline.test +++ b/testing/btest/scripts/base/protocols/irc/longline.test @@ -1,6 +1,6 @@ # This tests that an excessively long line is truncated by the contentline # analyzer -# @TEST-EXEC: bro -C -r $TRACES/contentline-irc-5k-line.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/contentline-irc-5k-line.pcap %INPUT # @TEST-EXEC: btest-diff weird.log diff --git a/testing/btest/scripts/base/protocols/irc/names-weird.bro b/testing/btest/scripts/base/protocols/irc/names-weird.bro deleted file mode 100644 index 33124416f6..0000000000 --- a/testing/btest/scripts/base/protocols/irc/names-weird.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/irc-353.pcap %INPUT -# @TEST-EXEC: btest-diff weird.log - -event irc_names_info(c: connection, is_orig: bool, c_type: string, channel: string, users: string_set) - { - print channel, users; - } diff --git a/testing/btest/scripts/base/protocols/irc/names-weird.zeek b/testing/btest/scripts/base/protocols/irc/names-weird.zeek new file mode 100644 index 0000000000..2d0ff001b2 --- /dev/null +++ b/testing/btest/scripts/base/protocols/irc/names-weird.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -C -r $TRACES/irc-353.pcap %INPUT +# @TEST-EXEC: btest-diff weird.log + +event irc_names_info(c: connection, is_orig: bool, c_type: string, channel: string, users: string_set) + { + print channel, users; + } diff --git a/testing/btest/scripts/base/protocols/irc/starttls.test b/testing/btest/scripts/base/protocols/irc/starttls.test index c110a77c39..9a0ec689ad 100644 --- a/testing/btest/scripts/base/protocols/irc/starttls.test +++ b/testing/btest/scripts/base/protocols/irc/starttls.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/tls/irc-starttls.pcap %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/tls/irc-starttls.pcap %INPUT # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/protocols/krb/kinit.test b/testing/btest/scripts/base/protocols/krb/kinit.test index d9e4097361..16c8773a5b 100644 --- a/testing/btest/scripts/base/protocols/krb/kinit.test +++ b/testing/btest/scripts/base/protocols/krb/kinit.test @@ -1,6 +1,6 @@ # This test exercises many of the Linux kinit options against a KDC -# @TEST-EXEC: bro -b -r $TRACES/krb/kinit.trace %INPUT > output +# @TEST-EXEC: zeek -b -r $TRACES/krb/kinit.trace %INPUT > output # @TEST-EXEC: btest-diff kerberos.log # @TEST-EXEC: btest-diff output diff --git a/testing/btest/scripts/base/protocols/krb/smb2_krb.test b/testing/btest/scripts/base/protocols/krb/smb2_krb.test index 08c05d83f1..a5ffd20ebc 100644 --- a/testing/btest/scripts/base/protocols/krb/smb2_krb.test +++ b/testing/btest/scripts/base/protocols/krb/smb2_krb.test @@ -2,16 +2,16 @@ # Kerberos analyzer can open the AD ticket in the Negociate # Protocol Request and find the user. # -# @TEST-REQUIRES: grep -q "#define USE_KRB5" $BUILD/bro-config.h +# @TEST-REQUIRES: grep -q "#define USE_KRB5" $BUILD/zeek-config.h # # @TEST-COPY-FILE: ${TRACES}/krb/smb2_krb.keytab -# @TEST-EXEC: bro -b -C -r $TRACES/krb/smb2_krb.pcap %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/krb/smb2_krb.pcap %INPUT # @TEST-EXEC: btest-diff .stdout redef KRB::keytab = "smb2_krb.keytab"; global monitor_ports: set[port] = { 445/tcp, 139/tcp } &redef; -event bro_init() &priority=5{ +event zeek_init() &priority=5{ Analyzer::register_for_ports(Analyzer::ANALYZER_SMB, monitor_ports); } diff --git a/testing/btest/scripts/base/protocols/krb/smb2_krb_nokeytab.test b/testing/btest/scripts/base/protocols/krb/smb2_krb_nokeytab.test index 0d2c68d142..557b0128b5 100644 --- a/testing/btest/scripts/base/protocols/krb/smb2_krb_nokeytab.test +++ b/testing/btest/scripts/base/protocols/krb/smb2_krb_nokeytab.test @@ -1,16 +1,16 @@ # This test verifies that without a keytab file no entries are # created and no errors happen. # -# @TEST-REQUIRES: grep -q "#define USE_KRB5" $BUILD/bro-config.h +# @TEST-REQUIRES: grep -q "#define USE_KRB5" $BUILD/zeek-config.h # # @TEST-COPY-FILE: ${TRACES}/krb/smb2_krb.keytab -# @TEST-EXEC: bro -C -r $TRACES/krb/smb2_krb.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/krb/smb2_krb.pcap %INPUT # @TEST-EXEC: btest-diff .stdout # @TEST-EXEC: btest-diff .stderr global monitor_ports: set[port] = { 445/tcp, 139/tcp } &redef; -event bro_init() &priority=5{ +event zeek_init() &priority=5{ Analyzer::register_for_ports(Analyzer::ANALYZER_SMB, monitor_ports); } diff --git a/testing/btest/scripts/base/protocols/krb/smb_gssapi.test b/testing/btest/scripts/base/protocols/krb/smb_gssapi.test index 95e5660812..b8ad67945c 100644 --- a/testing/btest/scripts/base/protocols/krb/smb_gssapi.test +++ b/testing/btest/scripts/base/protocols/krb/smb_gssapi.test @@ -3,7 +3,7 @@ # SMB authentication event and therfore relies on the SMB # analyzer as well. -# @TEST-EXEC: bro -b -C -r $TRACES/krb/smb_gssapi.trace %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/krb/smb_gssapi.trace %INPUT # @TEST-EXEC: btest-diff kerberos.log # @TEST-EXEC: btest-diff-rst scripts.base.protocols.krb diff --git a/testing/btest/scripts/base/protocols/krb/tgs.test b/testing/btest/scripts/base/protocols/krb/tgs.test index bbf99762f6..8041a12804 100644 --- a/testing/btest/scripts/base/protocols/krb/tgs.test +++ b/testing/btest/scripts/base/protocols/krb/tgs.test @@ -1,6 +1,6 @@ # This test exercises a Kerberos authentication to a Kerberized SSH server -# @TEST-EXEC: bro -b -r $TRACES/krb/auth.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/krb/auth.trace %INPUT # @TEST-EXEC: btest-diff kerberos.log @load base/protocols/krb diff --git a/testing/btest/scripts/base/protocols/modbus/coil_parsing_big.bro b/testing/btest/scripts/base/protocols/modbus/coil_parsing_big.bro deleted file mode 100644 index acbf9aef8c..0000000000 --- a/testing/btest/scripts/base/protocols/modbus/coil_parsing_big.bro +++ /dev/null @@ -1,47 +0,0 @@ -# -# @TEST-EXEC: bro -C -r $TRACES/modbus/modbusBig.pcap %INPUT | sort | uniq -c | sed 's/^ *//g' >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $2}' | grep "^modbus_" | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/modbus/events.bif | grep "^event modbus_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage - -event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) -{ - print "modbus_message", c$id, headers, is_orig; -} - -event modbus_exception(c: connection, headers: ModbusHeaders, code: count) -{ - print "modbus_exception", c$id, headers, code; -} - -event modbus_read_coils_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_read_coils_request", c$id, headers, start_address, quantity; -} - -event modbus_read_coils_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) -{ - print "modbus_read_coils_response", c$id, headers, coils; -} -event modbus_write_single_coil_request(c: connection, headers: ModbusHeaders, address: count, value: bool) -{ - print "modbus_write_single_coil_request", c$id, headers, address, value; -} - -event modbus_write_single_coil_response(c: connection, headers: ModbusHeaders, address: count, value: bool) -{ - print "modbus_write_single_coil_response", c$id, headers, address, value; -} - -event modbus_write_multiple_coils_request(c: connection, headers: ModbusHeaders, start_address: count, coils: ModbusCoils) -{ - print "modbus_write_multiple_coils_request", c$id, headers, start_address, coils; -} - -event modbus_write_multiple_coils_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_write_multiple_coils_response", c$id, headers, start_address, quantity; -} - diff --git a/testing/btest/scripts/base/protocols/modbus/coil_parsing_big.zeek b/testing/btest/scripts/base/protocols/modbus/coil_parsing_big.zeek new file mode 100644 index 0000000000..1cecf4c541 --- /dev/null +++ b/testing/btest/scripts/base/protocols/modbus/coil_parsing_big.zeek @@ -0,0 +1,47 @@ +# +# @TEST-EXEC: zeek -C -r $TRACES/modbus/modbusBig.pcap %INPUT | sort | uniq -c | sed 's/^ *//g' >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $2}' | grep "^modbus_" | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/modbus/events.bif | grep "^event modbus_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage + +event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) +{ + print "modbus_message", c$id, headers, is_orig; +} + +event modbus_exception(c: connection, headers: ModbusHeaders, code: count) +{ + print "modbus_exception", c$id, headers, code; +} + +event modbus_read_coils_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_read_coils_request", c$id, headers, start_address, quantity; +} + +event modbus_read_coils_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) +{ + print "modbus_read_coils_response", c$id, headers, coils; +} +event modbus_write_single_coil_request(c: connection, headers: ModbusHeaders, address: count, value: bool) +{ + print "modbus_write_single_coil_request", c$id, headers, address, value; +} + +event modbus_write_single_coil_response(c: connection, headers: ModbusHeaders, address: count, value: bool) +{ + print "modbus_write_single_coil_response", c$id, headers, address, value; +} + +event modbus_write_multiple_coils_request(c: connection, headers: ModbusHeaders, start_address: count, coils: ModbusCoils) +{ + print "modbus_write_multiple_coils_request", c$id, headers, start_address, coils; +} + +event modbus_write_multiple_coils_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_write_multiple_coils_response", c$id, headers, start_address, quantity; +} + diff --git a/testing/btest/scripts/base/protocols/modbus/coil_parsing_small.bro b/testing/btest/scripts/base/protocols/modbus/coil_parsing_small.bro deleted file mode 100644 index 84ee314907..0000000000 --- a/testing/btest/scripts/base/protocols/modbus/coil_parsing_small.bro +++ /dev/null @@ -1,47 +0,0 @@ -# -# @TEST-EXEC: bro -C -r $TRACES/modbus/modbusSmall.pcap %INPUT | sort | uniq -c | sed 's/^ *//g' >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $2}' | grep "^modbus_" | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/modbus/events.bif | grep "^event modbus_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage - -event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) -{ - print "modbus_message", c$id, headers, is_orig; -} - -event modbus_exception(c: connection, headers: ModbusHeaders, code: count) -{ - print "modbus_exception", c$id, headers, code; -} - -event modbus_read_coils_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_read_coils_request", c$id, headers, start_address, quantity; -} - -event modbus_read_coils_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) -{ - print "modbus_read_coils_response", c$id, headers, coils; -} -event modbus_write_single_coil_request(c: connection, headers: ModbusHeaders, address: count, value: bool) -{ - print "modbus_write_single_coil_request", c$id, headers, address, value; -} - -event modbus_write_single_coil_response(c: connection, headers: ModbusHeaders, address: count, value: bool) -{ - print "modbus_write_single_coil_response", c$id, headers, address, value; -} - -event modbus_write_multiple_coils_request(c: connection, headers: ModbusHeaders, start_address: count, coils: ModbusCoils) -{ - print "modbus_write_multiple_coils_request", c$id, headers, start_address, coils; -} - -event modbus_write_multiple_coils_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_write_multiple_coils_response", c$id, headers, start_address, quantity; -} - diff --git a/testing/btest/scripts/base/protocols/modbus/coil_parsing_small.zeek b/testing/btest/scripts/base/protocols/modbus/coil_parsing_small.zeek new file mode 100644 index 0000000000..0e21021d6e --- /dev/null +++ b/testing/btest/scripts/base/protocols/modbus/coil_parsing_small.zeek @@ -0,0 +1,47 @@ +# +# @TEST-EXEC: zeek -C -r $TRACES/modbus/modbusSmall.pcap %INPUT | sort | uniq -c | sed 's/^ *//g' >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $2}' | grep "^modbus_" | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/modbus/events.bif | grep "^event modbus_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage + +event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) +{ + print "modbus_message", c$id, headers, is_orig; +} + +event modbus_exception(c: connection, headers: ModbusHeaders, code: count) +{ + print "modbus_exception", c$id, headers, code; +} + +event modbus_read_coils_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_read_coils_request", c$id, headers, start_address, quantity; +} + +event modbus_read_coils_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) +{ + print "modbus_read_coils_response", c$id, headers, coils; +} +event modbus_write_single_coil_request(c: connection, headers: ModbusHeaders, address: count, value: bool) +{ + print "modbus_write_single_coil_request", c$id, headers, address, value; +} + +event modbus_write_single_coil_response(c: connection, headers: ModbusHeaders, address: count, value: bool) +{ + print "modbus_write_single_coil_response", c$id, headers, address, value; +} + +event modbus_write_multiple_coils_request(c: connection, headers: ModbusHeaders, start_address: count, coils: ModbusCoils) +{ + print "modbus_write_multiple_coils_request", c$id, headers, start_address, coils; +} + +event modbus_write_multiple_coils_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_write_multiple_coils_response", c$id, headers, start_address, quantity; +} + diff --git a/testing/btest/scripts/base/protocols/modbus/events.bro b/testing/btest/scripts/base/protocols/modbus/events.bro deleted file mode 100644 index 55a3f3cb04..0000000000 --- a/testing/btest/scripts/base/protocols/modbus/events.bro +++ /dev/null @@ -1,151 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/modbus/modbus.trace %INPUT | sort | uniq -c | sed 's/^ *//g' >output -# @TEST-EXEC: btest-diff output -# @TEST-EXEC: cat output | awk '{print $2}' | grep "^modbus_" | sort | uniq | wc -l >covered -# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/modbus/events.bif | grep "^event modbus_" | wc -l >total -# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage -# @TEST-EXEC: btest-diff coverage -# @TEST-EXEC: btest-diff conn.log - -redef DPD::ignore_violations_after = 1; - -event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) -{ - print "modbus_message", c$id, headers, is_orig; -} - -event modbus_exception(c: connection, headers: ModbusHeaders, code: count) -{ - print "modbus_exception", c$id, headers, code; -} - -event modbus_read_coils_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_read_coils_request", c$id, headers, start_address, quantity; -} - -event modbus_read_coils_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) -{ - print "modbus_read_coils_response", c$id, headers, coils; -} - -event modbus_read_discrete_inputs_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_read_discrete_inputs_request", c$id, headers, start_address, quantity; -} - -event modbus_read_discrete_inputs_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) -{ - print "modbus_read_discrete_inputs_response", c$id, headers, coils; -} - -event modbus_read_holding_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_read_holding_registers_request", c$id, headers, start_address, quantity; -} - -event modbus_read_holding_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) -{ - print "modbus_read_holding_registers_response", c$id, headers, registers; -} - -event modbus_read_input_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_read_input_registers_request", c$id, headers, start_address, quantity; -} - -event modbus_read_input_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) -{ - print "modbus_read_input_registers_response", c$id, headers, registers; -} - -event modbus_write_single_coil_request(c: connection, headers: ModbusHeaders, address: count, value: bool) -{ - print "modbus_write_single_coil_request", c$id, headers, address, value; -} - -event modbus_write_single_coil_response(c: connection, headers: ModbusHeaders, address: count, value: bool) -{ - print "modbus_write_single_coil_response", c$id, headers, address, value; -} - -event modbus_write_single_register_request(c: connection, headers: ModbusHeaders, address: count, value: count) -{ - print "modbus_write_single_register_request", c$id, headers, address, value; -} - -event modbus_write_single_register_response(c: connection, headers: ModbusHeaders, address: count, value: count) -{ - print "modbus_write_single_register_response", c$id, headers, address, value; -} - -event modbus_write_multiple_coils_request(c: connection, headers: ModbusHeaders, start_address: count, coils: ModbusCoils) -{ - print "modbus_write_multiple_coils_request", c$id, headers, start_address, coils; -} - -event modbus_write_multiple_coils_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_write_multiple_coils_response", c$id, headers, start_address, quantity; -} - -event modbus_write_multiple_registers_request(c: connection, headers: ModbusHeaders, start_address: count, registers: ModbusRegisters) -{ - print "modbus_write_multiple_registers_request", c$id, headers, start_address, registers; -} - -event modbus_write_multiple_registers_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) -{ - print "modbus_write_multiple_registers_response", c$id, headers, start_address, quantity; -} - -event modbus_read_file_record_request(c: connection, headers: ModbusHeaders) -{ - print "modbus_read_file_record_request", c$id, headers; -} - -event modbus_read_file_record_response(c: connection, headers: ModbusHeaders) -{ - print "modbus_read_file_record_response", c$id, headers; -} - -event modbus_write_file_record_request(c: connection, headers: ModbusHeaders) -{ - print "modbus_write_file_record_request", c$id, headers; -} - -event modbus_write_file_record_response(c: connection, headers: ModbusHeaders) -{ - print "modbus_write_file_record_response", c$id, headers; -} - -event modbus_mask_write_register_request(c: connection, headers: ModbusHeaders, address: count, and_mask: count, or_mask: count) -{ - print "modbus_mask_write_register_request", c$id, headers, address, and_mask, or_mask; -} - -event modbus_mask_write_register_response(c: connection, headers: ModbusHeaders, address: count, and_mask: count, or_mask: count) -{ - print "modbus_mask_write_register_response", c$id, headers, address, and_mask, or_mask; -} - -event modbus_read_write_multiple_registers_request(c: connection, headers: ModbusHeaders, read_start_address: count, read_quantity: count, write_start_address: count, write_registers: ModbusRegisters) -{ - print "modbus_read_write_multiple_registers_request", c$id, headers, read_start_address, read_quantity, write_start_address, write_registers; -} - -event modbus_read_write_multiple_registers_response(c: connection, headers: ModbusHeaders, written_registers: ModbusRegisters) -{ - print "modbus_read_write_multiple_registers_response", c$id, headers, written_registers; -} - -event modbus_read_fifo_queue_request(c: connection, headers: ModbusHeaders, start_address: count) -{ - print "modbus_read_fifo_queue_request", c$id, headers, start_address; -} - -event modbus_read_fifo_queue_response(c: connection, headers: ModbusHeaders, fifos: ModbusRegisters) -{ - print "modbus_read_fifo_queue_response", c$id, headers, fifos; -} - diff --git a/testing/btest/scripts/base/protocols/modbus/events.zeek b/testing/btest/scripts/base/protocols/modbus/events.zeek new file mode 100644 index 0000000000..4b55828565 --- /dev/null +++ b/testing/btest/scripts/base/protocols/modbus/events.zeek @@ -0,0 +1,151 @@ +# +# @TEST-EXEC: zeek -r $TRACES/modbus/modbus.trace %INPUT | sort | uniq -c | sed 's/^ *//g' >output +# @TEST-EXEC: btest-diff output +# @TEST-EXEC: cat output | awk '{print $2}' | grep "^modbus_" | sort | uniq | wc -l >covered +# @TEST-EXEC: cat ${DIST}/src/analyzer/protocol/modbus/events.bif | grep "^event modbus_" | wc -l >total +# @TEST-EXEC: echo `cat covered` of `cat total` events triggered by trace >coverage +# @TEST-EXEC: btest-diff coverage +# @TEST-EXEC: btest-diff conn.log + +redef DPD::ignore_violations_after = 1; + +event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) +{ + print "modbus_message", c$id, headers, is_orig; +} + +event modbus_exception(c: connection, headers: ModbusHeaders, code: count) +{ + print "modbus_exception", c$id, headers, code; +} + +event modbus_read_coils_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_read_coils_request", c$id, headers, start_address, quantity; +} + +event modbus_read_coils_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) +{ + print "modbus_read_coils_response", c$id, headers, coils; +} + +event modbus_read_discrete_inputs_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_read_discrete_inputs_request", c$id, headers, start_address, quantity; +} + +event modbus_read_discrete_inputs_response(c: connection, headers: ModbusHeaders, coils: ModbusCoils) +{ + print "modbus_read_discrete_inputs_response", c$id, headers, coils; +} + +event modbus_read_holding_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_read_holding_registers_request", c$id, headers, start_address, quantity; +} + +event modbus_read_holding_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) +{ + print "modbus_read_holding_registers_response", c$id, headers, registers; +} + +event modbus_read_input_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_read_input_registers_request", c$id, headers, start_address, quantity; +} + +event modbus_read_input_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) +{ + print "modbus_read_input_registers_response", c$id, headers, registers; +} + +event modbus_write_single_coil_request(c: connection, headers: ModbusHeaders, address: count, value: bool) +{ + print "modbus_write_single_coil_request", c$id, headers, address, value; +} + +event modbus_write_single_coil_response(c: connection, headers: ModbusHeaders, address: count, value: bool) +{ + print "modbus_write_single_coil_response", c$id, headers, address, value; +} + +event modbus_write_single_register_request(c: connection, headers: ModbusHeaders, address: count, value: count) +{ + print "modbus_write_single_register_request", c$id, headers, address, value; +} + +event modbus_write_single_register_response(c: connection, headers: ModbusHeaders, address: count, value: count) +{ + print "modbus_write_single_register_response", c$id, headers, address, value; +} + +event modbus_write_multiple_coils_request(c: connection, headers: ModbusHeaders, start_address: count, coils: ModbusCoils) +{ + print "modbus_write_multiple_coils_request", c$id, headers, start_address, coils; +} + +event modbus_write_multiple_coils_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_write_multiple_coils_response", c$id, headers, start_address, quantity; +} + +event modbus_write_multiple_registers_request(c: connection, headers: ModbusHeaders, start_address: count, registers: ModbusRegisters) +{ + print "modbus_write_multiple_registers_request", c$id, headers, start_address, registers; +} + +event modbus_write_multiple_registers_response(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) +{ + print "modbus_write_multiple_registers_response", c$id, headers, start_address, quantity; +} + +event modbus_read_file_record_request(c: connection, headers: ModbusHeaders) +{ + print "modbus_read_file_record_request", c$id, headers; +} + +event modbus_read_file_record_response(c: connection, headers: ModbusHeaders) +{ + print "modbus_read_file_record_response", c$id, headers; +} + +event modbus_write_file_record_request(c: connection, headers: ModbusHeaders) +{ + print "modbus_write_file_record_request", c$id, headers; +} + +event modbus_write_file_record_response(c: connection, headers: ModbusHeaders) +{ + print "modbus_write_file_record_response", c$id, headers; +} + +event modbus_mask_write_register_request(c: connection, headers: ModbusHeaders, address: count, and_mask: count, or_mask: count) +{ + print "modbus_mask_write_register_request", c$id, headers, address, and_mask, or_mask; +} + +event modbus_mask_write_register_response(c: connection, headers: ModbusHeaders, address: count, and_mask: count, or_mask: count) +{ + print "modbus_mask_write_register_response", c$id, headers, address, and_mask, or_mask; +} + +event modbus_read_write_multiple_registers_request(c: connection, headers: ModbusHeaders, read_start_address: count, read_quantity: count, write_start_address: count, write_registers: ModbusRegisters) +{ + print "modbus_read_write_multiple_registers_request", c$id, headers, read_start_address, read_quantity, write_start_address, write_registers; +} + +event modbus_read_write_multiple_registers_response(c: connection, headers: ModbusHeaders, written_registers: ModbusRegisters) +{ + print "modbus_read_write_multiple_registers_response", c$id, headers, written_registers; +} + +event modbus_read_fifo_queue_request(c: connection, headers: ModbusHeaders, start_address: count) +{ + print "modbus_read_fifo_queue_request", c$id, headers, start_address; +} + +event modbus_read_fifo_queue_response(c: connection, headers: ModbusHeaders, fifos: ModbusRegisters) +{ + print "modbus_read_fifo_queue_response", c$id, headers, fifos; +} + diff --git a/testing/btest/scripts/base/protocols/modbus/exception_handling.test b/testing/btest/scripts/base/protocols/modbus/exception_handling.test index 8a4fadcbeb..b249fd33b0 100644 --- a/testing/btest/scripts/base/protocols/modbus/exception_handling.test +++ b/testing/btest/scripts/base/protocols/modbus/exception_handling.test @@ -1,8 +1,8 @@ -# @TEST-EXEC: bro -r $TRACES/modbus/fuzz-72.trace +# @TEST-EXEC: zeek -r $TRACES/modbus/fuzz-72.trace # @TEST-EXEC: btest-diff modbus.log # The pcap has a flow with some fuzzed modbus traffic in it that should cause # the binpac-generated analyzer code to throw a binpac::ExceptionOutOfBound. # This should be correctly caught as a type of binpac::Exception and the # binpac::ModbusTCP::Exception type that's defined as part of the analyzer -# shouldn't interfere with that handling and definitely shouldn't crash bro. +# shouldn't interfere with that handling and definitely shouldn't crash Zeek. diff --git a/testing/btest/scripts/base/protocols/modbus/length_mismatch.bro b/testing/btest/scripts/base/protocols/modbus/length_mismatch.bro deleted file mode 100644 index 17371f3788..0000000000 --- a/testing/btest/scripts/base/protocols/modbus/length_mismatch.bro +++ /dev/null @@ -1,14 +0,0 @@ -# The parser generated by BinPAC needs to handle this pcap without crashing -# or asserting. Specifically, pasing Function Code 23, -# ReadWriteMultipleRegistersRequest, has a field: -# -# uint16[write_quantity] &length=write_byte_count; -# -# And the pcap has mismatching values for those quantities. -# The use of &length on arrays previously caused array elements to -# be treated as already having a bounds check in the parsing-loop, which -# is problematic in the case where (write_quantity * 2) > write_byte_count -# as that can cause reading from a location that exceeds the end of the -# data buffer. - -# @TEST-EXEC: bro -r $TRACES/modbus/4SICS-GeekLounge-151022-min.pcap diff --git a/testing/btest/scripts/base/protocols/modbus/length_mismatch.zeek b/testing/btest/scripts/base/protocols/modbus/length_mismatch.zeek new file mode 100644 index 0000000000..0659614bd8 --- /dev/null +++ b/testing/btest/scripts/base/protocols/modbus/length_mismatch.zeek @@ -0,0 +1,14 @@ +# The parser generated by BinPAC needs to handle this pcap without crashing +# or asserting. Specifically, pasing Function Code 23, +# ReadWriteMultipleRegistersRequest, has a field: +# +# uint16[write_quantity] &length=write_byte_count; +# +# And the pcap has mismatching values for those quantities. +# The use of &length on arrays previously caused array elements to +# be treated as already having a bounds check in the parsing-loop, which +# is problematic in the case where (write_quantity * 2) > write_byte_count +# as that can cause reading from a location that exceeds the end of the +# data buffer. + +# @TEST-EXEC: zeek -r $TRACES/modbus/4SICS-GeekLounge-151022-min.pcap diff --git a/testing/btest/scripts/base/protocols/modbus/policy.bro b/testing/btest/scripts/base/protocols/modbus/policy.bro deleted file mode 100644 index 8f7e41c274..0000000000 --- a/testing/btest/scripts/base/protocols/modbus/policy.bro +++ /dev/null @@ -1,11 +0,0 @@ -# -# @TEST-EXEC: bro -r $TRACES/modbus/modbus.trace %INPUT -# @TEST-EXEC: btest-diff modbus.log -# @TEST-EXEC: btest-diff modbus_register_change.log -# @TEST-EXEC: btest-diff known_modbus.log -# - -@load protocols/modbus/known-masters-slaves.bro -@load protocols/modbus/track-memmap.bro - -redef DPD::ignore_violations_after = 1; diff --git a/testing/btest/scripts/base/protocols/modbus/policy.zeek b/testing/btest/scripts/base/protocols/modbus/policy.zeek new file mode 100644 index 0000000000..ae4923ee77 --- /dev/null +++ b/testing/btest/scripts/base/protocols/modbus/policy.zeek @@ -0,0 +1,11 @@ +# +# @TEST-EXEC: zeek -r $TRACES/modbus/modbus.trace %INPUT +# @TEST-EXEC: btest-diff modbus.log +# @TEST-EXEC: btest-diff modbus_register_change.log +# @TEST-EXEC: btest-diff known_modbus.log +# + +@load protocols/modbus/known-masters-slaves +@load protocols/modbus/track-memmap + +redef DPD::ignore_violations_after = 1; diff --git a/testing/btest/scripts/base/protocols/modbus/register_parsing.bro b/testing/btest/scripts/base/protocols/modbus/register_parsing.bro deleted file mode 100644 index 1641860228..0000000000 --- a/testing/btest/scripts/base/protocols/modbus/register_parsing.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/modbus/fuzz-1011.trace %INPUT >output -# @TEST-EXEC: btest-diff modbus.log -# @TEST-EXEC: btest-diff output - -# modbus registers are 2-byte values. Many messages send a variable amount -# of register values, with the quantity being derived from a byte count value -# that is also sent. If the byte count value is invalid (e.g. an odd value -# might not be valid since registers must be 2-byte values), then the parser -# should not trigger any asserts, but generate a protocol_violation (in this -# case TCP_ApplicationAnalyzer::ProtocolViolation asserts its behavior for -# incomplete connections). - -event modbus_read_input_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) - { - print "modbus_read_input_registers_request", c$id, headers, start_address, quantity; - } - -event modbus_read_input_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) - { - print "modbus_read_input_registers_response", c$id, headers, registers, |registers|; - } diff --git a/testing/btest/scripts/base/protocols/modbus/register_parsing.zeek b/testing/btest/scripts/base/protocols/modbus/register_parsing.zeek new file mode 100644 index 0000000000..1fc482ee95 --- /dev/null +++ b/testing/btest/scripts/base/protocols/modbus/register_parsing.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -r $TRACES/modbus/fuzz-1011.trace %INPUT >output +# @TEST-EXEC: btest-diff modbus.log +# @TEST-EXEC: btest-diff output + +# modbus registers are 2-byte values. Many messages send a variable amount +# of register values, with the quantity being derived from a byte count value +# that is also sent. If the byte count value is invalid (e.g. an odd value +# might not be valid since registers must be 2-byte values), then the parser +# should not trigger any asserts, but generate a protocol_violation (in this +# case TCP_ApplicationAnalyzer::ProtocolViolation asserts its behavior for +# incomplete connections). + +event modbus_read_input_registers_request(c: connection, headers: ModbusHeaders, start_address: count, quantity: count) + { + print "modbus_read_input_registers_request", c$id, headers, start_address, quantity; + } + +event modbus_read_input_registers_response(c: connection, headers: ModbusHeaders, registers: ModbusRegisters) + { + print "modbus_read_input_registers_response", c$id, headers, registers, |registers|; + } diff --git a/testing/btest/scripts/base/protocols/mount/basic.test b/testing/btest/scripts/base/protocols/mount/basic.test index 8576874ce3..65a1adffd4 100644 --- a/testing/btest/scripts/base/protocols/mount/basic.test +++ b/testing/btest/scripts/base/protocols/mount/basic.test @@ -1,10 +1,10 @@ -# @TEST-EXEC: bro -b -r $TRACES/mount/mount_base.pcap %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/mount/mount_base.pcap %INPUT # @TEST-EXEC: btest-diff .stdout global mount_ports: set[port] = { 635/tcp, 635/udp, 20048/tcp, 20048/udp } &redef; redef ignore_checksums = T; -event bro_init() +event zeek_init() { Analyzer::register_for_ports(Analyzer::ANALYZER_MOUNT, mount_ports); Analyzer::enable_analyzer(Analyzer::ANALYZER_MOUNT); diff --git a/testing/btest/scripts/base/protocols/mysql/auth.test b/testing/btest/scripts/base/protocols/mysql/auth.test index 6c764e496f..78c1ca0f19 100644 --- a/testing/btest/scripts/base/protocols/mysql/auth.test +++ b/testing/btest/scripts/base/protocols/mysql/auth.test @@ -1,6 +1,6 @@ # This tests that successful/unsuccesful auth attempts get logged correctly -# @TEST-EXEC: bro -b -r $TRACES/mysql/auth.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/mysql/auth.trace %INPUT # @TEST-EXEC: btest-diff mysql.log @load base/protocols/mysql \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/mysql/encrypted.test b/testing/btest/scripts/base/protocols/mysql/encrypted.test index e41c93186f..d6bfb0a271 100644 --- a/testing/btest/scripts/base/protocols/mysql/encrypted.test +++ b/testing/btest/scripts/base/protocols/mysql/encrypted.test @@ -1,8 +1,9 @@ -# This tests how Bro deals with encrypted connections. Right now, it doesn't log them as it -# can't parse much of value. We're testing for an empty mysql.log file. +# This tests how Zeek deals with encrypted connections. Right now, it +# doesn't log them as it can't parse much of value. We're testing for an +# empty mysql.log file. # @TEST-EXEC: touch mysql.log -# @TEST-EXEC: bro -b -r $TRACES/mysql/encrypted.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/mysql/encrypted.trace %INPUT # @TEST-EXEC: btest-diff mysql.log -@load base/protocols/mysql \ No newline at end of file +@load base/protocols/mysql diff --git a/testing/btest/scripts/base/protocols/mysql/wireshark.test b/testing/btest/scripts/base/protocols/mysql/wireshark.test index 55fe5be16c..64c8eb7ffa 100644 --- a/testing/btest/scripts/base/protocols/mysql/wireshark.test +++ b/testing/btest/scripts/base/protocols/mysql/wireshark.test @@ -1,6 +1,6 @@ # This tests a PCAP with a few MySQL commands from the Wireshark samples. -# @TEST-EXEC: bro -b -r $TRACES/mysql/mysql.trace %INPUT >out +# @TEST-EXEC: zeek -b -r $TRACES/mysql/mysql.trace %INPUT >out # @TEST-EXEC: btest-diff out # @TEST-EXEC: btest-diff mysql.log diff --git a/testing/btest/scripts/base/protocols/ncp/event.bro b/testing/btest/scripts/base/protocols/ncp/event.bro deleted file mode 100644 index acb4bf0a0c..0000000000 --- a/testing/btest/scripts/base/protocols/ncp/event.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/ncp.pcap %INPUT >out -# @TEST-EXEC: btest-diff out - -redef likely_server_ports += { 524/tcp }; - -event bro_init() - { - const ports = { 524/tcp }; - Analyzer::register_for_ports(Analyzer::ANALYZER_NCP, ports); - } - -event ncp_request(c: connection, frame_type: count, length: count, func: count) - { - print "ncp request", frame_type, length, func; - } - -event ncp_reply(c: connection, frame_type: count, length: count, req_frame: count, req_func: count, completion_code: count) - { - print "ncp reply", frame_type, length, req_frame, req_func, completion_code; - } diff --git a/testing/btest/scripts/base/protocols/ncp/event.zeek b/testing/btest/scripts/base/protocols/ncp/event.zeek new file mode 100644 index 0000000000..58ac47c8e8 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ncp/event.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ncp.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +redef likely_server_ports += { 524/tcp }; + +event zeek_init() + { + const ports = { 524/tcp }; + Analyzer::register_for_ports(Analyzer::ANALYZER_NCP, ports); + } + +event ncp_request(c: connection, frame_type: count, length: count, func: count) + { + print "ncp request", frame_type, length, func; + } + +event ncp_reply(c: connection, frame_type: count, length: count, req_frame: count, req_func: count, completion_code: count) + { + print "ncp reply", frame_type, length, req_frame, req_func, completion_code; + } diff --git a/testing/btest/scripts/base/protocols/ncp/frame_size_tuning.bro b/testing/btest/scripts/base/protocols/ncp/frame_size_tuning.bro deleted file mode 100644 index 46ad87e752..0000000000 --- a/testing/btest/scripts/base/protocols/ncp/frame_size_tuning.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/ncp.pcap %INPUT NCP::max_frame_size=150 >out -# @TEST-EXEC: btest-diff out - -redef likely_server_ports += { 524/tcp }; - -event bro_init() - { - const ports = { 524/tcp }; - Analyzer::register_for_ports(Analyzer::ANALYZER_NCP, ports); - } - -event ncp_request(c: connection, frame_type: count, length: count, func: count) - { - print "ncp request", frame_type, length, func; - } - -event ncp_reply(c: connection, frame_type: count, length: count, req_frame: count, req_func: count, completion_code: count) - { - print "ncp reply", frame_type, length, req_frame, req_func, completion_code; - } diff --git a/testing/btest/scripts/base/protocols/ncp/frame_size_tuning.zeek b/testing/btest/scripts/base/protocols/ncp/frame_size_tuning.zeek new file mode 100644 index 0000000000..c18f322892 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ncp/frame_size_tuning.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ncp.pcap %INPUT NCP::max_frame_size=150 >out +# @TEST-EXEC: btest-diff out + +redef likely_server_ports += { 524/tcp }; + +event zeek_init() + { + const ports = { 524/tcp }; + Analyzer::register_for_ports(Analyzer::ANALYZER_NCP, ports); + } + +event ncp_request(c: connection, frame_type: count, length: count, func: count) + { + print "ncp request", frame_type, length, func; + } + +event ncp_reply(c: connection, frame_type: count, length: count, req_frame: count, req_func: count, completion_code: count) + { + print "ncp reply", frame_type, length, req_frame, req_func, completion_code; + } diff --git a/testing/btest/scripts/base/protocols/nfs/basic.test b/testing/btest/scripts/base/protocols/nfs/basic.test index f2d2b1862a..e4dab09ed6 100755 --- a/testing/btest/scripts/base/protocols/nfs/basic.test +++ b/testing/btest/scripts/base/protocols/nfs/basic.test @@ -1,10 +1,10 @@ -# @TEST-EXEC: bro -b -r $TRACES/nfs/nfs_base.pcap %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/nfs/nfs_base.pcap %INPUT # @TEST-EXEC: btest-diff .stdout global nfs_ports: set[port] = { 2049/tcp, 2049/udp } &redef; redef ignore_checksums = T; -event bro_init() +event zeek_init() { Analyzer::register_for_ports(Analyzer::ANALYZER_NFS, nfs_ports); Analyzer::enable_analyzer(Analyzer::ANALYZER_NFS); diff --git a/testing/btest/scripts/base/protocols/ntp/ntp-digest.test b/testing/btest/scripts/base/protocols/ntp/ntp-digest.test new file mode 100644 index 0000000000..92ddbf6edd --- /dev/null +++ b/testing/btest/scripts/base/protocols/ntp/ntp-digest.test @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ntp/NTP-digest.pcap %INPUT +# @TEST-EXEC: btest-diff ntp.log +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/ntp + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) + { + print fmt("ntp_message %s -> %s:%d %s", c$id$orig_h, c$id$resp_h, c$id$resp_p, msg); + } + diff --git a/testing/btest/scripts/base/protocols/ntp/ntp.test b/testing/btest/scripts/base/protocols/ntp/ntp.test new file mode 100644 index 0000000000..451f88b5cf --- /dev/null +++ b/testing/btest/scripts/base/protocols/ntp/ntp.test @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ntp/ntp.pcap %INPUT +# @TEST-EXEC: btest-diff ntp.log +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/ntp + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) + { + print fmt("ntp_message %s -> %s:%d %s", c$id$orig_h, c$id$resp_h, c$id$resp_p, msg); + } + diff --git a/testing/btest/scripts/base/protocols/ntp/ntp2.test b/testing/btest/scripts/base/protocols/ntp/ntp2.test new file mode 100644 index 0000000000..540416e1ba --- /dev/null +++ b/testing/btest/scripts/base/protocols/ntp/ntp2.test @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ntp/ntp2.pcap %INPUT +# @TEST-EXEC: btest-diff ntp.log +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/ntp + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) + { + print fmt("ntp_message %s -> %s:%d %s", c$id$orig_h, c$id$resp_h, c$id$resp_p, msg); + } + diff --git a/testing/btest/scripts/base/protocols/ntp/ntp3.test b/testing/btest/scripts/base/protocols/ntp/ntp3.test new file mode 100644 index 0000000000..fa132ff9cb --- /dev/null +++ b/testing/btest/scripts/base/protocols/ntp/ntp3.test @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ntp/NTP_sync.pcap %INPUT +# @TEST-EXEC: btest-diff ntp.log +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/ntp + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) + { + print fmt("ntp_message %s -> %s:%d %s", c$id$orig_h, c$id$resp_h, c$id$resp_p, msg); + } + diff --git a/testing/btest/scripts/base/protocols/ntp/ntpmode67.test b/testing/btest/scripts/base/protocols/ntp/ntpmode67.test new file mode 100644 index 0000000000..efacbc14c5 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ntp/ntpmode67.test @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ntp/ntpmode67.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/ntp + +event ntp_message(c: connection, is_orig: bool, msg: NTP::Message) + { + print fmt("ntp_message %s -> %s:%d %s", c$id$orig_h, c$id$resp_h, c$id$resp_p, msg); + } + diff --git a/testing/btest/scripts/base/protocols/pop3/starttls.bro b/testing/btest/scripts/base/protocols/pop3/starttls.bro deleted file mode 100644 index 8e0d1ab5ef..0000000000 --- a/testing/btest/scripts/base/protocols/pop3/starttls.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/tls/pop3-starttls.pcap %INPUT -# @TEST-EXEC: btest-diff conn.log -# @TEST-EXEC: btest-diff ssl.log -# @TEST-EXEC: btest-diff x509.log - -@load base/protocols/conn -@load base/frameworks/dpd -@load base/protocols/ssl - -module POP3; - -const ports = { - 110/tcp -}; -redef likely_server_ports += { ports }; - -event bro_init() &priority=5 - { - Analyzer::register_for_ports(Analyzer::ANALYZER_POP3, ports); - } diff --git a/testing/btest/scripts/base/protocols/pop3/starttls.zeek b/testing/btest/scripts/base/protocols/pop3/starttls.zeek new file mode 100644 index 0000000000..cf5371d284 --- /dev/null +++ b/testing/btest/scripts/base/protocols/pop3/starttls.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -C -b -r $TRACES/tls/pop3-starttls.pcap %INPUT +# @TEST-EXEC: btest-diff conn.log +# @TEST-EXEC: btest-diff ssl.log +# @TEST-EXEC: btest-diff x509.log + +@load base/protocols/conn +@load base/frameworks/dpd +@load base/protocols/ssl + +module POP3; + +const ports = { + 110/tcp +}; +redef likely_server_ports += { ports }; + +event zeek_init() &priority=5 + { + Analyzer::register_for_ports(Analyzer::ANALYZER_POP3, ports); + } diff --git a/testing/btest/scripts/base/protocols/radius/auth.test b/testing/btest/scripts/base/protocols/radius/auth.test index 9ec63dec0a..bcddeffd57 100644 --- a/testing/btest/scripts/base/protocols/radius/auth.test +++ b/testing/btest/scripts/base/protocols/radius/auth.test @@ -1,6 +1,6 @@ # This tests that a RADIUS authentication gets logged correctly -# @TEST-EXEC: bro -b -r $TRACES/radius/radius.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/radius/radius.trace %INPUT # @TEST-EXEC: btest-diff radius.log @load base/protocols/radius \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/radius/radius-multiple-attempts.test b/testing/btest/scripts/base/protocols/radius/radius-multiple-attempts.test index 473e492355..6456e58fe2 100644 --- a/testing/btest/scripts/base/protocols/radius/radius-multiple-attempts.test +++ b/testing/btest/scripts/base/protocols/radius/radius-multiple-attempts.test @@ -1,6 +1,6 @@ # Test a more complicated radius session with multiple attempts -# @TEST-EXEC: bro -b -C -r $TRACES/radius/radius_localhost.pcapng %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/radius/radius_localhost.pcapng %INPUT # @TEST-EXEC: btest-diff radius.log @load base/protocols/radius diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-client-cluster-data.zeek b/testing/btest/scripts/base/protocols/rdp/rdp-client-cluster-data.zeek new file mode 100644 index 0000000000..7bea9c16e1 --- /dev/null +++ b/testing/btest/scripts/base/protocols/rdp/rdp-client-cluster-data.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -r $TRACES/rdp/rdp-proprietary-encryption.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +@load base/protocols/rdp + +event rdp_client_cluster_data(c: connection, data: RDP::ClientClusterData) + { + print "RDP Client Cluster Data"; + print fmt("Flags: %08x",data$flags); + print fmt("RedirSessionId: %08x",data$redir_session_id); + print fmt("Redirection Supported: %08x",data$redir_supported); + print fmt("ServerSessionRedirectionVersionMask: %08x",data$svr_session_redir_version_mask); + print fmt("RedirectionSessionIDFieldValid: %08x",data$redir_sessionid_field_valid); + print fmt("RedirectedSmartCard: %08x",data$redir_smartcard); + } + +event rdp_client_network_data(c: connection, channels: RDP::ClientChannelList) + { + print "RDP Client Channel List Options"; + for ( i in channels ) + print fmt("%08x", channels[i]$options); + } diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-client-security-data.zeek b/testing/btest/scripts/base/protocols/rdp/rdp-client-security-data.zeek new file mode 100644 index 0000000000..97390c1248 --- /dev/null +++ b/testing/btest/scripts/base/protocols/rdp/rdp-client-security-data.zeek @@ -0,0 +1,13 @@ +# @TEST-EXEC: zeek -r $TRACES/rdp/rdp-proprietary-encryption.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +@load base/protocols/rdp + +event rdp_client_security_data(c: connection, data: RDP::ClientSecurityData) + { + print "rdp_client_security_data", data; + print " 40-bit flag", data$encryption_methods & 0x00000001 != 0; + print " 128-bit flag", data$encryption_methods & 0x00000002 != 0; + print " 56-bit flag", data$encryption_methods & 0x00000008 != 0; + print " fips flag", data$encryption_methods & 0x00000010 != 0; + } diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-native-encrypted-data.zeek b/testing/btest/scripts/base/protocols/rdp/rdp-native-encrypted-data.zeek new file mode 100644 index 0000000000..2c2b84735a --- /dev/null +++ b/testing/btest/scripts/base/protocols/rdp/rdp-native-encrypted-data.zeek @@ -0,0 +1,14 @@ +# @TEST-EXEC: zeek -r $TRACES/rdp/rdp-proprietary-encryption.pcap %INPUT >out +# @TEST-EXEC: btest-diff out + +@load base/protocols/rdp + +event rdp_native_encrypted_data(c: connection, orig: bool, len: count) + { + print "rdp native encrypted data", orig, len; + + if ( ! orig ) + # That's fine to stop here, we don't need to check the entire + # encrypted conversation for the purpose of the unit test. + terminate(); + } diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-proprietary-encryption.bro b/testing/btest/scripts/base/protocols/rdp/rdp-proprietary-encryption.bro deleted file mode 100644 index 99305087ba..0000000000 --- a/testing/btest/scripts/base/protocols/rdp/rdp-proprietary-encryption.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/rdp/rdp-proprietary-encryption.pcap %INPUT -# @TEST-EXEC: btest-diff rdp.log - -@load base/protocols/rdp diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-proprietary-encryption.zeek b/testing/btest/scripts/base/protocols/rdp/rdp-proprietary-encryption.zeek new file mode 100644 index 0000000000..7558506c8f --- /dev/null +++ b/testing/btest/scripts/base/protocols/rdp/rdp-proprietary-encryption.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -r $TRACES/rdp/rdp-proprietary-encryption.pcap %INPUT +# @TEST-EXEC: btest-diff rdp.log + +@load base/protocols/rdp diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-to-ssl.bro b/testing/btest/scripts/base/protocols/rdp/rdp-to-ssl.bro deleted file mode 100644 index 1be2bd7e8e..0000000000 --- a/testing/btest/scripts/base/protocols/rdp/rdp-to-ssl.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/rdp/rdp-to-ssl.pcap %INPUT -# @TEST-EXEC: btest-diff rdp.log -# @TEST-EXEC: btest-diff ssl.log - -@load base/protocols/rdp diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-to-ssl.zeek b/testing/btest/scripts/base/protocols/rdp/rdp-to-ssl.zeek new file mode 100644 index 0000000000..47f154eef3 --- /dev/null +++ b/testing/btest/scripts/base/protocols/rdp/rdp-to-ssl.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: zeek -r $TRACES/rdp/rdp-to-ssl.pcap %INPUT +# @TEST-EXEC: btest-diff rdp.log +# @TEST-EXEC: btest-diff ssl.log + +@load base/protocols/rdp diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-x509.bro b/testing/btest/scripts/base/protocols/rdp/rdp-x509.bro deleted file mode 100644 index 2fed0d7d19..0000000000 --- a/testing/btest/scripts/base/protocols/rdp/rdp-x509.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/rdp/rdp-x509.pcap %INPUT -# @TEST-EXEC: btest-diff rdp.log -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-timestamps | $SCRIPTS/diff-remove-x509-key-info" btest-diff x509.log - -@load base/protocols/rdp diff --git a/testing/btest/scripts/base/protocols/rdp/rdp-x509.zeek b/testing/btest/scripts/base/protocols/rdp/rdp-x509.zeek new file mode 100644 index 0000000000..56747a915b --- /dev/null +++ b/testing/btest/scripts/base/protocols/rdp/rdp-x509.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: zeek -r $TRACES/rdp/rdp-x509.pcap %INPUT +# @TEST-EXEC: btest-diff rdp.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-timestamps | $SCRIPTS/diff-remove-x509-key-info" btest-diff x509.log + +@load base/protocols/rdp diff --git a/testing/btest/scripts/base/protocols/rfb/rfb-apple-remote-desktop.test b/testing/btest/scripts/base/protocols/rfb/rfb-apple-remote-desktop.test index e4510f35fb..2fc8129c67 100644 --- a/testing/btest/scripts/base/protocols/rfb/rfb-apple-remote-desktop.test +++ b/testing/btest/scripts/base/protocols/rfb/rfb-apple-remote-desktop.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/rfb/vncmac.pcap +# @TEST-EXEC: zeek -C -r $TRACES/rfb/vncmac.pcap # @TEST-EXEC: btest-diff rfb.log @load base/protocols/rfb diff --git a/testing/btest/scripts/base/protocols/rfb/vnc-mac-to-linux.test b/testing/btest/scripts/base/protocols/rfb/vnc-mac-to-linux.test index c9dd37f1c1..027a70e955 100644 --- a/testing/btest/scripts/base/protocols/rfb/vnc-mac-to-linux.test +++ b/testing/btest/scripts/base/protocols/rfb/vnc-mac-to-linux.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/rfb/vnc-mac-to-linux.pcap +# @TEST-EXEC: zeek -C -r $TRACES/rfb/vnc-mac-to-linux.pcap # @TEST-EXEC: btest-diff rfb.log @load base/protocols/rfb diff --git a/testing/btest/scripts/base/protocols/sip/wireshark.test b/testing/btest/scripts/base/protocols/sip/wireshark.test index 8c4611c880..12ebe6b664 100644 --- a/testing/btest/scripts/base/protocols/sip/wireshark.test +++ b/testing/btest/scripts/base/protocols/sip/wireshark.test @@ -1,6 +1,6 @@ # This tests a PCAP with a few SIP commands from the Wireshark samples. -# @TEST-EXEC: bro -b -r $TRACES/sip/wireshark.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/sip/wireshark.trace %INPUT # @TEST-EXEC: btest-diff sip.log @load base/protocols/sip \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/smb/disabled-dce-rpc.test b/testing/btest/scripts/base/protocols/smb/disabled-dce-rpc.test index 627e396517..330e95eace 100644 --- a/testing/btest/scripts/base/protocols/smb/disabled-dce-rpc.test +++ b/testing/btest/scripts/base/protocols/smb/disabled-dce-rpc.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/smb/dssetup_DsRoleGetPrimaryDomainInformation_standalone_workstation.cap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/smb/dssetup_DsRoleGetPrimaryDomainInformation_standalone_workstation.cap %INPUT # @TEST-EXEC: [ ! -f dce_rpc.log ] @load base/protocols/smb @@ -6,7 +6,7 @@ # The DCE_RPC analyzer is a little weird since it's instantiated # by the SMB analyzer directly in some cases. Care needs to be # taken to handle a disabled analyzer correctly. -event bro_init() +event zeek_init() { Analyzer::disable_analyzer(Analyzer::ANALYZER_DCE_RPC); } diff --git a/testing/btest/scripts/base/protocols/smb/raw-ntlm.test b/testing/btest/scripts/base/protocols/smb/raw-ntlm.test index 9cf9aa35c4..4518368972 100644 --- a/testing/btest/scripts/base/protocols/smb/raw-ntlm.test +++ b/testing/btest/scripts/base/protocols/smb/raw-ntlm.test @@ -1,4 +1,4 @@ -#@TEST-EXEC: bro -b -C -r $TRACES/smb/raw_ntlm_in_smb.pcap %INPUT +#@TEST-EXEC: zeek -b -C -r $TRACES/smb/raw_ntlm_in_smb.pcap %INPUT #@TEST-EXEC: btest-diff .stdout @load base/protocols/ntlm diff --git a/testing/btest/scripts/base/protocols/smb/smb1-transaction-dcerpc.test b/testing/btest/scripts/base/protocols/smb/smb1-transaction-dcerpc.test index 52f05c57b4..8a6a775005 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1-transaction-dcerpc.test +++ b/testing/btest/scripts/base/protocols/smb/smb1-transaction-dcerpc.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/smb/dssetup_DsRoleGetPrimaryDomainInformation_standalone_workstation.cap %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/smb/dssetup_DsRoleGetPrimaryDomainInformation_standalone_workstation.cap %INPUT # @TEST-EXEC: btest-diff dce_rpc.log @load base/protocols/dce-rpc diff --git a/testing/btest/scripts/base/protocols/smb/smb1-transaction-request.test b/testing/btest/scripts/base/protocols/smb/smb1-transaction-request.test index 1573eb93b8..d6b5d0766d 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1-transaction-request.test +++ b/testing/btest/scripts/base/protocols/smb/smb1-transaction-request.test @@ -1,4 +1,4 @@ -#@TEST-EXEC: bro -b -C -r $TRACES/smb/smb1_transaction_request.pcap %INPUT +#@TEST-EXEC: zeek -b -C -r $TRACES/smb/smb1_transaction_request.pcap %INPUT #@TEST-EXEC: btest-diff .stdout @load base/protocols/smb diff --git a/testing/btest/scripts/base/protocols/smb/smb1-transaction-response.test b/testing/btest/scripts/base/protocols/smb/smb1-transaction-response.test index 6e826445e9..5016c828b5 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1-transaction-response.test +++ b/testing/btest/scripts/base/protocols/smb/smb1-transaction-response.test @@ -1,4 +1,4 @@ -#@TEST-EXEC: bro -b -C -r $TRACES/smb/smb1_transaction_response.pcap %INPUT +#@TEST-EXEC: zeek -b -C -r $TRACES/smb/smb1_transaction_response.pcap %INPUT #@TEST-EXEC: btest-diff .stdout @load base/protocols/smb diff --git a/testing/btest/scripts/base/protocols/smb/smb1-transaction-secondary-request.test b/testing/btest/scripts/base/protocols/smb/smb1-transaction-secondary-request.test index e186ee7b22..797fe01b6d 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1-transaction-secondary-request.test +++ b/testing/btest/scripts/base/protocols/smb/smb1-transaction-secondary-request.test @@ -1,4 +1,4 @@ -#@TEST-EXEC: bro -b -C -r $TRACES/smb/smb1_transaction_secondary_request.pcap %INPUT +#@TEST-EXEC: zeek -b -C -r $TRACES/smb/smb1_transaction_secondary_request.pcap %INPUT #@TEST-EXEC: btest-diff .stdout @load base/protocols/smb diff --git a/testing/btest/scripts/base/protocols/smb/smb1-transaction2-request.test b/testing/btest/scripts/base/protocols/smb/smb1-transaction2-request.test index d216d41c32..40fe08a2a4 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1-transaction2-request.test +++ b/testing/btest/scripts/base/protocols/smb/smb1-transaction2-request.test @@ -1,4 +1,4 @@ -#@TEST-EXEC: bro -b -C -r $TRACES/smb/smb1_transaction2_request.pcap %INPUT +#@TEST-EXEC: zeek -b -C -r $TRACES/smb/smb1_transaction2_request.pcap %INPUT #@TEST-EXEC: btest-diff .stdout @load base/protocols/smb diff --git a/testing/btest/scripts/base/protocols/smb/smb1-transaction2-secondary-request.test b/testing/btest/scripts/base/protocols/smb/smb1-transaction2-secondary-request.test index e8c462dd0d..1e7ba8665f 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1-transaction2-secondary-request.test +++ b/testing/btest/scripts/base/protocols/smb/smb1-transaction2-secondary-request.test @@ -1,4 +1,4 @@ -#@TEST-EXEC: bro -b -C -r $TRACES/smb/smb1_transaction2_secondary_request.pcap %INPUT +#@TEST-EXEC: zeek -b -C -r $TRACES/smb/smb1_transaction2_secondary_request.pcap %INPUT #@TEST-EXEC: btest-diff .stdout @load base/protocols/smb diff --git a/testing/btest/scripts/base/protocols/smb/smb1.test b/testing/btest/scripts/base/protocols/smb/smb1.test index 61727754dc..89ac10eecb 100644 --- a/testing/btest/scripts/base/protocols/smb/smb1.test +++ b/testing/btest/scripts/base/protocols/smb/smb1.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/smb/smb1.pcap %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/smb/smb1.pcap %INPUT # @TEST-EXEC: btest-diff smb_files.log @load base/protocols/smb diff --git a/testing/btest/scripts/base/protocols/smb/smb2-read-write.bro b/testing/btest/scripts/base/protocols/smb/smb2-read-write.bro deleted file mode 100644 index 0d59e7a495..0000000000 --- a/testing/btest/scripts/base/protocols/smb/smb2-read-write.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/smb/smb2readwrite.pcap %INPUT -# @TEST-EXEC: btest-diff smb_files.log -# @TEST-EXEC: btest-diff files.log -# @TEST-EXEC: test ! -f dpd.log - -@load base/protocols/smb - -redef SMB::logged_file_actions += { SMB::FILE_READ, SMB::FILE_WRITE }; - diff --git a/testing/btest/scripts/base/protocols/smb/smb2-read-write.zeek b/testing/btest/scripts/base/protocols/smb/smb2-read-write.zeek new file mode 100644 index 0000000000..ed18bb0715 --- /dev/null +++ b/testing/btest/scripts/base/protocols/smb/smb2-read-write.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -C -r $TRACES/smb/smb2readwrite.pcap %INPUT +# @TEST-EXEC: btest-diff smb_files.log +# @TEST-EXEC: btest-diff files.log +# @TEST-EXEC: test ! -f dpd.log + +@load base/protocols/smb + +redef SMB::logged_file_actions += { SMB::FILE_READ, SMB::FILE_WRITE }; + diff --git a/testing/btest/scripts/base/protocols/smb/smb2-write-response.test b/testing/btest/scripts/base/protocols/smb/smb2-write-response.test new file mode 100644 index 0000000000..c737b43991 --- /dev/null +++ b/testing/btest/scripts/base/protocols/smb/smb2-write-response.test @@ -0,0 +1,13 @@ +# @TEST-EXEC: zeek -C -r $TRACES/smb/smb2readwrite.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/smb + +# A test for write response. +event smb2_write_response(c: connection, hdr: SMB2::Header, length: count) + { + print fmt("smb2_write_response %s -> %s:%d, length: %d", c$id$orig_h, c$id$resp_h, c$id$resp_p, length); + print (hdr); + } + + diff --git a/testing/btest/scripts/base/protocols/smb/smb2.test b/testing/btest/scripts/base/protocols/smb/smb2.test index c4c6e78224..f69972f8ba 100644 --- a/testing/btest/scripts/base/protocols/smb/smb2.test +++ b/testing/btest/scripts/base/protocols/smb/smb2.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/smb/smb2.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/smb/smb2.pcap %INPUT # @TEST-EXEC: btest-diff smb_files.log # @TEST-EXEC: btest-diff smb_mapping.log # @TEST-EXEC: btest-diff files.log diff --git a/testing/btest/scripts/base/protocols/smb/smb3.test b/testing/btest/scripts/base/protocols/smb/smb3.test index f762ea10f3..aeab67d27c 100644 --- a/testing/btest/scripts/base/protocols/smb/smb3.test +++ b/testing/btest/scripts/base/protocols/smb/smb3.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/smb/smb3.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/smb/smb3.pcap %INPUT # @TEST-EXEC: btest-diff smb_mapping.log # @TEST-EXEC: test ! -f dpd.log # @TEST-EXEC: test ! -f weird.log diff --git a/testing/btest/scripts/base/protocols/smb/smb311.test b/testing/btest/scripts/base/protocols/smb/smb311.test index 22f232c14a..c988355742 100644 --- a/testing/btest/scripts/base/protocols/smb/smb311.test +++ b/testing/btest/scripts/base/protocols/smb/smb311.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -C -r $TRACES/smb/smb311.pcap %INPUT +# @TEST-EXEC: zeek -b -C -r $TRACES/smb/smb311.pcap %INPUT # @TEST-EXEC: test ! -f dpd.log # @TEST-EXEC: test ! -f weird.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/smtp/attachment.test b/testing/btest/scripts/base/protocols/smtp/attachment.test index 49602f00c1..ddbdae0d64 100644 --- a/testing/btest/scripts/base/protocols/smtp/attachment.test +++ b/testing/btest/scripts/base/protocols/smtp/attachment.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/smtp.trace %INPUT # @TEST-EXEC: btest-diff smtp.log # @TEST-EXEC: btest-diff files.log diff --git a/testing/btest/scripts/base/protocols/smtp/basic.test b/testing/btest/scripts/base/protocols/smtp/basic.test index 6be512a255..41a9290f13 100644 --- a/testing/btest/scripts/base/protocols/smtp/basic.test +++ b/testing/btest/scripts/base/protocols/smtp/basic.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT # @TEST-EXEC: btest-diff smtp.log @load base/protocols/smtp diff --git a/testing/btest/scripts/base/protocols/smtp/one-side.test b/testing/btest/scripts/base/protocols/smtp/one-side.test index cffbe1d173..9c9e036a8c 100644 --- a/testing/btest/scripts/base/protocols/smtp/one-side.test +++ b/testing/btest/scripts/base/protocols/smtp/one-side.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/smtp-one-side-only.trace %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/smtp-one-side-only.trace %INPUT # @TEST-EXEC: btest-diff smtp.log @load base/protocols/smtp diff --git a/testing/btest/scripts/base/protocols/smtp/starttls.test b/testing/btest/scripts/base/protocols/smtp/starttls.test index e3a114f572..865497f022 100644 --- a/testing/btest/scripts/base/protocols/smtp/starttls.test +++ b/testing/btest/scripts/base/protocols/smtp/starttls.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/smtp-starttls.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/smtp-starttls.pcap %INPUT # @TEST-EXEC: btest-diff smtp.log # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/protocols/snmp/snmp-addr.bro b/testing/btest/scripts/base/protocols/snmp/snmp-addr.bro deleted file mode 100644 index 5c21cf7be3..0000000000 --- a/testing/btest/scripts/base/protocols/snmp/snmp-addr.bro +++ /dev/null @@ -1,15 +0,0 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/snmp/snmpwalk-short.pcap %INPUT -# @TEST-EXEC: btest-diff .stdout - -@load base/protocols/snmp - -event snmp_response(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) { - - for (i in pdu$bindings) { - local binding = pdu$bindings[i]; - - if (binding$value?$address) - print binding$value$address; - } - -} diff --git a/testing/btest/scripts/base/protocols/snmp/snmp-addr.zeek b/testing/btest/scripts/base/protocols/snmp/snmp-addr.zeek new file mode 100644 index 0000000000..16203c597e --- /dev/null +++ b/testing/btest/scripts/base/protocols/snmp/snmp-addr.zeek @@ -0,0 +1,15 @@ +# @TEST-EXEC: zeek -C -b -r $TRACES/snmp/snmpwalk-short.pcap %INPUT +# @TEST-EXEC: btest-diff .stdout + +@load base/protocols/snmp + +event snmp_response(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) { + + for (i in pdu$bindings) { + local binding = pdu$bindings[i]; + + if (binding$value?$address) + print binding$value$address; + } + +} diff --git a/testing/btest/scripts/base/protocols/snmp/v1.bro b/testing/btest/scripts/base/protocols/snmp/v1.bro deleted file mode 100644 index 7dd5bd4a68..0000000000 --- a/testing/btest/scripts/base/protocols/snmp/v1.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv1_get.pcap %INPUT $SCRIPTS/snmp-test.bro >out1 -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv1_get_short.pcap %INPUT $SCRIPTS/snmp-test.bro >out2 -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv1_set.pcap %INPUT $SCRIPTS/snmp-test.bro >out3 -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv1_trap.pcap %INPUT $SCRIPTS/snmp-test.bro >out4 - -# @TEST-EXEC: btest-diff out1 -# @TEST-EXEC: btest-diff out2 -# @TEST-EXEC: btest-diff out3 -# @TEST-EXEC: btest-diff out4 - -@load base/protocols/snmp diff --git a/testing/btest/scripts/base/protocols/snmp/v1.zeek b/testing/btest/scripts/base/protocols/snmp/v1.zeek new file mode 100644 index 0000000000..6513d94177 --- /dev/null +++ b/testing/btest/scripts/base/protocols/snmp/v1.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv1_get.pcap %INPUT $SCRIPTS/snmp-test.zeek >out1 +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv1_get_short.pcap %INPUT $SCRIPTS/snmp-test.zeek >out2 +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv1_set.pcap %INPUT $SCRIPTS/snmp-test.zeek >out3 +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv1_trap.pcap %INPUT $SCRIPTS/snmp-test.zeek >out4 + +# @TEST-EXEC: btest-diff out1 +# @TEST-EXEC: btest-diff out2 +# @TEST-EXEC: btest-diff out3 +# @TEST-EXEC: btest-diff out4 + +@load base/protocols/snmp diff --git a/testing/btest/scripts/base/protocols/snmp/v2.bro b/testing/btest/scripts/base/protocols/snmp/v2.bro deleted file mode 100644 index a2b9885fbb..0000000000 --- a/testing/btest/scripts/base/protocols/snmp/v2.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv2_get.pcap %INPUT $SCRIPTS/snmp-test.bro >out1 -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv2_get_bulk.pcap %INPUT $SCRIPTS/snmp-test.bro >out2 -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv2_get_next.pcap %INPUT $SCRIPTS/snmp-test.bro >out3 - -# @TEST-EXEC: btest-diff out1 -# @TEST-EXEC: btest-diff out2 -# @TEST-EXEC: btest-diff out3 - -@load base/protocols/snmp diff --git a/testing/btest/scripts/base/protocols/snmp/v2.zeek b/testing/btest/scripts/base/protocols/snmp/v2.zeek new file mode 100644 index 0000000000..015d6446da --- /dev/null +++ b/testing/btest/scripts/base/protocols/snmp/v2.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv2_get.pcap %INPUT $SCRIPTS/snmp-test.zeek >out1 +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv2_get_bulk.pcap %INPUT $SCRIPTS/snmp-test.zeek >out2 +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv2_get_next.pcap %INPUT $SCRIPTS/snmp-test.zeek >out3 + +# @TEST-EXEC: btest-diff out1 +# @TEST-EXEC: btest-diff out2 +# @TEST-EXEC: btest-diff out3 + +@load base/protocols/snmp diff --git a/testing/btest/scripts/base/protocols/snmp/v3.bro b/testing/btest/scripts/base/protocols/snmp/v3.bro deleted file mode 100644 index 43edbdc2df..0000000000 --- a/testing/btest/scripts/base/protocols/snmp/v3.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: bro -b -r $TRACES/snmp/snmpv3_get_next.pcap %INPUT $SCRIPTS/snmp-test.bro >out1 - -# @TEST-EXEC: btest-diff out1 - -@load base/protocols/snmp diff --git a/testing/btest/scripts/base/protocols/snmp/v3.zeek b/testing/btest/scripts/base/protocols/snmp/v3.zeek new file mode 100644 index 0000000000..7d4cb53e72 --- /dev/null +++ b/testing/btest/scripts/base/protocols/snmp/v3.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: zeek -b -r $TRACES/snmp/snmpv3_get_next.pcap %INPUT $SCRIPTS/snmp-test.zeek >out1 + +# @TEST-EXEC: btest-diff out1 + +@load base/protocols/snmp diff --git a/testing/btest/scripts/base/protocols/socks/socks-auth.bro b/testing/btest/scripts/base/protocols/socks/socks-auth.bro deleted file mode 100644 index d58e1b5801..0000000000 --- a/testing/btest/scripts/base/protocols/socks/socks-auth.bro +++ /dev/null @@ -1,11 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/socks-auth.pcap %INPUT -# @TEST-EXEC: btest-diff socks.log -# @TEST-EXEC: btest-diff tunnel.log - -@load base/protocols/socks - -redef SOCKS::default_capture_password = T; - -@TEST-START-NEXT - -@load base/protocols/socks diff --git a/testing/btest/scripts/base/protocols/socks/socks-auth.zeek b/testing/btest/scripts/base/protocols/socks/socks-auth.zeek new file mode 100644 index 0000000000..eabd4a6420 --- /dev/null +++ b/testing/btest/scripts/base/protocols/socks/socks-auth.zeek @@ -0,0 +1,11 @@ +# @TEST-EXEC: zeek -r $TRACES/socks-auth.pcap %INPUT +# @TEST-EXEC: btest-diff socks.log +# @TEST-EXEC: btest-diff tunnel.log + +@load base/protocols/socks + +redef SOCKS::default_capture_password = T; + +@TEST-START-NEXT + +@load base/protocols/socks diff --git a/testing/btest/scripts/base/protocols/socks/trace1.test b/testing/btest/scripts/base/protocols/socks/trace1.test index fb1d9ebaf2..900a962fef 100644 --- a/testing/btest/scripts/base/protocols/socks/trace1.test +++ b/testing/btest/scripts/base/protocols/socks/trace1.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/socks.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/socks.trace %INPUT # @TEST-EXEC: btest-diff socks.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/scripts/base/protocols/socks/trace2.test b/testing/btest/scripts/base/protocols/socks/trace2.test index 5e3a449120..c9defb5f34 100644 --- a/testing/btest/scripts/base/protocols/socks/trace2.test +++ b/testing/btest/scripts/base/protocols/socks/trace2.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/socks-with-ssl.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/socks-with-ssl.trace %INPUT # @TEST-EXEC: btest-diff socks.log # @TEST-EXEC: btest-diff tunnel.log diff --git a/testing/btest/scripts/base/protocols/socks/trace3.test b/testing/btest/scripts/base/protocols/socks/trace3.test index c3b3b091eb..c83ad4fa87 100644 --- a/testing/btest/scripts/base/protocols/socks/trace3.test +++ b/testing/btest/scripts/base/protocols/socks/trace3.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tunnels/socks.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tunnels/socks.pcap %INPUT # @TEST-EXEC: btest-diff tunnel.log @load base/protocols/socks diff --git a/testing/btest/scripts/base/protocols/ssh/basic.test b/testing/btest/scripts/base/protocols/ssh/basic.test index 84b38a1f32..162ab9dd1f 100644 --- a/testing/btest/scripts/base/protocols/ssh/basic.test +++ b/testing/btest/scripts/base/protocols/ssh/basic.test @@ -1,6 +1,6 @@ # This tests some SSH connections and the output log. -# @TEST-EXEC: bro -r $TRACES/ssh/ssh.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/ssh/ssh.trace %INPUT # @TEST-EXEC: btest-diff ssh.log # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssh/curve25519_kex.test b/testing/btest/scripts/base/protocols/ssh/curve25519_kex.test index 64641fe4af..ca13bda6ef 100644 --- a/testing/btest/scripts/base/protocols/ssh/curve25519_kex.test +++ b/testing/btest/scripts/base/protocols/ssh/curve25519_kex.test @@ -1,6 +1,6 @@ # This tests a successful login with pubkey using curve25519 as the KEX algorithm -# @TEST-EXEC: bro -b -r $TRACES/ssh/ssh_kex_curve25519.pcap %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/ssh/ssh_kex_curve25519.pcap %INPUT # @TEST-EXEC: btest-diff ssh.log @load base/protocols/ssh \ No newline at end of file diff --git a/testing/btest/scripts/base/protocols/ssh/one-auth-fail-only.test b/testing/btest/scripts/base/protocols/ssh/one-auth-fail-only.test index abaa48fd35..e87a246957 100644 --- a/testing/btest/scripts/base/protocols/ssh/one-auth-fail-only.test +++ b/testing/btest/scripts/base/protocols/ssh/one-auth-fail-only.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/ssh/sshguess.pcap %INPUT | sort >output +# @TEST-EXEC: zeek -C -r $TRACES/ssh/sshguess.pcap %INPUT | sort >output # @TEST-EXEC: btest-diff output event ssh_auth_attempted(c: connection, authenticated: bool) diff --git a/testing/btest/scripts/base/protocols/ssl/basic.test b/testing/btest/scripts/base/protocols/ssl/basic.test index 51eacfd572..918ecd55b7 100644 --- a/testing/btest/scripts/base/protocols/ssl/basic.test +++ b/testing/btest/scripts/base/protocols/ssl/basic.test @@ -1,6 +1,6 @@ # This tests a normal SSL connection and the log it outputs. -# @TEST-EXEC: bro -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log # @TEST-EXEC: test ! -f dpd.log diff --git a/testing/btest/scripts/base/protocols/ssl/common_name.test b/testing/btest/scripts/base/protocols/ssl/common_name.test index fa14e19045..32565b2ea7 100644 --- a/testing/btest/scripts/base/protocols/ssl/common_name.test +++ b/testing/btest/scripts/base/protocols/ssl/common_name.test @@ -1,7 +1,7 @@ # This tests a normal SSL connection and the log it outputs. -# @TEST-EXEC: bro -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT -# @TEST-EXEC: bro -C -r $TRACES/tls/cert-no-cn.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/cert-no-cn.pcap %INPUT # @TEST-EXEC: btest-diff .stdout event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) diff --git a/testing/btest/scripts/base/protocols/ssl/comp_methods.test b/testing/btest/scripts/base/protocols/ssl/comp_methods.test index fa24d4b47b..ae6b43e179 100644 --- a/testing/btest/scripts/base/protocols/ssl/comp_methods.test +++ b/testing/btest/scripts/base/protocols/ssl/comp_methods.test @@ -1,6 +1,6 @@ # This tests that the values sent for compression methods are correct. -# @TEST-EXEC: bro -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT # @TEST-EXEC: btest-diff .stdout event ssl_client_hello(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec) diff --git a/testing/btest/scripts/base/protocols/ssl/cve-2015-3194.test b/testing/btest/scripts/base/protocols/ssl/cve-2015-3194.test index d2aa7b536f..ce405cb405 100644 --- a/testing/btest/scripts/base/protocols/ssl/cve-2015-3194.test +++ b/testing/btest/scripts/base/protocols/ssl/cve-2015-3194.test @@ -1,6 +1,6 @@ -# This tests if Bro does not crash when exposed to CVE-2015-3194 +# This tests if Zeek does not crash when exposed to CVE-2015-3194 -# @TEST-EXEC: bro -r $TRACES/tls/CVE-2015-3194.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/CVE-2015-3194.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log -@load protocols/ssl/validate-certs.bro +@load protocols/ssl/validate-certs diff --git a/testing/btest/scripts/base/protocols/ssl/dhe.test b/testing/btest/scripts/base/protocols/ssl/dhe.test index f41cb70fab..df22cea9cc 100644 --- a/testing/btest/scripts/base/protocols/ssl/dhe.test +++ b/testing/btest/scripts/base/protocols/ssl/dhe.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/dhe.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/dhe.pcap %INPUT # @TEST-EXEC: btest-diff .stdout # @TEST-EXEC: btest-diff ssl.log diff --git a/testing/btest/scripts/base/protocols/ssl/dpd.test b/testing/btest/scripts/base/protocols/ssl/dpd.test index 1a16a10db4..f7f76a6e1a 100644 --- a/testing/btest/scripts/base/protocols/ssl/dpd.test +++ b/testing/btest/scripts/base/protocols/ssl/dpd.test @@ -1,15 +1,15 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/tls/ssl-v2.trace %INPUT -# @TEST-EXEC: bro -b -r $TRACES/tls/ssl.v3.trace %INPUT -# @TEST-EXEC: bro -b -r $TRACES/tls/tls1.2.trace %INPUT -# @TEST-EXEC: bro -b -r $TRACES/tls/tls-early-alert.trace %INPUT -# @TEST-EXEC: bro -b -r $TRACES/tls/tls-13draft19-early-data.pcap %INPUT +# @TEST-EXEC: zeek -C -b -r $TRACES/tls/ssl-v2.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/tls/ssl.v3.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/tls/tls1.2.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/tls/tls-early-alert.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/tls/tls-13draft19-early-data.pcap %INPUT # @TEST-EXEC: btest-diff .stdout @load base/frameworks/dpd @load base/frameworks/signatures @load-sigs base/protocols/ssl/dpd.sig -event bro_init() +event zeek_init() { print "Start test run"; } diff --git a/testing/btest/scripts/base/protocols/ssl/dtls-no-dtls.test b/testing/btest/scripts/base/protocols/ssl/dtls-no-dtls.test new file mode 100644 index 0000000000..88667fca18 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ssl/dtls-no-dtls.test @@ -0,0 +1,15 @@ +# This tests checks that non-dtls connections to which we attach don't trigger tons of errors. + +# @TEST-EXEC: zeek -C -r $TRACES/dns-txt-multiple.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +event zeek_init() + { + const add_ports = { 53/udp }; + Analyzer::register_for_ports(Analyzer::ANALYZER_DTLS, add_ports); + } + +event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count, reason: string) + { + print c$id, atype, reason; + } diff --git a/testing/btest/scripts/base/protocols/ssl/dtls-stun-dpd.test b/testing/btest/scripts/base/protocols/ssl/dtls-stun-dpd.test index d2437aac8b..b86ff75ee4 100644 --- a/testing/btest/scripts/base/protocols/ssl/dtls-stun-dpd.test +++ b/testing/btest/scripts/base/protocols/ssl/dtls-stun-dpd.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/webrtc-stun.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/webrtc-stun.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: touch dpd.log # @TEST-EXEC: btest-diff dpd.log diff --git a/testing/btest/scripts/base/protocols/ssl/dtls.test b/testing/btest/scripts/base/protocols/ssl/dtls.test index a1b2c74dd8..2f31758cbf 100644 --- a/testing/btest/scripts/base/protocols/ssl/dtls.test +++ b/testing/btest/scripts/base/protocols/ssl/dtls.test @@ -1,9 +1,9 @@ # This tests a normal SSL connection and the log it outputs. -# @TEST-EXEC: bro -r $TRACES/tls/dtls1_0.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/dtls1_0.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log -# @TEST-EXEC: bro -r $TRACES/tls/dtls1_2.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/dtls1_2.pcap %INPUT # @TEST-EXEC: cp ssl.log ssl1_2.log # @TEST-EXEC: cp x509.log x5091_2.log # @TEST-EXEC: btest-diff ssl1_2.log diff --git a/testing/btest/scripts/base/protocols/ssl/ecdhe.test b/testing/btest/scripts/base/protocols/ssl/ecdhe.test index bd1bd2cb96..e200619013 100644 --- a/testing/btest/scripts/base/protocols/ssl/ecdhe.test +++ b/testing/btest/scripts/base/protocols/ssl/ecdhe.test @@ -1,3 +1,3 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/protocols/ssl/ecdsa.test b/testing/btest/scripts/base/protocols/ssl/ecdsa.test index a2db7c2cb5..2ace638a41 100644 --- a/testing/btest/scripts/base/protocols/ssl/ecdsa.test +++ b/testing/btest/scripts/base/protocols/ssl/ecdsa.test @@ -1,3 +1,3 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/ecdsa-cert.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ecdsa-cert.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/protocols/ssl/fragment.test b/testing/btest/scripts/base/protocols/ssl/fragment.test index b01a78a07a..2ea87d8291 100644 --- a/testing/btest/scripts/base/protocols/ssl/fragment.test +++ b/testing/btest/scripts/base/protocols/ssl/fragment.test @@ -1,6 +1,6 @@ # Test a heavily fragmented tls connection -# @TEST-EXEC: cat $TRACES/tls/tls-fragmented-handshake.pcap.gz | gunzip | bro -r - %INPUT +# @TEST-EXEC: cat $TRACES/tls/tls-fragmented-handshake.pcap.gz | gunzip | zeek -r - %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssl/handshake-events.test b/testing/btest/scripts/base/protocols/ssl/handshake-events.test index f73d268eef..0b45bebc02 100644 --- a/testing/btest/scripts/base/protocols/ssl/handshake-events.test +++ b/testing/btest/scripts/base/protocols/ssl/handshake-events.test @@ -1,6 +1,6 @@ # This tests events not covered by other tests -# @TEST-EXEC: bro -b -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT +# @TEST-EXEC: zeek -b -r $TRACES/tls/tls-conn-with-extensions.trace %INPUT # @TEST-EXEC: btest-diff .stdout @load base/protocols/ssl diff --git a/testing/btest/scripts/base/protocols/ssl/keyexchange.test b/testing/btest/scripts/base/protocols/ssl/keyexchange.test index 6e1106ece7..252237f0dd 100644 --- a/testing/btest/scripts/base/protocols/ssl/keyexchange.test +++ b/testing/btest/scripts/base/protocols/ssl/keyexchange.test @@ -1,14 +1,14 @@ -# @TEST-EXEC: bro -r $TRACES/tls/dhe.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/dhe.pcap %INPUT # @TEST-EXEC: cat ssl.log > ssl-all.log -# @TEST-EXEC: bro -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-all.log -# @TEST-EXEC: bro -r $TRACES/tls/ssl.v3.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/ssl.v3.trace %INPUT # @TEST-EXEC: cat ssl.log >> ssl-all.log -# @TEST-EXEC: bro -r $TRACES/tls/tls1_1.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls1_1.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-all.log -# @TEST-EXEC: bro -r $TRACES/tls/dtls1_0.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/dtls1_0.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-all.log -# @TEST-EXEC: bro -r $TRACES/tls/dtls1_2.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/dtls1_2.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-all.log # @TEST-EXEC: btest-diff ssl-all.log @@ -16,7 +16,7 @@ @load base/protocols/ssl @load base/files/x509 -@load protocols/ssl/extract-certs-pem.bro +@load protocols/ssl/extract-certs-pem module SSL; diff --git a/testing/btest/scripts/base/protocols/ssl/ocsp-http-get.test b/testing/btest/scripts/base/protocols/ssl/ocsp-http-get.test index c8c8acc589..6b4b034c69 100644 --- a/testing/btest/scripts/base/protocols/ssl/ocsp-http-get.test +++ b/testing/btest/scripts/base/protocols/ssl/ocsp-http-get.test @@ -1,12 +1,12 @@ # This tests a normal OCSP request sent through HTTP GET -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-http-get.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-http-get.pcap %INPUT # @TEST-EXEC: btest-diff ocsp.log # @TEST-EXEC: btest-diff .stdout @load files/x509/log-ocsp -event bro_init() +event zeek_init() { Files::register_for_mime_type(Files::ANALYZER_OCSP_REQUEST, "application/ocsp-request"); Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); @@ -32,7 +32,7 @@ event ocsp_response_status(f: fa_file, status: string) print "ocsp_response_status", status; } -event ocsp_response_bytes(f: fa_file, resp_ref: opaque of ocsp_resp, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) +event ocsp_response_bytes(f: fa_file, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) { print "ocsp_response_bytes", status, version, responderId, producedAt, signatureAlgorithm; } diff --git a/testing/btest/scripts/base/protocols/ssl/ocsp-request-only.test b/testing/btest/scripts/base/protocols/ssl/ocsp-request-only.test index 05483717b0..5106a17c75 100644 --- a/testing/btest/scripts/base/protocols/ssl/ocsp-request-only.test +++ b/testing/btest/scripts/base/protocols/ssl/ocsp-request-only.test @@ -1,11 +1,11 @@ # This tests a OCSP request missing response -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-request-only.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-request-only.pcap %INPUT # @TEST-EXEC: btest-diff .stdout @load files/x509/log-ocsp -event bro_init() +event zeek_init() { Files::register_for_mime_type(Files::ANALYZER_OCSP_REQUEST, "application/ocsp-request"); Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); @@ -31,7 +31,7 @@ event ocsp_response_status(f: fa_file, status: string) print "ocsp_response_status", status; } -event ocsp_response_bytes(f: fa_file, resp_ref: opaque of ocsp_resp, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) +event ocsp_response_bytes(f: fa_file, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) { print "ocsp_response_bytes", status, version, responderId, producedAt, signatureAlgorithm; } diff --git a/testing/btest/scripts/base/protocols/ssl/ocsp-request-response.test b/testing/btest/scripts/base/protocols/ssl/ocsp-request-response.test index b95203dfd8..67f62e451d 100644 --- a/testing/btest/scripts/base/protocols/ssl/ocsp-request-response.test +++ b/testing/btest/scripts/base/protocols/ssl/ocsp-request-response.test @@ -1,12 +1,12 @@ # This tests a pair of normal OCSP request and response -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-request-response.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-request-response.pcap %INPUT # @TEST-EXEC: btest-diff ocsp.log # @TEST-EXEC: btest-diff .stdout @load files/x509/log-ocsp -event bro_init() +event zeek_init() { Files::register_for_mime_type(Files::ANALYZER_OCSP_REQUEST, "application/ocsp-request"); Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); @@ -32,7 +32,7 @@ event ocsp_response_status(f: fa_file, status: string) print "ocsp_response_status", status; } -event ocsp_response_bytes(f: fa_file, resp_ref: opaque of ocsp_resp, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) +event ocsp_response_bytes(f: fa_file, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) { print "ocsp_response_bytes", status, version, responderId, producedAt, signatureAlgorithm; } diff --git a/testing/btest/scripts/base/protocols/ssl/ocsp-response-only.test b/testing/btest/scripts/base/protocols/ssl/ocsp-response-only.test index 43dbf82583..568915d7aa 100644 --- a/testing/btest/scripts/base/protocols/ssl/ocsp-response-only.test +++ b/testing/btest/scripts/base/protocols/ssl/ocsp-response-only.test @@ -1,12 +1,12 @@ # This tests a normal OCSP response missing request -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-response-only.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-response-only.pcap %INPUT # @TEST-EXEC: btest-diff ocsp.log # @TEST-EXEC: btest-diff .stdout @load files/x509/log-ocsp -event bro_init() +event zeek_init() { Files::register_for_mime_type(Files::ANALYZER_OCSP_REQUEST, "application/ocsp-request"); Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); @@ -32,7 +32,7 @@ event ocsp_response_status(f: fa_file, status: string) print "ocsp_response_status", status; } -event ocsp_response_bytes(f: fa_file, resp_ref: opaque of ocsp_resp, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) +event ocsp_response_bytes(f: fa_file, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) { print "ocsp_response_bytes", status, version, responderId, producedAt, signatureAlgorithm; } diff --git a/testing/btest/scripts/base/protocols/ssl/ocsp-revoked.test b/testing/btest/scripts/base/protocols/ssl/ocsp-revoked.test index e4378135ad..e26bae59a5 100644 --- a/testing/btest/scripts/base/protocols/ssl/ocsp-revoked.test +++ b/testing/btest/scripts/base/protocols/ssl/ocsp-revoked.test @@ -1,12 +1,12 @@ # This tests OCSP response with revocation -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-revoked.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-revoked.pcap %INPUT # @TEST-EXEC: btest-diff ocsp.log # @TEST-EXEC: btest-diff .stdout @load files/x509/log-ocsp -event bro_init() +event zeek_init() { Files::register_for_mime_type(Files::ANALYZER_OCSP_REQUEST, "application/ocsp-request"); Files::register_for_mime_type(Files::ANALYZER_OCSP_REPLY, "application/ocsp-response"); @@ -32,7 +32,7 @@ event ocsp_response_status(f: fa_file, status: string) print "ocsp_response_status", status; } -event ocsp_response_bytes(f: fa_file, resp_ref: opaque of ocsp_resp, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) +event ocsp_response_bytes(f: fa_file, status: string, version: count, responderId: string, producedAt: time, signatureAlgorithm: string, certs: x509_opaque_vector) { print "ocsp_response_bytes", status, version, responderId, producedAt, signatureAlgorithm; } diff --git a/testing/btest/scripts/base/protocols/ssl/ocsp-stapling.test b/testing/btest/scripts/base/protocols/ssl/ocsp-stapling.test index 6424f263f1..3c338933aa 100644 --- a/testing/btest/scripts/base/protocols/ssl/ocsp-stapling.test +++ b/testing/btest/scripts/base/protocols/ssl/ocsp-stapling.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-stapling.trace %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-stapling.trace %INPUT # @TEST-EXEC: btest-diff .stdout redef SSL::root_certs += { diff --git a/testing/btest/scripts/base/protocols/ssl/signed_certificate_timestamp.test b/testing/btest/scripts/base/protocols/ssl/signed_certificate_timestamp.test index 7c7dc90e4c..e2201c3218 100644 --- a/testing/btest/scripts/base/protocols/ssl/signed_certificate_timestamp.test +++ b/testing/btest/scripts/base/protocols/ssl/signed_certificate_timestamp.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/signed_certificate_timestamp.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/signed_certificate_timestamp.pcap %INPUT # # The following file contains a tls 1.0 connection with a SCT in a TLS extension. # This is interesting because the digitally-signed struct in TLS 1.0 does not come @@ -7,7 +7,7 @@ # uses in the end. So this one does have a Signature/Hash alg, even if the protocol # itself does not carry it in the same struct. # -# @TEST-EXEC: bro -r $TRACES/tls/signed_certificate_timestamp_tls1_0.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/signed_certificate_timestamp_tls1_0.pcap %INPUT # @TEST-EXEC: btest-diff .stdout # @TEST-EXEC: test ! -f dpd.log diff --git a/testing/btest/scripts/base/protocols/ssl/tls-1.2-ciphers.test b/testing/btest/scripts/base/protocols/ssl/tls-1.2-ciphers.test index a904628acf..077aa15f1a 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls-1.2-ciphers.test +++ b/testing/btest/scripts/base/protocols/ssl/tls-1.2-ciphers.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls1.2.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls1.2.trace %INPUT # @TEST-EXEC: btest-diff .stdout event ssl_client_hello(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec) diff --git a/testing/btest/scripts/base/protocols/ssl/tls-1.2-handshake-failure.test b/testing/btest/scripts/base/protocols/ssl/tls-1.2-handshake-failure.test index 74acf3224a..6507e58793 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls-1.2-handshake-failure.test +++ b/testing/btest/scripts/base/protocols/ssl/tls-1.2-handshake-failure.test @@ -1,2 +1,2 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls-1.2-handshake-failure.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls-1.2-handshake-failure.trace %INPUT # @TEST-EXEC: btest-diff ssl.log diff --git a/testing/btest/scripts/base/protocols/ssl/tls-1.2-random.test b/testing/btest/scripts/base/protocols/ssl/tls-1.2-random.test index 7f023927ac..b21fc4ee11 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls-1.2-random.test +++ b/testing/btest/scripts/base/protocols/ssl/tls-1.2-random.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls1.2.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls1.2.trace %INPUT # @TEST-EXEC: btest-diff .stdout event ssl_client_hello(c: connection, version: count, record_version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec, comp_methods: index_vec) diff --git a/testing/btest/scripts/base/protocols/ssl/tls-1.2.test b/testing/btest/scripts/base/protocols/ssl/tls-1.2.test index 15a737c032..8e2189d9f6 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls-1.2.test +++ b/testing/btest/scripts/base/protocols/ssl/tls-1.2.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls1.2.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls1.2.trace %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssl/tls-extension-events.test b/testing/btest/scripts/base/protocols/ssl/tls-extension-events.test index b8f3d42242..99e9847fb4 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls-extension-events.test +++ b/testing/btest/scripts/base/protocols/ssl/tls-extension-events.test @@ -1,5 +1,6 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/chrome-34-google.trace %INPUT -# @TEST-EXEC: bro -C -r $TRACES/tls/tls-13draft19-early-data.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/chrome-34-google.trace %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls-13draft19-early-data.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13_psk_succesfull.pcap %INPUT # @TEST-EXEC: btest-diff .stdout event ssl_extension_elliptic_curves(c: connection, is_orig: bool, curves: index_vec) @@ -37,7 +38,7 @@ event ssl_extension_signature_algorithm(c: connection, is_orig: bool, signature_ event ssl_extension_supported_versions(c: connection, is_orig: bool, versions: index_vec) { - print "supported_versions(", c$id$orig_h, c$id$resp_h; + print "supported_versions", c$id$orig_h, c$id$resp_h; for ( i in versions ) print SSL::version_strings[versions[i]]; } @@ -48,3 +49,13 @@ event ssl_extension_psk_key_exchange_modes(c: connection, is_orig: bool, modes: for ( i in modes ) print modes[i]; } + +event ssl_extension_pre_shared_key_client_hello(c: connection, is_orig: bool, identities: psk_identity_vec, binders: string_vec) + { + print "pre_shared_key client hello", c$id$orig_h, c$id$resp_h, identities, binders; + } + +event ssl_extension_pre_shared_key_server_hello(c: connection, is_orig: bool, selected_identity: count) + { + print "pre_shared_key server hello", c$id$orig_h, c$id$resp_h, selected_identity; + } diff --git a/testing/btest/scripts/base/protocols/ssl/tls13-experiment.test b/testing/btest/scripts/base/protocols/ssl/tls13-experiment.test index e074535692..f784ea0af0 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls13-experiment.test +++ b/testing/btest/scripts/base/protocols/ssl/tls13-experiment.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/chrome-63.0.3211.0-canary-tls_experiment.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/chrome-63.0.3211.0-canary-tls_experiment.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssl/tls13-version.test b/testing/btest/scripts/base/protocols/ssl/tls13-version.test index 9194c861e1..29c6da9261 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls13-version.test +++ b/testing/btest/scripts/base/protocols/ssl/tls13-version.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/tls13draft23-chrome67.0.3368.0-canary.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13draft23-chrome67.0.3368.0-canary.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # Test that we correctly parse the version out of the extension in an 1.3 connection diff --git a/testing/btest/scripts/base/protocols/ssl/tls13.test b/testing/btest/scripts/base/protocols/ssl/tls13.test index 5033b6ea01..d7db1626e4 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls13.test +++ b/testing/btest/scripts/base/protocols/ssl/tls13.test @@ -1,10 +1,14 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/tls13draft16-chrome55.0.2879.0-canary-aborted.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13draft16-chrome55.0.2879.0-canary-aborted.pcap %INPUT # @TEST-EXEC: cat ssl.log > ssl-out.log -# @TEST-EXEC: bro -C -r $TRACES/tls/tls13draft16-chrome55.0.2879.0-canary.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13draft16-chrome55.0.2879.0-canary.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-out.log -# @TEST-EXEC: bro -C -r $TRACES/tls/tls13draft16-ff52.a01-aborted.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13draft16-ff52.a01-aborted.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-out.log -# @TEST-EXEC: bro -C -r $TRACES/tls/tls13draft16-ff52.a01.pcap %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13draft16-ff52.a01.pcap %INPUT +# @TEST-EXEC: cat ssl.log >> ssl-out.log +# @TEST-EXEC: zeek -C -r $TRACES/tls/tls13_psk_succesfull.pcap %INPUT +# @TEST-EXEC: cat ssl.log >> ssl-out.log +# @TEST-EXEC: zeek -C -r $TRACES/tls/hrr.pcap %INPUT # @TEST-EXEC: cat ssl.log >> ssl-out.log # @TEST-EXEC: btest-diff ssl-out.log # @TEST-EXEC: btest-diff .stdout diff --git a/testing/btest/scripts/base/protocols/ssl/tls1_1.test b/testing/btest/scripts/base/protocols/ssl/tls1_1.test index 885a047ebe..de3ed740b4 100644 --- a/testing/btest/scripts/base/protocols/ssl/tls1_1.test +++ b/testing/btest/scripts/base/protocols/ssl/tls1_1.test @@ -1,6 +1,6 @@ # This tests a normal SSL connection and the log it outputs. -# @TEST-EXEC: bro -r $TRACES/tls/tls1_1.pcap %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls1_1.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log # @TEST-EXEC: test ! -f dpd.log diff --git a/testing/btest/scripts/base/protocols/ssl/x509-invalid-extension.test b/testing/btest/scripts/base/protocols/ssl/x509-invalid-extension.test index de0dc9e59f..05bac2d21b 100644 --- a/testing/btest/scripts/base/protocols/ssl/x509-invalid-extension.test +++ b/testing/btest/scripts/base/protocols/ssl/x509-invalid-extension.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/ocsp-stapling.trace %INPUT +# @TEST-EXEC: zeek -C -r $TRACES/tls/ocsp-stapling.trace %INPUT # @TEST-EXEC: btest-diff .stdout event x509_extension(f: fa_file, ext: X509::Extension) diff --git a/testing/btest/scripts/base/protocols/ssl/x509_extensions.test b/testing/btest/scripts/base/protocols/ssl/x509_extensions.test index 425afbb2c8..ee7fa103e4 100644 --- a/testing/btest/scripts/base/protocols/ssl/x509_extensions.test +++ b/testing/btest/scripts/base/protocols/ssl/x509_extensions.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls1.2.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/tls/tls1.2.trace %INPUT # @TEST-EXEC: btest-diff .stdout event x509_extension(f: fa_file, extension: X509::Extension) diff --git a/testing/btest/scripts/base/protocols/syslog/missing-pri.bro b/testing/btest/scripts/base/protocols/syslog/missing-pri.bro deleted file mode 100644 index c33eb1638b..0000000000 --- a/testing/btest/scripts/base/protocols/syslog/missing-pri.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/syslog-missing-pri.trace %INPUT -# @TEST-EXEC: btest-diff syslog.log - -@load base/protocols/syslog diff --git a/testing/btest/scripts/base/protocols/syslog/missing-pri.zeek b/testing/btest/scripts/base/protocols/syslog/missing-pri.zeek new file mode 100644 index 0000000000..0382fa0aaf --- /dev/null +++ b/testing/btest/scripts/base/protocols/syslog/missing-pri.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -r $TRACES/syslog-missing-pri.trace %INPUT +# @TEST-EXEC: btest-diff syslog.log + +@load base/protocols/syslog diff --git a/testing/btest/scripts/base/protocols/syslog/trace.test b/testing/btest/scripts/base/protocols/syslog/trace.test index 78b681a9d8..f4dba5c807 100644 --- a/testing/btest/scripts/base/protocols/syslog/trace.test +++ b/testing/btest/scripts/base/protocols/syslog/trace.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/syslog-single-udp.trace %INPUT +# @TEST-EXEC: zeek -r $TRACES/syslog-single-udp.trace %INPUT # @TEST-EXEC: btest-diff syslog.log @load base/protocols/syslog diff --git a/testing/btest/scripts/base/protocols/tcp/pending.bro b/testing/btest/scripts/base/protocols/tcp/pending.bro deleted file mode 100644 index 1a49f5d19b..0000000000 --- a/testing/btest/scripts/base/protocols/tcp/pending.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/chrome-34-google.trace %INPUT -# @TEST-EXEC: btest-diff .stdout - -event connection_pending(c: connection) - { - print current_time(), "Connection pending", c$id, c$history; - } diff --git a/testing/btest/scripts/base/protocols/tcp/pending.zeek b/testing/btest/scripts/base/protocols/tcp/pending.zeek new file mode 100644 index 0000000000..8695f71b47 --- /dev/null +++ b/testing/btest/scripts/base/protocols/tcp/pending.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tls/chrome-34-google.trace %INPUT +# @TEST-EXEC: btest-diff .stdout + +event connection_pending(c: connection) + { + print current_time(), "Connection pending", c$id, c$history; + } diff --git a/testing/btest/scripts/base/protocols/xmpp/client-dpd.test b/testing/btest/scripts/base/protocols/xmpp/client-dpd.test index 9c9cc29c8a..544b56a744 100644 --- a/testing/btest/scripts/base/protocols/xmpp/client-dpd.test +++ b/testing/btest/scripts/base/protocols/xmpp/client-dpd.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/tls/xmpp-starttls.pcap %INPUT +# @TEST-EXEC: zeek -C -b -r $TRACES/tls/xmpp-starttls.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log @load base/frameworks/dpd diff --git a/testing/btest/scripts/base/protocols/xmpp/server-dialback-dpd.test b/testing/btest/scripts/base/protocols/xmpp/server-dialback-dpd.test index 9483c0cca8..e398aed22e 100644 --- a/testing/btest/scripts/base/protocols/xmpp/server-dialback-dpd.test +++ b/testing/btest/scripts/base/protocols/xmpp/server-dialback-dpd.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/tls/xmpp-dialback-starttls.pcap %INPUT +# @TEST-EXEC: zeek -C -b -r $TRACES/tls/xmpp-dialback-starttls.pcap %INPUT # @TEST-EXEC: btest-diff ssl.log @load base/frameworks/dpd diff --git a/testing/btest/scripts/base/protocols/xmpp/starttls.test b/testing/btest/scripts/base/protocols/xmpp/starttls.test index f046d49283..7cc4717e31 100644 --- a/testing/btest/scripts/base/protocols/xmpp/starttls.test +++ b/testing/btest/scripts/base/protocols/xmpp/starttls.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -C -b -r $TRACES/tls/xmpp-starttls.pcap %INPUT +# @TEST-EXEC: zeek -C -b -r $TRACES/tls/xmpp-starttls.pcap %INPUT # @TEST-EXEC: btest-diff conn.log # @TEST-EXEC: btest-diff ssl.log # @TEST-EXEC: btest-diff x509.log diff --git a/testing/btest/scripts/base/utils/active-http.test b/testing/btest/scripts/base/utils/active-http.test index 97d06448ca..ff80dc5bf2 100644 --- a/testing/btest/scripts/base/utils/active-http.test +++ b/testing/btest/scripts/base/utils/active-http.test @@ -3,9 +3,9 @@ # # @TEST-EXEC: btest-bg-run httpd python $SCRIPTS/httpd.py --max 2 --addr=127.0.0.1 # @TEST-EXEC: sleep 3 -# @TEST-EXEC: btest-bg-run bro bro -b %INPUT +# @TEST-EXEC: btest-bg-run zeek zeek -b %INPUT # @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: cat bro/.stdout | sort >output +# @TEST-EXEC: cat zeek/.stdout | sort >output # @TEST-EXEC: btest-diff output @load base/utils/active-http @@ -35,7 +35,7 @@ function test_request(label: string, req: ActiveHTTP::Request) } } -event bro_init() +event zeek_init() { test_request("test1", [$url="127.0.0.1:32123"]); test_request("test2", [$url="127.0.0.1:32123/empty", $method="POST"]); diff --git a/testing/btest/scripts/base/utils/addrs.test b/testing/btest/scripts/base/utils/addrs.test index 224fd9dc62..274887fbce 100644 --- a/testing/btest/scripts/base/utils/addrs.test +++ b/testing/btest/scripts/base/utils/addrs.test @@ -1,27 +1,58 @@ -# @TEST-EXEC: bro -b %INPUT > output +# @TEST-EXEC: zeek -b %INPUT > output # @TEST-EXEC: btest-diff output @load base/utils/addrs -event bro_init() +event zeek_init() { + print "============ test ipv4 regex (good strings)"; local ip = "0.0.0.0"; - - print "============ test ipv4 regex"; print ip == ipv4_addr_regex; print is_valid_ip(ip); + ip = "1.1.1.1"; print ip == ipv4_addr_regex; print is_valid_ip(ip); + + ip = "9.9.9.9"; + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + + ip = "99.99.99.99"; + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + + ip = "09.99.99.99"; + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + + ip = "009.99.99.99"; + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + ip = "255.255.255.255"; print ip == ipv4_addr_regex; print is_valid_ip(ip); + + print "============ bad ipv4 decimals"; ip = "255.255.255.256"; - print ip == ipv4_addr_regex; # the regex doesn't check for 0-255 - print is_valid_ip(ip); # but is_valid_ip() will + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + + ip = "255.255.255.295"; + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + + ip = "255.255.255.300"; + print ip == ipv4_addr_regex; + print is_valid_ip(ip); + + print "============ too many ipv4 decimals"; ip = "255.255.255.255.255"; print ip == ipv4_addr_regex; print is_valid_ip(ip); + + print "============ typical looking ipv4"; ip = "192.168.1.100"; print ip == ipv4_addr_regex; print is_valid_ip(ip); @@ -97,8 +128,16 @@ event bro_init() ip = "2001:db8:0:0:0:FFFF:192.168.0.256"; print is_valid_ip(ip); + # These have too many hextets ("::" must expand to at least one hextet) + print is_valid_ip("6:1:2::3:4:5:6:7"); + print is_valid_ip("6:1:2::3:4:5:6:7:8"); + print "============ test extract_ip_addresses()"; print extract_ip_addresses("this is 1.1.1.1 a test 2.2.2.2 string with ip addresses 3.3.3.3"); print extract_ip_addresses("this is 1.1.1.1 a test 0:0:0:0:0:0:0:0 string with ip addresses 3.3.3.3"); + # This will use the leading 6 from "IPv6" (maybe that's not intended + # by a person trying to parse such a string, but that's just what's going + # to happen; it's on them to deal). + print extract_ip_addresses("IPv6:1:2::3:4:5:6:7"); } diff --git a/testing/btest/scripts/base/utils/conn-ids.test b/testing/btest/scripts/base/utils/conn-ids.test index affe746e35..b44615b102 100644 --- a/testing/btest/scripts/base/utils/conn-ids.test +++ b/testing/btest/scripts/base/utils/conn-ids.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. diff --git a/testing/btest/scripts/base/utils/decompose_uri.bro b/testing/btest/scripts/base/utils/decompose_uri.bro deleted file mode 100644 index 6ed30e7889..0000000000 --- a/testing/btest/scripts/base/utils/decompose_uri.bro +++ /dev/null @@ -1,33 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT > output -# @TEST-EXEC: btest-diff output - -@load base/utils/urls - -function dc(s: string) - { - print fmt("%s", s); - print fmt(" -> %s", decompose_uri(s)); - print ""; - } - -event bro_init() - { - dc("https://www.bro.org:42/documentation/faq.html?k1=v1&k2=v2"); - dc(""); - dc("https://"); - dc("https://www.bro.org"); - dc("https://www.bro.org/"); - dc("https://www.bro.org:42"); - dc("https://www.bro.org:42/"); - dc("https://www.bro.org/documentation"); - dc("https://www.bro.org/documentation/"); - dc("https://www.bro.org/documentation/faq"); - dc("https://www.bro.org/documentation/faq.html"); - dc("https://www.bro.org/documentation/faq.html?"); - dc("https://www.bro.org/documentation/faq.html?k=v"); - dc("https://www.bro.org/documentation/faq.html?k="); - dc("https://www.bro.org/documentation/faq.html?=v"); - dc("file:///documentation/faq.html?=v"); - dc("www.bro.org/?foo=bar"); - } - diff --git a/testing/btest/scripts/base/utils/decompose_uri.zeek b/testing/btest/scripts/base/utils/decompose_uri.zeek new file mode 100644 index 0000000000..30ba9cd245 --- /dev/null +++ b/testing/btest/scripts/base/utils/decompose_uri.zeek @@ -0,0 +1,33 @@ +# @TEST-EXEC: zeek -b %INPUT > output +# @TEST-EXEC: btest-diff output + +@load base/utils/urls + +function dc(s: string) + { + print fmt("%s", s); + print fmt(" -> %s", decompose_uri(s)); + print ""; + } + +event zeek_init() + { + dc("https://www.bro.org:42/documentation/faq.html?k1=v1&k2=v2"); + dc(""); + dc("https://"); + dc("https://www.bro.org"); + dc("https://www.bro.org/"); + dc("https://www.bro.org:42"); + dc("https://www.bro.org:42/"); + dc("https://www.bro.org/documentation"); + dc("https://www.bro.org/documentation/"); + dc("https://www.bro.org/documentation/faq"); + dc("https://www.bro.org/documentation/faq.html"); + dc("https://www.bro.org/documentation/faq.html?"); + dc("https://www.bro.org/documentation/faq.html?k=v"); + dc("https://www.bro.org/documentation/faq.html?k="); + dc("https://www.bro.org/documentation/faq.html?=v"); + dc("file:///documentation/faq.html?=v"); + dc("www.bro.org/?foo=bar"); + } + diff --git a/testing/btest/scripts/base/utils/dir.test b/testing/btest/scripts/base/utils/dir.test index 4cbb4a3c89..6043d54289 100644 --- a/testing/btest/scripts/base/utils/dir.test +++ b/testing/btest/scripts/base/utils/dir.test @@ -1,14 +1,14 @@ -# @TEST-EXEC: btest-bg-run bro bro -b ../dirtest.bro -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/next1 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: btest-bg-run zeek zeek -b ../dirtest.zeek +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/next1 10 || (btest-bg-wait -k 1 && false) # @TEST-EXEC: touch testdir/newone # @TEST-EXEC: rm testdir/bye -# @TEST-EXEC: $SCRIPTS/wait-for-file bro/next2 10 || (btest-bg-wait -k 1 && false) +# @TEST-EXEC: $SCRIPTS/wait-for-file zeek/next2 10 || (btest-bg-wait -k 1 && false) # @TEST-EXEC: touch testdir/bye # @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: touch testdir/newone -# @TEST-EXEC: btest-diff bro/.stdout +# @TEST-EXEC: btest-diff zeek/.stdout -@TEST-START-FILE dirtest.bro +@TEST-START-FILE dirtest.zeek @load base/utils/dir redef exit_only_after_terminate = T; @@ -47,7 +47,7 @@ function new_file(fname: string) terminate(); } -event bro_init() +event zeek_init() { Dir::monitor("../testdir", new_file, .25sec); } diff --git a/testing/btest/scripts/base/utils/directions-and-hosts.test b/testing/btest/scripts/base/utils/directions-and-hosts.test index 92d1b48d3a..7e731aba2e 100644 --- a/testing/btest/scripts/base/utils/directions-and-hosts.test +++ b/testing/btest/scripts/base/utils/directions-and-hosts.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # These are loaded by default. @@ -40,7 +40,7 @@ function test_dir(id: conn_id, d: Direction, expect: bool) result == expect ? "SUCCESS" : "FAIL"); } -event bro_init() +event zeek_init() { test_host(local_ip, LOCAL_HOSTS, T); test_host(local_ip, REMOTE_HOSTS, F); diff --git a/testing/btest/scripts/base/utils/exec.test b/testing/btest/scripts/base/utils/exec.test index 0b926df402..efa13c781c 100644 --- a/testing/btest/scripts/base/utils/exec.test +++ b/testing/btest/scripts/base/utils/exec.test @@ -1,8 +1,8 @@ -# @TEST-EXEC: btest-bg-run bro bro -b ../exectest.bro +# @TEST-EXEC: btest-bg-run zeek zeek -b ../exectest.zeek # @TEST-EXEC: btest-bg-wait 15 -# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff bro/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff zeek/.stdout -@TEST-START-FILE exectest.bro +@TEST-START-FILE exectest.zeek @load base/utils/exec redef exit_only_after_terminate = T; @@ -26,7 +26,7 @@ function test_cmd(label: string, cmd: Exec::Command) } } -event bro_init() +event zeek_init() { test_cmd("test1", [$cmd="bash ../somescript.sh", $read_files=set("out1", "out2")]); diff --git a/testing/btest/scripts/base/utils/files.test b/testing/btest/scripts/base/utils/files.test index 3324522030..8410c50a1a 100644 --- a/testing/btest/scripts/base/utils/files.test +++ b/testing/btest/scripts/base/utils/files.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. @@ -12,9 +12,9 @@ event connection_established(c: connection) print generate_extraction_filename("", c, ""); } -event bro_init() +event zeek_init() { print extract_filename_from_content_disposition("attachment; filename=Economy"); print extract_filename_from_content_disposition("attachment; name=\"US-$ rates\""); print extract_filename_from_content_disposition("attachment; filename*=iso-8859-1'en'%A3%20rates"); - } \ No newline at end of file + } diff --git a/testing/btest/scripts/base/utils/hash_hrw.bro b/testing/btest/scripts/base/utils/hash_hrw.bro deleted file mode 100644 index 90f87f6f46..0000000000 --- a/testing/btest/scripts/base/utils/hash_hrw.bro +++ /dev/null @@ -1,57 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT > output -# @TEST-EXEC: btest-diff output - -@load base/utils/hash_hrw - -local pool = HashHRW::Pool(); -local alice = HashHRW::Site($id=0, $user_data="alice"); -local bob = HashHRW::Site($id=1, $user_data="bob"); -local charlie = HashHRW::Site($id=2, $user_data="charlie"); -local dave = HashHRW::Site($id=3, $user_data="dave"); -local eve = HashHRW::Site($id=4, $user_data="eve"); - -print HashHRW::add_site(pool, alice); -print HashHRW::add_site(pool, alice); -print HashHRW::add_site(pool, bob); -print HashHRW::add_site(pool, charlie); -print HashHRW::add_site(pool, dave); -print HashHRW::add_site(pool, eve); -print HashHRW::rem_site(pool, charlie); -print HashHRW::rem_site(pool, charlie); - -print HashHRW::get_site(pool, "one"); -print HashHRW::get_site(pool, "two"); -print HashHRW::get_site(pool, "three"); -print HashHRW::get_site(pool, "four"); -print HashHRW::get_site(pool, "four"); -print HashHRW::get_site(pool, "five"); -print HashHRW::get_site(pool, "six"); -print HashHRW::get_site(pool, 1); -print HashHRW::get_site(pool, 2); -print HashHRW::get_site(pool, 3); - -print HashHRW::rem_site(pool, alice); - -print HashHRW::get_site(pool, "one"); -print HashHRW::get_site(pool, "two"); -print HashHRW::get_site(pool, "three"); -print HashHRW::get_site(pool, "four"); -print HashHRW::get_site(pool, "four"); -print HashHRW::get_site(pool, "five"); -print HashHRW::get_site(pool, "six"); -print HashHRW::get_site(pool, 1); -print HashHRW::get_site(pool, 2); -print HashHRW::get_site(pool, 3); - -print HashHRW::add_site(pool, alice); - -print HashHRW::get_site(pool, "one"); -print HashHRW::get_site(pool, "two"); -print HashHRW::get_site(pool, "three"); -print HashHRW::get_site(pool, "four"); -print HashHRW::get_site(pool, "four"); -print HashHRW::get_site(pool, "five"); -print HashHRW::get_site(pool, "six"); -print HashHRW::get_site(pool, 1); -print HashHRW::get_site(pool, 2); -print HashHRW::get_site(pool, 3); diff --git a/testing/btest/scripts/base/utils/hash_hrw.zeek b/testing/btest/scripts/base/utils/hash_hrw.zeek new file mode 100644 index 0000000000..c77e1548fe --- /dev/null +++ b/testing/btest/scripts/base/utils/hash_hrw.zeek @@ -0,0 +1,57 @@ +# @TEST-EXEC: zeek -b %INPUT > output +# @TEST-EXEC: btest-diff output + +@load base/utils/hash_hrw + +local pool = HashHRW::Pool(); +local alice = HashHRW::Site($id=0, $user_data="alice"); +local bob = HashHRW::Site($id=1, $user_data="bob"); +local charlie = HashHRW::Site($id=2, $user_data="charlie"); +local dave = HashHRW::Site($id=3, $user_data="dave"); +local eve = HashHRW::Site($id=4, $user_data="eve"); + +print HashHRW::add_site(pool, alice); +print HashHRW::add_site(pool, alice); +print HashHRW::add_site(pool, bob); +print HashHRW::add_site(pool, charlie); +print HashHRW::add_site(pool, dave); +print HashHRW::add_site(pool, eve); +print HashHRW::rem_site(pool, charlie); +print HashHRW::rem_site(pool, charlie); + +print HashHRW::get_site(pool, "one"); +print HashHRW::get_site(pool, "two"); +print HashHRW::get_site(pool, "three"); +print HashHRW::get_site(pool, "four"); +print HashHRW::get_site(pool, "four"); +print HashHRW::get_site(pool, "five"); +print HashHRW::get_site(pool, "six"); +print HashHRW::get_site(pool, 1); +print HashHRW::get_site(pool, 2); +print HashHRW::get_site(pool, 3); + +print HashHRW::rem_site(pool, alice); + +print HashHRW::get_site(pool, "one"); +print HashHRW::get_site(pool, "two"); +print HashHRW::get_site(pool, "three"); +print HashHRW::get_site(pool, "four"); +print HashHRW::get_site(pool, "four"); +print HashHRW::get_site(pool, "five"); +print HashHRW::get_site(pool, "six"); +print HashHRW::get_site(pool, 1); +print HashHRW::get_site(pool, 2); +print HashHRW::get_site(pool, 3); + +print HashHRW::add_site(pool, alice); + +print HashHRW::get_site(pool, "one"); +print HashHRW::get_site(pool, "two"); +print HashHRW::get_site(pool, "three"); +print HashHRW::get_site(pool, "four"); +print HashHRW::get_site(pool, "four"); +print HashHRW::get_site(pool, "five"); +print HashHRW::get_site(pool, "six"); +print HashHRW::get_site(pool, 1); +print HashHRW::get_site(pool, 2); +print HashHRW::get_site(pool, 3); diff --git a/testing/btest/scripts/base/utils/json.test b/testing/btest/scripts/base/utils/json.test index 264151136a..8d34ed98b1 100644 --- a/testing/btest/scripts/base/utils/json.test +++ b/testing/btest/scripts/base/utils/json.test @@ -2,7 +2,7 @@ # test with no elements, with one element, and with more than one element. # Test that the "only_loggable" option works (output only record fields with # the &log attribute). -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output type color: enum { Red, White, Blue }; @@ -16,7 +16,7 @@ type myrec2: record { m: myrec1 &log; }; -event bro_init() +event zeek_init() { # ##################################### # Test the basic (non-container) types: diff --git a/testing/btest/scripts/base/utils/numbers.test b/testing/btest/scripts/base/utils/numbers.test index c1a2fff8c8..f80b64c26a 100644 --- a/testing/btest/scripts/base/utils/numbers.test +++ b/testing/btest/scripts/base/utils/numbers.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. diff --git a/testing/btest/scripts/base/utils/paths.test b/testing/btest/scripts/base/utils/paths.test index 8436d37b8b..09e8b96f97 100644 --- a/testing/btest/scripts/base/utils/paths.test +++ b/testing/btest/scripts/base/utils/paths.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. @@ -41,18 +41,18 @@ print "==============================="; test_extract("\"/this/is/a/dir\" is current directory", "/this/is/a/dir"); test_extract("/this/is/a/dir is current directory", "/this/is/a/dir"); test_extract("/this/is/a/dir\\ is\\ current\\ directory", "/this/is/a/dir\\ is\\ current\\ directory"); -test_extract("hey, /foo/bar/baz.bro is a cool script", "/foo/bar/baz.bro"); +test_extract("hey, /foo/bar/baz.zeek is a cool script", "/foo/bar/baz.zeek"); test_extract("here's two dirs: /foo/bar and /foo/baz", "/foo/bar"); print "test build_path_compressed()"; print "==============================="; -print build_path_compressed("/home/bro/", "policy/somefile.bro"); -print build_path_compressed("/home/bro/", "/usr/local/bro/share/bro/somefile.bro"); -print build_path_compressed("/home/bro/", "/usr/local/bro/share/../../bro/somefile.bro"); +print build_path_compressed("/home/bro/", "policy/somefile.zeek"); +print build_path_compressed("/home/bro/", "/usr/local/bro/share/bro/somefile.zeek"); +print build_path_compressed("/home/bro/", "/usr/local/bro/share/../../bro/somefile.zeek"); print "==============================="; print "test build_full_path()"; print "==============================="; -print build_path("/home/bro/", "policy/somefile.bro"); -print build_path("/home/bro/", "/usr/local/bro/share/bro/somefile.bro"); +print build_path("/home/bro/", "policy/somefile.zeek"); +print build_path("/home/bro/", "/usr/local/bro/share/bro/somefile.zeek"); diff --git a/testing/btest/scripts/base/utils/pattern.test b/testing/btest/scripts/base/utils/pattern.test index 1cf5c49100..1c5ad227ef 100644 --- a/testing/btest/scripts/base/utils/pattern.test +++ b/testing/btest/scripts/base/utils/pattern.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. diff --git a/testing/btest/scripts/base/utils/queue.test b/testing/btest/scripts/base/utils/queue.test index 344ea73f45..bad45a67ab 100644 --- a/testing/btest/scripts/base/utils/queue.test +++ b/testing/btest/scripts/base/utils/queue.test @@ -1,10 +1,10 @@ -# @TEST-EXEC: bro -b %INPUT > output +# @TEST-EXEC: zeek -b %INPUT > output # @TEST-EXEC: btest-diff output # This is loaded by default @load base/utils/queue -event bro_init() +event zeek_init() { local q = Queue::init([$max_len=2]); Queue::put(q, 1); @@ -30,4 +30,4 @@ event bro_init() Queue::get_vector(q2, test3); for ( i in test3 ) print fmt("String queue value: %s", test3[i]); - } \ No newline at end of file + } diff --git a/testing/btest/scripts/base/utils/site.test b/testing/btest/scripts/base/utils/site.test index cfd7dd2ceb..c97d98acbd 100644 --- a/testing/btest/scripts/base/utils/site.test +++ b/testing/btest/scripts/base/utils/site.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT > output +# @TEST-EXEC: zeek %INPUT > output # @TEST-EXEC: btest-diff output # This is loaded by default. @@ -12,7 +12,7 @@ redef Site::local_admins += { [141.142.100.0/24] = b, }; -event bro_init() +event zeek_init() { print Site::get_emails(141.142.1.1); print Site::get_emails(141.142.100.100); diff --git a/testing/btest/scripts/base/utils/strings.test b/testing/btest/scripts/base/utils/strings.test index 77fe715def..9606ab3213 100644 --- a/testing/btest/scripts/base/utils/strings.test +++ b/testing/btest/scripts/base/utils/strings.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. diff --git a/testing/btest/scripts/base/utils/thresholds.test b/testing/btest/scripts/base/utils/thresholds.test index 2e18cc3b63..1c56057090 100644 --- a/testing/btest/scripts/base/utils/thresholds.test +++ b/testing/btest/scripts/base/utils/thresholds.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. diff --git a/testing/btest/scripts/base/utils/urls.test b/testing/btest/scripts/base/utils/urls.test index fd8c0a8622..666f805edb 100644 --- a/testing/btest/scripts/base/utils/urls.test +++ b/testing/btest/scripts/base/utils/urls.test @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: zeek %INPUT >output # @TEST-EXEC: btest-diff output # This is loaded by default. diff --git a/testing/btest/scripts/check-test-all-policy.bro b/testing/btest/scripts/check-test-all-policy.bro deleted file mode 100644 index 9a9d120e6d..0000000000 --- a/testing/btest/scripts/check-test-all-policy.bro +++ /dev/null @@ -1,6 +0,0 @@ -# Makes sures test-all-policy.bro (which loads *all* other policy scripts) compiles correctly. -# -# @TEST-EXEC: bro %INPUT >output -# @TEST-EXEC: btest-diff output - -@load test-all-policy diff --git a/testing/btest/scripts/check-test-all-policy.zeek b/testing/btest/scripts/check-test-all-policy.zeek new file mode 100644 index 0000000000..19bfe40c08 --- /dev/null +++ b/testing/btest/scripts/check-test-all-policy.zeek @@ -0,0 +1,6 @@ +# Makes sures test-all-policy.zeek (which loads *all* other policy scripts) compiles correctly. +# +# @TEST-EXEC: zeek %INPUT >output +# @TEST-EXEC: btest-diff output + +@load test-all-policy diff --git a/testing/btest/scripts/policy/frameworks/files/extract-all.bro b/testing/btest/scripts/policy/frameworks/files/extract-all.bro deleted file mode 100644 index f54b2e299d..0000000000 --- a/testing/btest/scripts/policy/frameworks/files/extract-all.bro +++ /dev/null @@ -1,2 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/http/get.trace frameworks/files/extract-all-files -# @TEST-EXEC: grep -q EXTRACT files.log diff --git a/testing/btest/scripts/policy/frameworks/files/extract-all.zeek b/testing/btest/scripts/policy/frameworks/files/extract-all.zeek new file mode 100644 index 0000000000..b043e48830 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/files/extract-all.zeek @@ -0,0 +1,2 @@ +# @TEST-EXEC: zeek -r $TRACES/http/get.trace frameworks/files/extract-all-files +# @TEST-EXEC: grep -q EXTRACT files.log diff --git a/testing/btest/scripts/policy/frameworks/intel/removal.bro b/testing/btest/scripts/policy/frameworks/intel/removal.bro deleted file mode 100644 index 4d7e450da4..0000000000 --- a/testing/btest/scripts/policy/frameworks/intel/removal.bro +++ /dev/null @@ -1,46 +0,0 @@ - -# @TEST-EXEC: btest-bg-run broproc bro %INPUT -# @TEST-EXEC: btest-bg-wait -k 5 -# @TEST-EXEC: btest-diff broproc/intel.log - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.remove -10.0.0.1 Intel::ADDR source1 T -10.0.0.2 Intel::ADDR source1 F -@TEST-END-FILE - -@load frameworks/intel/removal - -redef exit_only_after_terminate = T; -redef Intel::read_files += { "../intel.dat" }; -redef enum Intel::Where += { SOMEWHERE }; - -hook Intel::filter_item(item: Intel::Item) - { - if ( item$indicator_type == Intel::ADDR && - Site::is_local_addr(to_addr(item$indicator)) ) - break; - } - -event do_it() - { - Intel::seen([$host=10.0.0.1, - $where=SOMEWHERE]); - Intel::seen([$host=10.0.0.2, - $where=SOMEWHERE]); - } - -global log_lines = 0; -event Intel::log_intel(rec: Intel::Info) - { - ++log_lines; - if ( log_lines == 1 ) - terminate(); - } - -event bro_init() &priority=-10 - { - Intel::insert([$indicator="10.0.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - Intel::insert([$indicator="10.0.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); - schedule 1sec { do_it() }; - } \ No newline at end of file diff --git a/testing/btest/scripts/policy/frameworks/intel/removal.zeek b/testing/btest/scripts/policy/frameworks/intel/removal.zeek new file mode 100644 index 0000000000..fe2938e711 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/intel/removal.zeek @@ -0,0 +1,46 @@ + +# @TEST-EXEC: btest-bg-run zeekproc zeek %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff zeekproc/intel.log + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.remove +10.0.0.1 Intel::ADDR source1 T +10.0.0.2 Intel::ADDR source1 F +@TEST-END-FILE + +@load frameworks/intel/removal + +redef exit_only_after_terminate = T; +redef Intel::read_files += { "../intel.dat" }; +redef enum Intel::Where += { SOMEWHERE }; + +hook Intel::filter_item(item: Intel::Item) + { + if ( item$indicator_type == Intel::ADDR && + Site::is_local_addr(to_addr(item$indicator)) ) + break; + } + +event do_it() + { + Intel::seen([$host=10.0.0.1, + $where=SOMEWHERE]); + Intel::seen([$host=10.0.0.2, + $where=SOMEWHERE]); + } + +global log_lines = 0; +event Intel::log_intel(rec: Intel::Info) + { + ++log_lines; + if ( log_lines == 1 ) + terminate(); + } + +event zeek_init() &priority=-10 + { + Intel::insert([$indicator="10.0.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="10.0.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + schedule 1sec { do_it() }; + } diff --git a/testing/btest/scripts/policy/frameworks/intel/seen/certs.bro b/testing/btest/scripts/policy/frameworks/intel/seen/certs.bro deleted file mode 100644 index 8571784d9a..0000000000 --- a/testing/btest/scripts/policy/frameworks/intel/seen/certs.bro +++ /dev/null @@ -1,29 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/tls/ecdsa-cert.pcap %INPUT -# @TEST-EXEC: cat intel.log > intel-all.log -# @TEST-EXEC: bro -r $TRACES/tls/ssl.v3.trace %INPUT -# @TEST-EXEC: cat intel.log >> intel-all.log -# @TEST-EXEC: btest-diff intel-all.log - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -www.pantz.org Intel::DOMAIN source1 test entry http://some-data-distributor.com/100000 -www.dresdner-privat.de Intel::DOMAIN source1 test entry http://some-data-distributor.com/100000 -2c322ae2b7fe91391345e070b63668978bb1c9da Intel::CERT_HASH source1 test entry http://some-data-distributor.com/100000 -@TEST-END-FILE - -@load base/frameworks/intel -@load base/protocols/ssl -@load frameworks/intel/seen - -redef Intel::read_files += { "intel.dat" }; - -event bro_init() - { - suspend_processing(); - } - -event Input::end_of_data(name: string, source: string) - { - continue_processing(); - } - diff --git a/testing/btest/scripts/policy/frameworks/intel/seen/certs.zeek b/testing/btest/scripts/policy/frameworks/intel/seen/certs.zeek new file mode 100644 index 0000000000..bd9abdf452 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/intel/seen/certs.zeek @@ -0,0 +1,29 @@ +# @TEST-EXEC: zeek -Cr $TRACES/tls/ecdsa-cert.pcap %INPUT +# @TEST-EXEC: cat intel.log > intel-all.log +# @TEST-EXEC: zeek -r $TRACES/tls/ssl.v3.trace %INPUT +# @TEST-EXEC: cat intel.log >> intel-all.log +# @TEST-EXEC: btest-diff intel-all.log + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +www.pantz.org Intel::DOMAIN source1 test entry http://some-data-distributor.com/100000 +www.dresdner-privat.de Intel::DOMAIN source1 test entry http://some-data-distributor.com/100000 +2c322ae2b7fe91391345e070b63668978bb1c9da Intel::CERT_HASH source1 test entry http://some-data-distributor.com/100000 +@TEST-END-FILE + +@load base/frameworks/intel +@load base/protocols/ssl +@load frameworks/intel/seen + +redef Intel::read_files += { "intel.dat" }; + +event zeek_init() + { + suspend_processing(); + } + +event Input::end_of_data(name: string, source: string) + { + continue_processing(); + } + diff --git a/testing/btest/scripts/policy/frameworks/intel/seen/smb.bro b/testing/btest/scripts/policy/frameworks/intel/seen/smb.bro deleted file mode 100644 index 5dd594953b..0000000000 --- a/testing/btest/scripts/policy/frameworks/intel/seen/smb.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/smb/smb2readwrite.pcap %INPUT -# @TEST-EXEC: btest-diff intel.log - -@load base/frameworks/intel -@load frameworks/intel/seen - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -pythonfile Intel::FILE_NAME source1 test entry http://some-data-distributor.com/100000 -@TEST-END-FILE - -redef Intel::read_files += { "intel.dat" }; - -event bro_init() - { - suspend_processing(); - } - -event Input::end_of_data(name: string, source: string) - { - continue_processing(); - } diff --git a/testing/btest/scripts/policy/frameworks/intel/seen/smb.zeek b/testing/btest/scripts/policy/frameworks/intel/seen/smb.zeek new file mode 100644 index 0000000000..ad87bf8955 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/intel/seen/smb.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -C -r $TRACES/smb/smb2readwrite.pcap %INPUT +# @TEST-EXEC: btest-diff intel.log + +@load base/frameworks/intel +@load frameworks/intel/seen + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +pythonfile Intel::FILE_NAME source1 test entry http://some-data-distributor.com/100000 +@TEST-END-FILE + +redef Intel::read_files += { "intel.dat" }; + +event zeek_init() + { + suspend_processing(); + } + +event Input::end_of_data(name: string, source: string) + { + continue_processing(); + } diff --git a/testing/btest/scripts/policy/frameworks/intel/seen/smtp.bro b/testing/btest/scripts/policy/frameworks/intel/seen/smtp.bro deleted file mode 100644 index fd21e0f73a..0000000000 --- a/testing/btest/scripts/policy/frameworks/intel/seen/smtp.bro +++ /dev/null @@ -1,35 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp-multi-addr.pcap %INPUT -# @TEST-EXEC: btest-diff intel.log - -@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -jan.grashoefer@gmail.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 -jan.grashoefer@cern.ch Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 -jan.grashofer@cern.ch Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 -addr-spec@example.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 -angle-addr@example.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 -name-addr@example.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 -@TEST-END-FILE - -@load base/frameworks/intel -@load frameworks/intel/seen - -redef Intel::read_files += { "intel.dat" }; - -event bro_init() - { - suspend_processing(); - } - -event Input::end_of_data(name: string, source: string) - { - continue_processing(); - } - -event SMTP::log_smtp(rec: SMTP::Info) - { - for ( adr in rec$to ) - { - print fmt("Addr: '%s'", adr); - } - } diff --git a/testing/btest/scripts/policy/frameworks/intel/seen/smtp.zeek b/testing/btest/scripts/policy/frameworks/intel/seen/smtp.zeek new file mode 100644 index 0000000000..ca144d3a55 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/intel/seen/smtp.zeek @@ -0,0 +1,35 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp-multi-addr.pcap %INPUT +# @TEST-EXEC: btest-diff intel.log + +@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +jan.grashoefer@gmail.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 +jan.grashoefer@cern.ch Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 +jan.grashofer@cern.ch Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 +addr-spec@example.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 +angle-addr@example.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 +name-addr@example.com Intel::EMAIL source1 test entry http://some-data-distributor.com/100000 +@TEST-END-FILE + +@load base/frameworks/intel +@load frameworks/intel/seen + +redef Intel::read_files += { "intel.dat" }; + +event zeek_init() + { + suspend_processing(); + } + +event Input::end_of_data(name: string, source: string) + { + continue_processing(); + } + +event SMTP::log_smtp(rec: SMTP::Info) + { + for ( adr in rec$to ) + { + print fmt("Addr: '%s'", adr); + } + } diff --git a/testing/btest/scripts/policy/frameworks/intel/whitelisting.bro b/testing/btest/scripts/policy/frameworks/intel/whitelisting.bro deleted file mode 100644 index 53acd49aa9..0000000000 --- a/testing/btest/scripts/policy/frameworks/intel/whitelisting.bro +++ /dev/null @@ -1,39 +0,0 @@ -# @TEST-EXEC: bro -Cr $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff intel.log - -#@TEST-START-FILE intel.dat -#fields indicator indicator_type meta.source meta.desc meta.url -upload.wikimedia.org Intel::DOMAIN source1 somehow bad http://some-data-distributor.com/1 -meta.wikimedia.org Intel::DOMAIN source1 also bad http://some-data-distributor.com/1 -#@TEST-END-FILE - -#@TEST-START-FILE whitelist.dat -#fields indicator indicator_type meta.source meta.desc meta.whitelist meta.url -meta.wikimedia.org Intel::DOMAIN source2 also bad T http://some-data-distributor.com/1 -#@TEST-END-FILE - -@load base/frameworks/intel -@load frameworks/intel/whitelist -@load frameworks/intel/seen - -redef Intel::read_files += { - "intel.dat", - "whitelist.dat", -}; - -global total_files_read = 0; - -event bro_init() - { - suspend_processing(); - } - -event Input::end_of_data(name: string, source: string) - { - # Wait until both intel files are read. - if ( /^intel-/ in name && (++total_files_read == 2) ) - { - continue_processing(); - } - } - diff --git a/testing/btest/scripts/policy/frameworks/intel/whitelisting.zeek b/testing/btest/scripts/policy/frameworks/intel/whitelisting.zeek new file mode 100644 index 0000000000..de8e28c7d4 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/intel/whitelisting.zeek @@ -0,0 +1,39 @@ +# @TEST-EXEC: zeek -Cr $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff intel.log + +#@TEST-START-FILE intel.dat +#fields indicator indicator_type meta.source meta.desc meta.url +upload.wikimedia.org Intel::DOMAIN source1 somehow bad http://some-data-distributor.com/1 +meta.wikimedia.org Intel::DOMAIN source1 also bad http://some-data-distributor.com/1 +#@TEST-END-FILE + +#@TEST-START-FILE whitelist.dat +#fields indicator indicator_type meta.source meta.desc meta.whitelist meta.url +meta.wikimedia.org Intel::DOMAIN source2 also bad T http://some-data-distributor.com/1 +#@TEST-END-FILE + +@load base/frameworks/intel +@load frameworks/intel/whitelist +@load frameworks/intel/seen + +redef Intel::read_files += { + "intel.dat", + "whitelist.dat", +}; + +global total_files_read = 0; + +event zeek_init() + { + suspend_processing(); + } + +event Input::end_of_data(name: string, source: string) + { + # Wait until both intel files are read. + if ( /^intel-/ in name && (++total_files_read == 2) ) + { + continue_processing(); + } + } + diff --git a/testing/btest/scripts/policy/frameworks/netcontrol/catch-and-release-forgotten.zeek b/testing/btest/scripts/policy/frameworks/netcontrol/catch-and-release-forgotten.zeek new file mode 100644 index 0000000000..040f4e1426 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/netcontrol/catch-and-release-forgotten.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff netcontrol_catch_release.log +# @TEST-EXEC: btest-diff .stdout + +@load base/frameworks/netcontrol +@load policy/frameworks/netcontrol/catch-and-release + +redef NetControl::catch_release_intervals = vector(1sec, 2sec, 2sec); + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +global pc: count = 0; + +event new_packet(c: connection, p: pkt_hdr) + { + if ( ++pc == 1 ) + NetControl::drop_address_catch_release(10.0.0.1); + } + +event NetControl::catch_release_forgotten(a: addr, bi: NetControl::BlockInfo) + { + print "Forgotten: ", a, bi; + } diff --git a/testing/btest/scripts/policy/frameworks/netcontrol/catch-and-release.zeek b/testing/btest/scripts/policy/frameworks/netcontrol/catch-and-release.zeek new file mode 100644 index 0000000000..433be6a593 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/netcontrol/catch-and-release.zeek @@ -0,0 +1,63 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/ecdhe.pcap %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER='grep -v ^# | $SCRIPTS/diff-remove-timestamps' btest-diff netcontrol.log +# @TEST-EXEC: btest-diff netcontrol_catch_release.log + +@load base/frameworks/netcontrol +@load policy/frameworks/netcontrol/catch-and-release + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +global i: count = 0; + +event connection_established(c: connection) + { + local id = c$id; + NetControl::drop_address_catch_release(id$orig_h); + # second one should be ignored because duplicate + NetControl::drop_address_catch_release(id$orig_h); + } + +event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string &default="") + { + if ( ++i == 6 ) + return; + + # delete directly, without notifying anything. + NetControl::delete_rule(r$id, "testing"); + NetControl::catch_release_seen(subnet_to_addr(r$entity$ip)); + } + +@TEST-START-NEXT + +@load base/frameworks/netcontrol +@load policy/frameworks/netcontrol/catch-and-release + +event NetControl::init() + { + local netcontrol_debug = NetControl::create_debug(T); + NetControl::activate(netcontrol_debug, 0); + } + +global i: count = 0; + +event connection_established(c: connection) + { + local id = c$id; + NetControl::drop_address(id$orig_h, 2min); + NetControl::drop_address_catch_release(id$orig_h, "test drop"); + } + +event NetControl::rule_added(r: NetControl::Rule, p: NetControl::PluginState, msg: string &default="") + { + if ( ++i == 3 ) + return; + + # delete directly, without notifying anything. + NetControl::delete_rule(r$id); + NetControl::catch_release_seen(subnet_to_addr(r$entity$ip)); + } + diff --git a/testing/btest/scripts/policy/frameworks/software/version-changes.bro b/testing/btest/scripts/policy/frameworks/software/version-changes.bro deleted file mode 100644 index c6d2433236..0000000000 --- a/testing/btest/scripts/policy/frameworks/software/version-changes.bro +++ /dev/null @@ -1,40 +0,0 @@ -# @TEST-EXEC: bro -b %INPUT -# @TEST-EXEC: btest-diff software.log -# @TEST-EXEC: btest-diff notice.log - -@load base/frameworks/software -@load policy/frameworks/software/version-changes - -const fake_software_name = "my_fake_software"; -redef Software::asset_tracking = ALL_HOSTS; -redef Software::interesting_version_changes += {fake_software_name}; - -global versions: vector of string = vector("1.0.0", "1.1.0", "1.2.0", "1.0.0"); -global version_index = 0; -global c = 0; - -event new_software() - { - local v = versions[version_index]; - local cid = conn_id($orig_h = 127.0.0.1, $orig_p = 22/tcp, - $resp_h = 127.0.0.1, $resp_p = 22/tcp); - local si = Software::Info($name=fake_software_name, - $unparsed_version=fmt("%s %s", - fake_software_name, v), - $host=127.0.0.1); - Software::found(cid, si); - - ++version_index; - ++c; - - if ( version_index >= |versions| ) - version_index = 0; - - if ( c < 10 ) - event new_software(); - } - -event bro_init() - { - event new_software(); - } diff --git a/testing/btest/scripts/policy/frameworks/software/version-changes.zeek b/testing/btest/scripts/policy/frameworks/software/version-changes.zeek new file mode 100644 index 0000000000..9f168fb502 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/software/version-changes.zeek @@ -0,0 +1,40 @@ +# @TEST-EXEC: zeek -b %INPUT +# @TEST-EXEC: btest-diff software.log +# @TEST-EXEC: btest-diff notice.log + +@load base/frameworks/software +@load policy/frameworks/software/version-changes + +const fake_software_name = "my_fake_software"; +redef Software::asset_tracking = ALL_HOSTS; +redef Software::interesting_version_changes += {fake_software_name}; + +global versions: vector of string = vector("1.0.0", "1.1.0", "1.2.0", "1.0.0"); +global version_index = 0; +global c = 0; + +event new_software() + { + local v = versions[version_index]; + local cid = conn_id($orig_h = 127.0.0.1, $orig_p = 22/tcp, + $resp_h = 127.0.0.1, $resp_p = 22/tcp); + local si = Software::Info($name=fake_software_name, + $unparsed_version=fmt("%s %s", + fake_software_name, v), + $host=127.0.0.1); + Software::found(cid, si); + + ++version_index; + ++c; + + if ( version_index >= |versions| ) + version_index = 0; + + if ( c < 10 ) + event new_software(); + } + +event zeek_init() + { + event new_software(); + } diff --git a/testing/btest/scripts/policy/frameworks/software/vulnerable.bro b/testing/btest/scripts/policy/frameworks/software/vulnerable.bro deleted file mode 100644 index 2ea7009a21..0000000000 --- a/testing/btest/scripts/policy/frameworks/software/vulnerable.bro +++ /dev/null @@ -1,23 +0,0 @@ -# @TEST-EXEC: bro %INPUT -# @TEST-EXEC: btest-diff notice.log - -@load frameworks/software/vulnerable - -redef Software::asset_tracking = ALL_HOSTS; - -global java_1_6_vuln: Software::VulnerableVersionRange = [$max=[$major=1,$minor=6,$minor2=0,$minor3=43]]; -global java_1_7_vuln: Software::VulnerableVersionRange = [$min=[$major=1,$minor=7], $max=[$major=1,$minor=7,$minor2=0,$minor3=20]]; -redef Software::vulnerable_versions += { - ["Java"] = set(java_1_6_vuln, java_1_7_vuln) -}; - -event bro_init() - { - Software::found([$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp], - [$name="Java", $host=1.2.3.4, $version=[$major=1, $minor=7, $minor2=0, $minor3=15]]); - Software::found([$orig_h=1.2.3.5, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp], - [$name="Java", $host=1.2.3.5, $version=[$major=1, $minor=6, $minor2=0, $minor3=43]]); - Software::found([$orig_h=1.2.3.6, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp], - [$name="Java", $host=1.2.3.6, $version=[$major=1, $minor=6, $minor2=0, $minor3=50]]); - - } diff --git a/testing/btest/scripts/policy/frameworks/software/vulnerable.zeek b/testing/btest/scripts/policy/frameworks/software/vulnerable.zeek new file mode 100644 index 0000000000..4d36bbf3f4 --- /dev/null +++ b/testing/btest/scripts/policy/frameworks/software/vulnerable.zeek @@ -0,0 +1,23 @@ +# @TEST-EXEC: zeek %INPUT +# @TEST-EXEC: btest-diff notice.log + +@load frameworks/software/vulnerable + +redef Software::asset_tracking = ALL_HOSTS; + +global java_1_6_vuln: Software::VulnerableVersionRange = [$max=[$major=1,$minor=6,$minor2=0,$minor3=43]]; +global java_1_7_vuln: Software::VulnerableVersionRange = [$min=[$major=1,$minor=7], $max=[$major=1,$minor=7,$minor2=0,$minor3=20]]; +redef Software::vulnerable_versions += { + ["Java"] = set(java_1_6_vuln, java_1_7_vuln) +}; + +event zeek_init() + { + Software::found([$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp], + [$name="Java", $host=1.2.3.4, $version=[$major=1, $minor=7, $minor2=0, $minor3=15]]); + Software::found([$orig_h=1.2.3.5, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp], + [$name="Java", $host=1.2.3.5, $version=[$major=1, $minor=6, $minor2=0, $minor3=43]]); + Software::found([$orig_h=1.2.3.6, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp], + [$name="Java", $host=1.2.3.6, $version=[$major=1, $minor=6, $minor2=0, $minor3=50]]); + + } diff --git a/testing/btest/scripts/policy/misc/dump-events.bro b/testing/btest/scripts/policy/misc/dump-events.bro deleted file mode 100644 index 33c9c97534..0000000000 --- a/testing/btest/scripts/policy/misc/dump-events.bro +++ /dev/null @@ -1,18 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/smtp.trace policy/misc/dump-events.bro %INPUT >all-events.log -# @TEST-EXEC: bro -r $TRACES/smtp.trace policy/misc/dump-events.bro %INPUT DumpEvents::include_args=F >all-events-no-args.log -# @TEST-EXEC: bro -r $TRACES/smtp.trace policy/misc/dump-events.bro %INPUT DumpEvents::include=/smtp_/ >smtp-events.log -# -# @TEST-EXEC: btest-diff all-events.log -# @TEST-EXEC: btest-diff all-events-no-args.log -# @TEST-EXEC: btest-diff smtp-events.log - -# There is some kind of race condition between the MD5 and SHA1 events, which are added -# by the SSL parser. Just remove MD5, this is not important for this test. - -event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=-5 - { - if ( ! c?$ssl ) - return; - - Files::remove_analyzer(f, Files::ANALYZER_MD5); - } diff --git a/testing/btest/scripts/policy/misc/dump-events.zeek b/testing/btest/scripts/policy/misc/dump-events.zeek new file mode 100644 index 0000000000..bc017c6533 --- /dev/null +++ b/testing/btest/scripts/policy/misc/dump-events.zeek @@ -0,0 +1,18 @@ +# @TEST-EXEC: zeek -r $TRACES/smtp.trace policy/misc/dump-events %INPUT >all-events.log +# @TEST-EXEC: zeek -r $TRACES/smtp.trace policy/misc/dump-events %INPUT DumpEvents::include_args=F >all-events-no-args.log +# @TEST-EXEC: zeek -r $TRACES/smtp.trace policy/misc/dump-events %INPUT DumpEvents::include=/smtp_/ >smtp-events.log +# +# @TEST-EXEC: btest-diff all-events.log +# @TEST-EXEC: btest-diff all-events-no-args.log +# @TEST-EXEC: btest-diff smtp-events.log + +# There is some kind of race condition between the MD5 and SHA1 events, which are added +# by the SSL parser. Just remove MD5, this is not important for this test. + +event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=-5 + { + if ( ! c?$ssl ) + return; + + Files::remove_analyzer(f, Files::ANALYZER_MD5); + } diff --git a/testing/btest/scripts/policy/misc/weird-stats-cluster.bro b/testing/btest/scripts/policy/misc/weird-stats-cluster.bro deleted file mode 100644 index 140bb3b006..0000000000 --- a/testing/btest/scripts/policy/misc/weird-stats-cluster.bro +++ /dev/null @@ -1,86 +0,0 @@ -# @TEST-PORT: BROKER_PORT1 -# @TEST-PORT: BROKER_PORT2 -# @TEST-PORT: BROKER_PORT3 -# -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 20 - -# @TEST-EXEC: btest-diff manager-1/weird_stats.log - -@TEST-START-FILE cluster-layout.bro -redef Cluster::nodes = { - ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], -}; -@TEST-END-FILE - -@load misc/weird-stats - -redef Cluster::retry_interval = 1sec; -redef Broker::default_listen_retry = 1sec; -redef Broker::default_connect_retry = 1sec; - -redef Log::enable_local_logging = T; -redef Log::default_rotation_interval = 0secs; -redef WeirdStats::weird_stat_interval = 5secs; - -event terminate_me() - { - terminate(); - } - -event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) - { - terminate(); - } - -event ready_again() - { - Reporter::net_weird("weird1"); - schedule 5secs { terminate_me() }; - } - -event ready_for_data() - { - local n = 0; - - if ( Cluster::node == "worker-1" ) - { - while ( n < 1000 ) - { - Reporter::net_weird("weird1"); - ++n; - } - - Reporter::net_weird("weird3"); - } - else if ( Cluster::node == "worker-2" ) - { - while ( n < 1000 ) - { - Reporter::net_weird("weird1"); - Reporter::net_weird("weird2"); - ++n; - } - } - - schedule 5secs { ready_again() }; - } - - -@if ( Cluster::local_node_type() == Cluster::MANAGER ) - -global peer_count = 0; - -event Cluster::node_up(name: string, id: string) - { - ++peer_count; - - if ( peer_count == 2 ) - Broker::publish(Cluster::worker_topic, ready_for_data); - } - -@endif diff --git a/testing/btest/scripts/policy/misc/weird-stats-cluster.zeek b/testing/btest/scripts/policy/misc/weird-stats-cluster.zeek new file mode 100644 index 0000000000..c0e83f08ed --- /dev/null +++ b/testing/btest/scripts/policy/misc/weird-stats-cluster.zeek @@ -0,0 +1,86 @@ +# @TEST-PORT: BROKER_PORT1 +# @TEST-PORT: BROKER_PORT2 +# @TEST-PORT: BROKER_PORT3 +# +# @TEST-EXEC: btest-bg-run manager-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=manager-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-1 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-1 zeek %INPUT +# @TEST-EXEC: btest-bg-run worker-2 ZEEKPATH=$ZEEKPATH:.. CLUSTER_NODE=worker-2 zeek %INPUT +# @TEST-EXEC: btest-bg-wait 20 + +# @TEST-EXEC: btest-diff manager-1/weird_stats.log + +@TEST-START-FILE cluster-layout.zeek +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT1"))], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT2")), $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=to_port(getenv("BROKER_PORT3")), $manager="manager-1", $interface="eth1"], +}; +@TEST-END-FILE + +@load misc/weird-stats + +redef Cluster::retry_interval = 1sec; +redef Broker::default_listen_retry = 1sec; +redef Broker::default_connect_retry = 1sec; + +redef Log::enable_local_logging = T; +redef Log::default_rotation_interval = 0secs; +redef WeirdStats::weird_stat_interval = 5secs; + +event terminate_me() + { + terminate(); + } + +event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) + { + terminate(); + } + +event ready_again() + { + Reporter::net_weird("weird1"); + schedule 5secs { terminate_me() }; + } + +event ready_for_data() + { + local n = 0; + + if ( Cluster::node == "worker-1" ) + { + while ( n < 1000 ) + { + Reporter::net_weird("weird1"); + ++n; + } + + Reporter::net_weird("weird3"); + } + else if ( Cluster::node == "worker-2" ) + { + while ( n < 1000 ) + { + Reporter::net_weird("weird1"); + Reporter::net_weird("weird2"); + ++n; + } + } + + schedule 5secs { ready_again() }; + } + + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +global peer_count = 0; + +event Cluster::node_up(name: string, id: string) + { + ++peer_count; + + if ( peer_count == 2 ) + Broker::publish(Cluster::worker_topic, ready_for_data); + } + +@endif diff --git a/testing/btest/scripts/policy/misc/weird-stats.bro b/testing/btest/scripts/policy/misc/weird-stats.bro deleted file mode 100644 index b26fce8e47..0000000000 --- a/testing/btest/scripts/policy/misc/weird-stats.bro +++ /dev/null @@ -1,32 +0,0 @@ -# @TEST-EXEC: btest-bg-run bro bro %INPUT -# @TEST-EXEC: btest-bg-wait 20 -# @TEST-EXEC: btest-diff bro/weird_stats.log - -@load misc/weird-stats.bro - -redef exit_only_after_terminate = T; -redef WeirdStats::weird_stat_interval = 5sec; - -event die() - { - terminate(); - } - -event gen_weirds(n: count, done: bool &default = F) - { - while ( n != 0 ) - { - Reporter::net_weird("my_weird"); - --n; - } - - if ( done ) - schedule 5sec { die() }; - } - -event bro_init() - { - event gen_weirds(1000); - schedule 7.5sec { gen_weirds(2000) } ; - schedule 12.5sec { gen_weirds(10, T) } ; - } diff --git a/testing/btest/scripts/policy/misc/weird-stats.zeek b/testing/btest/scripts/policy/misc/weird-stats.zeek new file mode 100644 index 0000000000..0caeb960fe --- /dev/null +++ b/testing/btest/scripts/policy/misc/weird-stats.zeek @@ -0,0 +1,32 @@ +# @TEST-EXEC: btest-bg-run zeek zeek %INPUT +# @TEST-EXEC: btest-bg-wait 20 +# @TEST-EXEC: btest-diff zeek/weird_stats.log + +@load misc/weird-stats + +redef exit_only_after_terminate = T; +redef WeirdStats::weird_stat_interval = 5sec; + +event die() + { + terminate(); + } + +event gen_weirds(n: count, done: bool &default = F) + { + while ( n != 0 ) + { + Reporter::net_weird("my_weird"); + --n; + } + + if ( done ) + schedule 5sec { die() }; + } + +event zeek_init() + { + event gen_weirds(1000); + schedule 7.5sec { gen_weirds(2000) } ; + schedule 12.5sec { gen_weirds(10, T) } ; + } diff --git a/testing/btest/scripts/policy/protocols/conn/known-hosts.bro b/testing/btest/scripts/policy/protocols/conn/known-hosts.bro deleted file mode 100644 index 677cfa9f3d..0000000000 --- a/testing/btest/scripts/policy/protocols/conn/known-hosts.bro +++ /dev/null @@ -1,20 +0,0 @@ -# A basic test of the known-hosts script's logging and asset_tracking options - -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=LOCAL_HOSTS -# @TEST-EXEC: mv known_hosts.log knownhosts-local.log -# @TEST-EXEC: btest-diff knownhosts-local.log - -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=REMOTE_HOSTS -# @TEST-EXEC: mv known_hosts.log knownhosts-remote.log -# @TEST-EXEC: btest-diff knownhosts-remote.log - -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=ALL_HOSTS -# @TEST-EXEC: mv known_hosts.log knownhosts-all.log -# @TEST-EXEC: btest-diff knownhosts-all.log - -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=NO_HOSTS -# @TEST-EXEC: test '!' -e known_hosts.log - -@load protocols/conn/known-hosts - -redef Site::local_nets += {141.142.0.0/16}; diff --git a/testing/btest/scripts/policy/protocols/conn/known-hosts.zeek b/testing/btest/scripts/policy/protocols/conn/known-hosts.zeek new file mode 100644 index 0000000000..cdb3fa5058 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/conn/known-hosts.zeek @@ -0,0 +1,20 @@ +# A basic test of the known-hosts script's logging and asset_tracking options + +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=LOCAL_HOSTS +# @TEST-EXEC: mv known_hosts.log knownhosts-local.log +# @TEST-EXEC: btest-diff knownhosts-local.log + +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=REMOTE_HOSTS +# @TEST-EXEC: mv known_hosts.log knownhosts-remote.log +# @TEST-EXEC: btest-diff knownhosts-remote.log + +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=ALL_HOSTS +# @TEST-EXEC: mv known_hosts.log knownhosts-all.log +# @TEST-EXEC: btest-diff knownhosts-all.log + +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT Known::host_tracking=NO_HOSTS +# @TEST-EXEC: test '!' -e known_hosts.log + +@load protocols/conn/known-hosts + +redef Site::local_nets += {141.142.0.0/16}; diff --git a/testing/btest/scripts/policy/protocols/conn/known-services.bro b/testing/btest/scripts/policy/protocols/conn/known-services.bro deleted file mode 100644 index ab787b6bd4..0000000000 --- a/testing/btest/scripts/policy/protocols/conn/known-services.bro +++ /dev/null @@ -1,20 +0,0 @@ -# A basic test of the known-services script's logging and asset_tracking options - -# @TEST-EXEC: bro -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=LOCAL_HOSTS -# @TEST-EXEC: mv known_services.log knownservices-local.log -# @TEST-EXEC: btest-diff knownservices-local.log - -# @TEST-EXEC: bro -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=REMOTE_HOSTS -# @TEST-EXEC: mv known_services.log knownservices-remote.log -# @TEST-EXEC: btest-diff knownservices-remote.log - -# @TEST-EXEC: bro -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=ALL_HOSTS -# @TEST-EXEC: mv known_services.log knownservices-all.log -# @TEST-EXEC: btest-diff knownservices-all.log - -# @TEST-EXEC: bro -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=NO_HOSTS -# @TEST-EXEC: test '!' -e known_services.log - -@load protocols/conn/known-services - -redef Site::local_nets += {172.16.238.0/24}; diff --git a/testing/btest/scripts/policy/protocols/conn/known-services.zeek b/testing/btest/scripts/policy/protocols/conn/known-services.zeek new file mode 100644 index 0000000000..3c34adadc9 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/conn/known-services.zeek @@ -0,0 +1,20 @@ +# A basic test of the known-services script's logging and asset_tracking options + +# @TEST-EXEC: zeek -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=LOCAL_HOSTS +# @TEST-EXEC: mv known_services.log knownservices-local.log +# @TEST-EXEC: btest-diff knownservices-local.log + +# @TEST-EXEC: zeek -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=REMOTE_HOSTS +# @TEST-EXEC: mv known_services.log knownservices-remote.log +# @TEST-EXEC: btest-diff knownservices-remote.log + +# @TEST-EXEC: zeek -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=ALL_HOSTS +# @TEST-EXEC: mv known_services.log knownservices-all.log +# @TEST-EXEC: btest-diff knownservices-all.log + +# @TEST-EXEC: zeek -r $TRACES/var-services-std-ports.trace %INPUT Known::service_tracking=NO_HOSTS +# @TEST-EXEC: test '!' -e known_services.log + +@load protocols/conn/known-services + +redef Site::local_nets += {172.16.238.0/24}; diff --git a/testing/btest/scripts/policy/protocols/conn/mac-logging.bro b/testing/btest/scripts/policy/protocols/conn/mac-logging.bro deleted file mode 100644 index a3cfbf768f..0000000000 --- a/testing/btest/scripts/policy/protocols/conn/mac-logging.bro +++ /dev/null @@ -1,14 +0,0 @@ -# A basic test of the mac logging script - -# @TEST-EXEC: bro -b -C -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: mv conn.log conn1.log -# @TEST-EXEC: bro -b -C -r $TRACES/radiotap.pcap %INPUT -# @TEST-EXEC: mv conn.log conn2.log -# @TEST-EXEC: bro -b -C -r $TRACES/llc.pcap %INPUT -# @TEST-EXEC: mv conn.log conn3.log -# -# @TEST-EXEC: btest-diff conn1.log -# @TEST-EXEC: btest-diff conn2.log -# @TEST-EXEC: btest-diff conn3.log - -@load protocols/conn/mac-logging diff --git a/testing/btest/scripts/policy/protocols/conn/mac-logging.zeek b/testing/btest/scripts/policy/protocols/conn/mac-logging.zeek new file mode 100644 index 0000000000..78b1ce9f4c --- /dev/null +++ b/testing/btest/scripts/policy/protocols/conn/mac-logging.zeek @@ -0,0 +1,14 @@ +# A basic test of the mac logging script + +# @TEST-EXEC: zeek -b -C -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: mv conn.log conn1.log +# @TEST-EXEC: zeek -b -C -r $TRACES/radiotap.pcap %INPUT +# @TEST-EXEC: mv conn.log conn2.log +# @TEST-EXEC: zeek -b -C -r $TRACES/llc.pcap %INPUT +# @TEST-EXEC: mv conn.log conn3.log +# +# @TEST-EXEC: btest-diff conn1.log +# @TEST-EXEC: btest-diff conn2.log +# @TEST-EXEC: btest-diff conn3.log + +@load protocols/conn/mac-logging diff --git a/testing/btest/scripts/policy/protocols/conn/vlan-logging.bro b/testing/btest/scripts/policy/protocols/conn/vlan-logging.bro deleted file mode 100644 index 1711eba71d..0000000000 --- a/testing/btest/scripts/policy/protocols/conn/vlan-logging.bro +++ /dev/null @@ -1,6 +0,0 @@ -# A basic test of the vlan logging script - -# @TEST-EXEC: bro -r $TRACES/q-in-q.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load protocols/conn/vlan-logging diff --git a/testing/btest/scripts/policy/protocols/conn/vlan-logging.zeek b/testing/btest/scripts/policy/protocols/conn/vlan-logging.zeek new file mode 100644 index 0000000000..6ee809af52 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/conn/vlan-logging.zeek @@ -0,0 +1,6 @@ +# A basic test of the vlan logging script + +# @TEST-EXEC: zeek -r $TRACES/q-in-q.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load protocols/conn/vlan-logging diff --git a/testing/btest/scripts/policy/protocols/dns/inverse-request.bro b/testing/btest/scripts/policy/protocols/dns/inverse-request.bro deleted file mode 100644 index d695060707..0000000000 --- a/testing/btest/scripts/policy/protocols/dns/inverse-request.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/dns-inverse-query.trace %INPUT -# @TEST-EXEC: test ! -e dns.log - -@load protocols/dns/auth-addl diff --git a/testing/btest/scripts/policy/protocols/dns/inverse-request.zeek b/testing/btest/scripts/policy/protocols/dns/inverse-request.zeek new file mode 100644 index 0000000000..770386072c --- /dev/null +++ b/testing/btest/scripts/policy/protocols/dns/inverse-request.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -r $TRACES/dns-inverse-query.trace %INPUT +# @TEST-EXEC: test ! -e dns.log + +@load protocols/dns/auth-addl diff --git a/testing/btest/scripts/policy/protocols/http/flash-version.bro b/testing/btest/scripts/policy/protocols/http/flash-version.bro deleted file mode 100644 index 9357295c3c..0000000000 --- a/testing/btest/scripts/policy/protocols/http/flash-version.bro +++ /dev/null @@ -1,8 +0,0 @@ -# @TEST-EXEC: bro -r ${TRACES}/http/flash-version.trace %INPUT -# @TEST-EXEC: btest-diff software.log - -@load protocols/http/software -@load protocols/http/software-browser-plugins - -redef Software::asset_tracking = ALL_HOSTS; - diff --git a/testing/btest/scripts/policy/protocols/http/flash-version.zeek b/testing/btest/scripts/policy/protocols/http/flash-version.zeek new file mode 100644 index 0000000000..e2ad2ebf3b --- /dev/null +++ b/testing/btest/scripts/policy/protocols/http/flash-version.zeek @@ -0,0 +1,8 @@ +# @TEST-EXEC: zeek -r ${TRACES}/http/flash-version.trace %INPUT +# @TEST-EXEC: btest-diff software.log + +@load protocols/http/software +@load protocols/http/software-browser-plugins + +redef Software::asset_tracking = ALL_HOSTS; + diff --git a/testing/btest/scripts/policy/protocols/http/header-names.bro b/testing/btest/scripts/policy/protocols/http/header-names.bro deleted file mode 100644 index 30b1de7fdb..0000000000 --- a/testing/btest/scripts/policy/protocols/http/header-names.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT -# @TEST-EXEC: btest-diff http.log - -@load protocols/http/header-names -redef HTTP::log_server_header_names=T; diff --git a/testing/btest/scripts/policy/protocols/http/header-names.zeek b/testing/btest/scripts/policy/protocols/http/header-names.zeek new file mode 100644 index 0000000000..5422c8e9e2 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/http/header-names.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: zeek -r $TRACES/wikipedia.trace %INPUT +# @TEST-EXEC: btest-diff http.log + +@load protocols/http/header-names +redef HTTP::log_server_header_names=T; diff --git a/testing/btest/scripts/policy/protocols/http/test-sql-injection-regex.bro b/testing/btest/scripts/policy/protocols/http/test-sql-injection-regex.bro deleted file mode 100644 index 2e82eb9dfb..0000000000 --- a/testing/btest/scripts/policy/protocols/http/test-sql-injection-regex.bro +++ /dev/null @@ -1,113 +0,0 @@ -# @TEST-EXEC: bro %INPUT > output -# @TEST-EXEC: btest-diff output - -@load protocols/http/detect-sqli - -event bro_init () -{ - local positive_matches: set[string]; - local negative_matches: set[string]; - - add positive_matches["/index.asp?ID='+convert(int,convert(varchar,0x7b5d))+'"]; - add positive_matches["/index.asp?ID='+cASt(somefield as int)+'"]; - add positive_matches["/index.asp?ID=1'+139+'0"]; - add positive_matches["/index.asp?ID='+139+'0"]; - add positive_matches["/index.php?blah=123'/*blooblah*/;select * from something;--"]; - add positive_matches["/index.cfm?ID=3%' and '%'='"]; - add positive_matches["/index.php?mac=\" OR whatever LIKE \"%"]; - add positive_matches["/index.cfm?ID=3;declare @d int;--"]; - add positive_matches["/index.cfm?subjID=12;create table t_jiaozhu(jiaozhu varchar(200))"]; - add positive_matches["/index.cfm?subjID=12%' and(char(94)+user+char(94))>0 and '%'='"]; - add positive_matches["/index.cgi?cgi_state=view&ARF_ID=1+(642*truncate(log10(10),0))"]; - add positive_matches["/index.cgi?view=1 regexp IF((ascii(substring(version(),6,1))>>(0)&1),char(42),1) AND 1=1"]; - add positive_matches["/index.cfm?News=203 and char(124)+db_name()+char(124)=0 --"]; - add positive_matches["/index.php?action=&type=view&s=&id=-1' UNION SELECT 0,252381211,0,0,0,0,0/*"]; - add positive_matches["/index.php?x=browse&category='UNION SELECT '1','2','pixelpost_category_sql_injection.nasl','1183412908','5'/*"]; - add positive_matches["/index.php?id='UNION/**/SELECT/**/0,0,1648909705,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/*"]; - add positive_matches["/index.php?id=-1/**/UNION/**/ALL/**/SELECT/**/1,0x7430705038755A7A20616E64207870726F67206F776E616765,convert(concat((SELECT/**/svalue/**/from/**/sconfig/**/where/**/soption=0x61646D696E5F6E616D65),0x3a,(SELECT/**/svalue/**/from/**/sconfig/**/where/**/soption=0x61646D696E5F70617373))/**/using/**/latin1),4,5,6,7,8,9/*"]; - add positive_matches["/index.jsp?arfID=5 AND ascii(lower(substring((SELECT TOP 1 name from sysobjects WHERE xtype=â™Uâ™), 1,1)))>109"]; - add positive_matches["/?main_menu=10&sub_menu=2&id=-1 union select aes_decrypt(aes_encrypt(LOAD_FILE('/etc/passwd'),0x70),0x70)/*"]; - add positive_matches["/index.asp?file=50' and 1=1 and ''='"]; - add positive_matches["/index.php?cat=999 UNION SELECT null,CONCAT(666,CHAR(58),user_pass,CHAR(58),666,CHAR(58)),null,null,null FROM wp_users where id=1/*"]; - add positive_matches["/index.asp?authornumber=1);insert into SubjectTable(Sub_id, SubjectName, display) values (666, 'ChkQualysRprt', 1); --"]; - add positive_matches["/index.php?ID=60 and (select unicode(substring(isNull(cast(db_name() as varchar(8000)),char(32)),29,1)))"]; - add positive_matches["/index.php?sort=all&&active=NO' union select 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/* and '1'='1"]; - add positive_matches["/index.php?sort=all&&active=no' and 1=2 union select 1,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1/* and '1'='1"]; - add positive_matches["/index.php?sort=all&&active=no' and (select count(table_name) from user_tables)>0 and '1'='1"]; - add positive_matches["/index.php?id=22 /*!49999 and 1=2*/-- and 1=1"]; - add positive_matches["/index.php?ID=59 and (select count(table_name) from user_tables)>0 and 1=1"]; - add positive_matches["/index.php?ID=60 and exists (select * from [news])"]; - - # These are not detected currently. - #add positive_matches["/index.asp?ARF_ID=(1/(1-(asc(mid(now(),18,1))\(2^7) mod 2)))"]; - #add positive_matches["/index.php' and 1=convert(int,(select top 1 table_name from information_schema.tables))--sp_password"]; - #add positive_matches["/index.php?id=873 and user=0--"]; - #add positive_matches["?id=1;+if+(1=1)+waitfor+delay+'00:00:01'--9"]; - #add positive_matches["?id=1+and+if(1=1,BENCHMARK(728000,MD5(0x41)),0)9"]; - - # The positive_matches below are from the mod_security evasion challenge. - # All supported attacks are uncommented. - # http://blog.spiderlabs.com/2011/07/modsecurity-sql-injection-challenge-lessons-learned.html - add positive_matches["/index.asp?id=100&arftype=46' XoR '8'='8"]; - #add positive_matches[unescape_URI("/testphp.vulnweb.com/artists.php?artist=0+div+1+union%23foo*%2F*bar%0D%0Aselect%23foo%0D%0A1%2C2%2Ccurrent_user")]; - #add positive_matches[unescape_URI("/index.php?hUserId=22768&FromDate=a1%27+or&ToDate=%3C%3Eamount+and%27&sendbutton1=Get+Statement")]; - #add positive_matches["after=1 AND (select DCount(last(username)&after=1&after=1) from users where username='ad1min')&before=d"]; - #add positive_matches["hUserId=22768&FromDate=1&ToDate=1'UNION/*!0SELECT user,2,3,4,5,6,7,8,9/*!0from/*!0mysql.user/*-&sendbutton1=Get+Statement"]; - add positive_matches[unescape_URI("/test.php?artist=-2%20div%201%20union%20all%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaafv%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%0Aselect%200x00,%200x41%20like/*!31337table_name*/,3%20from%20information_schema.tables%20limit%201")]; ; - #add positive_matches[unescape_URI("/test.php?artist=%40%40new%20union%23sqlmapsqlmap...%0Aselect%201,2,database%23sqlmap%0A%28%29 ")]; - add positive_matches[unescape_URI("/test.php?artist=-2%20div%201%20union%20all%23hack%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%23%0A%23fabuloso%23great%23%0Aselect%200x00%2C%200x41%20not%20like%2F*%2100000table_name*%2F%2C3%20from%20information_schema.tables%20limit%201")]; - add positive_matches[unescape_URI("/test.php?artist=1%0bAND(SELECT%0b1%20FROM%20mysql.x)")]; - - add negative_matches["/index.asp?db=a9h&jid=JHE&scope=site"]; - add negative_matches["/blah/?q=?q=archive+title=Read the older content in our archive"]; - add negative_matches["/blah/?q=?q= title=Return to the main page"]; - add negative_matches["/index.pl?http://search.ebscohost.com.proxy.lib.ohio-state.edu/direct.asp?db=s3h&jid=22EG&scope=site"]; - add negative_matches["/search?q=eugene svirsky&spell=1&access=p&output=xml_no_dtd&ie=UTF-8&client=default_frontend&site=default_collection&proxystylesheet=default_frontend"]; - add negative_matches["/index.htm?List=metadata+)++((munson)metadata+)+)"]; - add negative_matches["/index?Z=300x250&s=299359&_salt=523454521`54&B=10&u=http://ad.doubleclick.net/adi/answ.science/;dcopt=ist;kw=biased+sample;tid=2735125;scat=health;scat=business;pcat=science;pos=1;tile=1;sz=300x250;csrc=2451;csrc=2191;csrc=2665;csrc=2750;or&r=0"]; - add negative_matches["/index.php?sid=FirstSearch:AveryIndex&genre=article&issn=1590-1394&isbn=&atitle=Paesaggio+artificiale:+una+cava+diventa+parco+urbano+=++Artificial+landscape:+a+quarry+becomes+an+urban+park&title=Metamorfosi&issue=66&spage=58&epage=60&date=2007-05&sici=1590-1394(200705/06)66<58:PAUCDP>2.0.TX;2-C&id=doi:&pid=858994226+858994226fsapp13-52547-fhscgzal-jqsb44&url_ver=Z39.88-2004&rfr_id=info:sid/firstsearch.oclc.org:AveryIndex&rft_val_fmt=info:ofi/fmt:kev:mtx:journal&req_dat=fsapp13-52547-fhscgzal-jqsb44&rfe_dat=858994226+858994226&rft_id=urn:ISSN:1590-1394&rft.atitle=Paesaggio+artificiale:+una+cava+diventa+parco+urbano+=++Artificial+landscape:+a+quarry+becomes+an+urban+park&rft.jtitle=Metamorfosi&rft.date=2007-05&rft.issue=66&rft.spage=58&rft.epage=60&rft.issn=1590-1394&rft.genre=article&rft.sici=1590-1394(200705/06)66<58:PAUCDP>2.0.TX;2-C"]; - add negative_matches["/index?body=linker&reqidx=00012345(2005)L.349"]; - add negative_matches["/index.jsp?SortField=Score&SortOrder=desc&ResultCount=25&maxdoc=100&coll1=&coll2=ieeecnfs&coll3=ieecnfs&coll4=&coll5=&coll6=&coll7=&coll8=&srchres=0&history=yes&queryText=((curran)metadata)&oldqrytext=(~~simon+curran~~++metadata)++(4389466++punumber)&radiobutton=cit"]; - add negative_matches["/index.php?action=uid=32651(makessc) gid=32652(makessc) groups=32652(makessc)"]; - add negative_matches["/index.cgi?t=event&id=3947&year=2007&week=13&wday=3&rt=n&hour=13&min=30&lengthmin=90&title=771 (4) Biomedical Instrumentation - J. Liu&data=&startyear=2007&startweek=13&startwday=3&duration=1&alval=&altype=&alchk=&strike=0&todo=0&mail=0&lock=0&priv=0"]; - add negative_matches["/index.php?site=EagleTribunePublishingCompany&adSpace=ROS&size=468x60&type=horiz&requestID='+((new Date()).getTime() 2147483648) + Math.random()+'"]; - add negative_matches["/blah?callback=google.language.callbacks.id100&context=22&q=) or articles from the online magazine archive will need to log in, in order to access the content they have purchased.&langpair=|en&key=notsupplied&v=1.0"]; - add negative_matches["/blah?hl=en&rlz=1T4DDWQ_enUS432US432&q=\"andrew+foobar\""]; - add negative_matches["/index.cfm?filename=32423411.GP4&ip=1.2.3.4&id_num=0063&proj_num=2906&sheet_name=2 AND 3 FLR&sheet_num=2E&path=L:\ARF\DATA\13000\95013889.GP4"]; - add negative_matches["/index.pl\?supersite=stations&station=ABCD&path='+location.pathname+'&'+location.search.substring(1)+'\\\"\\"]; - add negative_matches["/ntpagetag.gif?js=1&ts=123412341234.568&lc=http://a.b.org/default.aspx?mode=js#&rs=1440x900&cd=32&ln=en&tz=GMT -04:00&jv=1&ets=123412341234.623&select_challenge_from_gallery=1&ci=RCC00000000"]; - - # These are still being matched accidentally. - #add negative_matches["/A-B-C-D/inc/foobar.php?img=1179681280a b c d arf union.jpg"]; - #add negative_matches["/test,+soviet+union&searchscope=7&SORT=DZ/test,+soviet+union&foobar=7"]; - #add negative_matches["/search?hl=en&q=fee union western"]; - #add negative_matches["/search?hl=en&q=ceiling drop tile"]; - #add negative_matches["/index/hmm.gif?utmdt=Record > Create a Graph"]; - #add negative_matches["/index.php?test='||\x0aTO_CHAR(foo_bar.Foo_Bar_ID)||"]; - - print "If anything besides this line prints out, there is a problem."; - for ( test in positive_matches ) - { - if ( HTTP::match_sql_injection_uri !in test ) - print fmt("Missed: %s", test ); - } - print ""; - for ( test in negative_matches ) - { - if ( HTTP::match_sql_injection_uri in test ) - print fmt("False Positive: %s", test); - } - -} diff --git a/testing/btest/scripts/policy/protocols/http/test-sql-injection-regex.zeek b/testing/btest/scripts/policy/protocols/http/test-sql-injection-regex.zeek new file mode 100644 index 0000000000..129acde477 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/http/test-sql-injection-regex.zeek @@ -0,0 +1,113 @@ +# @TEST-EXEC: zeek %INPUT > output +# @TEST-EXEC: btest-diff output + +@load protocols/http/detect-sqli + +event zeek_init() + { + local positive_matches: set[string]; + local negative_matches: set[string]; + + add positive_matches["/index.asp?ID='+convert(int,convert(varchar,0x7b5d))+'"]; + add positive_matches["/index.asp?ID='+cASt(somefield as int)+'"]; + add positive_matches["/index.asp?ID=1'+139+'0"]; + add positive_matches["/index.asp?ID='+139+'0"]; + add positive_matches["/index.php?blah=123'/*blooblah*/;select * from something;--"]; + add positive_matches["/index.cfm?ID=3%' and '%'='"]; + add positive_matches["/index.php?mac=\" OR whatever LIKE \"%"]; + add positive_matches["/index.cfm?ID=3;declare @d int;--"]; + add positive_matches["/index.cfm?subjID=12;create table t_jiaozhu(jiaozhu varchar(200))"]; + add positive_matches["/index.cfm?subjID=12%' and(char(94)+user+char(94))>0 and '%'='"]; + add positive_matches["/index.cgi?cgi_state=view&ARF_ID=1+(642*truncate(log10(10),0))"]; + add positive_matches["/index.cgi?view=1 regexp IF((ascii(substring(version(),6,1))>>(0)&1),char(42),1) AND 1=1"]; + add positive_matches["/index.cfm?News=203 and char(124)+db_name()+char(124)=0 --"]; + add positive_matches["/index.php?action=&type=view&s=&id=-1' UNION SELECT 0,252381211,0,0,0,0,0/*"]; + add positive_matches["/index.php?x=browse&category='UNION SELECT '1','2','pixelpost_category_sql_injection.nasl','1183412908','5'/*"]; + add positive_matches["/index.php?id='UNION/**/SELECT/**/0,0,1648909705,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/*"]; + add positive_matches["/index.php?id=-1/**/UNION/**/ALL/**/SELECT/**/1,0x7430705038755A7A20616E64207870726F67206F776E616765,convert(concat((SELECT/**/svalue/**/from/**/sconfig/**/where/**/soption=0x61646D696E5F6E616D65),0x3a,(SELECT/**/svalue/**/from/**/sconfig/**/where/**/soption=0x61646D696E5F70617373))/**/using/**/latin1),4,5,6,7,8,9/*"]; + add positive_matches["/index.jsp?arfID=5 AND ascii(lower(substring((SELECT TOP 1 name from sysobjects WHERE xtype=â™Uâ™), 1,1)))>109"]; + add positive_matches["/?main_menu=10&sub_menu=2&id=-1 union select aes_decrypt(aes_encrypt(LOAD_FILE('/etc/passwd'),0x70),0x70)/*"]; + add positive_matches["/index.asp?file=50' and 1=1 and ''='"]; + add positive_matches["/index.php?cat=999 UNION SELECT null,CONCAT(666,CHAR(58),user_pass,CHAR(58),666,CHAR(58)),null,null,null FROM wp_users where id=1/*"]; + add positive_matches["/index.asp?authornumber=1);insert into SubjectTable(Sub_id, SubjectName, display) values (666, 'ChkQualysRprt', 1); --"]; + add positive_matches["/index.php?ID=60 and (select unicode(substring(isNull(cast(db_name() as varchar(8000)),char(32)),29,1)))"]; + add positive_matches["/index.php?sort=all&&active=NO' union select 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/* and '1'='1"]; + add positive_matches["/index.php?sort=all&&active=no' and 1=2 union select 1,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1/* and '1'='1"]; + add positive_matches["/index.php?sort=all&&active=no' and (select count(table_name) from user_tables)>0 and '1'='1"]; + add positive_matches["/index.php?id=22 /*!49999 and 1=2*/-- and 1=1"]; + add positive_matches["/index.php?ID=59 and (select count(table_name) from user_tables)>0 and 1=1"]; + add positive_matches["/index.php?ID=60 and exists (select * from [news])"]; + + # These are not detected currently. + #add positive_matches["/index.asp?ARF_ID=(1/(1-(asc(mid(now(),18,1))\(2^7) mod 2)))"]; + #add positive_matches["/index.php' and 1=convert(int,(select top 1 table_name from information_schema.tables))--sp_password"]; + #add positive_matches["/index.php?id=873 and user=0--"]; + #add positive_matches["?id=1;+if+(1=1)+waitfor+delay+'00:00:01'--9"]; + #add positive_matches["?id=1+and+if(1=1,BENCHMARK(728000,MD5(0x41)),0)9"]; + + # The positive_matches below are from the mod_security evasion challenge. + # All supported attacks are uncommented. + # http://blog.spiderlabs.com/2011/07/modsecurity-sql-injection-challenge-lessons-learned.html + add positive_matches["/index.asp?id=100&arftype=46' XoR '8'='8"]; + #add positive_matches[unescape_URI("/testphp.vulnweb.com/artists.php?artist=0+div+1+union%23foo*%2F*bar%0D%0Aselect%23foo%0D%0A1%2C2%2Ccurrent_user")]; + #add positive_matches[unescape_URI("/index.php?hUserId=22768&FromDate=a1%27+or&ToDate=%3C%3Eamount+and%27&sendbutton1=Get+Statement")]; + #add positive_matches["after=1 AND (select DCount(last(username)&after=1&after=1) from users where username='ad1min')&before=d"]; + #add positive_matches["hUserId=22768&FromDate=1&ToDate=1'UNION/*!0SELECT user,2,3,4,5,6,7,8,9/*!0from/*!0mysql.user/*-&sendbutton1=Get+Statement"]; + add positive_matches[unescape_URI("/test.php?artist=-2%20div%201%20union%20all%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yea%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%23yeaah%0A%23yeah%20babc%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaf%23%0A%23fdsafdsafa%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaafv%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%23fafsfaaf%0Aselect%200x00,%200x41%20like/*!31337table_name*/,3%20from%20information_schema.tables%20limit%201")]; ; + #add positive_matches[unescape_URI("/test.php?artist=%40%40new%20union%23sqlmapsqlmap...%0Aselect%201,2,database%23sqlmap%0A%28%29 ")]; + add positive_matches[unescape_URI("/test.php?artist=-2%20div%201%20union%20all%23hack%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23hpys%20player%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%0A%23fabuloso%23modsec%0A%23hpys%20player%0A%23fabuloso%23great%23%0A%23fabuloso%23great%23%0Aselect%200x00%2C%200x41%20not%20like%2F*%2100000table_name*%2F%2C3%20from%20information_schema.tables%20limit%201")]; + add positive_matches[unescape_URI("/test.php?artist=1%0bAND(SELECT%0b1%20FROM%20mysql.x)")]; + + add negative_matches["/index.asp?db=a9h&jid=JHE&scope=site"]; + add negative_matches["/blah/?q=?q=archive+title=Read the older content in our archive"]; + add negative_matches["/blah/?q=?q= title=Return to the main page"]; + add negative_matches["/index.pl?http://search.ebscohost.com.proxy.lib.ohio-state.edu/direct.asp?db=s3h&jid=22EG&scope=site"]; + add negative_matches["/search?q=eugene svirsky&spell=1&access=p&output=xml_no_dtd&ie=UTF-8&client=default_frontend&site=default_collection&proxystylesheet=default_frontend"]; + add negative_matches["/index.htm?List=metadata+)++((munson)metadata+)+)"]; + add negative_matches["/index?Z=300x250&s=299359&_salt=523454521`54&B=10&u=http://ad.doubleclick.net/adi/answ.science/;dcopt=ist;kw=biased+sample;tid=2735125;scat=health;scat=business;pcat=science;pos=1;tile=1;sz=300x250;csrc=2451;csrc=2191;csrc=2665;csrc=2750;or&r=0"]; + add negative_matches["/index.php?sid=FirstSearch:AveryIndex&genre=article&issn=1590-1394&isbn=&atitle=Paesaggio+artificiale:+una+cava+diventa+parco+urbano+=++Artificial+landscape:+a+quarry+becomes+an+urban+park&title=Metamorfosi&issue=66&spage=58&epage=60&date=2007-05&sici=1590-1394(200705/06)66<58:PAUCDP>2.0.TX;2-C&id=doi:&pid=858994226+858994226fsapp13-52547-fhscgzal-jqsb44&url_ver=Z39.88-2004&rfr_id=info:sid/firstsearch.oclc.org:AveryIndex&rft_val_fmt=info:ofi/fmt:kev:mtx:journal&req_dat=fsapp13-52547-fhscgzal-jqsb44&rfe_dat=858994226+858994226&rft_id=urn:ISSN:1590-1394&rft.atitle=Paesaggio+artificiale:+una+cava+diventa+parco+urbano+=++Artificial+landscape:+a+quarry+becomes+an+urban+park&rft.jtitle=Metamorfosi&rft.date=2007-05&rft.issue=66&rft.spage=58&rft.epage=60&rft.issn=1590-1394&rft.genre=article&rft.sici=1590-1394(200705/06)66<58:PAUCDP>2.0.TX;2-C"]; + add negative_matches["/index?body=linker&reqidx=00012345(2005)L.349"]; + add negative_matches["/index.jsp?SortField=Score&SortOrder=desc&ResultCount=25&maxdoc=100&coll1=&coll2=ieeecnfs&coll3=ieecnfs&coll4=&coll5=&coll6=&coll7=&coll8=&srchres=0&history=yes&queryText=((curran)metadata)&oldqrytext=(~~simon+curran~~++metadata)++(4389466++punumber)&radiobutton=cit"]; + add negative_matches["/index.php?action=uid=32651(makessc) gid=32652(makessc) groups=32652(makessc)"]; + add negative_matches["/index.cgi?t=event&id=3947&year=2007&week=13&wday=3&rt=n&hour=13&min=30&lengthmin=90&title=771 (4) Biomedical Instrumentation - J. Liu&data=&startyear=2007&startweek=13&startwday=3&duration=1&alval=&altype=&alchk=&strike=0&todo=0&mail=0&lock=0&priv=0"]; + add negative_matches["/index.php?site=EagleTribunePublishingCompany&adSpace=ROS&size=468x60&type=horiz&requestID='+((new Date()).getTime() 2147483648) + Math.random()+'"]; + add negative_matches["/blah?callback=google.language.callbacks.id100&context=22&q=) or articles from the online magazine archive will need to log in, in order to access the content they have purchased.&langpair=|en&key=notsupplied&v=1.0"]; + add negative_matches["/blah?hl=en&rlz=1T4DDWQ_enUS432US432&q=\"andrew+foobar\""]; + add negative_matches["/index.cfm?filename=32423411.GP4&ip=1.2.3.4&id_num=0063&proj_num=2906&sheet_name=2 AND 3 FLR&sheet_num=2E&path=L:\ARF\DATA\13000\95013889.GP4"]; + add negative_matches["/index.pl\?supersite=stations&station=ABCD&path='+location.pathname+'&'+location.search.substring(1)+'\\\"\\"]; + add negative_matches["/ntpagetag.gif?js=1&ts=123412341234.568&lc=http://a.b.org/default.aspx?mode=js#&rs=1440x900&cd=32&ln=en&tz=GMT -04:00&jv=1&ets=123412341234.623&select_challenge_from_gallery=1&ci=RCC00000000"]; + + # These are still being matched accidentally. + #add negative_matches["/A-B-C-D/inc/foobar.php?img=1179681280a b c d arf union.jpg"]; + #add negative_matches["/test,+soviet+union&searchscope=7&SORT=DZ/test,+soviet+union&foobar=7"]; + #add negative_matches["/search?hl=en&q=fee union western"]; + #add negative_matches["/search?hl=en&q=ceiling drop tile"]; + #add negative_matches["/index/hmm.gif?utmdt=Record > Create a Graph"]; + #add negative_matches["/index.php?test='||\x0aTO_CHAR(foo_bar.Foo_Bar_ID)||"]; + + print "If anything besides this line prints out, there is a problem."; + for ( test in positive_matches ) + { + if ( HTTP::match_sql_injection_uri !in test ) + print fmt("Missed: %s", test ); + } + print ""; + for ( test in negative_matches ) + { + if ( HTTP::match_sql_injection_uri in test ) + print fmt("False Positive: %s", test); + } + +} diff --git a/testing/btest/scripts/policy/protocols/krb/ticket-logging.bro b/testing/btest/scripts/policy/protocols/krb/ticket-logging.bro deleted file mode 100644 index 0bc0a33d5d..0000000000 --- a/testing/btest/scripts/policy/protocols/krb/ticket-logging.bro +++ /dev/null @@ -1,6 +0,0 @@ -# This test makes sure that krb ticket hashes are logged correctly. - -# @TEST-EXEC: bro -b -r $TRACES/krb/auth.trace %INPUT -# @TEST-EXEC: btest-diff kerberos.log - -@load protocols/krb/ticket-logging diff --git a/testing/btest/scripts/policy/protocols/krb/ticket-logging.zeek b/testing/btest/scripts/policy/protocols/krb/ticket-logging.zeek new file mode 100644 index 0000000000..f537e5146d --- /dev/null +++ b/testing/btest/scripts/policy/protocols/krb/ticket-logging.zeek @@ -0,0 +1,6 @@ +# This test makes sure that krb ticket hashes are logged correctly. + +# @TEST-EXEC: zeek -b -r $TRACES/krb/auth.trace %INPUT +# @TEST-EXEC: btest-diff kerberos.log + +@load protocols/krb/ticket-logging diff --git a/testing/btest/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/testing/btest/scripts/policy/protocols/ssh/detect-bruteforcing.bro deleted file mode 100644 index e28ebf5b49..0000000000 --- a/testing/btest/scripts/policy/protocols/ssh/detect-bruteforcing.bro +++ /dev/null @@ -1,5 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/ssh/sshguess.pcap %INPUT -# @TEST-EXEC: btest-diff notice.log - -@load protocols/ssh/detect-bruteforcing -redef SSH::password_guesses_limit=10; diff --git a/testing/btest/scripts/policy/protocols/ssh/detect-bruteforcing.zeek b/testing/btest/scripts/policy/protocols/ssh/detect-bruteforcing.zeek new file mode 100644 index 0000000000..583c8ae0a5 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssh/detect-bruteforcing.zeek @@ -0,0 +1,5 @@ +# @TEST-EXEC: zeek -C -r $TRACES/ssh/sshguess.pcap %INPUT +# @TEST-EXEC: btest-diff notice.log + +@load protocols/ssh/detect-bruteforcing +redef SSH::password_guesses_limit=10; diff --git a/testing/btest/scripts/policy/protocols/ssl/expiring-certs.bro b/testing/btest/scripts/policy/protocols/ssl/expiring-certs.bro deleted file mode 100644 index 9278e11de0..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/expiring-certs.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls-expired-cert.trace %INPUT -# @TEST-EXEC: btest-diff notice.log - -@load protocols/ssl/expiring-certs - -redef SSL::notify_certs_expiration = ALL_HOSTS; - diff --git a/testing/btest/scripts/policy/protocols/ssl/expiring-certs.zeek b/testing/btest/scripts/policy/protocols/ssl/expiring-certs.zeek new file mode 100644 index 0000000000..16591d560c --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/expiring-certs.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/tls-expired-cert.trace %INPUT +# @TEST-EXEC: btest-diff notice.log + +@load protocols/ssl/expiring-certs + +redef SSL::notify_certs_expiration = ALL_HOSTS; + diff --git a/testing/btest/scripts/policy/protocols/ssl/extract-certs-pem.bro b/testing/btest/scripts/policy/protocols/ssl/extract-certs-pem.bro deleted file mode 100644 index ad99e2e143..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/extract-certs-pem.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/ssl.v3.trace %INPUT -# @TEST-EXEC: btest-diff certs-remote.pem - -@load protocols/ssl/extract-certs-pem - -redef SSL::extract_certs_pem = ALL_HOSTS; diff --git a/testing/btest/scripts/policy/protocols/ssl/extract-certs-pem.zeek b/testing/btest/scripts/policy/protocols/ssl/extract-certs-pem.zeek new file mode 100644 index 0000000000..660181942e --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/extract-certs-pem.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/ssl.v3.trace %INPUT +# @TEST-EXEC: btest-diff certs-remote.pem + +@load protocols/ssl/extract-certs-pem + +redef SSL::extract_certs_pem = ALL_HOSTS; diff --git a/testing/btest/scripts/policy/protocols/ssl/heartbleed.bro b/testing/btest/scripts/policy/protocols/ssl/heartbleed.bro deleted file mode 100644 index 52137adbd0..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/heartbleed.bro +++ /dev/null @@ -1,21 +0,0 @@ -# TEST-EXEC: bro -C -r $TRACES/tls/heartbleed.pcap %INPUT -# TEST-EXEC: mv notice.log notice-heartbleed.log -# TEST-EXEC: btest-diff notice-heartbleed.log - -# @TEST-EXEC: bro -C -r $TRACES/tls/heartbleed-success.pcap %INPUT -# @TEST-EXEC: mv notice.log notice-heartbleed-success.log -# @TEST-EXEC: btest-diff notice-heartbleed-success.log - -# @TEST-EXEC: bro -C -r $TRACES/tls/heartbleed-encrypted.pcap %INPUT -# @TEST-EXEC: mv notice.log notice-encrypted.log -# @TEST-EXEC: btest-diff notice-encrypted.log - -# @TEST-EXEC: bro -C -r $TRACES/tls/heartbleed-encrypted-success.pcap %INPUT -# @TEST-EXEC: mv notice.log notice-encrypted-success.log -# @TEST-EXEC: btest-diff notice-encrypted-success.log - -# @TEST-EXEC: bro -C -r $TRACES/tls/heartbleed-encrypted-short.pcap %INPUT -# @TEST-EXEC: mv notice.log notice-encrypted-short.log -# @TEST-EXEC: btest-diff notice-encrypted-short.log - -@load protocols/ssl/heartbleed diff --git a/testing/btest/scripts/policy/protocols/ssl/heartbleed.zeek b/testing/btest/scripts/policy/protocols/ssl/heartbleed.zeek new file mode 100644 index 0000000000..233dfd82c4 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/heartbleed.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tls/heartbleed.pcap %INPUT +# @TEST-EXEC: mv notice.log notice-heartbleed.log + +# @TEST-EXEC: zeek -C -r $TRACES/tls/heartbleed-success.pcap %INPUT +# @TEST-EXEC: mv notice.log notice-heartbleed-success.log + +# @TEST-EXEC: zeek -C -r $TRACES/tls/heartbleed-encrypted.pcap %INPUT +# @TEST-EXEC: mv notice.log notice-encrypted.log + +# @TEST-EXEC: zeek -C -r $TRACES/tls/heartbleed-encrypted-success.pcap %INPUT +# @TEST-EXEC: mv notice.log notice-encrypted-success.log + +# @TEST-EXEC: zeek -C -r $TRACES/tls/heartbleed-encrypted-short.pcap %INPUT +# @TEST-EXEC: mv notice.log notice-encrypted-short.log + +# @TEST-EXEC: btest-diff notice-heartbleed.log +# @TEST-EXEC: btest-diff notice-heartbleed-success.log +# @TEST-EXEC: btest-diff notice-encrypted.log +# @TEST-EXEC: btest-diff notice-encrypted-success.log +# @TEST-EXEC: btest-diff notice-encrypted-short.log + +@load protocols/ssl/heartbleed diff --git a/testing/btest/scripts/policy/protocols/ssl/known-certs.bro b/testing/btest/scripts/policy/protocols/ssl/known-certs.bro deleted file mode 100644 index f5ff187164..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/known-certs.bro +++ /dev/null @@ -1,9 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/google-duplicate.trace %INPUT -# @TEST-EXEC: btest-diff ssl.log -# @TEST-EXEC: btest-diff x509.log -# @TEST-EXEC: btest-diff known_certs.log - -@load protocols/ssl/known-certs - -redef Known::cert_tracking = ALL_HOSTS; - diff --git a/testing/btest/scripts/policy/protocols/ssl/known-certs.zeek b/testing/btest/scripts/policy/protocols/ssl/known-certs.zeek new file mode 100644 index 0000000000..e3a586b292 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/known-certs.zeek @@ -0,0 +1,9 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/google-duplicate.trace %INPUT +# @TEST-EXEC: btest-diff ssl.log +# @TEST-EXEC: btest-diff x509.log +# @TEST-EXEC: btest-diff known_certs.log + +@load protocols/ssl/known-certs + +redef Known::cert_tracking = ALL_HOSTS; + diff --git a/testing/btest/scripts/policy/protocols/ssl/log-hostcerts-only.bro b/testing/btest/scripts/policy/protocols/ssl/log-hostcerts-only.bro deleted file mode 100644 index 37f9f7592b..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/log-hostcerts-only.bro +++ /dev/null @@ -1,4 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/google-duplicate.trace %INPUT -# @TEST-EXEC: btest-diff x509.log - -@load protocols/ssl/log-hostcerts-only diff --git a/testing/btest/scripts/policy/protocols/ssl/log-hostcerts-only.zeek b/testing/btest/scripts/policy/protocols/ssl/log-hostcerts-only.zeek new file mode 100644 index 0000000000..25d830acb0 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/log-hostcerts-only.zeek @@ -0,0 +1,4 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/google-duplicate.trace %INPUT +# @TEST-EXEC: btest-diff x509.log + +@load protocols/ssl/log-hostcerts-only diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-certs-no-cache.bro b/testing/btest/scripts/policy/protocols/ssl/validate-certs-no-cache.bro deleted file mode 100644 index 4a3ec44468..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/validate-certs-no-cache.bro +++ /dev/null @@ -1,6 +0,0 @@ -# @TEST-EXEC: bro -C -r $TRACES/tls/missing-intermediate.pcap $SCRIPTS/external-ca-list.bro %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl.log - -@load protocols/ssl/validate-certs.bro - -redef SSL::ssl_cache_intermediate_ca = F; diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-certs-no-cache.zeek b/testing/btest/scripts/policy/protocols/ssl/validate-certs-no-cache.zeek new file mode 100644 index 0000000000..cb5d72a0d9 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/validate-certs-no-cache.zeek @@ -0,0 +1,6 @@ +# @TEST-EXEC: zeek -C -r $TRACES/tls/missing-intermediate.pcap $SCRIPTS/external-ca-list.zeek %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl.log + +@load protocols/ssl/validate-certs + +redef SSL::ssl_cache_intermediate_ca = F; diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-certs.bro b/testing/btest/scripts/policy/protocols/ssl/validate-certs.bro deleted file mode 100644 index 9a00919643..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/validate-certs.bro +++ /dev/null @@ -1,7 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/tls-expired-cert.trace $SCRIPTS/external-ca-list.bro %INPUT -# @TEST-EXEC: cat ssl.log > ssl-all.log -# @TEST-EXEC: bro -C -r $TRACES/tls/missing-intermediate.pcap $SCRIPTS/external-ca-list.bro %INPUT -# @TEST-EXEC: cat ssl.log >> ssl-all.log -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-all.log - -@load protocols/ssl/validate-certs.bro diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-certs.zeek b/testing/btest/scripts/policy/protocols/ssl/validate-certs.zeek new file mode 100644 index 0000000000..434b3b020b --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/validate-certs.zeek @@ -0,0 +1,7 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/tls-expired-cert.trace $SCRIPTS/external-ca-list.zeek %INPUT +# @TEST-EXEC: cat ssl.log > ssl-all.log +# @TEST-EXEC: zeek -C -r $TRACES/tls/missing-intermediate.pcap $SCRIPTS/external-ca-list.zeek %INPUT +# @TEST-EXEC: cat ssl.log >> ssl-all.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-all.log + +@load protocols/ssl/validate-certs diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro b/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro deleted file mode 100644 index 4e53a46b02..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.bro +++ /dev/null @@ -1,10 +0,0 @@ -# @TEST-EXEC: bro $SCRIPTS/external-ca-list.bro -C -r $TRACES/tls/ocsp-stapling.trace %INPUT -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl.log -# @TEST-EXEC: bro $SCRIPTS/external-ca-list.bro -C -r $TRACES/tls/ocsp-stapling-twimg.trace %INPUT -# @TEST-EXEC: mv ssl.log ssl-twimg.log -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-twimg.log -# @TEST-EXEC: bro $SCRIPTS/external-ca-list.bro -C -r $TRACES/tls/ocsp-stapling-digicert.trace %INPUT -# @TEST-EXEC: mv ssl.log ssl-digicert.log -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-digicert.log - -@load protocols/ssl/validate-ocsp diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.zeek b/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.zeek new file mode 100644 index 0000000000..948fa38b01 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/validate-ocsp.zeek @@ -0,0 +1,10 @@ +# @TEST-EXEC: zeek $SCRIPTS/external-ca-list.zeek -C -r $TRACES/tls/ocsp-stapling.trace %INPUT +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl.log +# @TEST-EXEC: zeek $SCRIPTS/external-ca-list.zeek -C -r $TRACES/tls/ocsp-stapling-twimg.trace %INPUT +# @TEST-EXEC: mv ssl.log ssl-twimg.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-twimg.log +# @TEST-EXEC: zeek $SCRIPTS/external-ca-list.zeek -C -r $TRACES/tls/ocsp-stapling-digicert.trace %INPUT +# @TEST-EXEC: mv ssl.log ssl-digicert.log +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-digicert.log + +@load protocols/ssl/validate-ocsp diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-sct.bro b/testing/btest/scripts/policy/protocols/ssl/validate-sct.bro deleted file mode 100644 index 0e6065f937..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/validate-sct.bro +++ /dev/null @@ -1,21 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/signed_certificate_timestamp.pcap $SCRIPTS/external-ca-list.bro %INPUT -# @TEST-EXEC: cat ssl.log > ssl-all.log -# @TEST-EXEC: bro -r $TRACES/tls/signed_certificate_timestamp-2.pcap $SCRIPTS/external-ca-list.bro %INPUT -# @TEST-EXEC: cat ssl.log >> ssl-all.log -# @TEST-EXEC: btest-diff .stdout -# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-all.log - -@load protocols/ssl/validate-sct.bro - -module SSL; - -event ssl_established(c: connection) - { - print c$ssl$ct_proofs; - for ( i in c$ssl$ct_proofs ) - { - local proof = c$ssl$ct_proofs[i]; - local log = SSL::ct_logs[proof$logid]; - print log$description, proof$valid; - } - } diff --git a/testing/btest/scripts/policy/protocols/ssl/validate-sct.zeek b/testing/btest/scripts/policy/protocols/ssl/validate-sct.zeek new file mode 100644 index 0000000000..7d2ac86865 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/validate-sct.zeek @@ -0,0 +1,21 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/signed_certificate_timestamp.pcap $SCRIPTS/external-ca-list.zeek %INPUT +# @TEST-EXEC: cat ssl.log > ssl-all.log +# @TEST-EXEC: zeek -r $TRACES/tls/signed_certificate_timestamp-2.pcap $SCRIPTS/external-ca-list.zeek %INPUT +# @TEST-EXEC: cat ssl.log >> ssl-all.log +# @TEST-EXEC: btest-diff .stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER="$SCRIPTS/diff-remove-x509-names | $SCRIPTS/diff-remove-timestamps" btest-diff ssl-all.log + +@load protocols/ssl/validate-sct + +module SSL; + +event ssl_established(c: connection) + { + print c$ssl$ct_proofs; + for ( i in c$ssl$ct_proofs ) + { + local proof = c$ssl$ct_proofs[i]; + local log = SSL::ct_logs[proof$logid]; + print log$description, proof$valid; + } + } diff --git a/testing/btest/scripts/policy/protocols/ssl/weak-keys.bro b/testing/btest/scripts/policy/protocols/ssl/weak-keys.bro deleted file mode 100644 index f4d51f8016..0000000000 --- a/testing/btest/scripts/policy/protocols/ssl/weak-keys.bro +++ /dev/null @@ -1,12 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/tls/dhe.pcap %INPUT -# @TEST-EXEC: cp notice.log notice-out.log -# @TEST-EXEC: bro -r $TRACES/tls/ssl-v2.trace %INPUT -# @TEST-EXEC: cat notice.log >> notice-out.log -# @TEST-EXEC: bro -r $TRACES/tls/ssl.v3.trace %INPUT -# @TEST-EXEC: cat notice.log >> notice-out.log -# @TEST-EXEC: btest-diff notice-out.log - -@load protocols/ssl/weak-keys - -redef SSL::notify_weak_keys = ALL_HOSTS; -redef SSL::notify_minimal_key_length = 4096; diff --git a/testing/btest/scripts/policy/protocols/ssl/weak-keys.zeek b/testing/btest/scripts/policy/protocols/ssl/weak-keys.zeek new file mode 100644 index 0000000000..efc9aebf12 --- /dev/null +++ b/testing/btest/scripts/policy/protocols/ssl/weak-keys.zeek @@ -0,0 +1,12 @@ +# @TEST-EXEC: zeek -r $TRACES/tls/dhe.pcap %INPUT +# @TEST-EXEC: cp notice.log notice-out.log +# @TEST-EXEC: zeek -r $TRACES/tls/ssl-v2.trace %INPUT +# @TEST-EXEC: cat notice.log >> notice-out.log +# @TEST-EXEC: zeek -r $TRACES/tls/ssl.v3.trace %INPUT +# @TEST-EXEC: cat notice.log >> notice-out.log +# @TEST-EXEC: btest-diff notice-out.log + +@load protocols/ssl/weak-keys + +redef SSL::notify_weak_keys = ALL_HOSTS; +redef SSL::notify_minimal_key_length = 4096; diff --git a/testing/btest/scripts/site/local-compat.test b/testing/btest/scripts/site/local-compat.test index 3eb189e639..1627b00523 100644 --- a/testing/btest/scripts/site/local-compat.test +++ b/testing/btest/scripts/site/local-compat.test @@ -1,14 +1,14 @@ -# @TEST-EXEC: bro local-`cat $DIST/VERSION | sed 's/\([0-9].[0-9]\).*/\1/g'`.bro +# @TEST-EXEC: zeek local-`cat $DIST/VERSION | sed 's/\([0-9].[0-9]\).*/\1/g'`.bro # This tests the compatibility of the past release's site/local.bro -# script with the current version of Bro. If the test fails because +# script with the current version of Zeek. If the test fails because # it doesn't find the right file, that means everything stayed # compatibile between releases, so just add a TEST-START-FILE with -# the contents the latest Bro version's site/local.bro script. +# the contents the latest Zeek version's site/local.zeek script. # If the test fails while loading the old local.bro, it usually # indicates a note will need to be made in NEWS explaining to users # how to migrate to the new version and this test's TEST-START-FILE -# should be updated with the latest contents of site/local.bro. +# should be updated with the latest contents of site/local.zeek. @TEST-START-FILE local-2.6.bro ##! Local site policy. Customize as appropriate. diff --git a/testing/btest/scripts/site/local.test b/testing/btest/scripts/site/local.test index e2058417cd..158cc7f8c0 100644 --- a/testing/btest/scripts/site/local.test +++ b/testing/btest/scripts/site/local.test @@ -1,3 +1,3 @@ -# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: zeek %INPUT @load local \ No newline at end of file diff --git a/testing/btest/signatures/bad-eval-condition.bro b/testing/btest/signatures/bad-eval-condition.bro deleted file mode 100644 index 2b3fef76fe..0000000000 --- a/testing/btest/signatures/bad-eval-condition.bro +++ /dev/null @@ -1,22 +0,0 @@ -# @TEST-EXEC-FAIL: bro -r $TRACES/ftp/ipv4.trace %INPUT -# @TEST-EXEC: btest-diff .stderr - -@load-sigs blah.sig - -@TEST-START-FILE blah.sig -signature blah - { - ip-proto == tcp - src-port == 21 - payload /.*/ - eval mark_conn - } -@TEST-END-FILE - -# wrong function signature for use with signature 'eval' conditions -# needs to be reported -function mark_conn(state: signature_state): bool - { - add state$conn$service["blah"]; - return T; - } diff --git a/testing/btest/signatures/bad-eval-condition.zeek b/testing/btest/signatures/bad-eval-condition.zeek new file mode 100644 index 0000000000..d64cb4cba4 --- /dev/null +++ b/testing/btest/signatures/bad-eval-condition.zeek @@ -0,0 +1,22 @@ +# @TEST-EXEC-FAIL: zeek -r $TRACES/ftp/ipv4.trace %INPUT +# @TEST-EXEC: btest-diff .stderr + +@load-sigs blah.sig + +@TEST-START-FILE blah.sig +signature blah + { + ip-proto == tcp + src-port == 21 + payload /.*/ + eval mark_conn + } +@TEST-END-FILE + +# wrong function signature for use with signature 'eval' conditions +# needs to be reported +function mark_conn(state: signature_state): bool + { + add state$conn$service["blah"]; + return T; + } diff --git a/testing/btest/signatures/dpd.bro b/testing/btest/signatures/dpd.bro deleted file mode 100644 index 39f1b01294..0000000000 --- a/testing/btest/signatures/dpd.bro +++ /dev/null @@ -1,54 +0,0 @@ -# @TEST-EXEC: bro -b -s myftp -r $TRACES/ftp/ipv4.trace %INPUT >dpd-ipv4.out -# @TEST-EXEC: bro -b -s myftp -r $TRACES/ftp/ipv6.trace %INPUT >dpd-ipv6.out -# @TEST-EXEC: bro -b -r $TRACES/ftp/ipv4.trace %INPUT >nosig-ipv4.out -# @TEST-EXEC: bro -b -r $TRACES/ftp/ipv6.trace %INPUT >nosig-ipv6.out -# @TEST-EXEC: btest-diff dpd-ipv4.out -# @TEST-EXEC: btest-diff dpd-ipv6.out -# @TEST-EXEC: btest-diff nosig-ipv4.out -# @TEST-EXEC: btest-diff nosig-ipv6.out - -# DPD based on 'ip-proto' and 'payload' signatures should be independent -# of IP protocol. - -@TEST-START-FILE myftp.sig -signature my_ftp_client { - ip-proto == tcp - payload /(|.*[\n\r]) *[uU][sS][eE][rR] / - tcp-state originator - event "matched my_ftp_client" -} - -signature my_ftp_server { - ip-proto == tcp - payload /[\n\r ]*(120|220)[^0-9].*[\n\r] *(230|331)[^0-9]/ - tcp-state responder - requires-reverse-signature my_ftp_client - enable "ftp" - event "matched my_ftp_server" -} -@TEST-END-FILE - -@load base/utils/addrs - -event bro_init() - { - # no analyzer attached to any port by default, depends entirely on sigs - print "|Analyzer::all_registered_ports()|", |Analyzer::all_registered_ports()|; - } - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } - -event ftp_request(c: connection, command: string, arg: string) - { - print fmt("ftp_request %s:%s - %s %s", addr_to_uri(c$id$orig_h), - port_to_count(c$id$orig_p), command, arg); - } - -event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) - { - print fmt("ftp_reply %s:%s - %s %s", addr_to_uri(c$id$resp_h), - port_to_count(c$id$resp_p), code, msg); - } diff --git a/testing/btest/signatures/dpd.zeek b/testing/btest/signatures/dpd.zeek new file mode 100644 index 0000000000..16e7f19724 --- /dev/null +++ b/testing/btest/signatures/dpd.zeek @@ -0,0 +1,54 @@ +# @TEST-EXEC: zeek -b -s myftp -r $TRACES/ftp/ipv4.trace %INPUT >dpd-ipv4.out +# @TEST-EXEC: zeek -b -s myftp -r $TRACES/ftp/ipv6.trace %INPUT >dpd-ipv6.out +# @TEST-EXEC: zeek -b -r $TRACES/ftp/ipv4.trace %INPUT >nosig-ipv4.out +# @TEST-EXEC: zeek -b -r $TRACES/ftp/ipv6.trace %INPUT >nosig-ipv6.out +# @TEST-EXEC: btest-diff dpd-ipv4.out +# @TEST-EXEC: btest-diff dpd-ipv6.out +# @TEST-EXEC: btest-diff nosig-ipv4.out +# @TEST-EXEC: btest-diff nosig-ipv6.out + +# DPD based on 'ip-proto' and 'payload' signatures should be independent +# of IP protocol. + +@TEST-START-FILE myftp.sig +signature my_ftp_client { + ip-proto == tcp + payload /(|.*[\n\r]) *[uU][sS][eE][rR] / + tcp-state originator + event "matched my_ftp_client" +} + +signature my_ftp_server { + ip-proto == tcp + payload /[\n\r ]*(120|220)[^0-9].*[\n\r] *(230|331)[^0-9]/ + tcp-state responder + requires-reverse-signature my_ftp_client + enable "ftp" + event "matched my_ftp_server" +} +@TEST-END-FILE + +@load base/utils/addrs + +event zeek_init() + { + # no analyzer attached to any port by default, depends entirely on sigs + print "|Analyzer::all_registered_ports()|", |Analyzer::all_registered_ports()|; + } + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } + +event ftp_request(c: connection, command: string, arg: string) + { + print fmt("ftp_request %s:%s - %s %s", addr_to_uri(c$id$orig_h), + port_to_count(c$id$orig_p), command, arg); + } + +event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) + { + print fmt("ftp_reply %s:%s - %s %s", addr_to_uri(c$id$resp_h), + port_to_count(c$id$resp_p), code, msg); + } diff --git a/testing/btest/signatures/dst-ip-cidr-v4.bro b/testing/btest/signatures/dst-ip-cidr-v4.bro deleted file mode 100644 index e86a746e54..0000000000 --- a/testing/btest/signatures/dst-ip-cidr-v4.bro +++ /dev/null @@ -1,17 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ntp.pcap %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE a.sig -signature foo { - dst-ip == 17.0.0.0/8 - ip-proto == udp - event "match" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print "match", state$sig_id; - } - -@load-sigs ./a.sig diff --git a/testing/btest/signatures/dst-ip-cidr-v4.zeek b/testing/btest/signatures/dst-ip-cidr-v4.zeek new file mode 100644 index 0000000000..9c80a9148a --- /dev/null +++ b/testing/btest/signatures/dst-ip-cidr-v4.zeek @@ -0,0 +1,17 @@ +# @TEST-EXEC: zeek -r $TRACES/ntp.pcap %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE a.sig +signature foo { + dst-ip == 17.0.0.0/8 + ip-proto == udp + event "match" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print "match", state$sig_id; + } + +@load-sigs ./a.sig diff --git a/testing/btest/signatures/dst-ip-header-condition-v4-masks.bro b/testing/btest/signatures/dst-ip-header-condition-v4-masks.bro deleted file mode 100644 index dc5b0f48b8..0000000000 --- a/testing/btest/signatures/dst-ip-header-condition-v4-masks.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s dst-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq.out -# @TEST-EXEC: bro -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-list.out - -# @TEST-EXEC: bro -b -s dst-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne.out -# @TEST-EXEC: bro -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff dst-ip-eq.out -# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-eq-list.out - -# @TEST-EXEC: btest-diff dst-ip-ne.out -# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-ne-list.out -# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out - -@TEST-START-FILE dst-ip-eq.sig -signature id { - dst-ip == 192.168.1.0/24 - event "dst-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-nomatch.sig -signature id { - dst-ip == 10.0.0.0/8 - event "dst-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-list.sig -signature id { - dst-ip == 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 - event "dst-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne.sig -signature id { - dst-ip != 10.0.0.0/8 - event "dst-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-nomatch.sig -signature id { - dst-ip != 192.168.1.0/24 - event "dst-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list.sig -signature id { - dst-ip != 10.0.0.0/8,[fe80::0]/16 - event "dst-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list-nomatch.sig -signature id { - dst-ip != 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 - event "dst-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/dst-ip-header-condition-v4-masks.zeek b/testing/btest/signatures/dst-ip-header-condition-v4-masks.zeek new file mode 100644 index 0000000000..9389f11df2 --- /dev/null +++ b/testing/btest/signatures/dst-ip-header-condition-v4-masks.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s dst-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s dst-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff dst-ip-eq.out +# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-eq-list.out + +# @TEST-EXEC: btest-diff dst-ip-ne.out +# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-ne-list.out +# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out + +@TEST-START-FILE dst-ip-eq.sig +signature id { + dst-ip == 192.168.1.0/24 + event "dst-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-nomatch.sig +signature id { + dst-ip == 10.0.0.0/8 + event "dst-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-list.sig +signature id { + dst-ip == 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 + event "dst-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne.sig +signature id { + dst-ip != 10.0.0.0/8 + event "dst-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-nomatch.sig +signature id { + dst-ip != 192.168.1.0/24 + event "dst-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list.sig +signature id { + dst-ip != 10.0.0.0/8,[fe80::0]/16 + event "dst-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list-nomatch.sig +signature id { + dst-ip != 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 + event "dst-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/dst-ip-header-condition-v4.bro b/testing/btest/signatures/dst-ip-header-condition-v4.bro deleted file mode 100644 index 0d0d3e644c..0000000000 --- a/testing/btest/signatures/dst-ip-header-condition-v4.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s dst-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq.out -# @TEST-EXEC: bro -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-list.out - -# @TEST-EXEC: bro -b -s dst-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne.out -# @TEST-EXEC: bro -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff dst-ip-eq.out -# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-eq-list.out - -# @TEST-EXEC: btest-diff dst-ip-ne.out -# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-ne-list.out -# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out - -@TEST-START-FILE dst-ip-eq.sig -signature id { - dst-ip == 192.168.1.101 - event "dst-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-nomatch.sig -signature id { - dst-ip == 10.0.0.1 - event "dst-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-list.sig -signature id { - dst-ip == 10.0.0.1,10.0.0.2,[fe80::1],192.168.1.101 - event "dst-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne.sig -signature id { - dst-ip != 10.0.0.1 - event "dst-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-nomatch.sig -signature id { - dst-ip != 192.168.1.101 - event "dst-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list.sig -signature id { - dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] - event "dst-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list-nomatch.sig -signature id { - dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],192.168.1.101 - event "dst-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/dst-ip-header-condition-v4.zeek b/testing/btest/signatures/dst-ip-header-condition-v4.zeek new file mode 100644 index 0000000000..b04d6c30ca --- /dev/null +++ b/testing/btest/signatures/dst-ip-header-condition-v4.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s dst-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s dst-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff dst-ip-eq.out +# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-eq-list.out + +# @TEST-EXEC: btest-diff dst-ip-ne.out +# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-ne-list.out +# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out + +@TEST-START-FILE dst-ip-eq.sig +signature id { + dst-ip == 192.168.1.101 + event "dst-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-nomatch.sig +signature id { + dst-ip == 10.0.0.1 + event "dst-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-list.sig +signature id { + dst-ip == 10.0.0.1,10.0.0.2,[fe80::1],192.168.1.101 + event "dst-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne.sig +signature id { + dst-ip != 10.0.0.1 + event "dst-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-nomatch.sig +signature id { + dst-ip != 192.168.1.101 + event "dst-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list.sig +signature id { + dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] + event "dst-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list-nomatch.sig +signature id { + dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],192.168.1.101 + event "dst-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/dst-ip-header-condition-v6-masks.bro b/testing/btest/signatures/dst-ip-header-condition-v6-masks.bro deleted file mode 100644 index d82a76e78d..0000000000 --- a/testing/btest/signatures/dst-ip-header-condition-v6-masks.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s dst-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq.out -# @TEST-EXEC: bro -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-list.out - -# @TEST-EXEC: bro -b -s dst-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne.out -# @TEST-EXEC: bro -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff dst-ip-eq.out -# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-eq-list.out - -# @TEST-EXEC: btest-diff dst-ip-ne.out -# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-ne-list.out -# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out - -@TEST-START-FILE dst-ip-eq.sig -signature id { - dst-ip == [2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "dst-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-nomatch.sig -signature id { - dst-ip == [fe80::0]/16 - event "dst-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-list.sig -signature id { - dst-ip == 10.0.0.0/8,[fe80::0]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "dst-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne.sig -signature id { - dst-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/120 - event "dst-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-nomatch.sig -signature id { - dst-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "dst-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list.sig -signature id { - dst-ip != 10.0.0.0/8,[fe80::0]/16 - event "dst-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list-nomatch.sig -signature id { - dst-ip != 10.0.0.0/8,[fe80::1]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "dst-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/dst-ip-header-condition-v6-masks.zeek b/testing/btest/signatures/dst-ip-header-condition-v6-masks.zeek new file mode 100644 index 0000000000..9de148eb87 --- /dev/null +++ b/testing/btest/signatures/dst-ip-header-condition-v6-masks.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s dst-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s dst-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff dst-ip-eq.out +# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-eq-list.out + +# @TEST-EXEC: btest-diff dst-ip-ne.out +# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-ne-list.out +# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out + +@TEST-START-FILE dst-ip-eq.sig +signature id { + dst-ip == [2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "dst-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-nomatch.sig +signature id { + dst-ip == [fe80::0]/16 + event "dst-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-list.sig +signature id { + dst-ip == 10.0.0.0/8,[fe80::0]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "dst-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne.sig +signature id { + dst-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/120 + event "dst-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-nomatch.sig +signature id { + dst-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "dst-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list.sig +signature id { + dst-ip != 10.0.0.0/8,[fe80::0]/16 + event "dst-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list-nomatch.sig +signature id { + dst-ip != 10.0.0.0/8,[fe80::1]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "dst-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/dst-ip-header-condition-v6.bro b/testing/btest/signatures/dst-ip-header-condition-v6.bro deleted file mode 100644 index e629fb4462..0000000000 --- a/testing/btest/signatures/dst-ip-header-condition-v6.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s dst-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq.out -# @TEST-EXEC: bro -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-list.out - -# @TEST-EXEC: bro -b -s dst-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne.out -# @TEST-EXEC: bro -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list.out -# @TEST-EXEC: bro -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff dst-ip-eq.out -# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-eq-list.out - -# @TEST-EXEC: btest-diff dst-ip-ne.out -# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff dst-ip-ne-list.out -# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out - -@TEST-START-FILE dst-ip-eq.sig -signature id { - dst-ip == [2001:4f8:4:7:2e0:81ff:fe52:9a6b] - event "dst-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-nomatch.sig -signature id { - dst-ip == 10.0.0.1 - event "dst-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-eq-list.sig -signature id { - dst-ip == 10.0.0.1,10.0.0.2,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:9a6b] - event "dst-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne.sig -signature id { - dst-ip != 10.0.0.1 - event "dst-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-nomatch.sig -signature id { - dst-ip != [2001:4f8:4:7:2e0:81ff:fe52:9a6b] - event "dst-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list.sig -signature id { - dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] - event "dst-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-ip-ne-list-nomatch.sig -signature id { - dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:9a6b] - event "dst-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/dst-ip-header-condition-v6.zeek b/testing/btest/signatures/dst-ip-header-condition-v6.zeek new file mode 100644 index 0000000000..5bd64f8fc1 --- /dev/null +++ b/testing/btest/signatures/dst-ip-header-condition-v6.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s dst-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s dst-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list.out +# @TEST-EXEC: zeek -b -s dst-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff dst-ip-eq.out +# @TEST-EXEC: btest-diff dst-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-eq-list.out + +# @TEST-EXEC: btest-diff dst-ip-ne.out +# @TEST-EXEC: btest-diff dst-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff dst-ip-ne-list.out +# @TEST-EXEC: btest-diff dst-ip-ne-list-nomatch.out + +@TEST-START-FILE dst-ip-eq.sig +signature id { + dst-ip == [2001:4f8:4:7:2e0:81ff:fe52:9a6b] + event "dst-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-nomatch.sig +signature id { + dst-ip == 10.0.0.1 + event "dst-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-eq-list.sig +signature id { + dst-ip == 10.0.0.1,10.0.0.2,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:9a6b] + event "dst-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne.sig +signature id { + dst-ip != 10.0.0.1 + event "dst-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-nomatch.sig +signature id { + dst-ip != [2001:4f8:4:7:2e0:81ff:fe52:9a6b] + event "dst-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list.sig +signature id { + dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] + event "dst-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-ip-ne-list-nomatch.sig +signature id { + dst-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:9a6b] + event "dst-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/dst-port-header-condition.bro b/testing/btest/signatures/dst-port-header-condition.bro deleted file mode 100644 index 08ba07b0de..0000000000 --- a/testing/btest/signatures/dst-port-header-condition.bro +++ /dev/null @@ -1,164 +0,0 @@ -# @TEST-EXEC: bro -b -s dst-port-eq -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >dst-port-eq.out -# @TEST-EXEC: bro -b -s dst-port-eq-nomatch -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >dst-port-eq-nomatch.out -# @TEST-EXEC: bro -b -s dst-port-eq-list -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >dst-port-eq-list.out -# @TEST-EXEC: bro -b -s dst-port-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-eq-ip6.out - -# @TEST-EXEC: bro -b -s dst-port-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne.out -# @TEST-EXEC: bro -b -s dst-port-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne-nomatch.out -# @TEST-EXEC: bro -b -s dst-port-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne-list.out -# @TEST-EXEC: bro -b -s dst-port-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne-list-nomatch.out - -# @TEST-EXEC: bro -b -s dst-port-lt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lt.out -# @TEST-EXEC: bro -b -s dst-port-lt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lt-nomatch.out -# @TEST-EXEC: bro -b -s dst-port-lte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lte1.out -# @TEST-EXEC: bro -b -s dst-port-lte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lte2.out -# @TEST-EXEC: bro -b -s dst-port-lte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lte-nomatch.out - -# @TEST-EXEC: bro -b -s dst-port-gt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gt.out -# @TEST-EXEC: bro -b -s dst-port-gt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gt-nomatch.out -# @TEST-EXEC: bro -b -s dst-port-gte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gte1.out -# @TEST-EXEC: bro -b -s dst-port-gte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gte2.out -# @TEST-EXEC: bro -b -s dst-port-gte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gte-nomatch.out - -# @TEST-EXEC: btest-diff dst-port-eq.out -# @TEST-EXEC: btest-diff dst-port-eq-nomatch.out -# @TEST-EXEC: btest-diff dst-port-eq-list.out -# @TEST-EXEC: btest-diff dst-port-eq-ip6.out -# @TEST-EXEC: btest-diff dst-port-ne.out -# @TEST-EXEC: btest-diff dst-port-ne-nomatch.out -# @TEST-EXEC: btest-diff dst-port-ne-list.out -# @TEST-EXEC: btest-diff dst-port-ne-list-nomatch.out -# @TEST-EXEC: btest-diff dst-port-lt.out -# @TEST-EXEC: btest-diff dst-port-lt-nomatch.out -# @TEST-EXEC: btest-diff dst-port-lte1.out -# @TEST-EXEC: btest-diff dst-port-lte2.out -# @TEST-EXEC: btest-diff dst-port-lte-nomatch.out -# @TEST-EXEC: btest-diff dst-port-gt.out -# @TEST-EXEC: btest-diff dst-port-gt-nomatch.out -# @TEST-EXEC: btest-diff dst-port-gte1.out -# @TEST-EXEC: btest-diff dst-port-gte2.out -# @TEST-EXEC: btest-diff dst-port-gte-nomatch.out - -@TEST-START-FILE dst-port-eq.sig -signature id { - dst-port == 13000 - event "dst-port-eq" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-eq-nomatch.sig -signature id { - dst-port == 22 - event "dst-port-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-eq-list.sig -signature id { - dst-port == 22,23,24,13000 - event "dst-port-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-ne.sig -signature id { - dst-port != 22 - event "dst-port-ne" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-ne-nomatch.sig -signature id { - dst-port != 13000 - event "dst-port-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-ne-list.sig -signature id { - dst-port != 22,23,24,25 - event "dst-port-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-ne-list-nomatch.sig -signature id { - dst-port != 22,23,24,25,13000 - event "dst-port-ne-list-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-lt.sig -signature id { - dst-port < 13001 - event "dst-port-lt" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-lt-nomatch.sig -signature id { - dst-port < 13000 - event "dst-port-lt-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-lte1.sig -signature id { - dst-port <= 13000 - event "dst-port-lte1" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-lte2.sig -signature id { - dst-port <= 13001 - event "dst-port-lte2" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-lte-nomatch.sig -signature id { - dst-port <= 12999 - event "dst-port-lte-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-gt.sig -signature id { - dst-port > 12999 - event "dst-port-gt" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-gt-nomatch.sig -signature id { - dst-port > 13000 - event "dst-port-gt-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-gte1.sig -signature id { - dst-port >= 13000 - event "dst-port-gte1" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-gte2.sig -signature id { - dst-port >= 12999 - event "dst-port-gte2" -} -@TEST-END-FILE - -@TEST-START-FILE dst-port-gte-nomatch.sig -signature id { - dst-port >= 13001 - event "dst-port-gte-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/dst-port-header-condition.zeek b/testing/btest/signatures/dst-port-header-condition.zeek new file mode 100644 index 0000000000..5f2f880d79 --- /dev/null +++ b/testing/btest/signatures/dst-port-header-condition.zeek @@ -0,0 +1,164 @@ +# @TEST-EXEC: zeek -b -s dst-port-eq -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >dst-port-eq.out +# @TEST-EXEC: zeek -b -s dst-port-eq-nomatch -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >dst-port-eq-nomatch.out +# @TEST-EXEC: zeek -b -s dst-port-eq-list -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >dst-port-eq-list.out +# @TEST-EXEC: zeek -b -s dst-port-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-eq-ip6.out + +# @TEST-EXEC: zeek -b -s dst-port-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne.out +# @TEST-EXEC: zeek -b -s dst-port-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne-nomatch.out +# @TEST-EXEC: zeek -b -s dst-port-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne-list.out +# @TEST-EXEC: zeek -b -s dst-port-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-ne-list-nomatch.out + +# @TEST-EXEC: zeek -b -s dst-port-lt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lt.out +# @TEST-EXEC: zeek -b -s dst-port-lt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lt-nomatch.out +# @TEST-EXEC: zeek -b -s dst-port-lte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lte1.out +# @TEST-EXEC: zeek -b -s dst-port-lte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lte2.out +# @TEST-EXEC: zeek -b -s dst-port-lte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-lte-nomatch.out + +# @TEST-EXEC: zeek -b -s dst-port-gt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gt.out +# @TEST-EXEC: zeek -b -s dst-port-gt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gt-nomatch.out +# @TEST-EXEC: zeek -b -s dst-port-gte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gte1.out +# @TEST-EXEC: zeek -b -s dst-port-gte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gte2.out +# @TEST-EXEC: zeek -b -s dst-port-gte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >dst-port-gte-nomatch.out + +# @TEST-EXEC: btest-diff dst-port-eq.out +# @TEST-EXEC: btest-diff dst-port-eq-nomatch.out +# @TEST-EXEC: btest-diff dst-port-eq-list.out +# @TEST-EXEC: btest-diff dst-port-eq-ip6.out +# @TEST-EXEC: btest-diff dst-port-ne.out +# @TEST-EXEC: btest-diff dst-port-ne-nomatch.out +# @TEST-EXEC: btest-diff dst-port-ne-list.out +# @TEST-EXEC: btest-diff dst-port-ne-list-nomatch.out +# @TEST-EXEC: btest-diff dst-port-lt.out +# @TEST-EXEC: btest-diff dst-port-lt-nomatch.out +# @TEST-EXEC: btest-diff dst-port-lte1.out +# @TEST-EXEC: btest-diff dst-port-lte2.out +# @TEST-EXEC: btest-diff dst-port-lte-nomatch.out +# @TEST-EXEC: btest-diff dst-port-gt.out +# @TEST-EXEC: btest-diff dst-port-gt-nomatch.out +# @TEST-EXEC: btest-diff dst-port-gte1.out +# @TEST-EXEC: btest-diff dst-port-gte2.out +# @TEST-EXEC: btest-diff dst-port-gte-nomatch.out + +@TEST-START-FILE dst-port-eq.sig +signature id { + dst-port == 13000 + event "dst-port-eq" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-eq-nomatch.sig +signature id { + dst-port == 22 + event "dst-port-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-eq-list.sig +signature id { + dst-port == 22,23,24,13000 + event "dst-port-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-ne.sig +signature id { + dst-port != 22 + event "dst-port-ne" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-ne-nomatch.sig +signature id { + dst-port != 13000 + event "dst-port-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-ne-list.sig +signature id { + dst-port != 22,23,24,25 + event "dst-port-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-ne-list-nomatch.sig +signature id { + dst-port != 22,23,24,25,13000 + event "dst-port-ne-list-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-lt.sig +signature id { + dst-port < 13001 + event "dst-port-lt" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-lt-nomatch.sig +signature id { + dst-port < 13000 + event "dst-port-lt-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-lte1.sig +signature id { + dst-port <= 13000 + event "dst-port-lte1" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-lte2.sig +signature id { + dst-port <= 13001 + event "dst-port-lte2" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-lte-nomatch.sig +signature id { + dst-port <= 12999 + event "dst-port-lte-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-gt.sig +signature id { + dst-port > 12999 + event "dst-port-gt" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-gt-nomatch.sig +signature id { + dst-port > 13000 + event "dst-port-gt-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-gte1.sig +signature id { + dst-port >= 13000 + event "dst-port-gte1" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-gte2.sig +signature id { + dst-port >= 12999 + event "dst-port-gte2" +} +@TEST-END-FILE + +@TEST-START-FILE dst-port-gte-nomatch.sig +signature id { + dst-port >= 13001 + event "dst-port-gte-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/eval-condition-no-return-value.bro b/testing/btest/signatures/eval-condition-no-return-value.bro deleted file mode 100644 index b1a4f5781f..0000000000 --- a/testing/btest/signatures/eval-condition-no-return-value.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT -# @TEST-EXEC: btest-diff .stdout -# @TEST-EXEC: btest-diff .stderr - -@load-sigs blah.sig - -@TEST-START-FILE blah.sig -signature blah - { - ip-proto == tcp - src-port == 21 - payload /.*/ - eval mark_conn - } -@TEST-END-FILE - -function mark_conn(state: signature_state, data: string): bool - { - print "Called"; - } diff --git a/testing/btest/signatures/eval-condition-no-return-value.zeek b/testing/btest/signatures/eval-condition-no-return-value.zeek new file mode 100644 index 0000000000..88a8e57ca1 --- /dev/null +++ b/testing/btest/signatures/eval-condition-no-return-value.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT +# @TEST-EXEC: btest-diff .stdout +# @TEST-EXEC: btest-diff .stderr + +@load-sigs blah.sig + +@TEST-START-FILE blah.sig +signature blah + { + ip-proto == tcp + src-port == 21 + payload /.*/ + eval mark_conn + } +@TEST-END-FILE + +function mark_conn(state: signature_state, data: string): bool + { + print "Called"; + } diff --git a/testing/btest/signatures/eval-condition.bro b/testing/btest/signatures/eval-condition.bro deleted file mode 100644 index a14003b691..0000000000 --- a/testing/btest/signatures/eval-condition.bro +++ /dev/null @@ -1,20 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ftp/ipv4.trace %INPUT -# @TEST-EXEC: btest-diff conn.log - -@load-sigs blah.sig - -@TEST-START-FILE blah.sig -signature blah - { - ip-proto == tcp - src-port == 21 - payload /.*/ - eval mark_conn - } -@TEST-END-FILE - -function mark_conn(state: signature_state, data: string): bool - { - add state$conn$service["blah"]; - return T; - } diff --git a/testing/btest/signatures/eval-condition.zeek b/testing/btest/signatures/eval-condition.zeek new file mode 100644 index 0000000000..fe2db7482b --- /dev/null +++ b/testing/btest/signatures/eval-condition.zeek @@ -0,0 +1,20 @@ +# @TEST-EXEC: zeek -r $TRACES/ftp/ipv4.trace %INPUT +# @TEST-EXEC: btest-diff conn.log + +@load-sigs blah.sig + +@TEST-START-FILE blah.sig +signature blah + { + ip-proto == tcp + src-port == 21 + payload /.*/ + eval mark_conn + } +@TEST-END-FILE + +function mark_conn(state: signature_state, data: string): bool + { + add state$conn$service["blah"]; + return T; + } diff --git a/testing/btest/signatures/header-header-condition.bro b/testing/btest/signatures/header-header-condition.bro deleted file mode 100644 index ad78ba4513..0000000000 --- a/testing/btest/signatures/header-header-condition.bro +++ /dev/null @@ -1,78 +0,0 @@ -# @TEST-EXEC: bro -b -s ip -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >ip.out -# @TEST-EXEC: bro -b -s ip-mask -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >ip-mask.out -# @TEST-EXEC: bro -b -s ip6 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >ip6.out -# @TEST-EXEC: bro -b -s udp -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >udp.out -# @TEST-EXEC: bro -b -s tcp -r $TRACES/chksums/ip4-tcp-good-chksum.pcap %INPUT >tcp.out -# @TEST-EXEC: bro -b -s icmp -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >icmp.out -# @TEST-EXEC: bro -b -s icmp6 -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap %INPUT >icmp6.out -# @TEST-EXEC: bro -b -s val-mask -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >val-mask.out - -# @TEST-EXEC: btest-diff ip.out -# @TEST-EXEC: btest-diff ip-mask.out -# @TEST-EXEC: btest-diff ip6.out -# @TEST-EXEC: btest-diff udp.out -# @TEST-EXEC: btest-diff tcp.out -# @TEST-EXEC: btest-diff icmp.out -# @TEST-EXEC: btest-diff icmp6.out -# @TEST-EXEC: btest-diff val-mask.out - -@TEST-START-FILE ip.sig -signature id { - header ip[10:1] == 0x7c - event "ip" -} -@TEST-END-FILE - -@TEST-START-FILE ip-mask.sig -signature id { - header ip[16:4] == 127.0.0.0/24 - event "ip-mask" -} -@TEST-END-FILE - -@TEST-START-FILE ip6.sig -signature id { - header ip6[10:1] == 0x04 - event "ip6" -} -@TEST-END-FILE - -@TEST-START-FILE udp.sig -signature id { - header udp[2:1] == 0x32 - event "udp" -} -@TEST-END-FILE - -@TEST-START-FILE tcp.sig -signature id { - header tcp[3:4] == 0x50000000 - event "tcp" -} -@TEST-END-FILE - -@TEST-START-FILE icmp.sig -signature id { - header icmp[2:2] == 0xf7ff - event "icmp" -} -@TEST-END-FILE - -@TEST-START-FILE icmp6.sig -signature id { - header icmp6[0:1] == 0x80 - event "icmp6" -} -@TEST-END-FILE - -@TEST-START-FILE val-mask.sig -signature id { - header udp[2:1] & 0x0f == 0x02 - event "val-mask" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/header-header-condition.zeek b/testing/btest/signatures/header-header-condition.zeek new file mode 100644 index 0000000000..545a9fdf40 --- /dev/null +++ b/testing/btest/signatures/header-header-condition.zeek @@ -0,0 +1,78 @@ +# @TEST-EXEC: zeek -b -s ip -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >ip.out +# @TEST-EXEC: zeek -b -s ip-mask -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >ip-mask.out +# @TEST-EXEC: zeek -b -s ip6 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >ip6.out +# @TEST-EXEC: zeek -b -s udp -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >udp.out +# @TEST-EXEC: zeek -b -s tcp -r $TRACES/chksums/ip4-tcp-good-chksum.pcap %INPUT >tcp.out +# @TEST-EXEC: zeek -b -s icmp -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >icmp.out +# @TEST-EXEC: zeek -b -s icmp6 -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap %INPUT >icmp6.out +# @TEST-EXEC: zeek -b -s val-mask -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >val-mask.out + +# @TEST-EXEC: btest-diff ip.out +# @TEST-EXEC: btest-diff ip-mask.out +# @TEST-EXEC: btest-diff ip6.out +# @TEST-EXEC: btest-diff udp.out +# @TEST-EXEC: btest-diff tcp.out +# @TEST-EXEC: btest-diff icmp.out +# @TEST-EXEC: btest-diff icmp6.out +# @TEST-EXEC: btest-diff val-mask.out + +@TEST-START-FILE ip.sig +signature id { + header ip[10:1] == 0x7c + event "ip" +} +@TEST-END-FILE + +@TEST-START-FILE ip-mask.sig +signature id { + header ip[16:4] == 127.0.0.0/24 + event "ip-mask" +} +@TEST-END-FILE + +@TEST-START-FILE ip6.sig +signature id { + header ip6[10:1] == 0x04 + event "ip6" +} +@TEST-END-FILE + +@TEST-START-FILE udp.sig +signature id { + header udp[2:1] == 0x32 + event "udp" +} +@TEST-END-FILE + +@TEST-START-FILE tcp.sig +signature id { + header tcp[3:4] == 0x50000000 + event "tcp" +} +@TEST-END-FILE + +@TEST-START-FILE icmp.sig +signature id { + header icmp[2:2] == 0xf7ff + event "icmp" +} +@TEST-END-FILE + +@TEST-START-FILE icmp6.sig +signature id { + header icmp6[0:1] == 0x80 + event "icmp6" +} +@TEST-END-FILE + +@TEST-START-FILE val-mask.sig +signature id { + header udp[2:1] & 0x0f == 0x02 + event "val-mask" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/id-lookup.bro b/testing/btest/signatures/id-lookup.bro deleted file mode 100644 index f055e73725..0000000000 --- a/testing/btest/signatures/id-lookup.bro +++ /dev/null @@ -1,27 +0,0 @@ -# @TEST-EXEC: bro -b -s id -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >id.out -# @TEST-EXEC: btest-diff id.out - -@TEST-START-FILE id.sig -signature id { - ip-proto == udp_proto_number - event "id" -} - -signature idtable { - dst-ip == mynets - event "idtable" -} -@TEST-END-FILE - -const udp_proto_number = 17; - -const mynets: set[subnet] = { - 192.168.1.0/24, - 10.0.0.0/8, - 127.0.0.0/24 -}; - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/id-lookup.zeek b/testing/btest/signatures/id-lookup.zeek new file mode 100644 index 0000000000..a100b0a624 --- /dev/null +++ b/testing/btest/signatures/id-lookup.zeek @@ -0,0 +1,27 @@ +# @TEST-EXEC: zeek -b -s id -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >id.out +# @TEST-EXEC: btest-diff id.out + +@TEST-START-FILE id.sig +signature id { + ip-proto == udp_proto_number + event "id" +} + +signature idtable { + dst-ip == mynets + event "idtable" +} +@TEST-END-FILE + +const udp_proto_number = 17; + +const mynets: set[subnet] = { + 192.168.1.0/24, + 10.0.0.0/8, + 127.0.0.0/24 +}; + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/ip-proto-header-condition.bro b/testing/btest/signatures/ip-proto-header-condition.bro deleted file mode 100644 index 52d58ea223..0000000000 --- a/testing/btest/signatures/ip-proto-header-condition.bro +++ /dev/null @@ -1,48 +0,0 @@ -# @TEST-EXEC: bro -b -s tcp -r $TRACES/chksums/ip4-tcp-good-chksum.pcap %INPUT >tcp_in_ip4.out -# @TEST-EXEC: bro -b -s udp -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >udp_in_ip4.out -# @TEST-EXEC: bro -b -s icmp -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >icmp_in_ip4.out -# @TEST-EXEC: bro -b -s tcp -r $TRACES/chksums/ip6-tcp-good-chksum.pcap %INPUT >tcp_in_ip6.out -# @TEST-EXEC: bro -b -s udp -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >udp_in_ip6.out -# @TEST-EXEC: bro -b -s icmp6 -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap %INPUT >icmp6_in_ip6.out -# @TEST-EXEC: bro -b -s icmp -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap %INPUT >nomatch.out - -# @TEST-EXEC: btest-diff tcp_in_ip4.out -# @TEST-EXEC: btest-diff udp_in_ip4.out -# @TEST-EXEC: btest-diff icmp_in_ip4.out -# @TEST-EXEC: btest-diff tcp_in_ip6.out -# @TEST-EXEC: btest-diff udp_in_ip6.out -# @TEST-EXEC: btest-diff icmp6_in_ip6.out -# @TEST-EXEC: btest-diff nomatch.out - -@TEST-START-FILE tcp.sig -signature tcp_transport { - ip-proto == tcp - event "tcp" -} -@TEST-END-FILE - -@TEST-START-FILE udp.sig -signature udp_transport { - ip-proto == udp - event "udp" -} -@TEST-END-FILE - -@TEST-START-FILE icmp.sig -signature icmp_transport { - ip-proto == icmp - event "icmp" -} -@TEST-END-FILE - -@TEST-START-FILE icmp6.sig -signature icmp6_transport { - ip-proto == icmp6 - event "icmp6" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/ip-proto-header-condition.zeek b/testing/btest/signatures/ip-proto-header-condition.zeek new file mode 100644 index 0000000000..bbaf865f06 --- /dev/null +++ b/testing/btest/signatures/ip-proto-header-condition.zeek @@ -0,0 +1,48 @@ +# @TEST-EXEC: zeek -b -s tcp -r $TRACES/chksums/ip4-tcp-good-chksum.pcap %INPUT >tcp_in_ip4.out +# @TEST-EXEC: zeek -b -s udp -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >udp_in_ip4.out +# @TEST-EXEC: zeek -b -s icmp -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >icmp_in_ip4.out +# @TEST-EXEC: zeek -b -s tcp -r $TRACES/chksums/ip6-tcp-good-chksum.pcap %INPUT >tcp_in_ip6.out +# @TEST-EXEC: zeek -b -s udp -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >udp_in_ip6.out +# @TEST-EXEC: zeek -b -s icmp6 -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap %INPUT >icmp6_in_ip6.out +# @TEST-EXEC: zeek -b -s icmp -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap %INPUT >nomatch.out + +# @TEST-EXEC: btest-diff tcp_in_ip4.out +# @TEST-EXEC: btest-diff udp_in_ip4.out +# @TEST-EXEC: btest-diff icmp_in_ip4.out +# @TEST-EXEC: btest-diff tcp_in_ip6.out +# @TEST-EXEC: btest-diff udp_in_ip6.out +# @TEST-EXEC: btest-diff icmp6_in_ip6.out +# @TEST-EXEC: btest-diff nomatch.out + +@TEST-START-FILE tcp.sig +signature tcp_transport { + ip-proto == tcp + event "tcp" +} +@TEST-END-FILE + +@TEST-START-FILE udp.sig +signature udp_transport { + ip-proto == udp + event "udp" +} +@TEST-END-FILE + +@TEST-START-FILE icmp.sig +signature icmp_transport { + ip-proto == icmp + event "icmp" +} +@TEST-END-FILE + +@TEST-START-FILE icmp6.sig +signature icmp6_transport { + ip-proto == icmp6 + event "icmp6" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/load-sigs.bro b/testing/btest/signatures/load-sigs.bro deleted file mode 100644 index 3e08338f2c..0000000000 --- a/testing/btest/signatures/load-sigs.bro +++ /dev/null @@ -1,21 +0,0 @@ -# A test of signature loading using @load-sigs. - -# @TEST-EXEC: bro -C -r $TRACES/wikipedia.trace %INPUT >output -# @TEST-EXEC: btest-diff output - -@load-sigs ./subdir/mysigs.sig - -event signature_match(state: signature_state, msg: string, data: string) - { - print state$conn$id; - print msg; - print data; - } - -@TEST-START-FILE subdir/mysigs.sig -signature my-sig { -ip-proto == tcp -payload /GET \/images/ -event "works" -} -@TEST-END-FILE diff --git a/testing/btest/signatures/load-sigs.zeek b/testing/btest/signatures/load-sigs.zeek new file mode 100644 index 0000000000..d57630ec14 --- /dev/null +++ b/testing/btest/signatures/load-sigs.zeek @@ -0,0 +1,21 @@ +# A test of signature loading using @load-sigs. + +# @TEST-EXEC: zeek -C -r $TRACES/wikipedia.trace %INPUT >output +# @TEST-EXEC: btest-diff output + +@load-sigs ./subdir/mysigs.sig + +event signature_match(state: signature_state, msg: string, data: string) + { + print state$conn$id; + print msg; + print data; + } + +@TEST-START-FILE subdir/mysigs.sig +signature my-sig { +ip-proto == tcp +payload /GET \/images/ +event "works" +} +@TEST-END-FILE diff --git a/testing/btest/signatures/src-ip-header-condition-v4-masks.bro b/testing/btest/signatures/src-ip-header-condition-v4-masks.bro deleted file mode 100644 index 1e272c81ee..0000000000 --- a/testing/btest/signatures/src-ip-header-condition-v4-masks.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s src-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq.out -# @TEST-EXEC: bro -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-list.out - -# @TEST-EXEC: bro -b -s src-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne.out -# @TEST-EXEC: bro -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list.out -# @TEST-EXEC: bro -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff src-ip-eq.out -# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff src-ip-eq-list.out - -# @TEST-EXEC: btest-diff src-ip-ne.out -# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff src-ip-ne-list.out -# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out - -@TEST-START-FILE src-ip-eq.sig -signature id { - src-ip == 192.168.1.0/24 - event "src-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-nomatch.sig -signature id { - src-ip == 10.0.0.0/8 - event "src-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-list.sig -signature id { - src-ip == 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 - event "src-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne.sig -signature id { - src-ip != 10.0.0.0/8 - event "src-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-nomatch.sig -signature id { - src-ip != 192.168.1.0/24 - event "src-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list.sig -signature id { - src-ip != 10.0.0.0/8,[fe80::0]/16 - event "src-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list-nomatch.sig -signature id { - src-ip != 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 - event "src-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/src-ip-header-condition-v4-masks.zeek b/testing/btest/signatures/src-ip-header-condition-v4-masks.zeek new file mode 100644 index 0000000000..9c34853c8a --- /dev/null +++ b/testing/btest/signatures/src-ip-header-condition-v4-masks.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s src-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq.out +# @TEST-EXEC: zeek -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s src-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne.out +# @TEST-EXEC: zeek -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff src-ip-eq.out +# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff src-ip-eq-list.out + +# @TEST-EXEC: btest-diff src-ip-ne.out +# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff src-ip-ne-list.out +# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out + +@TEST-START-FILE src-ip-eq.sig +signature id { + src-ip == 192.168.1.0/24 + event "src-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-nomatch.sig +signature id { + src-ip == 10.0.0.0/8 + event "src-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-list.sig +signature id { + src-ip == 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 + event "src-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne.sig +signature id { + src-ip != 10.0.0.0/8 + event "src-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-nomatch.sig +signature id { + src-ip != 192.168.1.0/24 + event "src-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list.sig +signature id { + src-ip != 10.0.0.0/8,[fe80::0]/16 + event "src-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list-nomatch.sig +signature id { + src-ip != 10.0.0.0/8,[fe80::0]/16,192.168.1.0/24 + event "src-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/src-ip-header-condition-v4.bro b/testing/btest/signatures/src-ip-header-condition-v4.bro deleted file mode 100644 index 746e41a4be..0000000000 --- a/testing/btest/signatures/src-ip-header-condition-v4.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s src-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq.out -# @TEST-EXEC: bro -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-list.out - -# @TEST-EXEC: bro -b -s src-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne.out -# @TEST-EXEC: bro -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list.out -# @TEST-EXEC: bro -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff src-ip-eq.out -# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff src-ip-eq-list.out - -# @TEST-EXEC: btest-diff src-ip-ne.out -# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff src-ip-ne-list.out -# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out - -@TEST-START-FILE src-ip-eq.sig -signature id { - src-ip == 192.168.1.100 - event "src-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-nomatch.sig -signature id { - src-ip == 10.0.0.1 - event "src-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-list.sig -signature id { - src-ip == 10.0.0.1,10.0.0.2,[fe80::1],192.168.1.100 - event "src-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne.sig -signature id { - src-ip != 10.0.0.1 - event "src-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-nomatch.sig -signature id { - src-ip != 192.168.1.100 - event "src-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list.sig -signature id { - src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] - event "src-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list-nomatch.sig -signature id { - src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],192.168.1.100 - event "src-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/src-ip-header-condition-v4.zeek b/testing/btest/signatures/src-ip-header-condition-v4.zeek new file mode 100644 index 0000000000..3eaa73ce9c --- /dev/null +++ b/testing/btest/signatures/src-ip-header-condition-v4.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s src-ip-eq -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq.out +# @TEST-EXEC: zeek -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-eq-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s src-ip-ne -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne.out +# @TEST-EXEC: zeek -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip4-icmp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff src-ip-eq.out +# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff src-ip-eq-list.out + +# @TEST-EXEC: btest-diff src-ip-ne.out +# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff src-ip-ne-list.out +# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out + +@TEST-START-FILE src-ip-eq.sig +signature id { + src-ip == 192.168.1.100 + event "src-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-nomatch.sig +signature id { + src-ip == 10.0.0.1 + event "src-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-list.sig +signature id { + src-ip == 10.0.0.1,10.0.0.2,[fe80::1],192.168.1.100 + event "src-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne.sig +signature id { + src-ip != 10.0.0.1 + event "src-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-nomatch.sig +signature id { + src-ip != 192.168.1.100 + event "src-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list.sig +signature id { + src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] + event "src-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list-nomatch.sig +signature id { + src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],192.168.1.100 + event "src-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/src-ip-header-condition-v6-masks.bro b/testing/btest/signatures/src-ip-header-condition-v6-masks.bro deleted file mode 100644 index 3c4fbf5526..0000000000 --- a/testing/btest/signatures/src-ip-header-condition-v6-masks.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s src-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq.out -# @TEST-EXEC: bro -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-list.out - -# @TEST-EXEC: bro -b -s src-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne.out -# @TEST-EXEC: bro -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list.out -# @TEST-EXEC: bro -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff src-ip-eq.out -# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff src-ip-eq-list.out - -# @TEST-EXEC: btest-diff src-ip-ne.out -# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff src-ip-ne-list.out -# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out - -@TEST-START-FILE src-ip-eq.sig -signature id { - src-ip == [2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "src-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-nomatch.sig -signature id { - src-ip == [fe80::0]/16 - event "src-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-list.sig -signature id { - src-ip == 10.0.0.0/8,[fe80::0]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "src-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne.sig -signature id { - src-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/120 - event "src-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-nomatch.sig -signature id { - src-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "src-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list.sig -signature id { - src-ip != 10.0.0.0/8,[fe80::0]/16 - event "src-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list-nomatch.sig -signature id { - src-ip != 10.0.0.0/8,[fe80::1]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 - event "src-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/src-ip-header-condition-v6-masks.zeek b/testing/btest/signatures/src-ip-header-condition-v6-masks.zeek new file mode 100644 index 0000000000..ad5ca917a9 --- /dev/null +++ b/testing/btest/signatures/src-ip-header-condition-v6-masks.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s src-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq.out +# @TEST-EXEC: zeek -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s src-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne.out +# @TEST-EXEC: zeek -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff src-ip-eq.out +# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff src-ip-eq-list.out + +# @TEST-EXEC: btest-diff src-ip-ne.out +# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff src-ip-ne-list.out +# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out + +@TEST-START-FILE src-ip-eq.sig +signature id { + src-ip == [2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "src-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-nomatch.sig +signature id { + src-ip == [fe80::0]/16 + event "src-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-list.sig +signature id { + src-ip == 10.0.0.0/8,[fe80::0]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "src-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne.sig +signature id { + src-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/120 + event "src-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-nomatch.sig +signature id { + src-ip != [2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "src-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list.sig +signature id { + src-ip != 10.0.0.0/8,[fe80::0]/16 + event "src-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list-nomatch.sig +signature id { + src-ip != 10.0.0.0/8,[fe80::1]/16,[2001:4f8:4:7:2e0:81ff:fe52:0]/112 + event "src-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/src-ip-header-condition-v6.bro b/testing/btest/signatures/src-ip-header-condition-v6.bro deleted file mode 100644 index 613a3dd4c1..0000000000 --- a/testing/btest/signatures/src-ip-header-condition-v6.bro +++ /dev/null @@ -1,71 +0,0 @@ -# @TEST-EXEC: bro -b -s src-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq.out -# @TEST-EXEC: bro -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-list.out - -# @TEST-EXEC: bro -b -s src-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne.out -# @TEST-EXEC: bro -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out -# @TEST-EXEC: bro -b -s src-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list.out -# @TEST-EXEC: bro -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out - -# @TEST-EXEC: btest-diff src-ip-eq.out -# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out -# @TEST-EXEC: btest-diff src-ip-eq-list.out - -# @TEST-EXEC: btest-diff src-ip-ne.out -# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out -# @TEST-EXEC: btest-diff src-ip-ne-list.out -# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out - -@TEST-START-FILE src-ip-eq.sig -signature id { - src-ip == [2001:4f8:4:7:2e0:81ff:fe52:ffff] - event "src-ip-eq" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-nomatch.sig -signature id { - src-ip == 10.0.0.1 - event "src-ip-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-eq-list.sig -signature id { - src-ip == 10.0.0.1,10.0.0.2,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:ffff] - event "src-ip-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne.sig -signature id { - src-ip != 10.0.0.1 - event "src-ip-ne" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-nomatch.sig -signature id { - src-ip != [2001:4f8:4:7:2e0:81ff:fe52:ffff] - event "src-ip-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list.sig -signature id { - src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] - event "src-ip-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-ip-ne-list-nomatch.sig -signature id { - src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:ffff] - event "src-ip-ne-list-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/src-ip-header-condition-v6.zeek b/testing/btest/signatures/src-ip-header-condition-v6.zeek new file mode 100644 index 0000000000..6ada9db299 --- /dev/null +++ b/testing/btest/signatures/src-ip-header-condition-v6.zeek @@ -0,0 +1,71 @@ +# @TEST-EXEC: zeek -b -s src-ip-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq.out +# @TEST-EXEC: zeek -b -s src-ip-eq-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-eq-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-eq-list.out + +# @TEST-EXEC: zeek -b -s src-ip-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne.out +# @TEST-EXEC: zeek -b -s src-ip-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-nomatch.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list.out +# @TEST-EXEC: zeek -b -s src-ip-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-ip-ne-list-nomatch.out + +# @TEST-EXEC: btest-diff src-ip-eq.out +# @TEST-EXEC: btest-diff src-ip-eq-nomatch.out +# @TEST-EXEC: btest-diff src-ip-eq-list.out + +# @TEST-EXEC: btest-diff src-ip-ne.out +# @TEST-EXEC: btest-diff src-ip-ne-nomatch.out +# @TEST-EXEC: btest-diff src-ip-ne-list.out +# @TEST-EXEC: btest-diff src-ip-ne-list-nomatch.out + +@TEST-START-FILE src-ip-eq.sig +signature id { + src-ip == [2001:4f8:4:7:2e0:81ff:fe52:ffff] + event "src-ip-eq" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-nomatch.sig +signature id { + src-ip == 10.0.0.1 + event "src-ip-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-eq-list.sig +signature id { + src-ip == 10.0.0.1,10.0.0.2,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:ffff] + event "src-ip-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne.sig +signature id { + src-ip != 10.0.0.1 + event "src-ip-ne" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-nomatch.sig +signature id { + src-ip != [2001:4f8:4:7:2e0:81ff:fe52:ffff] + event "src-ip-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list.sig +signature id { + src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1] + event "src-ip-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-ip-ne-list-nomatch.sig +signature id { + src-ip != 10.0.0.1,10.0.0.2,10.0.0.3,[fe80::1],[2001:4f8:4:7:2e0:81ff:fe52:ffff] + event "src-ip-ne-list-nomatch" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/src-port-header-condition.bro b/testing/btest/signatures/src-port-header-condition.bro deleted file mode 100644 index ea9e08ce2b..0000000000 --- a/testing/btest/signatures/src-port-header-condition.bro +++ /dev/null @@ -1,164 +0,0 @@ -# @TEST-EXEC: bro -b -s src-port-eq -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >src-port-eq.out -# @TEST-EXEC: bro -b -s src-port-eq-nomatch -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >src-port-eq-nomatch.out -# @TEST-EXEC: bro -b -s src-port-eq-list -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >src-port-eq-list.out -# @TEST-EXEC: bro -b -s src-port-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-eq-ip6.out - -# @TEST-EXEC: bro -b -s src-port-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne.out -# @TEST-EXEC: bro -b -s src-port-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne-nomatch.out -# @TEST-EXEC: bro -b -s src-port-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne-list.out -# @TEST-EXEC: bro -b -s src-port-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne-list-nomatch.out - -# @TEST-EXEC: bro -b -s src-port-lt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lt.out -# @TEST-EXEC: bro -b -s src-port-lt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lt-nomatch.out -# @TEST-EXEC: bro -b -s src-port-lte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lte1.out -# @TEST-EXEC: bro -b -s src-port-lte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lte2.out -# @TEST-EXEC: bro -b -s src-port-lte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lte-nomatch.out - -# @TEST-EXEC: bro -b -s src-port-gt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gt.out -# @TEST-EXEC: bro -b -s src-port-gt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gt-nomatch.out -# @TEST-EXEC: bro -b -s src-port-gte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gte1.out -# @TEST-EXEC: bro -b -s src-port-gte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gte2.out -# @TEST-EXEC: bro -b -s src-port-gte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gte-nomatch.out - -# @TEST-EXEC: btest-diff src-port-eq.out -# @TEST-EXEC: btest-diff src-port-eq-nomatch.out -# @TEST-EXEC: btest-diff src-port-eq-list.out -# @TEST-EXEC: btest-diff src-port-eq-ip6.out -# @TEST-EXEC: btest-diff src-port-ne.out -# @TEST-EXEC: btest-diff src-port-ne-nomatch.out -# @TEST-EXEC: btest-diff src-port-ne-list.out -# @TEST-EXEC: btest-diff src-port-ne-list-nomatch.out -# @TEST-EXEC: btest-diff src-port-lt.out -# @TEST-EXEC: btest-diff src-port-lt-nomatch.out -# @TEST-EXEC: btest-diff src-port-lte1.out -# @TEST-EXEC: btest-diff src-port-lte2.out -# @TEST-EXEC: btest-diff src-port-lte-nomatch.out -# @TEST-EXEC: btest-diff src-port-gt.out -# @TEST-EXEC: btest-diff src-port-gt-nomatch.out -# @TEST-EXEC: btest-diff src-port-gte1.out -# @TEST-EXEC: btest-diff src-port-gte2.out -# @TEST-EXEC: btest-diff src-port-gte-nomatch.out - -@TEST-START-FILE src-port-eq.sig -signature id { - src-port == 30000 - event "src-port-eq" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-eq-nomatch.sig -signature id { - src-port == 22 - event "src-port-eq-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-eq-list.sig -signature id { - src-port == 22,23,24,30000 - event "src-port-eq-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-ne.sig -signature id { - src-port != 22 - event "src-port-ne" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-ne-nomatch.sig -signature id { - src-port != 30000 - event "src-port-ne-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-ne-list.sig -signature id { - src-port != 22,23,24,25 - event "src-port-ne-list" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-ne-list-nomatch.sig -signature id { - src-port != 22,23,24,25,30000 - event "src-port-ne-list-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-lt.sig -signature id { - src-port < 30001 - event "src-port-lt" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-lt-nomatch.sig -signature id { - src-port < 30000 - event "src-port-lt-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-lte1.sig -signature id { - src-port <= 30000 - event "src-port-lte1" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-lte2.sig -signature id { - src-port <= 30001 - event "src-port-lte2" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-lte-nomatch.sig -signature id { - src-port <= 29999 - event "src-port-lte-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-gt.sig -signature id { - src-port > 29999 - event "src-port-gt" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-gt-nomatch.sig -signature id { - src-port > 30000 - event "src-port-gt-nomatch" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-gte1.sig -signature id { - src-port >= 30000 - event "src-port-gte1" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-gte2.sig -signature id { - src-port >= 29999 - event "src-port-gte2" -} -@TEST-END-FILE - -@TEST-START-FILE src-port-gte-nomatch.sig -signature id { - src-port >= 30001 - event "src-port-gte-nomatch" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print fmt("signature_match %s - %s", state$conn$id, msg); - } diff --git a/testing/btest/signatures/src-port-header-condition.zeek b/testing/btest/signatures/src-port-header-condition.zeek new file mode 100644 index 0000000000..841817e79c --- /dev/null +++ b/testing/btest/signatures/src-port-header-condition.zeek @@ -0,0 +1,175 @@ +# @TEST-EXEC: zeek -b -s src-port-eq -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >src-port-eq.out +# @TEST-EXEC: zeek -b -s src-port-eq-nomatch -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >src-port-eq-nomatch.out +# @TEST-EXEC: zeek -b -s src-port-eq-list -r $TRACES/chksums/ip4-udp-good-chksum.pcap %INPUT >src-port-eq-list.out +# @TEST-EXEC: zeek -b -s src-port-eq -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-eq-ip6.out + +# @TEST-EXEC: zeek -b -s src-port-ne -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne.out +# @TEST-EXEC: zeek -b -s src-port-ne-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne-nomatch.out +# @TEST-EXEC: zeek -b -s src-port-ne-list -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne-list.out +# @TEST-EXEC: zeek -b -s src-port-ne-list-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-ne-list-nomatch.out + +# @TEST-EXEC: zeek -b -s src-port-lt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lt.out +# @TEST-EXEC: zeek -b -s src-port-lt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lt-nomatch.out +# @TEST-EXEC: zeek -b -s src-port-lte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lte1.out +# @TEST-EXEC: zeek -b -s src-port-lte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lte2.out +# @TEST-EXEC: zeek -b -s src-port-lte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-lte-nomatch.out + +# @TEST-EXEC: zeek -b -s src-port-gt -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gt.out +# @TEST-EXEC: zeek -b -s src-port-gt-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gt-nomatch.out +# @TEST-EXEC: zeek -b -s src-port-gte1 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gte1.out +# @TEST-EXEC: zeek -b -s src-port-gte2 -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gte2.out +# @TEST-EXEC: zeek -b -s src-port-gte-nomatch -r $TRACES/chksums/ip6-udp-good-chksum.pcap %INPUT >src-port-gte-nomatch.out + +# @TEST-EXEC: zeek -b -s src-port-range -r $TRACES/udp-multiple-source-ports.pcap %INPUT >src-port-range.out + +# @TEST-EXEC: btest-diff src-port-eq.out +# @TEST-EXEC: btest-diff src-port-eq-nomatch.out +# @TEST-EXEC: btest-diff src-port-eq-list.out +# @TEST-EXEC: btest-diff src-port-eq-ip6.out +# @TEST-EXEC: btest-diff src-port-ne.out +# @TEST-EXEC: btest-diff src-port-ne-nomatch.out +# @TEST-EXEC: btest-diff src-port-ne-list.out +# @TEST-EXEC: btest-diff src-port-ne-list-nomatch.out +# @TEST-EXEC: btest-diff src-port-lt.out +# @TEST-EXEC: btest-diff src-port-lt-nomatch.out +# @TEST-EXEC: btest-diff src-port-lte1.out +# @TEST-EXEC: btest-diff src-port-lte2.out +# @TEST-EXEC: btest-diff src-port-lte-nomatch.out +# @TEST-EXEC: btest-diff src-port-gt.out +# @TEST-EXEC: btest-diff src-port-gt-nomatch.out +# @TEST-EXEC: btest-diff src-port-gte1.out +# @TEST-EXEC: btest-diff src-port-gte2.out +# @TEST-EXEC: btest-diff src-port-gte-nomatch.out + +# @TEST-EXEC: btest-diff src-port-range.out + +@TEST-START-FILE src-port-eq.sig +signature id { + src-port == 30000 + event "src-port-eq" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-eq-nomatch.sig +signature id { + src-port == 22 + event "src-port-eq-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-eq-list.sig +signature id { + src-port == 22,23,24,30000 + event "src-port-eq-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-ne.sig +signature id { + src-port != 22 + event "src-port-ne" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-ne-nomatch.sig +signature id { + src-port != 30000 + event "src-port-ne-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-ne-list.sig +signature id { + src-port != 22,23,24,25 + event "src-port-ne-list" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-ne-list-nomatch.sig +signature id { + src-port != 22,23,24,25,30000 + event "src-port-ne-list-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-lt.sig +signature id { + src-port < 30001 + event "src-port-lt" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-lt-nomatch.sig +signature id { + src-port < 30000 + event "src-port-lt-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-lte1.sig +signature id { + src-port <= 30000 + event "src-port-lte1" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-lte2.sig +signature id { + src-port <= 30001 + event "src-port-lte2" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-lte-nomatch.sig +signature id { + src-port <= 29999 + event "src-port-lte-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-gt.sig +signature id { + src-port > 29999 + event "src-port-gt" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-gt-nomatch.sig +signature id { + src-port > 30000 + event "src-port-gt-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-gte1.sig +signature id { + src-port >= 30000 + event "src-port-gte1" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-gte2.sig +signature id { + src-port >= 29999 + event "src-port-gte2" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-gte-nomatch.sig +signature id { + src-port >= 30001 + event "src-port-gte-nomatch" +} +@TEST-END-FILE + +@TEST-START-FILE src-port-range.sig +signature id { + src-port == 29997-29999,30001-30002,30003 + event "src-port-range" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print fmt("signature_match %s - %s", state$conn$id, msg); + } diff --git a/testing/btest/signatures/udp-packetwise-insensitive.zeek b/testing/btest/signatures/udp-packetwise-insensitive.zeek new file mode 100644 index 0000000000..a87971d5c8 --- /dev/null +++ b/testing/btest/signatures/udp-packetwise-insensitive.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: zeek -r $TRACES/udp-signature-test.pcap %INPUT | sort >out +# @TEST-EXEC: btest-diff out + +@load-sigs test.sig + +@TEST-START-FILE test.sig +signature xxxx { + ip-proto = udp + payload /xXxX/i + event "Found XXXX" +} + +signature axxxx { + ip-proto = udp + payload /^xxxx/i + event "Found ^XXXX" +} + +signature sxxxx { + ip-proto = udp + payload /.*xxXx/i + event "Found .*XXXX" +} + +signature yyyy { + ip-proto = udp + payload /YYYY/i + event "Found YYYY" +} + +signature ayyyy { + ip-proto = udp + payload /^YYYY/i + event "Found ^YYYY" +} + +signature syyyy { + ip-proto = udp + payload /.*YYYY/i + event "Found .*YYYY" +} + +signature nope { + ip-proto = udp + payload /.*nope/i + event "Found .*nope" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print "signature match", msg, data; + } diff --git a/testing/btest/signatures/udp-packetwise-match.bro b/testing/btest/signatures/udp-packetwise-match.bro deleted file mode 100644 index 706b632dd7..0000000000 --- a/testing/btest/signatures/udp-packetwise-match.bro +++ /dev/null @@ -1,53 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/udp-signature-test.pcap %INPUT | sort >out -# @TEST-EXEC: btest-diff out - -@load-sigs test.sig - -@TEST-START-FILE test.sig -signature xxxx { - ip-proto = udp - payload /XXXX/ - event "Found XXXX" -} - -signature axxxx { - ip-proto = udp - payload /^XXXX/ - event "Found ^XXXX" -} - -signature sxxxx { - ip-proto = udp - payload /.*XXXX/ - event "Found .*XXXX" -} - -signature yyyy { - ip-proto = udp - payload /YYYY/ - event "Found YYYY" -} - -signature ayyyy { - ip-proto = udp - payload /^YYYY/ - event "Found ^YYYY" -} - -signature syyyy { - ip-proto = udp - payload /.*YYYY/ - event "Found .*YYYY" -} - -signature nope { - ip-proto = udp - payload /.*nope/ - event "Found .*nope" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print "signature match", msg, data; - } diff --git a/testing/btest/signatures/udp-packetwise-match.zeek b/testing/btest/signatures/udp-packetwise-match.zeek new file mode 100644 index 0000000000..feb531c37c --- /dev/null +++ b/testing/btest/signatures/udp-packetwise-match.zeek @@ -0,0 +1,53 @@ +# @TEST-EXEC: zeek -r $TRACES/udp-signature-test.pcap %INPUT | sort >out +# @TEST-EXEC: btest-diff out + +@load-sigs test.sig + +@TEST-START-FILE test.sig +signature xxxx { + ip-proto = udp + payload /XXXX/ + event "Found XXXX" +} + +signature axxxx { + ip-proto = udp + payload /^XXXX/ + event "Found ^XXXX" +} + +signature sxxxx { + ip-proto = udp + payload /.*XXXX/ + event "Found .*XXXX" +} + +signature yyyy { + ip-proto = udp + payload /YYYY/ + event "Found YYYY" +} + +signature ayyyy { + ip-proto = udp + payload /^YYYY/ + event "Found ^YYYY" +} + +signature syyyy { + ip-proto = udp + payload /.*YYYY/ + event "Found .*YYYY" +} + +signature nope { + ip-proto = udp + payload /.*nope/ + event "Found .*nope" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print "signature match", msg, data; + } diff --git a/testing/btest/signatures/udp-payload-size.bro b/testing/btest/signatures/udp-payload-size.bro deleted file mode 100644 index efc5411feb..0000000000 --- a/testing/btest/signatures/udp-payload-size.bro +++ /dev/null @@ -1,23 +0,0 @@ -# @TEST-EXEC: bro -r $TRACES/ntp.pcap %INPUT >output -# @TEST-EXEC: btest-diff output - -@TEST-START-FILE a.sig -signature foo1 { - ip-proto == udp - payload-size < 1 - event "match" -} - -signature foo2 { - ip-proto == udp - payload-size > 0 - event "match" -} -@TEST-END-FILE - -event signature_match(state: signature_state, msg: string, data: string) - { - print "match", state$sig_id; - } - -@load-sigs ./a.sig diff --git a/testing/btest/signatures/udp-payload-size.zeek b/testing/btest/signatures/udp-payload-size.zeek new file mode 100644 index 0000000000..c1c6a6d49b --- /dev/null +++ b/testing/btest/signatures/udp-payload-size.zeek @@ -0,0 +1,23 @@ +# @TEST-EXEC: zeek -r $TRACES/ntp.pcap %INPUT >output +# @TEST-EXEC: btest-diff output + +@TEST-START-FILE a.sig +signature foo1 { + ip-proto == udp + payload-size < 1 + event "match" +} + +signature foo2 { + ip-proto == udp + payload-size > 0 + event "match" +} +@TEST-END-FILE + +event signature_match(state: signature_state, msg: string, data: string) + { + print "match", state$sig_id; + } + +@load-sigs ./a.sig diff --git a/testing/coverage/README b/testing/coverage/README index d1352640f2..cc21827817 100644 --- a/testing/coverage/README +++ b/testing/coverage/README @@ -1,5 +1,5 @@ -On a Bro build configured with --enable-coverage, this script produces a code -coverage report after Bro has been invoked. The intended application of this +On a Zeek build configured with --enable-coverage, this script produces a code +coverage report after Zeek has been invoked. The intended application of this script is after the btest testsuite has run. This combination (btests first, coverage computation afterward) happens automatically when running "make" in the testing directory. This script puts .gcov files (which are included in diff --git a/testing/coverage/code_coverage.sh b/testing/coverage/code_coverage.sh index 758b2fa915..79999abe19 100755 --- a/testing/coverage/code_coverage.sh +++ b/testing/coverage/code_coverage.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# On a Bro build configured with --enable-coverage, this script -# produces a code coverage report after Bro has been invoked. The +# On a Zeek build configured with --enable-coverage, this script +# produces a code coverage report after Zeek has been invoked. The # intended application of this script is after the btest testsuite has # run. This combination (btests first, coverage computation afterward) # happens automatically when running "make" in the testing directory. @@ -12,7 +12,7 @@ # 1. Run test suite # 2. Check for .gcda files existing. # 3a. Run gcov (-p to preserve path) -# 3b. Prune .gcov files for objects outside of the Bro tree +# 3b. Prune .gcov files for objects outside of the Zeek tree # 4a. Analyze .gcov files generated and create summary file # 4b. Send .gcov files to appropriate path # @@ -52,7 +52,7 @@ function check_file_coverage { function check_group_coverage { DATA="$1" # FILE CONTAINING COVERAGE DATA - SRC_FOLDER="$2" # WHERE BRO WAS COMPILED + SRC_FOLDER="$2" # WHERE ZEEK WAS COMPILED OUTPUT="$3" # Prints all the relevant directories @@ -117,9 +117,9 @@ else exit 1 fi -# 3b. Prune gcov files that fall outside of the Bro tree: +# 3b. Prune gcov files that fall outside of the Zeek tree: # Look for files containing gcov's slash substitution character "#" -# and remove any that don't contain the Bro path root. +# and remove any that don't contain the Zeek path root. echo -n "Pruning out-of-tree coverage files... " PREFIX=$(echo "$BASE" | sed 's|/|#|g') for i in "$TMP"/*#*.gcov; do diff --git a/testing/coverage/lcov_html.sh b/testing/coverage/lcov_html.sh index c729b2145c..f17e583e2c 100755 --- a/testing/coverage/lcov_html.sh +++ b/testing/coverage/lcov_html.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# On a Bro build configured with --enable-coverage, this script -# produces a code coverage report in HTML format after Bro has been invoked. The +# On a Zeek build configured with --enable-coverage, this script +# produces a code coverage report in HTML format after Zeek has been invoked. The # intended application of this script is after the btest testsuite has run. # This depends on lcov to run. diff --git a/testing/external/README b/testing/external/README index ee6d71979e..6ab327b581 100644 --- a/testing/external/README +++ b/testing/external/README @@ -2,9 +2,9 @@ Test Suite for Large Trace Files ================================ -This test-suite runs more complex Bro configurations on larger trace +This test-suite runs more complex Zeek configurations on larger trace files, and compares the results to a pre-established baseline. Due to -their size, both traces and baseline are not part of the main Bro +their size, both traces and baseline are not part of the main Zeek repository but kept externally. In addition to the publically provided files, one can also add a local set to the test-suite for running on private traces. @@ -60,7 +60,7 @@ To update a test's baseline, first run ``btest`` in update mode: .. console: - > cd bro-testing + > cd zeek-testing > btest -u tests/test-you-want-to-update Then use ``git`` to commit the changes and push the changes upstream diff --git a/testing/external/commit-hash.zeek-testing b/testing/external/commit-hash.zeek-testing index 029d39391b..b9c51b74bb 100644 --- a/testing/external/commit-hash.zeek-testing +++ b/testing/external/commit-hash.zeek-testing @@ -1 +1 @@ -37f541404be417d5b092b8b36c7c1c84d2f307e9 +3db517fc4e1cfb1f0050b65eee4fd1b61ba5a461 diff --git a/testing/external/commit-hash.zeek-testing-private b/testing/external/commit-hash.zeek-testing-private index a99b5e8d7b..098a51358b 100644 --- a/testing/external/commit-hash.zeek-testing-private +++ b/testing/external/commit-hash.zeek-testing-private @@ -1 +1 @@ -de8e378210cacc599d8e59e1204286f7fe9cbc1b +e485d5c6ce4407c9b62880e075b1ba86d8d563cd diff --git a/testing/external/scripts/diff-all b/testing/external/scripts/diff-all index d51f3b294f..0caa5078be 100755 --- a/testing/external/scripts/diff-all +++ b/testing/external/scripts/diff-all @@ -27,7 +27,7 @@ for i in `echo $files_cwd $files_baseline | sort | uniq`; do if [[ "$i" == "reporter.log" ]]; then # Do not diff the reporter.log if it only complains about missing # GeoIP support. - if ! egrep -v "^#|Bro was not configured for GeoIP support" $i; then + if ! egrep -v "^#|Zeek was not configured for GeoIP support" $i; then continue fi fi diff --git a/testing/external/scripts/external-ca-list.bro b/testing/external/scripts/external-ca-list.bro deleted file mode 120000 index a52a9be196..0000000000 --- a/testing/external/scripts/external-ca-list.bro +++ /dev/null @@ -1 +0,0 @@ -../../scripts/external-ca-list.bro \ No newline at end of file diff --git a/testing/external/scripts/external-ca-list.zeek b/testing/external/scripts/external-ca-list.zeek new file mode 120000 index 0000000000..a50808a16d --- /dev/null +++ b/testing/external/scripts/external-ca-list.zeek @@ -0,0 +1 @@ +../../scripts/external-ca-list.zeek \ No newline at end of file diff --git a/testing/external/scripts/perftools-adapt-paths b/testing/external/scripts/perftools-adapt-paths index cfecd39993..cbfaa610ab 100755 --- a/testing/external/scripts/perftools-adapt-paths +++ b/testing/external/scripts/perftools-adapt-paths @@ -5,6 +5,6 @@ # # Returns an exit code > 0 if there's a leak. -cat $1 | sed "s#bro *\"\./#../../../build/src/bro \".tmp/$TEST_NAME/#g" | sed 's/ *--gv//g' >$1.tmp && mv $1.tmp $1 +cat $1 | sed "s#zeek *\"\./#../../../build/src/zeek \".tmp/$TEST_NAME/#g" | sed 's/ *--gv//g' >$1.tmp && mv $1.tmp $1 grep -qv "detected leaks of" $1 diff --git a/testing/external/scripts/skel/test.skeleton b/testing/external/scripts/skel/test.skeleton index a76f3d4d09..aa32e72e7a 100644 --- a/testing/external/scripts/skel/test.skeleton +++ b/testing/external/scripts/skel/test.skeleton @@ -1,4 +1,4 @@ -# @TEST-EXEC: zcat $TRACES/trace.gz | bro -r - %INPUT +# @TEST-EXEC: zcat $TRACES/trace.gz | zeek -r - %INPUT # @TEST-EXEC: $SCRIPTS/diff-all '*.log' @load testing-setup diff --git a/testing/external/scripts/testing-setup.bro b/testing/external/scripts/testing-setup.bro deleted file mode 100644 index a56a72aee5..0000000000 --- a/testing/external/scripts/testing-setup.bro +++ /dev/null @@ -1,14 +0,0 @@ -# Sets some testing specific options. - -@load external-ca-list.bro - -@ifdef ( SMTP::never_calc_md5 ) - # MDD5s can depend on libmagic output. - redef SMTP::never_calc_md5 = T; -@endif - -@ifdef ( LogAscii::use_json ) - # Don't start logging everything as JSON. - # (json-logs.bro activates this). - redef LogAscii::use_json = F; -@endif diff --git a/testing/external/scripts/testing-setup.zeek b/testing/external/scripts/testing-setup.zeek new file mode 100644 index 0000000000..18e7c4783f --- /dev/null +++ b/testing/external/scripts/testing-setup.zeek @@ -0,0 +1,14 @@ +# Sets some testing specific options. + +@load external-ca-list + +@ifdef ( SMTP::never_calc_md5 ) + # MDD5s can depend on libmagic output. + redef SMTP::never_calc_md5 = T; +@endif + +@ifdef ( LogAscii::use_json ) + # Don't start logging everything as JSON. + # (json-logs.zeek activates this). + redef LogAscii::use_json = F; +@endif diff --git a/testing/external/subdir-btest.cfg b/testing/external/subdir-btest.cfg index 39aaead17a..3e0514301c 100644 --- a/testing/external/subdir-btest.cfg +++ b/testing/external/subdir-btest.cfg @@ -6,8 +6,8 @@ IgnoreDirs = .svn CVS .tmp IgnoreFiles = *.tmp *.swp #* *.trace .gitignore *.skeleton [environment] -BROPATH=`bash -c %(testbase)s/../../../build/bro-path-dev`:%(testbase)s/../scripts -BRO_SEED_FILE=%(testbase)s/../random.seed +ZEEKPATH=`bash -c %(testbase)s/../../../build/zeek-path-dev`:%(testbase)s/../scripts +ZEEK_SEED_FILE=%(testbase)s/../random.seed TZ=UTC LC_ALL=C PATH=%(testbase)s/../../../build/src:%(testbase)s/../../../aux/btest:%(testbase)s/../../scripts:%(default_path)s @@ -18,7 +18,7 @@ SCRIPTS=%(testbase)s/../scripts SCRIPTS_LOCAL=%(testbase)s/scripts DIST=%(testbase)s/../../.. BUILD=%(testbase)s/../../../build -BRO_PROFILER_FILE=%(testbase)s/.tmp/script-coverage/XXXXXX -BRO_DNS_FAKE=1 +ZEEK_PROFILER_FILE=%(testbase)s/.tmp/script-coverage/XXXXXX +ZEEK_DNS_FAKE=1 # For fedora 21 - they disable MD5 for certificate verification and need setting an environment variable to permit it. OPENSSL_ENABLE_MD5_VERIFY=1 diff --git a/testing/scripts/coverage-calc b/testing/scripts/coverage-calc index df12e0c86f..3645f57144 100755 --- a/testing/scripts/coverage-calc +++ b/testing/scripts/coverage-calc @@ -1,12 +1,12 @@ #! /usr/bin/env python -# This script aggregates many files containing Bro script coverage information +# This script aggregates many files containing Zeek script coverage information # into a single file and reports the overall coverage information. Usage: # # coverage-calc